[
  {
    "path": ".clang-format",
    "content": "# Use the Google style in this project.\nBasedOnStyle: Google\n"
  },
  {
    "path": ".clang-tidy",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# clang-tidy checks\n# https://clang.llvm.org/extra/clang-tidy/checks/list.html\n\nChecks: >\n  -*,\n  modernize-*,\n  bugprone-*,\n  concurrency-*,\n  misc-*,\n  readability-*,\n  performance-*,\n  portability-*,\n  google-*,\n  -modernize-use-trailing-return-type,\n  -modernize-avoid-c-arrays,\n  -modernize-make-unique,\n  -modernize-use-using,\n  -modernize-use-equals-delete,\n  -modernize-use-nodiscard,\n  -modernize-use-transparent-functors,\n  -modernize-deprecated-headers,\n  -bugprone-lambda-function-name,\n  -bugprone-narrowing-conversions,\n  -bugprone-branch-clone,\n  -bugprone-reserved-identifier,\n  -concurrency-mt-unsafe,\n  -misc-non-private-member-variables-in-classes,\n  -misc-unused-parameters,\n  -misc-lambda-function-name,\n  -misc-misplaced-widening-cast,\n  -misc-no-recursion,\n  -readability-simplify-boolean-expr,\n  -readability-container-size-empty,\n  -readability-convert-member-functions-to-static,\n  -readability-implicit-bool-conversion,\n  -readability-make-member-function-const,\n  -readability-magic-numbers,\n  -readability-use-anyofallof,\n  -readability-function-cognitive-complexity,\n  -google-readability-avoid-underscore-in-googletest-name,\n  -google-default-arguments,\n  -google-runtime-int,\n  -google-runtime-references,\n  -google-readability-casting,\n  -google-readability-todo,\n  -google-explicit-constructor,\n  \n\n# enable this when required\n  # clang-analyzer-*,\n  # clang-analyzer-deadcode.DeadStores,\n  # clang-analyzer-optin.performance.Padding,\n  # clang-analyzer-optin.cplusplus.VirtualCall,\n\n# Turn all the warnings from the checks above into errors.\nHeaderFilterRegex: '((?!build/)src|test/unit|test/drivers|test/function|test/mock)/*'\nFormatStyle: file\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/feature.md",
    "content": "---\nname: 需求建议 | Feature Request\nabout: 需求建议描述 | Use this template for raising a feature request\ntitle: ''\nlabels: type:feature\nassignees: ''\n\n---\n\n请确认提交的需求，而不是问题或求助。  \nPlease make sure that this is a feature request.\n\n**运行环境信息 | System information** (请提供足够详细的信息 | Please provide as much relevant information as possible)\n\n- 操作系统信息 | Operation System information\n- GPU或加速卡信息 | GPU or NPU information\n- 使用的推理引擎 | Inference engine information\n- 编程语言 | Programming language （C++, Python, Java）\n- 是否愿意贡献此需求 | Are you willing to contribute it（Yes/No）\n\n**需求应用场景 | Describe the feature**\n\n**是否修改当前API接口，怎么修改 | Will this change the current api? How?**\n\n**特性受益用户 | Benefit users**\n\n**建议的方案 | Suggest solution**\n\n**其他补充信息 | Other info**\n\n---\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/issue.md",
    "content": "---\nname: \"问题报告 | Bug Issue\"\nabout: \"问题报告说明 | Use this template for reporting a bug\"\nlabels: type:bug\n\n---\n\n在提交BUG之前，请阅读帮助文档的FAQ，或在现有issue中搜索是否有类似问题。  \nBefore submitting a bug, please read the FAQ of the help documentation, or search for similar issues in existing issues.\n\n**运行环境信息 | System information** (请提供足够详细的信息 | Please provide as much relevant information as possible)\n\n- 操作系统信息 | Operation System information\n- 设备信息 | Device information\n- ModelBox版本 | ModelBox version\n- GPU或加速卡信息 | GPU or NPU information\n- 使用的推理引擎 | Inference engine information\n- 编程语言 | Programming language （C++, Python, Java）\n\n**描述问题 | Describe the current behavior:**\n\n**期望的行为 | Describe the expected behavior:**\n\n**重现步骤描述 | Standalone code to reproduce the issue:**\n\n提供具体重现问题的步骤，如果可能，提供相关的截图信息，日志信息。  \nProvide a reproducible test case that is the bare minimum necessary to replicate the problem.\n\n**日志信息 | Logs**\n\n收集ModelBox的运行日志，路径为/var/log/modelbox\nPlease Provide modleobx logs, log path /var/log/modelbox\n\n**其他信息 | Other Info.**\n"
  },
  {
    "path": ".github/workflows/gitee-mirror.yml",
    "content": "name: Mirror-To-Gitee\n\non:\n  workflow_dispatch:\n  schedule:\n    - cron: '45 17 * * *'\n\nconcurrency:\n  group: git-mirror\n\njobs:\n  git-mirror:\n    runs-on: ubuntu-latest\n    if: github.repository == 'modelbox-ai/modelbox'\n    steps:\n      - uses: wearerequired/git-mirror-action@v1\n        env:\n          SSH_PRIVATE_KEY: ${{ secrets.GITEE_SSH_PRIVATE_KEY }}\n        with:\n          source-repo: 'https://github.com/modelbox-ai/modelbox.git'\n          destination-repo: 'git@gitee.com:modelbox/modelbox.git'\n"
  },
  {
    "path": ".github/workflows/manual-build-with-image.yml",
    "content": "name: Build With Specific Images\n\non:\n  workflow_dispatch:\n    inputs:\n      images_name:\n        type: choice\n        description: Images Lists\n        options: \n        - modelbox/modelbox-develop-tensorrt_8.4.2-cuda_11.2-ubuntu-x86_64\n        - modelbox/modelbox-develop-tensorflow_2.6.0-cuda_11.2-ubuntu-x86_64\n        - modelbox/modelbox-develop-libtorch_1.9.1-cuda_10.2-ubuntu-x86_64\n        - modelbox/modelbox-develop-tensorrt_7.1.3-cuda_10.2-ubuntu-x86_64\n        - modelbox/modelbox-develop-mindspore_1.9.0-cann_6.0.1-ubuntu-x86_64\n        - modelbox/modelbox-develop-mindspore_1.9.0-cann_6.0.1-d310p-ubuntu-x86_64\n        - modelbox/modelbox-develop-tensorrt_8.4.2-cuda_11.2-openeuler-x86_64\n        - modelbox/modelbox-develop-tensorflow_2.6.0-cuda_11.2-openeuler-x86_64\n        - modelbox/modelbox-develop-libtorch_1.9.1-cuda_10.2-openeuler-x86_64\n        - modelbox/modelbox-develop-tensorrt_7.1.3-cuda_10.2-openeuler-x86_64\n        - modelbox/modelbox-develop-mindspore_1.9.0-cann_6.0.1-openeuler-x86_64\n        - modelbox/modelbox-develop-mindspore_1.9.0-cann_6.0.1-d310p-openeuler-x86_64\n\nenv:\n  BUILD_TYPE: Release\n\njobs:\n  build:\n    runs-on: ubuntu-latest\n    container:\n      image: ${{ github.event.inputs.images_name }}\n    steps:\n    - name: Checkout\n      uses: actions/checkout@v3\n    - name: Configure CMake\n      run: |\n        mkdir build\n        cd build\n        cmake .. -DCMAKE_BUILD_TYPE=${{env.BUILD_TYPE}} -DWITH_ALL_DEMO=on -DWITH_WEBUI=on\n    - name: Build\n      working-directory: build\n      run: make package -j8\n    - name: Release check\n      run: ./docker/artifact_check.sh\n      shell: bash\n    - name: Test\n      working-directory: build\n      run: |\n        dpkg -r $(dpkg -l|grep  modelbox|awk '{print $2}') || true\n        rpm -e $(rpm -qa|grep modelbox) || true\n        make build-test -j8\n        unset LD_LIBRARY_PATH\n        make unittest\n"
  },
  {
    "path": ".github/workflows/manual-build-with-rockchip-images.yml",
    "content": "name: Build With rockchip images\non:\n  workflow_dispatch:\n    inputs:\n      images_rk_name:\n        type: choice\n        description: Rockchip Images Lists\n        options: \n        - 356x\n        - 3588\n      version:\n        description: 'new image tag(e.g. v1.1.0)'\n        required: true\n        default: 'latest'\n\njobs:\n  complie_rockchip_rknnrt_build_ubuntu_image:\n    runs-on: [rockchip-unit, Linux, ARM64]\n    outputs:\n      IMAGE_NAME_DEV: ${{ steps.env.outputs.IMAGE_NAME_DEV }}\n      IMAGE_VERSION: ${{ steps.env.outputs.IMAGE_VERSION }}\n    name: Build on ${{ matrix.distro }} ${{ matrix.arch }}\n\n    strategy:\n      matrix:\n        include:\n          - arch: aarch64\n            distro: ubuntu20.04\n    steps:\n      - name: Set-env\n        id: env\n        run: |\n          echo \"::set-output name=IMAGE_NAME_DEV::modelbox/modelbox-build-rockchip-rknnrt-${{ github.event.inputs.images_rk_name }}-ubuntu-aarch64\"\n          echo \"::set-output name=IMAGE_VERSION::${{ github.event.inputs.version }}\"\n          echo \"the docker name is ${{ steps.env.outputs.IMAGE_NAME_DEV }}.\"\n          echo \"the docker version is ${{ steps.env.outputs.IMAGE_VERSION }}.\"\n      - name: Checkout\n        uses: actions/checkout@v3.1.0\n      - name: Set up Docker Buildx\n        uses: docker/setup-buildx-action@v2.0.0\n      - name: Login to DockerHub\n        uses: docker/login-action@v2.0.0\n        with:\n          username: ${{ secrets.DOCKERHUB_USERNAME }}\n          password: ${{ secrets.DOCKERHUB_TOKEN }}\n      - name: Download for rockchip package\n        run: |\n          echo \"the arch is ${{ runner.arch }}.\"\n          chmod +x ./docker/prepare_for_rockchip.sh\n          ./docker/prepare_for_rockchip.sh ${{ github.event.inputs.images_rk_name }}\n        shell: bash\n      - name: Build and Push\n        uses: docker/build-push-action@v3.1.1\n        with:\n          push: true\n          context: .\n          file: docker/Dockerfile.rknnrt.build.ubuntu\n          tags: |\n            ${{ steps.env.outputs.IMAGE_NAME_DEV }}:latest\n            ${{ steps.env.outputs.IMAGE_NAME_DEV }}:${{ steps.env.outputs.IMAGE_VERSION }}"
  },
  {
    "path": ".github/workflows/merge-request-ascend.yml",
    "content": "name: Merge Request Ascend\n\non:\n  pull_request:\n    branches: \n      - main\n\nconcurrency: \n  group: ${{ github.workflow }}-${{ github.ref }}\n  cancel-in-progress: true\n\nenv:\n  BUILD_TYPE: Release\n\njobs:\n  build:\n    runs-on: ubuntu-latest\n    container:\n      image: modelbox/modelbox-develop-mindspore_1.9.0-cann_6.0.1-ubuntu-x86_64\n    steps:\n    - uses: actions/checkout@v3\n    - run: apt update\n    - name: Set up JDK 11\n      uses: actions/setup-java@v1\n      with:\n        java-version: 11\n        maven-version: '3.6.2'\n        cache: 'maven'\n    - name: Set up Maven\n      uses: stCarolas/setup-maven@v4.4\n      with:\n        maven-version: 3.8.2\n    - uses: actions/cache@v1\n      with:\n        path: /root/.m2/repository\n        key: ${{ runner.os }}-maven-${{ hashFiles('src/java/pom.xml') }}\n        restore-keys: |\n          ${{ runner.os }}-maven-\n    - name: ccache\n      uses: hendrikmuhs/ccache-action@v1.2\n      with:\n        key: ubuntu-latest-x86_64\n        max-size: 512M\n    - name: Remove old modelbox\n      continue-on-error: true\n      run: |\n        dpkg -r $(dpkg -l|grep  modelbox|awk '{print $2}')\n\n    - name: Configure CMake\n      run: |\n        mkdir build\n        cd build\n        cmake .. -DCMAKE_BUILD_TYPE=${{env.BUILD_TYPE}} -DWITH_WEBUI=off -DCLANG_TIDY=on -DCLANG_TIDY_AS_ERROR=on -DWITH_JAVA=on\n\n    - name: Build\n      working-directory: build\n      run: |\n        make package -j8\n\n    - name: Test\n      working-directory: build\n      run: |\n        make build-test -j8\n        unset LD_LIBRARY_PATH\n        make unittest\n"
  },
  {
    "path": ".github/workflows/merge-request-cuda.yml",
    "content": "name: Merge Request CUDA\n\non:\n  pull_request:\n    branches: \n      - main\n\nconcurrency: \n  group: ${{ github.workflow }}-${{ github.ref }}\n  cancel-in-progress: true\n\nenv:\n  BUILD_TYPE: Release\n\njobs:\n  build:\n    runs-on: ubuntu-latest\n    container:\n      image: modelbox/modelbox-develop-tensorflow_2.6.0-cuda_11.2-ubuntu-x86_64\n\n    steps:\n    - uses: actions/checkout@v3\n    - run: apt update\n    - name: Set up JDK 11\n      uses: actions/setup-java@v1\n      with:\n        java-version: 11\n        maven-version: '3.6.2'\n        cache: 'maven'\n    - name: Set up Maven\n      uses: stCarolas/setup-maven@v4.4\n      with:\n        maven-version: 3.8.2\n    - uses: actions/cache@v1\n      with:\n        path: /root/.m2/repository\n        key: ${{ runner.os }}-maven-${{ hashFiles('src/java/pom.xml') }}\n        restore-keys: |\n          ${{ runner.os }}-maven-\n    - name: ccache\n      uses: hendrikmuhs/ccache-action@v1.2\n      with:\n        key: ubuntu-latest-x86_64\n        max-size: 512M\n    - name: Remove old modelbox\n      continue-on-error: true\n      run: |\n        dpkg -r $(dpkg -l|grep  modelbox|awk '{print $2}')\n\n    - name: Configure CMake\n      run: |\n        mkdir build\n        cd build\n        cmake .. -DCMAKE_BUILD_TYPE=${{env.BUILD_TYPE}} -DWITH_WEBUI=off -DCLANG_TIDY=on -DCLANG_TIDY_AS_ERROR=on -DWITH_JAVA=on\n\n    - name: Build\n      working-directory: build\n      run: |\n        make package -j8\n\n    - name: Test\n      working-directory: build\n      run: |\n        make build-test -j8\n        unset LD_LIBRARY_PATH\n        make unittest\n"
  },
  {
    "path": ".github/workflows/merge-request-rockchip.yml",
    "content": "name: Merge Request RockChip\non:\n  pull_request:\n    branches:\n      - main\n\nconcurrency:\n  group: ${{ github.workflow }}-${{ github.ref }}\n  cancel-in-progress: true\n\nenv:\n  BUILD_TYPE: Release\n  \njobs:\n  build:\n    runs-on: [rockchip-build, Linux, ARM64]\n\n    steps:\n      - run: echo \"the build begin\"\n      - name: Checkout\n        uses: actions/checkout@v3\n      \n      - name: ccache\n        uses: hendrikmuhs/ccache-action@v1.2\n        with:\n          key: ubuntu-latest-arm64\n          max-size: 1024M\n\n      - name: Remove old modelbox\n        continue-on-error: true\n        run: |\n          dpkg -r $(dpkg -l|grep  modelbox|awk '{print $2}')\n\n      - name: Configure CMake\n        run: |\n          mkdir build\n          cd build\n          cmake .. -DCMAKE_BUILD_TYPE=${{env.BUILD_TYPE}} -DWITH_WEBUI=off -DCLANG_TIDY=on -DCLANG_TIDY_AS_ERROR=on\n\n      - name: build\n        working-directory: build\n        run: |\n          make package -j8\n\n      - name: show result\n        working-directory: build\n        run: |\n          ls -l src/drivers/devices/rockchip/core\n          ls -l release\n\n      - name: Finish\n        run: echo \"the build finish\"\n"
  },
  {
    "path": ".github/workflows/publish-modelbox-images.yml",
    "content": "name: Publish modelbox images\non:\n  workflow_dispatch:\n    inputs:\n      version:\n        description: 'new image tag(e.g. v1.1.0)'\n        required: true\n        default: 'latest'\n\nenv:\n  BUILD_TYPE: Release\n  IMAGE_VERSION: ${{ github.event.inputs.version }}\n\njobs:\n  compile_cuda112_trt_ubuntu:\n    runs-on: ubuntu-latest\n    container: modelbox/modelbox-develop-tensorrt_8.4.2-cuda_11.2-ubuntu-x86_64\n    outputs:\n      CUDA_VER: ${{ steps.env.outputs.CUDA_VER }}\n      CUDA_VERSION: ${{ steps.env.outputs.CUDA_VERSION }}\n      CUDA_CUDART_VERSION: ${{ steps.env.outputs.CUDA_CUDART_VERSION }}\n      TRT_VERSION: ${{ steps.env.outputs.TRT_VERSION }}\n      NVIDIA_CUDA_VERSION: ${{ steps.env.outputs.NVIDIA_CUDA_VERSION }}\n      NVIDIA_REQUIRE_CUDA: ${{ steps.env.outputs.NVIDIA_REQUIRE_CUDA }}\n      IMAGE_NAME_DEV: ${{ steps.env.outputs.IMAGE_NAME_DEV }}\n      IMAGE_NAME_RUN: ${{ steps.env.outputs.IMAGE_NAME_RUN }}\n    steps:\n      - name: Set-env\n        id: env\n        run: |\n          echo \"CUDA_VER=11-2\" >> $GITHUB_OUTPUT\n          echo \"CUDA_VERSION=11.2\" >> $GITHUB_OUTPUT\n          echo \"CUDA_CUDART_VERSION=11.2.152-1\" >> $GITHUB_OUTPUT\n          echo \"TRT_VERSION=8.4.2.4\" >> $GITHUB_OUTPUT\n          echo \"NVIDIA_CUDA_VERSION=11.2.2\" >> $GITHUB_OUTPUT\n          echo \"NVIDIA_REQUIRE_CUDA=cuda>=11.2 brand=tesla,driver>=418,driver<419 brand=tesla,driver>=440,driver<441\" >> $GITHUB_OUTPUT\n          echo \"IMAGE_NAME_DEV=modelbox/modelbox-develop-tensorrt_8.4.2-cuda_11.2-ubuntu-x86_64\" >> $GITHUB_OUTPUT\n          echo \"IMAGE_NAME_RUN=modelbox/modelbox-runtime-tensorrt_8.4.2-cuda_11.2-ubuntu-x86_64\" >> $GITHUB_OUTPUT\n      - name: Checkout\n        uses: actions/checkout@main\n      - name: CMake\n        run: |\n          mkdir build\n          cd build\n          cmake .. -DCMAKE_BUILD_TYPE=${{env.BUILD_TYPE}} -DWITH_ALL_DEMO=on -DWITH_WEBUI=on\n      - name: Build\n        working-directory: build\n        run: |\n          make package -j8\n      - name: Release check\n        run: ./docker/artifact_check.sh\n        shell: bash\n      - name: Prepare Artifact\n        run: |\n          mkdir artifact\n          cp -af build/release artifact/\n          ls -lh artifact\n      - name: Upload Artifact\n        uses: actions/upload-artifact@main\n        with:\n          name: modelbox_cuda112_trt_ubuntu\n          path: artifact\n\n  build_cuda112_trt_ubuntu_develop_image:\n    runs-on: ubuntu-20.04\n    needs: compile_cuda112_trt_ubuntu\n    steps:\n      - name: Checkout\n        uses: actions/checkout@main\n      - name: Set up Docker Buildx\n        uses: docker/setup-buildx-action@master\n      - name: Login to DockerHub\n        uses: docker/login-action@master\n        with:\n          username: ${{ secrets.DOCKERHUB_USERNAME }}\n          password: ${{ secrets.DOCKERHUB_TOKEN }}\n      - name: Download Artifact\n        uses: actions/download-artifact@main\n        with:\n          name: modelbox_cuda112_trt_ubuntu\n          path: .\n      - name: Download for dev package\n        run: ./docker/prepare_for_dev.sh\n        shell: bash\n      - name: Build and Push\n        uses: docker/build-push-action@master\n        with:\n          push: true\n          context: .\n          file: docker/Dockerfile.cuda.develop.ubuntu\n          build-args: |\n            CUDA_VER=${{ needs.compile_cuda112_trt_ubuntu.outputs.CUDA_VER }}\n            CUDA_VERSION=${{ needs.compile_cuda112_trt_ubuntu.outputs.CUDA_VERSION }}\n            TRT_VERSION=${{ needs.compile_cuda112_trt_ubuntu.outputs.TRT_VERSION }}\n            CUDA_CUDART_VERSION=${{ needs.compile_cuda112_trt_ubuntu.outputs.CUDA_CUDART_VERSION }}\n            NVIDIA_CUDA_VERSION=${{ needs.compile_cuda112_trt_ubuntu.outputs.NVIDIA_CUDA_VERSION }}\n            NVIDIA_REQUIRE_CUDA=${{ needs.compile_cuda112_trt_ubuntu.outputs.NVIDIA_REQUIRE_CUDA }}\n          tags: |\n            ${{ needs.compile_cuda112_trt_ubuntu.outputs.IMAGE_NAME_DEV }}:latest\n            ${{ needs.compile_cuda112_trt_ubuntu.outputs.IMAGE_NAME_DEV }}:${{ env.IMAGE_VERSION }}\n\n  build_cuda112_trt_ubuntu_runtime_image:\n    runs-on: ubuntu-20.04\n    needs: compile_cuda112_trt_ubuntu\n    steps:\n      - name: Checkout\n        uses: actions/checkout@main\n      - name: Set up Docker Buildx\n        uses: docker/setup-buildx-action@master\n      - name: Login to DockerHub\n        uses: docker/login-action@master\n        with:\n          username: ${{ secrets.DOCKERHUB_USERNAME }}\n          password: ${{ secrets.DOCKERHUB_TOKEN }}\n      - name: Download Artifact\n        uses: actions/download-artifact@main\n        with:\n          name: modelbox_cuda112_trt_ubuntu\n          path: .\n      - name: Download for run package\n        run: ./docker/prepare_for_run.sh\n        shell: bash\n      - name: Build and Push\n        uses: docker/build-push-action@master\n        with:\n          push: true\n          context: .\n          file: docker/Dockerfile.cuda.runtime.ubuntu\n          build-args: |\n            CUDA_VER=${{ needs.compile_cuda112_trt_ubuntu.outputs.CUDA_VER }}\n            CUDA_VERSION=${{ needs.compile_cuda112_trt_ubuntu.outputs.CUDA_VERSION }}\n            TRT_VERSION=${{ needs.compile_cuda112_trt_ubuntu.outputs.TRT_VERSION }}\n            CUDA_CUDART_VERSION=${{ needs.compile_cuda112_trt_ubuntu.outputs.CUDA_CUDART_VERSION }}\n            NVIDIA_CUDA_VERSION=${{ needs.compile_cuda112_trt_ubuntu.outputs.NVIDIA_CUDA_VERSION }}\n            NVIDIA_REQUIRE_CUDA=${{ needs.compile_cuda112_trt_ubuntu.outputs.NVIDIA_REQUIRE_CUDA }}\n          tags: |\n            ${{ needs.compile_cuda112_trt_ubuntu.outputs.IMAGE_NAME_RUN }}:latest\n            ${{ needs.compile_cuda112_trt_ubuntu.outputs.IMAGE_NAME_RUN }}:${{ env.IMAGE_VERSION }}\n\n  test_cuda112_trt_ubuntu:\n    runs-on: ubuntu-latest\n    needs: [compile_cuda112_trt_ubuntu,build_cuda112_trt_ubuntu_develop_image]\n    container:\n      image: ${{ needs.compile_cuda112_trt_ubuntu.outputs.IMAGE_NAME_DEV }}:${{ github.event.inputs.version }}\n    steps:\n      - name: Checkout\n        uses: actions/checkout@main\n      - name: CMake\n        run: |\n          mkdir build\n          cd build\n          cmake .. -DCMAKE_BUILD_TYPE=${{env.BUILD_TYPE}} -DWITH_ALL_DEMO=on -DWITH_WEBUI=on\n      - name: Test\n        working-directory: build\n        run: |\n          dpkg -r $(dpkg -l|grep  modelbox|awk '{print $2}')\n          make build-test -j8\n          make unittest\n\n  compile_cuda112_tf_ubuntu:\n    runs-on: ubuntu-latest\n    container: modelbox/modelbox-develop-tensorflow_2.6.0-cuda_11.2-ubuntu-x86_64\n    outputs:\n      CUDA_VER: ${{ steps.env.outputs.CUDA_VER }}\n      TF_VERSION: ${{ steps.env.outputs.TF_VERSION }}\n      CUDA_VERSION: ${{ steps.env.outputs.CUDA_VERSION }}\n      CUDA_CUDART_VERSION: ${{ steps.env.outputs.CUDA_CUDART_VERSION }}\n      NVIDIA_CUDA_VERSION: ${{ steps.env.outputs.NVIDIA_CUDA_VERSION }}\n      NVIDIA_REQUIRE_CUDA: ${{ steps.env.outputs.NVIDIA_REQUIRE_CUDA }}\n      IMAGE_NAME_DEV: ${{ steps.env.outputs.IMAGE_NAME_DEV }}\n      IMAGE_NAME_RUN: ${{ steps.env.outputs.IMAGE_NAME_RUN }}\n    steps:\n      - name: Set-env\n        id: env\n        run: |\n          echo \"CUDA_VER=11-2\" >> $GITHUB_OUTPUT\n          echo \"TF_VERSION=2.6.0\" >> $GITHUB_OUTPUT\n          echo \"CUDA_VERSION=11.2\" >> $GITHUB_OUTPUT\n          echo \"CUDA_CUDART_VERSION=11.2.152-1\" >> $GITHUB_OUTPUT\n          echo \"NVIDIA_CUDA_VERSION=11.2.2\" >> $GITHUB_OUTPUT\n          echo \"NVIDIA_REQUIRE_CUDA=cuda>=11.2 brand=tesla,driver>=418,driver<419 brand=tesla,driver>=450,driver<451\" >> $GITHUB_OUTPUT\n          echo \"IMAGE_NAME_DEV=modelbox/modelbox-develop-tensorflow_2.6.0-cuda_11.2-ubuntu-x86_64\" >> $GITHUB_OUTPUT\n          echo \"IMAGE_NAME_RUN=modelbox/modelbox-runtime-tensorflow_2.6.0-cuda_11.2-ubuntu-x86_64\" >> $GITHUB_OUTPUT\n      - name: Checkout\n        uses: actions/checkout@main\n      - name: CMake\n        run: |\n          mkdir build\n          cd build\n          cmake .. -DCMAKE_BUILD_TYPE=${{env.BUILD_TYPE}} -DWITH_ALL_DEMO=on -DWITH_WEBUI=on\n      - name: Build\n        working-directory: build\n        run: |\n          make package -j8\n      - name: Release check\n        run: ./docker/artifact_check.sh\n        shell: bash\n      - name: Prepare Artifact\n        run: |\n          mkdir artifact\n          cp -af build/release artifact/\n          ls -lh artifact\n      - name: Upload Artifact\n        uses: actions/upload-artifact@main\n        with:\n          name: modelbox_cuda112_tf_ubuntu\n          path: artifact\n          \n  build_cuda112_tf_ubuntu_develop_image:\n    runs-on: ubuntu-20.04\n    needs: compile_cuda112_tf_ubuntu\n    steps:\n      - name: Checkout\n        uses: actions/checkout@main\n      - name: Set up Docker Buildx\n        uses: docker/setup-buildx-action@master\n      - name: Login to DockerHub\n        uses: docker/login-action@master\n        with:\n          username: ${{ secrets.DOCKERHUB_USERNAME }}\n          password: ${{ secrets.DOCKERHUB_TOKEN }}\n      - name: Download Artifact\n        uses: actions/download-artifact@main\n        with:\n          name: modelbox_cuda112_tf_ubuntu\n          path: .\n      - name: Download for dev package\n        run: ./docker/prepare_for_dev.sh\n        shell: bash\n      - name: Build and Push\n        uses: docker/build-push-action@master\n        with:\n          push: true\n          context: .\n          file: docker/Dockerfile.cuda.develop.ubuntu\n          build-args: |\n            CUDA_VER=${{ needs.compile_cuda112_tf_ubuntu.outputs.CUDA_VER }}\n            TF_VERSION=${{ needs.compile_cuda112_tf_ubuntu.outputs.TF_VERSION }}\n            CUDA_VERSION=${{ needs.compile_cuda112_tf_ubuntu.outputs.CUDA_VERSION }}\n            CUDA_CUDART_VERSION=${{ needs.compile_cuda112_tf_ubuntu.outputs.CUDA_CUDART_VERSION }}\n            NVIDIA_CUDA_VERSION=${{ needs.compile_cuda112_tf_ubuntu.outputs.NVIDIA_CUDA_VERSION }}\n            NVIDIA_REQUIRE_CUDA=${{ needs.compile_cuda112_tf_ubuntu.outputs.NVIDIA_REQUIRE_CUDA }}\n          tags: |\n            ${{ needs.compile_cuda112_tf_ubuntu.outputs.IMAGE_NAME_DEV }}:latest\n            ${{ needs.compile_cuda112_tf_ubuntu.outputs.IMAGE_NAME_DEV }}:${{ env.IMAGE_VERSION }}\n\n  build_cuda112_tf_ubuntu_runtime_image:\n    runs-on: ubuntu-20.04\n    needs: compile_cuda112_tf_ubuntu\n    steps:\n      - name: Checkout\n        uses: actions/checkout@main\n      - name: Set up Docker Buildx\n        uses: docker/setup-buildx-action@master\n      - name: Login to DockerHub\n        uses: docker/login-action@master\n        with:\n          username: ${{ secrets.DOCKERHUB_USERNAME }}\n          password: ${{ secrets.DOCKERHUB_TOKEN }}\n      - name: Download Artifact\n        uses: actions/download-artifact@main\n        with:\n          name: modelbox_cuda112_tf_ubuntu\n          path: .\n      - name: Download for run package\n        run: ./docker/prepare_for_run.sh\n        shell: bash\n      - name: Build and Push\n        uses: docker/build-push-action@master\n        with:\n          push: true\n          context: .\n          file: docker/Dockerfile.cuda.runtime.ubuntu\n          build-args: |\n            CUDA_VER=${{ needs.compile_cuda112_tf_ubuntu.outputs.CUDA_VER }}\n            TF_VERSION=${{ needs.compile_cuda112_tf_ubuntu.outputs.TF_VERSION }}\n            CUDA_VERSION=${{ needs.compile_cuda112_tf_ubuntu.outputs.CUDA_VERSION }}\n            CUDA_CUDART_VERSION=${{ needs.compile_cuda112_tf_ubuntu.outputs.CUDA_CUDART_VERSION }}\n            NVIDIA_CUDA_VERSION=${{ needs.compile_cuda112_tf_ubuntu.outputs.NVIDIA_CUDA_VERSION }}\n            NVIDIA_REQUIRE_CUDA=${{ needs.compile_cuda112_tf_ubuntu.outputs.NVIDIA_REQUIRE_CUDA }}\n          tags: |\n            ${{ needs.compile_cuda112_tf_ubuntu.outputs.IMAGE_NAME_RUN }}:latest\n            ${{ needs.compile_cuda112_tf_ubuntu.outputs.IMAGE_NAME_RUN }}:${{ env.IMAGE_VERSION }}\n\n  test_cuda112_tf_ubuntu:\n    runs-on: ubuntu-latest\n    needs: [compile_cuda112_tf_ubuntu,build_cuda112_tf_ubuntu_develop_image]\n    container:\n      image: ${{ needs.compile_cuda112_tf_ubuntu.outputs.IMAGE_NAME_DEV }}:${{ github.event.inputs.version }}\n    steps:\n      - name: Checkout\n        uses: actions/checkout@main\n      - name: CMake\n        run: |\n          mkdir build\n          cd build\n          cmake .. -DCMAKE_BUILD_TYPE=${{env.BUILD_TYPE}} -DWITH_ALL_DEMO=on -DWITH_WEBUI=on\n      - name: Test\n        working-directory: build\n        run: |\n          dpkg -r $(dpkg -l|grep  modelbox|awk '{print $2}')\n          make build-test -j8\n          make unittest\n\n  compile_cuda102_trt_ubuntu:\n    runs-on: ubuntu-latest\n    container: modelbox/modelbox-develop-tensorrt_7.1.3-cuda_10.2-ubuntu-x86_64\n    outputs:\n      CUDA_VER: ${{ steps.env.outputs.CUDA_VER }}\n      CUDA_VERSION: ${{ steps.env.outputs.CUDA_VERSION }}\n      CUDA_CUDART_VERSION: ${{ steps.env.outputs.CUDA_CUDART_VERSION }}\n      TRT_VERSION: ${{ steps.env.outputs.TRT_VERSION }}\n      NVIDIA_CUDA_VERSION: ${{ steps.env.outputs.NVIDIA_CUDA_VERSION }}\n      NVIDIA_REQUIRE_CUDA: ${{ steps.env.outputs.NVIDIA_REQUIRE_CUDA }}\n      IMAGE_NAME_DEV: ${{ steps.env.outputs.IMAGE_NAME_DEV }}\n      IMAGE_NAME_RUN: ${{ steps.env.outputs.IMAGE_NAME_RUN }}\n    steps:\n      - name: Set-env\n        id: env\n        run: |\n          echo \"CUDA_VER=10-2\" >> $GITHUB_OUTPUT\n          echo \"CUDA_VERSION=10.2\" >> $GITHUB_OUTPUT\n          echo \"CUDA_CUDART_VERSION=10.2.89-1\" >> $GITHUB_OUTPUT\n          echo \"TRT_VERSION=7.1.3.4\" >> $GITHUB_OUTPUT\n          echo \"NVIDIA_CUDA_VERSION=10.2.89\" >> $GITHUB_OUTPUT\n          echo \"NVIDIA_REQUIRE_CUDA=cuda>=10.2 brand=tesla,driver>=418,driver<419\" >> $GITHUB_OUTPUT\n          echo \"IMAGE_NAME_DEV=modelbox/modelbox-develop-tensorrt_7.1.3-cuda_10.2-ubuntu-x86_64\" >> $GITHUB_OUTPUT\n          echo \"IMAGE_NAME_RUN=modelbox/modelbox-runtime-tensorrt_7.1.3-cuda_10.2-ubuntu-x86_64\" >> $GITHUB_OUTPUT\n      - name: Checkout\n        uses: actions/checkout@main\n      - name: CMake\n        run: |\n          mkdir build\n          cd build\n          cmake .. -DCMAKE_BUILD_TYPE=${{env.BUILD_TYPE}} -DWITH_ALL_DEMO=on -DWITH_WEBUI=on\n      - name: Build\n        working-directory: build\n        run: |\n          make package -j8\n      - name: Release check\n        run: ./docker/artifact_check.sh\n        shell: bash\n      - name: Prepare Artifact\n        run: |\n          mkdir artifact\n          cp -af build/release artifact/\n          ls -lh artifact\n      - name: Upload Artifact\n        uses: actions/upload-artifact@main\n        with:\n          name: modelbox_cuda102_trt_ubuntu\n          path: artifact\n\n  build_cuda102_trt_ubuntu_develop_image:\n    runs-on: ubuntu-22.04\n    needs: compile_cuda102_trt_ubuntu\n    steps:\n      - name: Checkout\n        uses: actions/checkout@main\n      - name: Set up Docker Buildx\n        uses: docker/setup-buildx-action@master\n      - name: Login to DockerHub\n        uses: docker/login-action@master\n        with:\n          username: ${{ secrets.DOCKERHUB_USERNAME }}\n          password: ${{ secrets.DOCKERHUB_TOKEN }}\n      - name: Download Artifact\n        uses: actions/download-artifact@main\n        with:\n          name: modelbox_cuda102_trt_ubuntu\n          path: .\n      - name: Download for dev package\n        run: |\n          sed -i 's/python3.8/python3.7/g' docker/Dockerfile.cuda.develop.ubuntu\n          sed -i 's/ubuntu2004/ubuntu1804/g' docker/Dockerfile.cuda.develop.ubuntu\n          bash docker/prepare_for_dev.sh\n        shell: bash\n      - name: Build and Push\n        uses: docker/build-push-action@master\n        with:\n          push: true\n          context: .\n          file: docker/Dockerfile.cuda.develop.ubuntu\n          build-args: |\n            BASE_IMAGE=ubuntu:18.04\n            CUDA_VER=${{ needs.compile_cuda102_trt_ubuntu.outputs.CUDA_VER }}\n            CUDA_VERSION=${{ needs.compile_cuda102_trt_ubuntu.outputs.CUDA_VERSION }}\n            TRT_VERSION=${{ needs.compile_cuda102_trt_ubuntu.outputs.TRT_VERSION }}\n            CUDA_CUDART_VERSION=${{ needs.compile_cuda102_trt_ubuntu.outputs.CUDA_CUDART_VERSION }}\n            NVIDIA_CUDA_VERSION=${{ needs.compile_cuda102_trt_ubuntu.outputs.NVIDIA_CUDA_VERSION }}\n            NVIDIA_REQUIRE_CUDA=${{ needs.compile_cuda102_trt_ubuntu.outputs.NVIDIA_REQUIRE_CUDA }}\n          tags: |\n            ${{ needs.compile_cuda102_trt_ubuntu.outputs.IMAGE_NAME_DEV }}:latest\n            ${{ needs.compile_cuda102_trt_ubuntu.outputs.IMAGE_NAME_DEV }}:${{ env.IMAGE_VERSION }}\n\n  build_cuda102_trt_ubuntu_runtime_image:\n    runs-on: ubuntu-22.04\n    needs: compile_cuda102_trt_ubuntu\n    steps:\n      - name: Checkout\n        uses: actions/checkout@main\n      - name: Set up Docker Buildx\n        uses: docker/setup-buildx-action@master\n      - name: Login to DockerHub\n        uses: docker/login-action@master\n        with:\n          username: ${{ secrets.DOCKERHUB_USERNAME }}\n          password: ${{ secrets.DOCKERHUB_TOKEN }}\n      - name: Download Artifact\n        uses: actions/download-artifact@main\n        with:\n          name: modelbox_cuda102_trt_ubuntu\n          path: .\n      - name: Download for run package\n        run: |\n          sed -i 's/python3.8/python3.7/g' docker/Dockerfile.cuda.runtime.ubuntu\n          sed -i 's/libduktape205/libduktape202/g' docker/Dockerfile.cuda.runtime.ubuntu\n          sed -i 's/ubuntu2004/ubuntu1804/g' docker/Dockerfile.cuda.runtime.ubuntu\n          bash docker/prepare_for_run.sh\n        shell: bash\n      - name: Build and Push\n        uses: docker/build-push-action@master\n        with:\n          push: true\n          context: .\n          file: docker/Dockerfile.cuda.runtime.ubuntu\n          build-args: |\n            BASE_IMAGE=ubuntu:18.04\n            CUDA_VER=${{ needs.compile_cuda102_trt_ubuntu.outputs.CUDA_VER }}\n            CUDA_VERSION=${{ needs.compile_cuda102_trt_ubuntu.outputs.CUDA_VERSION }}\n            TRT_VERSION=${{ needs.compile_cuda102_trt_ubuntu.outputs.TRT_VERSION }}\n            CUDA_CUDART_VERSION=${{ needs.compile_cuda102_trt_ubuntu.outputs.CUDA_CUDART_VERSION }}\n            NVIDIA_CUDA_VERSION=${{ needs.compile_cuda102_trt_ubuntu.outputs.NVIDIA_CUDA_VERSION }}\n            NVIDIA_REQUIRE_CUDA=${{ needs.compile_cuda102_trt_ubuntu.outputs.NVIDIA_REQUIRE_CUDA }}\n          tags: |\n            ${{ needs.compile_cuda102_trt_ubuntu.outputs.IMAGE_NAME_RUN }}:latest\n            ${{ needs.compile_cuda102_trt_ubuntu.outputs.IMAGE_NAME_RUN }}:${{ env.IMAGE_VERSION }}\n\n  test_cuda102_trt_ubuntu:\n    runs-on: ubuntu-latest\n    needs: [compile_cuda102_trt_ubuntu,build_cuda102_trt_ubuntu_develop_image]\n    container:\n      image: ${{ needs.compile_cuda102_trt_ubuntu.outputs.IMAGE_NAME_DEV }}:${{ github.event.inputs.version }}\n    steps:\n      - name: Checkout\n        uses: actions/checkout@main\n      - name: CMake\n        run: |\n          mkdir build\n          cd build\n          cmake .. -DCMAKE_BUILD_TYPE=${{env.BUILD_TYPE}} -DWITH_ALL_DEMO=on -DWITH_WEBUI=on\n      - name: Test\n        working-directory: build\n        run: |\n          dpkg -r $(dpkg -l|grep  modelbox|awk '{print $2}')\n          make build-test -j8\n          make unittest\n\n  compile_cuda102_torch_ubuntu:\n    runs-on: ubuntu-latest\n    container: modelbox/modelbox-develop-libtorch_1.9.1-cuda_10.2-ubuntu-x86_64\n    outputs:\n      CUDA_VER: ${{ steps.env.outputs.CUDA_VER }}\n      CUDA_VERSION: ${{ steps.env.outputs.CUDA_VERSION }}\n      CUDA_CUDART_VERSION: ${{ steps.env.outputs.CUDA_CUDART_VERSION }}\n      TORCH_VERSION: ${{ steps.env.outputs.TORCH_VERSION }}\n      NVIDIA_CUDA_VERSION: ${{ steps.env.outputs.NVIDIA_CUDA_VERSION }}\n      NVIDIA_REQUIRE_CUDA: ${{ steps.env.outputs.NVIDIA_REQUIRE_CUDA }}\n      IMAGE_NAME_DEV: ${{ steps.env.outputs.IMAGE_NAME_DEV }}\n      IMAGE_NAME_RUN: ${{ steps.env.outputs.IMAGE_NAME_RUN }}\n    steps:\n      - name: Set-env\n        id: env\n        run: |\n          echo \"CUDA_VER=10-2\" >> $GITHUB_OUTPUT\n          echo \"CUDA_VERSION=10.2\" >> $GITHUB_OUTPUT\n          echo \"CUDA_CUDART_VERSION=10.2.89-1\" >> $GITHUB_OUTPUT\n          echo \"TORCH_VERSION=1.9.1\" >> $GITHUB_OUTPUT\n          echo \"NVIDIA_CUDA_VERSION=10.2.89\" >> $GITHUB_OUTPUT\n          echo \"NVIDIA_REQUIRE_CUDA=cuda>=10.2 brand=tesla,driver>=418,driver<419\" >> $GITHUB_OUTPUT\n          echo \"IMAGE_NAME_DEV=modelbox/modelbox-develop-libtorch_1.9.1-cuda_10.2-ubuntu-x86_64\" >> $GITHUB_OUTPUT\n          echo \"IMAGE_NAME_RUN=modelbox/modelbox-runtime-libtorch_1.9.1-cuda_10.2-ubuntu-x86_64\" >> $GITHUB_OUTPUT\n      - name: Checkout\n        uses: actions/checkout@main\n      - name: CMake\n        run: |\n          mkdir build\n          cd build\n          cmake .. -DCMAKE_BUILD_TYPE=${{env.BUILD_TYPE}} -DWITH_ALL_DEMO=on -DWITH_WEBUI=on\n      - name: Build\n        working-directory: build\n        run: |\n          make package -j8\n      - name: Release check\n        run: ./docker/artifact_check.sh\n        shell: bash\n      - name: Prepare Artifact\n        run: |\n          mkdir artifact\n          cp -af build/release artifact/\n          ls -lh artifact\n      - name: Upload Artifact\n        uses: actions/upload-artifact@main\n        with:\n          name: modelbox_cuda102_torch_ubuntu\n          path: artifact\n\n  build_cuda102_torch_ubuntu_develop_image:\n    runs-on: ubuntu-22.04\n    needs: compile_cuda102_torch_ubuntu\n    steps:\n      - name: Checkout\n        uses: actions/checkout@main\n      - name: Set up Docker Buildx\n        uses: docker/setup-buildx-action@master\n      - name: Login to DockerHub\n        uses: docker/login-action@master\n        with:\n          username: ${{ secrets.DOCKERHUB_USERNAME }}\n          password: ${{ secrets.DOCKERHUB_TOKEN }}\n      - name: Download Artifact\n        uses: actions/download-artifact@main\n        with:\n          name: modelbox_cuda102_torch_ubuntu\n          path: .\n      - name: Download for dev package\n        run: |\n          sed -i 's/python3.8/python3.7/g' docker/Dockerfile.cuda.develop.ubuntu\n          sed -i 's/ubuntu2004/ubuntu1804/g' docker/Dockerfile.cuda.develop.ubuntu\n          bash docker/prepare_for_dev.sh\n        shell: bash\n      - name: Build and Push\n        uses: docker/build-push-action@master\n        with:\n          push: true\n          context: .\n          file: docker/Dockerfile.cuda.develop.ubuntu\n          build-args: |\n            BASE_IMAGE=ubuntu:18.04\n            CUDA_VER=${{ needs.compile_cuda102_torch_ubuntu.outputs.CUDA_VER }}\n            CUDA_VERSION=${{ needs.compile_cuda102_torch_ubuntu.outputs.CUDA_VERSION }}\n            TORCH_VERSION=${{ needs.compile_cuda102_torch_ubuntu.outputs.TORCH_VERSION }}\n            CUDA_CUDART_VERSION=${{ needs.compile_cuda102_torch_ubuntu.outputs.CUDA_CUDART_VERSION }}\n            NVIDIA_CUDA_VERSION=${{ needs.compile_cuda102_torch_ubuntu.outputs.NVIDIA_CUDA_VERSION }}\n            NVIDIA_REQUIRE_CUDA=${{ needs.compile_cuda102_torch_ubuntu.outputs.NVIDIA_REQUIRE_CUDA }}\n          tags: |\n            ${{ needs.compile_cuda102_torch_ubuntu.outputs.IMAGE_NAME_DEV }}:latest\n            ${{ needs.compile_cuda102_torch_ubuntu.outputs.IMAGE_NAME_DEV }}:${{ env.IMAGE_VERSION }}\n\n  build_cuda102_torch_ubuntu_runtime_image:\n    runs-on: ubuntu-22.04\n    needs: compile_cuda102_torch_ubuntu\n    steps:\n      - name: Checkout\n        uses: actions/checkout@main\n      - name: Set up Docker Buildx\n        uses: docker/setup-buildx-action@master\n      - name: Login to DockerHub\n        uses: docker/login-action@master\n        with:\n          username: ${{ secrets.DOCKERHUB_USERNAME }}\n          password: ${{ secrets.DOCKERHUB_TOKEN }}\n      - name: Download Artifact\n        uses: actions/download-artifact@main\n        with:\n          name: modelbox_cuda102_torch_ubuntu\n          path: .\n      - name: Download for run package\n        run: |\n          sed -i 's/python3.8/python3.7/g' docker/Dockerfile.cuda.runtime.ubuntu\n          sed -i 's/libduktape205/libduktape202/g' docker/Dockerfile.cuda.runtime.ubuntu\n          sed -i 's/ubuntu2004/ubuntu1804/g' docker/Dockerfile.cuda.runtime.ubuntu\n          bash docker/prepare_for_run.sh\n        shell: bash\n      - name: Build and Push\n        uses: docker/build-push-action@master\n        with:\n          push: true\n          context: .\n          file: docker/Dockerfile.cuda.runtime.ubuntu\n          build-args: |\n            BASE_IMAGE=ubuntu:18.04\n            CUDA_VER=${{ needs.compile_cuda102_torch_ubuntu.outputs.CUDA_VER }}\n            CUDA_VERSION=${{ needs.compile_cuda102_torch_ubuntu.outputs.CUDA_VERSION }}\n            TORCH_VERSION=${{ needs.compile_cuda102_torch_ubuntu.outputs.TORCH_VERSION }}\n            CUDA_CUDART_VERSION=${{ needs.compile_cuda102_torch_ubuntu.outputs.CUDA_CUDART_VERSION }}\n            NVIDIA_CUDA_VERSION=${{ needs.compile_cuda102_torch_ubuntu.outputs.NVIDIA_CUDA_VERSION }}\n            NVIDIA_REQUIRE_CUDA=${{ needs.compile_cuda102_torch_ubuntu.outputs.NVIDIA_REQUIRE_CUDA }}\n          tags: |\n            ${{ needs.compile_cuda102_torch_ubuntu.outputs.IMAGE_NAME_RUN }}:latest\n            ${{ needs.compile_cuda102_torch_ubuntu.outputs.IMAGE_NAME_RUN }}:${{ env.IMAGE_VERSION }}\n\n  test_cuda102_torch_ubuntu:\n    runs-on: ubuntu-latest\n    needs: [compile_cuda102_torch_ubuntu,build_cuda102_torch_ubuntu_develop_image]\n    container:\n      image: ${{ needs.compile_cuda102_torch_ubuntu.outputs.IMAGE_NAME_DEV }}:${{ github.event.inputs.version }}\n    steps:\n      - name: Checkout\n        uses: actions/checkout@main\n      - name: CMake\n        run: |\n          mkdir build\n          cd build\n          cmake .. -DCMAKE_BUILD_TYPE=${{env.BUILD_TYPE}} -DWITH_ALL_DEMO=on -DWITH_WEBUI=on\n      - name: Test\n        working-directory: build\n        run: |\n          dpkg -r $(dpkg -l|grep  modelbox|awk '{print $2}')\n          make build-test -j8\n          make unittest\n\n  compile_x86d_ubuntu:\n    runs-on: ubuntu-latest\n    container: modelbox/modelbox-develop-mindspore_1.9.0-cann_6.0.1-ubuntu-x86_64\n    outputs:\n      IMAGE_NAME_DEV: ${{ steps.env.outputs.IMAGE_NAME_DEV }}\n      IMAGE_NAME_RUN: ${{ steps.env.outputs.IMAGE_NAME_RUN }}\n    steps:\n      - name: Set-env\n        id: env\n        run: |\n          echo \"IMAGE_NAME_DEV=modelbox/modelbox-develop-mindspore_1.9.0-cann_6.0.1-ubuntu-x86_64\" >> $GITHUB_OUTPUT\n          echo \"IMAGE_NAME_RUN=modelbox/modelbox-runtime-mindspore_1.9.0-cann_6.0.1-ubuntu-x86_64\" >> $GITHUB_OUTPUT\n      - name: Checkout\n        uses: actions/checkout@main\n      - name: CMake\n        run: |\n          mkdir build\n          cd build\n          cmake .. -DCMAKE_BUILD_TYPE=${{env.BUILD_TYPE}} -DWITH_ALL_DEMO=on -DWITH_WEBUI=on\n      - name: Build\n        working-directory: build\n        run: |\n          make package -j8\n      - name: Release check\n        run: ./docker/artifact_check.sh\n        shell: bash\n      - name: Prepare Artifact\n        run: |\n          mkdir artifact\n          cp -af build/release artifact/\n          ls -lh artifact\n      - name: Upload Artifact\n        uses: actions/upload-artifact@main\n        with:\n          name: modelbox_x86d_ubuntu\n          path: artifact\n\n  build_x86d_ubuntu_develop_image:\n    runs-on: ubuntu-20.04\n    needs: compile_x86d_ubuntu\n    steps:\n      - name: Checkout\n        uses: actions/checkout@main\n      - name: Set up Docker Buildx\n        uses: docker/setup-buildx-action@master\n      - name: Login to DockerHub\n        uses: docker/login-action@master\n        with:\n          username: ${{ secrets.DOCKERHUB_USERNAME }}\n          password: ${{ secrets.DOCKERHUB_TOKEN }}\n      - name: Download Artifact\n        uses: actions/download-artifact@main\n        with:\n          name: modelbox_x86d_ubuntu\n          path: .\n      - name: Download for dev package\n        run: ./docker/prepare_for_dev.sh\n        shell: bash\n      - name: Build and Push\n        uses: docker/build-push-action@master\n        with:\n          push: true\n          context: .\n          file: docker/Dockerfile.ascend.develop.ubuntu\n          build-args: dtype=310\n          tags: |\n            ${{ needs.compile_x86d_ubuntu.outputs.IMAGE_NAME_DEV }}:latest\n            ${{ needs.compile_x86d_ubuntu.outputs.IMAGE_NAME_DEV }}:${{ env.IMAGE_VERSION }}\n\n  build_x86d_ubuntu_runtime_image:\n    runs-on: ubuntu-20.04\n    needs: compile_x86d_ubuntu\n    steps:\n      - name: Checkout\n        uses: actions/checkout@main\n      - name: Set up Docker Buildx\n        uses: docker/setup-buildx-action@master\n      - name: Login to DockerHub\n        uses: docker/login-action@master\n        with:\n          username: ${{ secrets.DOCKERHUB_USERNAME }}\n          password: ${{ secrets.DOCKERHUB_TOKEN }}\n      - name: Download Artifact\n        uses: actions/download-artifact@main\n        with:\n          name: modelbox_x86d_ubuntu\n          path: .\n      - name: Download for run package\n        run: ./docker/prepare_for_run.sh\n        shell: bash\n      - name: Build and Push\n        uses: docker/build-push-action@master\n        with:\n          push: true\n          context: .\n          file: docker/Dockerfile.ascend.runtime.ubuntu\n          build-args: dtype=310\n          tags: |\n            ${{ needs.compile_x86d_ubuntu.outputs.IMAGE_NAME_RUN }}:latest\n            ${{ needs.compile_x86d_ubuntu.outputs.IMAGE_NAME_RUN }}:${{ env.IMAGE_VERSION }}\n\n  test_x86d_ubuntu:\n    runs-on: ubuntu-latest\n    needs: [compile_x86d_ubuntu,build_x86d_ubuntu_develop_image]\n    container:\n      image: ${{ needs.compile_x86d_ubuntu.outputs.IMAGE_NAME_DEV }}:${{ github.event.inputs.version }}\n    steps:\n      - name: Checkout\n        uses: actions/checkout@main\n      - name: CMake\n        run: |\n          mkdir build\n          cd build\n          cmake .. -DCMAKE_BUILD_TYPE=${{env.BUILD_TYPE}} -DWITH_ALL_DEMO=on -DWITH_WEBUI=on\n      - name: Test\n        working-directory: build\n        run: |\n          dpkg -r $(dpkg -l|grep  modelbox|awk '{print $2}')\n          make build-test -j8\n          make unittest\n\n  compile_armd_ubuntu:\n    runs-on: ubuntu\n    outputs:\n      IMAGE_NAME_DEV: ${{ steps.env.outputs.IMAGE_NAME_DEV }}\n      IMAGE_NAME_RUN: ${{ steps.env.outputs.IMAGE_NAME_RUN }}\n    steps:\n      - name: Set-env\n        id: env\n        run: |\n          echo \"IMAGE_NAME_DEV=modelbox/modelbox-develop-mindspore_1.9.0-cann_6.0.1-ubuntu-aarch64\" >> $GITHUB_OUTPUT\n          echo \"IMAGE_NAME_RUN=modelbox/modelbox-runtime-mindspore_1.9.0-cann_6.0.1-ubuntu-aarch64\" >> $GITHUB_OUTPUT\n      - name: Checkout\n        uses: actions/checkout@main\n      - name: CMake\n        run: |\n          sed -i -e '107,116'H -e '116'G thirdparty/CMake/local-package.in\n          sed -i '118,120s@APIGW_Cpp@modelbox-webui@g' thirdparty/CMake/local-package.in\n          sed -i '121s@APIGW-cpp-sdk@modelbox-webui@g' thirdparty/CMake/local-package.in\n          sed -i '122s@APIGW-cpp-sdk-1.0.2@modelbox-webui@g' thirdparty/CMake/local-package.in\n          sed -n '118,127p' thirdparty/CMake/local-package.in\n          mkdir build\n          cd build\n          cmake .. -DCMAKE_BUILD_TYPE=${{env.BUILD_TYPE}} -DWITH_ALL_DEMO=on -DWITH_WEBUI=on -DLOCAL_PACKAGE_PATH=/opt/thirdparty/source\n      - name: Build\n        working-directory: build\n        run: |\n          npm cache clean --force\n          make package -j8\n      - name: Release check\n        run: ./docker/artifact_check.sh\n        shell: bash\n      - name: Prepare Artifact\n        run: |\n          mkdir artifact\n          cp -af build/release artifact/\n          ls -lh artifact\n      - name: Upload Artifact\n        uses: actions/upload-artifact@main\n        with:\n          name: modelbox_armd_ubuntu\n          path: artifact\n\n  build_armd_ubuntu_develop_image:\n    runs-on: ubuntu-latest\n    needs: compile_armd_ubuntu\n    steps:\n      - name: Checkout\n        uses: actions/checkout@main\n      - name: Set up Docker Buildx\n        uses: docker/setup-buildx-action@master\n      - name: Login to DockerHub\n        uses: docker/login-action@master\n        with:\n          username: ${{ secrets.DOCKERHUB_USERNAME }}\n          password: ${{ secrets.DOCKERHUB_TOKEN }}\n      - name: Download Artifact\n        uses: actions/download-artifact@main\n        with:\n          name: modelbox_armd_ubuntu\n          path: .\n      - name: Download for dev package\n        run: |\n          sed -i '5s/PLATFROM=.*/PLATFROM=aarch64/' docker/prepare_for_dev.sh\n          ./docker/prepare_for_dev.sh\n        shell: bash\n      - name: Build and Push\n        run: |\n          docker run --rm --privileged multiarch/qemu-user-static --reset -p yes\n          docker buildx create --name cibuilder --driver docker-container --use\n          docker buildx ls\n          docker buildx inspect --bootstrap\n          docker buildx build --platform linux/arm64 \\\n                 --build-arg dtype=310 \\\n                 --file docker/Dockerfile.ascend.develop.ubuntu \\\n                 --tag ${{ needs.compile_armd_ubuntu.outputs.IMAGE_NAME_DEV }}:latest \\\n                 --tag ${{ needs.compile_armd_ubuntu.outputs.IMAGE_NAME_DEV }}:${{ env.IMAGE_VERSION }} \\\n                 --push .\n\n  build_armd_ubuntu_runtime_image:\n    runs-on: ubuntu-latest\n    needs: compile_armd_ubuntu\n    steps:\n      - name: Checkout\n        uses: actions/checkout@main\n      - name: Set up Docker Buildx\n        uses: docker/setup-buildx-action@master\n      - name: Login to DockerHub\n        uses: docker/login-action@master\n        with:\n          username: ${{ secrets.DOCKERHUB_USERNAME }}\n          password: ${{ secrets.DOCKERHUB_TOKEN }}\n      - name: Download Artifact\n        uses: actions/download-artifact@main\n        with:\n          name: modelbox_armd_ubuntu\n          path: .\n      - name: Download for run package\n        run: |\n          sed -i '5s/PLATFROM=.*/PLATFROM=aarch64/' docker/prepare_for_run.sh\n          ./docker/prepare_for_run.sh\n        shell: bash\n      - name: Build and Push\n        run: |\n          docker run --rm --privileged multiarch/qemu-user-static --reset -p yes\n          docker buildx create --name cibuilder --driver docker-container --use\n          docker buildx ls\n          docker buildx inspect --bootstrap\n          docker buildx build --platform linux/arm64 \\\n                 --build-arg dtype=310 \\\n                 --file docker/Dockerfile.ascend.runtime.ubuntu \\\n                 --tag ${{ needs.compile_armd_ubuntu.outputs.IMAGE_NAME_RUN }}:latest \\\n                 --tag ${{ needs.compile_armd_ubuntu.outputs.IMAGE_NAME_RUN }}:${{ env.IMAGE_VERSION }} \\\n                 --push .\n\n  test_armd_ubuntu:\n    runs-on: ubuntu\n    needs: compile_armd_ubuntu\n    steps:\n      - name: Test\n        working-directory: build\n        run: |\n          dpkg -r $(dpkg -l|grep  modelbox|awk '{print $2}') || true\n          export LD_PRELOAD=/usr/lib/aarch64-linux-gnu/libgomp.so.1\n          make build-test -j8\n          make unittest\n\n  compile_x86d310p_ubuntu:\n    runs-on: ubuntu-latest\n    container: modelbox/modelbox-develop-mindspore_1.9.0-cann_6.0.1-d310p-ubuntu-x86_64\n    outputs:\n      IMAGE_NAME_DEV: ${{ steps.env.outputs.IMAGE_NAME_DEV }}\n      IMAGE_NAME_RUN: ${{ steps.env.outputs.IMAGE_NAME_RUN }}\n    steps:\n      - name: Set-env\n        id: env\n        run: |\n          echo \"IMAGE_NAME_DEV=modelbox/modelbox-develop-mindspore_1.9.0-cann_6.0.1-d310p-ubuntu-x86_64\" >> $GITHUB_OUTPUT\n          echo \"IMAGE_NAME_RUN=modelbox/modelbox-runtime-mindspore_1.9.0-cann_6.0.1-d310p-ubuntu-x86_64\" >> $GITHUB_OUTPUT\n      - name: Checkout\n        uses: actions/checkout@main\n      - name: CMake\n        run: |\n          mkdir build\n          cd build\n          cmake .. -DCMAKE_BUILD_TYPE=${{env.BUILD_TYPE}} -DWITH_ALL_DEMO=on -DWITH_WEBUI=on\n      - name: Build\n        working-directory: build\n        run: |\n          make package -j8\n      - name: Release check\n        run: ./docker/artifact_check.sh\n        shell: bash\n      - name: Prepare Artifact\n        run: |\n          mkdir artifact\n          cp -af build/release artifact/\n          ls -lh artifact\n      - name: Upload Artifact\n        uses: actions/upload-artifact@main\n        with:\n          name: modelbox_x86d310p_ubuntu\n          path: artifact\n\n  build_x86d310p_ubuntu_develop_image:\n    runs-on: ubuntu-20.04\n    needs: compile_x86d310p_ubuntu\n    steps:\n      - name: Checkout\n        uses: actions/checkout@main\n      - name: Set up Docker Buildx\n        uses: docker/setup-buildx-action@master\n      - name: Login to DockerHub\n        uses: docker/login-action@master\n        with:\n          username: ${{ secrets.DOCKERHUB_USERNAME }}\n          password: ${{ secrets.DOCKERHUB_TOKEN }}\n      - name: Download Artifact\n        uses: actions/download-artifact@main\n        with:\n          name: modelbox_x86d310p_ubuntu\n          path: .\n      - name: Download for dev package\n        run: ./docker/prepare_for_dev.sh\n        shell: bash\n      - name: Build and Push\n        uses: docker/build-push-action@master\n        with:\n          push: true\n          context: .\n          file: docker/Dockerfile.ascend.develop.ubuntu\n          build-args: dtype=710\n          tags: |\n            ${{ needs.compile_x86d310p_ubuntu.outputs.IMAGE_NAME_DEV }}:latest\n            ${{ needs.compile_x86d310p_ubuntu.outputs.IMAGE_NAME_DEV }}:${{ env.IMAGE_VERSION }}\n\n  build_x86d310p_ubuntu_runtime_image:\n    runs-on: ubuntu-20.04\n    needs: compile_x86d310p_ubuntu\n    steps:\n      - name: Checkout\n        uses: actions/checkout@main\n      - name: Set up Docker Buildx\n        uses: docker/setup-buildx-action@master\n      - name: Login to DockerHub\n        uses: docker/login-action@master\n        with:\n          username: ${{ secrets.DOCKERHUB_USERNAME }}\n          password: ${{ secrets.DOCKERHUB_TOKEN }}\n      - name: Download Artifact\n        uses: actions/download-artifact@main\n        with:\n          name: modelbox_x86d310p_ubuntu\n          path: .\n      - name: Download for run package\n        run: ./docker/prepare_for_run.sh\n        shell: bash\n      - name: Build and Push\n        uses: docker/build-push-action@master\n        with:\n          push: true\n          context: .\n          file: docker/Dockerfile.ascend.runtime.ubuntu\n          build-args: dtype=710\n          tags: |\n            ${{ needs.compile_x86d310p_ubuntu.outputs.IMAGE_NAME_RUN }}:latest\n            ${{ needs.compile_x86d310p_ubuntu.outputs.IMAGE_NAME_RUN }}:${{ env.IMAGE_VERSION }}\n\n  test_x86d310p_ubuntu:\n    runs-on: ubuntu-latest\n    needs: [compile_x86d310p_ubuntu,build_x86d310p_ubuntu_develop_image]\n    container:\n      image: ${{ needs.compile_x86d310p_ubuntu.outputs.IMAGE_NAME_DEV }}:${{ github.event.inputs.version }}\n    steps:\n      - name: Checkout\n        uses: actions/checkout@main\n      - name: CMake\n        run: |\n          mkdir build\n          cd build\n          cmake .. -DCMAKE_BUILD_TYPE=${{env.BUILD_TYPE}} -DWITH_ALL_DEMO=on -DWITH_WEBUI=on\n      - name: Test\n        working-directory: build\n        run: |\n          dpkg -r $(dpkg -l|grep  modelbox|awk '{print $2}')\n          make build-test -j8\n          make unittest\n\n  compile_armd310p_ubuntu:\n    runs-on: ubuntu-d310p\n    needs: compile_armd_ubuntu\n    outputs:\n      IMAGE_NAME_DEV: ${{ steps.env.outputs.IMAGE_NAME_DEV }}\n      IMAGE_NAME_RUN: ${{ steps.env.outputs.IMAGE_NAME_RUN }}\n    steps:\n      - name: Set-env\n        id: env\n        run: |\n          echo \"IMAGE_NAME_DEV=modelbox/modelbox-develop-mindspore_1.9.0-cann_6.0.1-d310p-ubuntu-aarch64\" >> $GITHUB_OUTPUT\n          echo \"IMAGE_NAME_RUN=modelbox/modelbox-runtime-mindspore_1.9.0-cann_6.0.1-d310p-ubuntu-aarch64\" >> $GITHUB_OUTPUT\n      - name: Checkout\n        uses: actions/checkout@main\n      - name: CMake\n        run: |\n          sed -i -e '107,116'H -e '116'G thirdparty/CMake/local-package.in\n          sed -i '118,120s@APIGW_Cpp@modelbox-webui@g' thirdparty/CMake/local-package.in\n          sed -i '121s@APIGW-cpp-sdk@modelbox-webui@g' thirdparty/CMake/local-package.in\n          sed -i '122s@APIGW-cpp-sdk-1.0.2@modelbox-webui@g' thirdparty/CMake/local-package.in\n          sed -n '118,127p' thirdparty/CMake/local-package.in\n          mkdir build\n          cd build\n          cmake .. -DCMAKE_BUILD_TYPE=${{env.BUILD_TYPE}} -DWITH_ALL_DEMO=on -DWITH_WEBUI=on -DLOCAL_PACKAGE_PATH=/opt/thirdparty/source\n      - name: Build\n        working-directory: build\n        run: |\n          npm cache clean --force\n          make package -j8\n      - name: Release check\n        run: ./docker/artifact_check.sh\n        shell: bash\n      - name: Prepare Artifact\n        run: |\n          mkdir artifact\n          cp -af build/release artifact/\n          ls -lh artifact\n      - name: Upload Artifact\n        uses: actions/upload-artifact@main\n        with:\n          name: modelbox_armd310p_ubuntu\n          path: artifact\n\n  build_armd310p_ubuntu_develop_image:\n    runs-on: ubuntu-latest\n    needs: compile_armd310p_ubuntu\n    steps:\n      - name: Checkout\n        uses: actions/checkout@main\n      - name: Set up Docker Buildx\n        uses: docker/setup-buildx-action@master\n      - name: Login to DockerHub\n        uses: docker/login-action@master\n        with:\n          username: ${{ secrets.DOCKERHUB_USERNAME }}\n          password: ${{ secrets.DOCKERHUB_TOKEN }}\n      - name: Download Artifact\n        uses: actions/download-artifact@main\n        with:\n          name: modelbox_armd310p_ubuntu\n          path: .\n      - name: Download for dev package\n        run: |\n          sed -i '5s/PLATFROM=.*/PLATFROM=aarch64/' docker/prepare_for_dev.sh\n          ./docker/prepare_for_dev.sh\n        shell: bash\n      - name: Build and Push\n        run: |\n          docker run --rm --privileged multiarch/qemu-user-static --reset -p yes\n          docker buildx create --name cibuilder --driver docker-container --use\n          docker buildx ls\n          docker buildx inspect --bootstrap\n          docker buildx build --platform linux/arm64 \\\n                 --build-arg dtype=710 \\\n                 --file docker/Dockerfile.ascend.develop.ubuntu \\\n                 --tag ${{ needs.compile_armd310p_ubuntu.outputs.IMAGE_NAME_DEV }}:latest \\\n                 --tag ${{ needs.compile_armd310p_ubuntu.outputs.IMAGE_NAME_DEV }}:${{ env.IMAGE_VERSION }} \\\n                 --push .\n\n  build_armd310p_ubuntu_runtime_image:\n    runs-on: ubuntu-latest\n    needs: compile_armd310p_ubuntu\n    steps:\n      - name: Checkout\n        uses: actions/checkout@main\n      - name: Set up Docker Buildx\n        uses: docker/setup-buildx-action@master\n      - name: Login to DockerHub\n        uses: docker/login-action@master\n        with:\n          username: ${{ secrets.DOCKERHUB_USERNAME }}\n          password: ${{ secrets.DOCKERHUB_TOKEN }}\n      - name: Download Artifact\n        uses: actions/download-artifact@main\n        with:\n          name: modelbox_armd310p_ubuntu\n          path: .\n      - name: Download for run package\n        run: |\n          sed -i '5s/PLATFROM=.*/PLATFROM=aarch64/' docker/prepare_for_run.sh\n          ./docker/prepare_for_run.sh\n        shell: bash\n      - name: Build and Push\n        run: |\n          docker run --rm --privileged multiarch/qemu-user-static --reset -p yes\n          docker buildx create --name cibuilder --driver docker-container --use\n          docker buildx ls\n          docker buildx inspect --bootstrap\n          docker buildx build --platform linux/arm64 \\\n                 --build-arg dtype=710 \\\n                 --file docker/Dockerfile.ascend.runtime.ubuntu \\\n                 --tag ${{ needs.compile_armd310p_ubuntu.outputs.IMAGE_NAME_RUN }}:latest \\\n                 --tag ${{ needs.compile_armd310p_ubuntu.outputs.IMAGE_NAME_RUN }}:${{ env.IMAGE_VERSION }} \\\n                 --push .\n\n  test_armd310p_ubuntu:\n    runs-on: ubuntu-d310p\n    needs: compile_armd310p_ubuntu\n    steps:\n      - name: Test\n        working-directory: build\n        run: |\n          dpkg -r $(dpkg -l|grep  modelbox|awk '{print $2}') || true\n          export LD_PRELOAD=/usr/lib/aarch64-linux-gnu/libgomp.so.1\n          make build-test -j8\n          make unittest\n\n  compile_cuda112_trt_openeuler:\n    runs-on: ubuntu-latest\n    container: modelbox/modelbox-develop-tensorrt_8.4.2-cuda_11.2-openeuler-x86_64\n    outputs:\n      CUDA_VER: ${{ steps.env.outputs.CUDA_VER }}\n      CUDA_VERSION: ${{ steps.env.outputs.CUDA_VERSION }}\n      CUDA_CUDART_VERSION: ${{ steps.env.outputs.CUDA_CUDART_VERSION }}\n      TRT_VERSION: ${{ steps.env.outputs.TRT_VERSION }}\n      NVIDIA_CUDA_VERSION: ${{ steps.env.outputs.NVIDIA_CUDA_VERSION }}\n      NVIDIA_REQUIRE_CUDA: ${{ steps.env.outputs.NVIDIA_REQUIRE_CUDA }}\n      IMAGE_NAME_DEV: ${{ steps.env.outputs.IMAGE_NAME_DEV }}\n      IMAGE_NAME_RUN: ${{ steps.env.outputs.IMAGE_NAME_RUN }}\n    steps:\n      - name: Set-env\n        id: env\n        run: |\n          echo \"CUDA_VER=11-2\" >> $GITHUB_OUTPUT\n          echo \"CUDA_VERSION=11.2\" >> $GITHUB_OUTPUT\n          echo \"CUDA_CUDART_VERSION=11.2.152-1\" >> $GITHUB_OUTPUT\n          echo \"TRT_VERSION=8.4.2.4\" >> $GITHUB_OUTPUT\n          echo \"NVIDIA_CUDA_VERSION=11.2.2\" >> $GITHUB_OUTPUT\n          echo \"NVIDIA_REQUIRE_CUDA=cuda>=11.2 brand=tesla,driver>=418,driver<419 brand=tesla,driver>=440,driver<441\" >> $GITHUB_OUTPUT\n          echo \"IMAGE_NAME_DEV=modelbox/modelbox-develop-tensorrt_8.4.2-cuda_11.2-openeuler-x86_64\" >> $GITHUB_OUTPUT\n          echo \"IMAGE_NAME_RUN=modelbox/modelbox-runtime-tensorrt_8.4.2-cuda_11.2-openeuler-x86_64\" >> $GITHUB_OUTPUT\n      - name: Checkout\n        uses: actions/checkout@main\n      - name: CMake\n        run: |\n          mkdir build\n          cd build\n          cmake .. -DCMAKE_BUILD_TYPE=${{env.BUILD_TYPE}} -DWITH_ALL_DEMO=on -DWITH_WEBUI=on\n      - name: Build\n        working-directory: build\n        run: |\n          make package -j8\n      - name: Release check\n        run: ./docker/artifact_check.sh\n        shell: bash\n      - name: Prepare Artifact\n        run: |\n          mkdir artifact\n          cp -af build/release artifact/\n          ls -lh artifact\n      - name: Upload Artifact\n        uses: actions/upload-artifact@main\n        with:\n          name: modelbox_cuda112_trt_openeuler\n          path: artifact\n\n  build_cuda112_trt_openeuler_develop_image:\n    runs-on: ubuntu-20.04\n    needs: compile_cuda112_trt_openeuler\n    steps:\n      - name: Checkout\n        uses: actions/checkout@main\n      - name: Set up Docker Buildx\n        uses: docker/setup-buildx-action@master\n      - name: Login to DockerHub\n        uses: docker/login-action@master\n        with:\n          username: ${{ secrets.DOCKERHUB_USERNAME }}\n          password: ${{ secrets.DOCKERHUB_TOKEN }}\n      - name: Download Artifact\n        uses: actions/download-artifact@main\n        with:\n          name: modelbox_cuda112_trt_openeuler\n          path: .\n      - name: Download for dev package\n        run: |\n          sed -i '3s/OS_NAME=.*/OS_NAME=openEuler/' docker/prepare_for_dev.sh\n          bash docker/prepare_for_dev.sh\n        shell: bash\n      - name: Build and Push\n        uses: docker/build-push-action@master\n        with:\n          push: true\n          context: .\n          file: docker/Dockerfile.cuda.develop.openeuler\n          build-args: |\n            CUDA_VER=${{ needs.compile_cuda112_trt_openeuler.outputs.CUDA_VER }}\n            CUDA_VERSION=${{ needs.compile_cuda112_trt_openeuler.outputs.CUDA_VERSION }}\n            TRT_VERSION=${{ needs.compile_cuda112_trt_openeuler.outputs.TRT_VERSION }}\n            CUDA_CUDART_VERSION=${{ needs.compile_cuda112_trt_openeuler.outputs.CUDA_CUDART_VERSION }}\n            NVIDIA_CUDA_VERSION=${{ needs.compile_cuda112_trt_openeuler.outputs.NVIDIA_CUDA_VERSION }}\n            NVIDIA_REQUIRE_CUDA=${{ needs.compile_cuda112_trt_openeuler.outputs.NVIDIA_REQUIRE_CUDA }}\n          tags: |\n            ${{ needs.compile_cuda112_trt_openeuler.outputs.IMAGE_NAME_DEV }}:latest\n            ${{ needs.compile_cuda112_trt_openeuler.outputs.IMAGE_NAME_DEV }}:${{ env.IMAGE_VERSION }}\n\n  build_cuda112_trt_openeuler_runtime_image:\n    runs-on: ubuntu-20.04\n    needs: compile_cuda112_trt_openeuler\n    steps:\n      - name: Checkout\n        uses: actions/checkout@main\n      - name: Set up Docker Buildx\n        uses: docker/setup-buildx-action@master\n      - name: Login to DockerHub\n        uses: docker/login-action@master\n        with:\n          username: ${{ secrets.DOCKERHUB_USERNAME }}\n          password: ${{ secrets.DOCKERHUB_TOKEN }}\n      - name: Download Artifact\n        uses: actions/download-artifact@main\n        with:\n          name: modelbox_cuda112_trt_openeuler\n          path: .\n      - name: Download for run package\n        run: |\n          sed -i '3s/OS_NAME=.*/OS_NAME=openEuler/' docker/prepare_for_run.sh\n          bash docker/prepare_for_run.sh\n        shell: bash\n      - name: Build and Push\n        uses: docker/build-push-action@master\n        with:\n          push: true\n          context: .\n          file: docker/Dockerfile.cuda.runtime.openeuler\n          build-args: |\n            CUDA_VER=${{ needs.compile_cuda112_trt_openeuler.outputs.CUDA_VER }}\n            CUDA_VERSION=${{ needs.compile_cuda112_trt_openeuler.outputs.CUDA_VERSION }}\n            TRT_VERSION=${{ needs.compile_cuda112_trt_openeuler.outputs.TRT_VERSION }}\n            CUDA_CUDART_VERSION=${{ needs.compile_cuda112_trt_openeuler.outputs.CUDA_CUDART_VERSION }}\n            NVIDIA_CUDA_VERSION=${{ needs.compile_cuda112_trt_openeuler.outputs.NVIDIA_CUDA_VERSION }}\n            NVIDIA_REQUIRE_CUDA=${{ needs.compile_cuda112_trt_openeuler.outputs.NVIDIA_REQUIRE_CUDA }}\n          tags: |\n            ${{ needs.compile_cuda112_trt_openeuler.outputs.IMAGE_NAME_RUN }}:latest\n            ${{ needs.compile_cuda112_trt_openeuler.outputs.IMAGE_NAME_RUN }}:${{ env.IMAGE_VERSION }}\n\n  test_cuda112_trt_openeuler:\n    runs-on: ubuntu-latest\n    needs: [compile_cuda112_trt_openeuler,build_cuda112_trt_openeuler_develop_image]\n    container:\n      image: ${{ needs.compile_cuda112_trt_openeuler.outputs.IMAGE_NAME_DEV }}:${{ github.event.inputs.version }}\n    steps:\n      - name: Checkout\n        uses: actions/checkout@main\n      - name: CMake\n        run: |\n          mkdir build\n          cd build\n          cmake .. -DCMAKE_BUILD_TYPE=${{env.BUILD_TYPE}} -DWITH_ALL_DEMO=on -DWITH_WEBUI=on\n      - name: Test\n        working-directory: build\n        run: |\n          rpm -e $(rpm -qa|grep modelbox)\n          make build-test -j8\n          make unittest\n\n  compile_cuda112_tf_openeuler:\n    runs-on: ubuntu-latest\n    container: modelbox/modelbox-develop-tensorflow_2.6.0-cuda_11.2-openeuler-x86_64\n    outputs:\n      CUDA_VER: ${{ steps.env.outputs.CUDA_VER }}\n      TF_VERSION: ${{ steps.env.outputs.TF_VERSION }}\n      CUDA_VERSION: ${{ steps.env.outputs.CUDA_VERSION }}\n      CUDA_CUDART_VERSION: ${{ steps.env.outputs.CUDA_CUDART_VERSION }}\n      NVIDIA_CUDA_VERSION: ${{ steps.env.outputs.NVIDIA_CUDA_VERSION }}\n      NVIDIA_REQUIRE_CUDA: ${{ steps.env.outputs.NVIDIA_REQUIRE_CUDA }}\n      IMAGE_NAME_DEV: ${{ steps.env.outputs.IMAGE_NAME_DEV }}\n      IMAGE_NAME_RUN: ${{ steps.env.outputs.IMAGE_NAME_RUN }}\n    steps:\n      - name: Set-env\n        id: env\n        run: |\n          echo \"CUDA_VER=11-2\" >> $GITHUB_OUTPUT\n          echo \"TF_VERSION=2.6.0\" >> $GITHUB_OUTPUT\n          echo \"CUDA_VERSION=11.2\" >> $GITHUB_OUTPUT\n          echo \"CUDA_CUDART_VERSION=11.2.152-1\" >> $GITHUB_OUTPUT\n          echo \"NVIDIA_CUDA_VERSION=11.2.2\" >> $GITHUB_OUTPUT\n          echo \"NVIDIA_REQUIRE_CUDA=cuda>=11.2 brand=tesla,driver>=418,driver<419 brand=tesla,driver>=450,driver<451\" >> $GITHUB_OUTPUT\n          echo \"IMAGE_NAME_DEV=modelbox/modelbox-develop-tensorflow_2.6.0-cuda_11.2-openeuler-x86_64\" >> $GITHUB_OUTPUT\n          echo \"IMAGE_NAME_RUN=modelbox/modelbox-runtime-tensorflow_2.6.0-cuda_11.2-openeuler-x86_64\" >> $GITHUB_OUTPUT\n      - name: Checkout\n        uses: actions/checkout@main\n      - name: CMake\n        run: |\n          mkdir build\n          cd build\n          cmake .. -DCMAKE_BUILD_TYPE=${{env.BUILD_TYPE}} -DWITH_ALL_DEMO=on -DWITH_WEBUI=on\n      - name: Build\n        working-directory: build\n        run: |\n          make package -j8\n      - name: Release check\n        run: ./docker/artifact_check.sh\n        shell: bash\n      - name: Prepare Artifact\n        run: |\n          mkdir artifact\n          cp -af build/release artifact/\n          ls -lh artifact\n      - name: Upload Artifact\n        uses: actions/upload-artifact@main\n        with:\n          name: modelbox_cuda112_tf_openeuler\n          path: artifact\n          \n  build_cuda112_tf_openeuler_develop_image:\n    runs-on: ubuntu-20.04\n    needs: compile_cuda112_tf_openeuler\n    steps:\n      - name: Checkout\n        uses: actions/checkout@main\n      - name: Set up Docker Buildx\n        uses: docker/setup-buildx-action@master\n      - name: Login to DockerHub\n        uses: docker/login-action@master\n        with:\n          username: ${{ secrets.DOCKERHUB_USERNAME }}\n          password: ${{ secrets.DOCKERHUB_TOKEN }}\n      - name: Download Artifact\n        uses: actions/download-artifact@main\n        with:\n          name: modelbox_cuda112_tf_openeuler\n          path: .\n      - name: Download for dev package\n        run: |\n          sed -i '3s/OS_NAME=.*/OS_NAME=openEuler/' docker/prepare_for_dev.sh\n          bash docker/prepare_for_dev.sh\n        shell: bash\n      - name: Build and Push\n        uses: docker/build-push-action@master\n        with:\n          push: true\n          context: .\n          file: docker/Dockerfile.cuda.develop.openeuler\n          build-args: |\n            CUDA_VER=${{ needs.compile_cuda112_tf_openeuler.outputs.CUDA_VER }}\n            TF_VERSION=${{ needs.compile_cuda112_tf_openeuler.outputs.TF_VERSION }}\n            CUDA_VERSION=${{ needs.compile_cuda112_tf_openeuler.outputs.CUDA_VERSION }}\n            CUDA_CUDART_VERSION=${{ needs.compile_cuda112_tf_openeuler.outputs.CUDA_CUDART_VERSION }}\n            NVIDIA_CUDA_VERSION=${{ needs.compile_cuda112_tf_openeuler.outputs.NVIDIA_CUDA_VERSION }}\n            NVIDIA_REQUIRE_CUDA=${{ needs.compile_cuda112_tf_openeuler.outputs.NVIDIA_REQUIRE_CUDA }}\n          tags: |\n            ${{ needs.compile_cuda112_tf_openeuler.outputs.IMAGE_NAME_DEV }}:latest\n            ${{ needs.compile_cuda112_tf_openeuler.outputs.IMAGE_NAME_DEV }}:${{ env.IMAGE_VERSION }}\n\n  build_cuda112_tf_openeuler_runtime_image:\n    runs-on: ubuntu-20.04\n    needs: compile_cuda112_tf_openeuler\n    steps:\n      - name: Checkout\n        uses: actions/checkout@main\n      - name: Set up Docker Buildx\n        uses: docker/setup-buildx-action@master\n      - name: Login to DockerHub\n        uses: docker/login-action@master\n        with:\n          username: ${{ secrets.DOCKERHUB_USERNAME }}\n          password: ${{ secrets.DOCKERHUB_TOKEN }}\n      - name: Download Artifact\n        uses: actions/download-artifact@main\n        with:\n          name: modelbox_cuda112_tf_openeuler\n          path: .\n      - name: Download for run package\n        run: |\n          sed -i '3s/OS_NAME=.*/OS_NAME=openEuler/' docker/prepare_for_run.sh\n          bash docker/prepare_for_run.sh\n        shell: bash\n      - name: Build and Push\n        uses: docker/build-push-action@master\n        with:\n          push: true\n          context: .\n          file: docker/Dockerfile.cuda.runtime.openeuler\n          build-args: |\n            CUDA_VER=${{ needs.compile_cuda112_tf_openeuler.outputs.CUDA_VER }}\n            TF_VERSION=${{ needs.compile_cuda112_tf_openeuler.outputs.TF_VERSION }}\n            CUDA_VERSION=${{ needs.compile_cuda112_tf_openeuler.outputs.CUDA_VERSION }}\n            CUDA_CUDART_VERSION=${{ needs.compile_cuda112_tf_openeuler.outputs.CUDA_CUDART_VERSION }}\n            NVIDIA_CUDA_VERSION=${{ needs.compile_cuda112_tf_openeuler.outputs.NVIDIA_CUDA_VERSION }}\n            NVIDIA_REQUIRE_CUDA=${{ needs.compile_cuda112_tf_openeuler.outputs.NVIDIA_REQUIRE_CUDA }}\n          tags: |\n            ${{ needs.compile_cuda112_tf_openeuler.outputs.IMAGE_NAME_RUN }}:latest\n            ${{ needs.compile_cuda112_tf_openeuler.outputs.IMAGE_NAME_RUN }}:${{ env.IMAGE_VERSION }}\n\n  test_cuda112_tf_openeuler:\n    runs-on: ubuntu-latest\n    needs: [compile_cuda112_tf_openeuler,build_cuda112_tf_openeuler_develop_image]\n    container:\n      image: ${{ needs.compile_cuda112_tf_openeuler.outputs.IMAGE_NAME_DEV }}:${{ github.event.inputs.version }}\n    steps:\n      - name: Checkout\n        uses: actions/checkout@main\n      - name: CMake\n        run: |\n          mkdir build\n          cd build\n          cmake .. -DCMAKE_BUILD_TYPE=${{env.BUILD_TYPE}} -DWITH_ALL_DEMO=on -DWITH_WEBUI=on\n      - name: Test\n        working-directory: build\n        run: |\n          rpm -e $(rpm -qa|grep modelbox)\n          make build-test -j8\n          make unittest\n\n  compile_cuda102_trt_openeuler:\n    runs-on: ubuntu-latest\n    container: modelbox/modelbox-develop-tensorrt_7.1.3-cuda_10.2-openeuler-x86_64\n    outputs:\n      CUDA_VER: ${{ steps.env.outputs.CUDA_VER }}\n      CUDA_VERSION: ${{ steps.env.outputs.CUDA_VERSION }}\n      CUDA_CUDART_VERSION: ${{ steps.env.outputs.CUDA_CUDART_VERSION }}\n      TRT_VERSION: ${{ steps.env.outputs.TRT_VERSION }}\n      NVIDIA_CUDA_VERSION: ${{ steps.env.outputs.NVIDIA_CUDA_VERSION }}\n      NVIDIA_REQUIRE_CUDA: ${{ steps.env.outputs.NVIDIA_REQUIRE_CUDA }}\n      IMAGE_NAME_DEV: ${{ steps.env.outputs.IMAGE_NAME_DEV }}\n      IMAGE_NAME_RUN: ${{ steps.env.outputs.IMAGE_NAME_RUN }}\n    steps:\n      - name: Set-env\n        id: env\n        run: |\n          echo \"CUDA_VER=10-2\" >> $GITHUB_OUTPUT\n          echo \"CUDA_VERSION=10.2\" >> $GITHUB_OUTPUT\n          echo \"CUDA_CUDART_VERSION=10.2.89-1\" >> $GITHUB_OUTPUT\n          echo \"TRT_VERSION=7.1.3.4\" >> $GITHUB_OUTPUT\n          echo \"NVIDIA_CUDA_VERSION=10.2.89\" >> $GITHUB_OUTPUT\n          echo \"NVIDIA_REQUIRE_CUDA=cuda>=10.2 brand=tesla,driver>=418,driver<419\" >> $GITHUB_OUTPUT\n          echo \"IMAGE_NAME_DEV=modelbox/modelbox-develop-tensorrt_7.1.3-cuda_10.2-openeuler-x86_64\" >> $GITHUB_OUTPUT\n          echo \"IMAGE_NAME_RUN=modelbox/modelbox-runtime-tensorrt_7.1.3-cuda_10.2-openeuler-x86_64\" >> $GITHUB_OUTPUT\n      - name: Checkout\n        uses: actions/checkout@main\n      - name: CMake\n        run: |\n          mkdir build\n          cd build\n          cmake .. -DCMAKE_BUILD_TYPE=${{env.BUILD_TYPE}} -DWITH_ALL_DEMO=on -DWITH_WEBUI=on\n      - name: Build\n        working-directory: build\n        run: |\n          make package -j8\n      - name: Release check\n        run: ./docker/artifact_check.sh\n        shell: bash\n      - name: Prepare Artifact\n        run: |\n          mkdir artifact\n          cp -af build/release artifact/\n          ls -lh artifact\n      - name: Upload Artifact\n        uses: actions/upload-artifact@main\n        with:\n          name: modelbox_cuda102_trt_openeuler\n          path: artifact\n\n  build_cuda102_trt_openeuler_develop_image:\n    runs-on: ubuntu-latest\n    needs: compile_cuda102_trt_openeuler\n    steps:\n      - name: Checkout\n        uses: actions/checkout@main\n      - name: Set up Docker Buildx\n        uses: docker/setup-buildx-action@master\n      - name: Login to DockerHub\n        uses: docker/login-action@master\n        with:\n          username: ${{ secrets.DOCKERHUB_USERNAME }}\n          password: ${{ secrets.DOCKERHUB_TOKEN }}\n      - name: Download Artifact\n        uses: actions/download-artifact@main\n        with:\n          name: modelbox_cuda102_trt_openeuler\n          path: .\n      - name: Download for dev package\n        run: |\n          sed -i '3s/OS_NAME=.*/OS_NAME=openEuler/' docker/prepare_for_dev.sh\n          bash docker/prepare_for_dev.sh\n        shell: bash\n      - name: Build and Push\n        uses: docker/build-push-action@master\n        with:\n          push: true\n          context: .\n          file: docker/Dockerfile.cuda.develop.openeuler\n          build-args: |\n            CUDA_VER=${{ needs.compile_cuda102_trt_openeuler.outputs.CUDA_VER }}\n            CUDA_VERSION=${{ needs.compile_cuda102_trt_openeuler.outputs.CUDA_VERSION }}\n            TRT_VERSION=${{ needs.compile_cuda102_trt_openeuler.outputs.TRT_VERSION }}\n            CUDA_CUDART_VERSION=${{ needs.compile_cuda102_trt_openeuler.outputs.CUDA_CUDART_VERSION }}\n            NVIDIA_CUDA_VERSION=${{ needs.compile_cuda102_trt_openeuler.outputs.NVIDIA_CUDA_VERSION }}\n            NVIDIA_REQUIRE_CUDA=${{ needs.compile_cuda102_trt_openeuler.outputs.NVIDIA_REQUIRE_CUDA }}\n          tags: |\n            ${{ needs.compile_cuda102_trt_openeuler.outputs.IMAGE_NAME_DEV }}:latest\n            ${{ needs.compile_cuda102_trt_openeuler.outputs.IMAGE_NAME_DEV }}:${{ env.IMAGE_VERSION }}\n\n  build_cuda102_trt_openeuler_runtime_image:\n    runs-on: ubuntu-latest\n    needs: compile_cuda102_trt_openeuler\n    steps:\n      - name: Checkout\n        uses: actions/checkout@main\n      - name: Set up Docker Buildx\n        uses: docker/setup-buildx-action@master\n      - name: Login to DockerHub\n        uses: docker/login-action@master\n        with:\n          username: ${{ secrets.DOCKERHUB_USERNAME }}\n          password: ${{ secrets.DOCKERHUB_TOKEN }}\n      - name: Download Artifact\n        uses: actions/download-artifact@main\n        with:\n          name: modelbox_cuda102_trt_openeuler\n          path: .\n      - name: Download for run package\n        run: |\n          sed -i '3s/OS_NAME=.*/OS_NAME=openEuler/' docker/prepare_for_run.sh\n          bash docker/prepare_for_run.sh\n        shell: bash\n      - name: Build and Push\n        uses: docker/build-push-action@master\n        with:\n          push: true\n          context: .\n          file: docker/Dockerfile.cuda.runtime.openeuler\n          build-args: |\n            CUDA_VER=${{ needs.compile_cuda102_trt_openeuler.outputs.CUDA_VER }}\n            CUDA_VERSION=${{ needs.compile_cuda102_trt_openeuler.outputs.CUDA_VERSION }}\n            TRT_VERSION=${{ needs.compile_cuda102_trt_openeuler.outputs.TRT_VERSION }}\n            CUDA_CUDART_VERSION=${{ needs.compile_cuda102_trt_openeuler.outputs.CUDA_CUDART_VERSION }}\n            NVIDIA_CUDA_VERSION=${{ needs.compile_cuda102_trt_openeuler.outputs.NVIDIA_CUDA_VERSION }}\n            NVIDIA_REQUIRE_CUDA=${{ needs.compile_cuda102_trt_openeuler.outputs.NVIDIA_REQUIRE_CUDA }}\n          tags: |\n            ${{ needs.compile_cuda102_trt_openeuler.outputs.IMAGE_NAME_RUN }}:latest\n            ${{ needs.compile_cuda102_trt_openeuler.outputs.IMAGE_NAME_RUN }}:${{ env.IMAGE_VERSION }}\n  \n  test_cuda102_trt_openeuler:\n    runs-on: ubuntu-latest\n    needs: [compile_cuda102_trt_openeuler,build_cuda102_trt_openeuler_develop_image]\n    container:\n      image: ${{ needs.compile_cuda102_trt_openeuler.outputs.IMAGE_NAME_DEV }}:${{ github.event.inputs.version }}\n    steps:\n      - name: Checkout\n        uses: actions/checkout@main\n      - name: CMake\n        run: |\n          mkdir build\n          cd build\n          cmake .. -DCMAKE_BUILD_TYPE=${{env.BUILD_TYPE}} -DWITH_ALL_DEMO=on -DWITH_WEBUI=on\n      - name: Test\n        working-directory: build\n        run: |\n          rpm -e $(rpm -qa|grep modelbox)\n          make build-test -j8\n          make unittest\n\n  compile_cuda102_torch_openeuler:\n    runs-on: ubuntu-latest\n    container: modelbox/modelbox-develop-libtorch_1.9.1-cuda_10.2-openeuler-x86_64\n    outputs:\n      CUDA_VER: ${{ steps.env.outputs.CUDA_VER }}\n      CUDA_VERSION: ${{ steps.env.outputs.CUDA_VERSION }}\n      CUDA_CUDART_VERSION: ${{ steps.env.outputs.CUDA_CUDART_VERSION }}\n      TORCH_VERSION: ${{ steps.env.outputs.TORCH_VERSION }}\n      NVIDIA_CUDA_VERSION: ${{ steps.env.outputs.NVIDIA_CUDA_VERSION }}\n      NVIDIA_REQUIRE_CUDA: ${{ steps.env.outputs.NVIDIA_REQUIRE_CUDA }}\n      IMAGE_NAME_DEV: ${{ steps.env.outputs.IMAGE_NAME_DEV }}\n      IMAGE_NAME_RUN: ${{ steps.env.outputs.IMAGE_NAME_RUN }}\n    steps:\n      - name: Set-env\n        id: env\n        run: |\n          echo \"CUDA_VER=10-2\" >> $GITHUB_OUTPUT\n          echo \"CUDA_VERSION=10.2\" >> $GITHUB_OUTPUT\n          echo \"CUDA_CUDART_VERSION=10.2.89-1\" >> $GITHUB_OUTPUT\n          echo \"TORCH_VERSION=1.9.1\" >> $GITHUB_OUTPUT\n          echo \"NVIDIA_CUDA_VERSION=10.2.89\" >> $GITHUB_OUTPUT\n          echo \"NVIDIA_REQUIRE_CUDA=cuda>=10.2 brand=tesla,driver>=418,driver<419\" >> $GITHUB_OUTPUT\n          echo \"IMAGE_NAME_DEV=modelbox/modelbox-develop-libtorch_1.9.1-cuda_10.2-openeuler-x86_64\" >> $GITHUB_OUTPUT\n          echo \"IMAGE_NAME_RUN=modelbox/modelbox-runtime-libtorch_1.9.1-cuda_10.2-openeuler-x86_64\" >> $GITHUB_OUTPUT\n      - name: Checkout\n        uses: actions/checkout@main\n      - name: CMake\n        run: |\n          mkdir build\n          cd build\n          cmake .. -DCMAKE_BUILD_TYPE=${{env.BUILD_TYPE}} -DWITH_ALL_DEMO=on -DWITH_WEBUI=on\n      - name: Build\n        working-directory: build\n        run: |\n          make package -j8\n      - name: Release check\n        run: ./docker/artifact_check.sh\n        shell: bash\n      - name: Prepare Artifact\n        run: |\n          mkdir artifact\n          cp -af build/release artifact/\n          ls -lh artifact\n      - name: Upload Artifact\n        uses: actions/upload-artifact@main\n        with:\n          name: modelbox_cuda102_torch_openeuler\n          path: artifact\n\n  build_cuda102_torch_openeuler_develop_image:\n    runs-on: ubuntu-latest\n    needs: compile_cuda102_torch_openeuler\n    steps:\n      - name: Checkout\n        uses: actions/checkout@main\n      - name: Set up Docker Buildx\n        uses: docker/setup-buildx-action@master\n      - name: Login to DockerHub\n        uses: docker/login-action@master\n        with:\n          username: ${{ secrets.DOCKERHUB_USERNAME }}\n          password: ${{ secrets.DOCKERHUB_TOKEN }}\n      - name: Download Artifact\n        uses: actions/download-artifact@main\n        with:\n          name: modelbox_cuda102_torch_openeuler\n          path: .\n      - name: Download for dev package\n        run: |\n          sed -i '3s/OS_NAME=.*/OS_NAME=openEuler/' docker/prepare_for_dev.sh\n          bash docker/prepare_for_dev.sh\n        shell: bash\n      - name: Build and Push\n        uses: docker/build-push-action@master\n        with:\n          push: true\n          context: .\n          file: docker/Dockerfile.cuda.develop.openeuler\n          build-args: |\n            CUDA_VER=${{ needs.compile_cuda102_torch_openeuler.outputs.CUDA_VER }}\n            CUDA_VERSION=${{ needs.compile_cuda102_torch_openeuler.outputs.CUDA_VERSION }}\n            TORCH_VERSION=${{ needs.compile_cuda102_torch_openeuler.outputs.TORCH_VERSION }}\n            CUDA_CUDART_VERSION=${{ needs.compile_cuda102_torch_openeuler.outputs.CUDA_CUDART_VERSION }}\n            NVIDIA_CUDA_VERSION=${{ needs.compile_cuda102_torch_openeuler.outputs.NVIDIA_CUDA_VERSION }}\n            NVIDIA_REQUIRE_CUDA=${{ needs.compile_cuda102_torch_openeuler.outputs.NVIDIA_REQUIRE_CUDA }}\n          tags: |\n            ${{ needs.compile_cuda102_torch_openeuler.outputs.IMAGE_NAME_DEV }}:latest\n            ${{ needs.compile_cuda102_torch_openeuler.outputs.IMAGE_NAME_DEV }}:${{ env.IMAGE_VERSION }}\n\n  build_cuda102_torch_openeuler_runtime_image:\n    runs-on: ubuntu-latest\n    needs: compile_cuda102_torch_openeuler\n    steps:\n      - name: Checkout\n        uses: actions/checkout@main\n      - name: Set up Docker Buildx\n        uses: docker/setup-buildx-action@master\n      - name: Login to DockerHub\n        uses: docker/login-action@master\n        with:\n          username: ${{ secrets.DOCKERHUB_USERNAME }}\n          password: ${{ secrets.DOCKERHUB_TOKEN }}\n      - name: Download Artifact\n        uses: actions/download-artifact@main\n        with:\n          name: modelbox_cuda102_torch_openeuler\n          path: .\n      - name: Download for run package\n        run: |\n          sed -i '3s/OS_NAME=.*/OS_NAME=openEuler/' docker/prepare_for_run.sh\n          bash docker/prepare_for_run.sh\n        shell: bash\n      - name: Build and Push\n        uses: docker/build-push-action@master\n        with:\n          push: true\n          context: .\n          file: docker/Dockerfile.cuda.runtime.openeuler\n          build-args: |\n            CUDA_VER=${{ needs.compile_cuda102_torch_openeuler.outputs.CUDA_VER }}\n            CUDA_VERSION=${{ needs.compile_cuda102_torch_openeuler.outputs.CUDA_VERSION }}\n            TORCH_VERSION=${{ needs.compile_cuda102_torch_openeuler.outputs.TORCH_VERSION }}\n            CUDA_CUDART_VERSION=${{ needs.compile_cuda102_torch_openeuler.outputs.CUDA_CUDART_VERSION }}\n            NVIDIA_CUDA_VERSION=${{ needs.compile_cuda102_torch_openeuler.outputs.NVIDIA_CUDA_VERSION }}\n            NVIDIA_REQUIRE_CUDA=${{ needs.compile_cuda102_torch_openeuler.outputs.NVIDIA_REQUIRE_CUDA }}\n          tags: |\n            ${{ needs.compile_cuda102_torch_openeuler.outputs.IMAGE_NAME_RUN }}:latest\n            ${{ needs.compile_cuda102_torch_openeuler.outputs.IMAGE_NAME_RUN }}:${{ env.IMAGE_VERSION }}\n\n  test_cuda102_torch_openeuler:\n    runs-on: ubuntu-latest\n    needs: [compile_cuda102_torch_openeuler,build_cuda102_torch_openeuler_develop_image]\n    container:\n      image: ${{ needs.compile_cuda102_torch_openeuler.outputs.IMAGE_NAME_DEV }}:${{ github.event.inputs.version }}\n    steps:\n      - name: Checkout\n        uses: actions/checkout@main\n      - name: CMake\n        run: |\n          mkdir build\n          cd build\n          cmake .. -DCMAKE_BUILD_TYPE=${{env.BUILD_TYPE}} -DWITH_ALL_DEMO=on -DWITH_WEBUI=on\n      - name: Test\n        working-directory: build\n        run: |\n          rpm -e $(rpm -qa|grep modelbox)\n          make build-test -j8\n          make unittest\n\n  compile_x86d_openeuler:\n    runs-on: ubuntu-latest\n    container: modelbox/modelbox-develop-mindspore_1.9.0-cann_6.0.1-openeuler-x86_64\n    outputs:\n      IMAGE_NAME_DEV: ${{ steps.env.outputs.IMAGE_NAME_DEV }}\n      IMAGE_NAME_RUN: ${{ steps.env.outputs.IMAGE_NAME_RUN }}\n    steps:\n      - name: Set-env\n        id: env\n        run: |\n          echo \"IMAGE_NAME_DEV=modelbox/modelbox-develop-mindspore_1.9.0-cann_6.0.1-openeuler-x86_64\" >> $GITHUB_OUTPUT\n          echo \"IMAGE_NAME_RUN=modelbox/modelbox-runtime-mindspore_1.9.0-cann_6.0.1-openeuler-x86_64\" >> $GITHUB_OUTPUT\n      - name: Checkout\n        uses: actions/checkout@main\n      - name: CMake\n        run: |\n          mkdir build\n          cd build\n          cmake .. -DCMAKE_BUILD_TYPE=${{env.BUILD_TYPE}} -DWITH_ALL_DEMO=on -DWITH_WEBUI=on\n      - name: Build\n        working-directory: build\n        run: |\n          make package -j8\n      - name: Release check\n        run: ./docker/artifact_check.sh\n        shell: bash\n      - name: Prepare Artifact\n        run: |\n          mkdir artifact\n          cp -af build/release artifact/\n          ls -lh artifact\n      - name: Upload Artifact\n        uses: actions/upload-artifact@main\n        with:\n          name: modelbox_x86d_openeuler\n          path: artifact\n\n  build_x86d_openeuler_develop_image:\n    runs-on: ubuntu-latest\n    needs: compile_x86d_openeuler\n    steps:\n      - name: Checkout\n        uses: actions/checkout@main\n      - name: Set up Docker Buildx\n        uses: docker/setup-buildx-action@master\n      - name: Login to DockerHub\n        uses: docker/login-action@master\n        with:\n          username: ${{ secrets.DOCKERHUB_USERNAME }}\n          password: ${{ secrets.DOCKERHUB_TOKEN }}\n      - name: Download Artifact\n        uses: actions/download-artifact@main\n        with:\n          name: modelbox_x86d_openeuler\n          path: .\n      - name: Download for dev package\n        run: |\n          sed -i '3s/OS_NAME=.*/OS_NAME=openEuler/' docker/prepare_for_dev.sh\n          bash docker/prepare_for_dev.sh\n        shell: bash\n      - name: Build and Push\n        uses: docker/build-push-action@master\n        with:\n          push: true\n          context: .\n          file: docker/Dockerfile.ascend.develop.openeuler\n          build-args: dtype=310\n          tags: |\n            ${{ needs.compile_x86d_openeuler.outputs.IMAGE_NAME_DEV }}:latest\n            ${{ needs.compile_x86d_openeuler.outputs.IMAGE_NAME_DEV }}:${{ env.IMAGE_VERSION }}\n\n  build_x86d_openeuler_runtime_image:\n    runs-on: ubuntu-latest\n    needs: compile_x86d_openeuler\n    steps:\n      - name: Checkout\n        uses: actions/checkout@main\n      - name: Set up Docker Buildx\n        uses: docker/setup-buildx-action@master\n      - name: Login to DockerHub\n        uses: docker/login-action@master\n        with:\n          username: ${{ secrets.DOCKERHUB_USERNAME }}\n          password: ${{ secrets.DOCKERHUB_TOKEN }}\n      - name: Download Artifact\n        uses: actions/download-artifact@main\n        with:\n          name: modelbox_x86d_openeuler\n          path: .\n      - name: Download for run package\n        run: |\n          sed -i '3s/OS_NAME=.*/OS_NAME=openEuler/' docker/prepare_for_run.sh\n          bash docker/prepare_for_run.sh\n        shell: bash\n      - name: Build and Push\n        uses: docker/build-push-action@master\n        with:\n          push: true\n          context: .\n          file: docker/Dockerfile.ascend.runtime.openeuler\n          build-args: dtype=310\n          tags: |\n            ${{ needs.compile_x86d_openeuler.outputs.IMAGE_NAME_RUN }}:latest\n            ${{ needs.compile_x86d_openeuler.outputs.IMAGE_NAME_RUN }}:${{ env.IMAGE_VERSION }}\n\n  test_x86d_openeuler:\n    runs-on: ubuntu-latest\n    needs: [compile_x86d_openeuler,build_x86d_openeuler_develop_image]\n    container:\n      image: ${{ needs.compile_x86d_openeuler.outputs.IMAGE_NAME_DEV }}:${{ github.event.inputs.version }}\n    steps:\n      - name: Checkout\n        uses: actions/checkout@main\n      - name: CMake\n        run: |\n          mkdir build\n          cd build\n          cmake .. -DCMAKE_BUILD_TYPE=${{env.BUILD_TYPE}} -DWITH_ALL_DEMO=on -DWITH_WEBUI=on\n      - name: Test\n        working-directory: build\n        run: |\n          rpm -e $(rpm -qa|grep modelbox)\n          make build-test -j8\n          make unittest\n\n  compile_armd_openeuler:\n    runs-on: openeuler\n    needs: compile_armd310p_ubuntu\n    outputs:\n      IMAGE_NAME_DEV: ${{ steps.env.outputs.IMAGE_NAME_DEV }}\n      IMAGE_NAME_RUN: ${{ steps.env.outputs.IMAGE_NAME_RUN }}\n    steps:\n      - name: Set-env\n        id: env\n        run: |\n          echo \"IMAGE_NAME_DEV=modelbox/modelbox-develop-mindspore_1.9.0-cann_6.0.1-openeuler-aarch64\" >> $GITHUB_OUTPUT\n          echo \"IMAGE_NAME_RUN=modelbox/modelbox-runtime-mindspore_1.9.0-cann_6.0.1-openeuler-aarch64\" >> $GITHUB_OUTPUT\n      - name: Checkout\n        uses: actions/checkout@main\n      - name: CMake\n        run: |\n          sed -i -e '107,116'H -e '116'G thirdparty/CMake/local-package.in\n          sed -i '118,120s@APIGW_Cpp@modelbox-webui@g' thirdparty/CMake/local-package.in\n          sed -i '121s@APIGW-cpp-sdk@modelbox-webui@g' thirdparty/CMake/local-package.in\n          sed -i '122s@APIGW-cpp-sdk-1.0.2@modelbox-webui@g' thirdparty/CMake/local-package.in\n          sed -n '118,127p' thirdparty/CMake/local-package.in\n          mkdir build\n          cd build\n          cmake .. -DCMAKE_BUILD_TYPE=${{env.BUILD_TYPE}} -DWITH_ALL_DEMO=on -DWITH_WEBUI=on -DLOCAL_PACKAGE_PATH=/opt/thirdparty/source\n      - name: Build\n        working-directory: build\n        run: |\n          npm cache clean --force\n          make package -j8\n      - name: Release check\n        run: ./docker/artifact_check.sh\n        shell: bash\n      - name: Prepare Artifact\n        run: |\n          mkdir artifact\n          cp -af build/release artifact/\n          ls -lh artifact\n      - name: Upload Artifact\n        uses: actions/upload-artifact@main\n        with:\n          name: modelbox_armd_openeuler\n          path: artifact\n\n  build_armd_openeuler_develop_image:\n    runs-on: ubuntu-latest\n    needs: compile_armd_openeuler\n    steps:\n      - name: Checkout\n        uses: actions/checkout@main\n      - name: Set up Docker Buildx\n        uses: docker/setup-buildx-action@master\n      - name: Login to DockerHub\n        uses: docker/login-action@master\n        with:\n          username: ${{ secrets.DOCKERHUB_USERNAME }}\n          password: ${{ secrets.DOCKERHUB_TOKEN }}\n      - name: Download Artifact\n        uses: actions/download-artifact@main\n        with:\n          name: modelbox_armd_openeuler\n          path: .\n      - name: Download for dev package\n        run: |\n          sed -i '3s/OS_NAME=.*/OS_NAME=openEuler/' docker/prepare_for_dev.sh\n          sed -i '5s/PLATFROM=.*/PLATFROM=aarch64/' docker/prepare_for_dev.sh\n          ./docker/prepare_for_dev.sh\n        shell: bash\n      - name: Build and Push\n        run: |\n          docker run --rm --privileged multiarch/qemu-user-static --reset -p yes\n          docker buildx create --name cibuilder --driver docker-container --use\n          docker buildx ls\n          docker buildx inspect --bootstrap\n          docker buildx build --platform linux/arm64 \\\n                 --build-arg dtype=310 \\\n                 --file docker/Dockerfile.ascend.develop.openeuler \\\n                 --tag ${{ needs.compile_armd_openeuler.outputs.IMAGE_NAME_DEV }}:latest \\\n                 --tag ${{ needs.compile_armd_openeuler.outputs.IMAGE_NAME_DEV }}:${{ env.IMAGE_VERSION }} \\\n                 --push .\n\n  build_armd_openeuler_runtime_image:\n    runs-on: ubuntu-latest\n    needs: compile_armd_openeuler\n    steps:\n      - name: Checkout\n        uses: actions/checkout@main\n      - name: Set up Docker Buildx\n        uses: docker/setup-buildx-action@master\n      - name: Login to DockerHub\n        uses: docker/login-action@master\n        with:\n          username: ${{ secrets.DOCKERHUB_USERNAME }}\n          password: ${{ secrets.DOCKERHUB_TOKEN }}\n      - name: Download Artifact\n        uses: actions/download-artifact@main\n        with:\n          name: modelbox_armd_openeuler\n          path: .\n      - name: Download for run package\n        run: |\n          sed -i '3s/OS_NAME=.*/OS_NAME=openEuler/' docker/prepare_for_run.sh\n          sed -i '5s/PLATFROM=.*/PLATFROM=aarch64/' docker/prepare_for_run.sh\n          ./docker/prepare_for_run.sh\n        shell: bash\n      - name: Build and Push\n        run: |\n          docker run --rm --privileged multiarch/qemu-user-static --reset -p yes\n          docker buildx create --name cibuilder --driver docker-container --use\n          docker buildx ls\n          docker buildx inspect --bootstrap\n          docker buildx build --platform linux/arm64 \\\n                 --build-arg dtype=310 \\\n                 --file docker/Dockerfile.ascend.runtime.openeuler \\\n                 --tag ${{ needs.compile_armd_openeuler.outputs.IMAGE_NAME_RUN }}:latest \\\n                 --tag ${{ needs.compile_armd_openeuler.outputs.IMAGE_NAME_RUN }}:${{ env.IMAGE_VERSION }} \\\n                 --push .\n\n  test_armd_openeuler:\n    runs-on: openeuler\n    needs: compile_armd_openeuler\n    steps:\n      - name: Test\n        working-directory: build\n        run: |\n          rpm -e $(rpm -qa|grep modelbox) || true\n          make build-test -j8\n          make unittest\n\n  compile_x86d310p_openeuler:\n    runs-on: ubuntu-latest\n    container: modelbox/modelbox-develop-mindspore_1.9.0-cann_6.0.1-d310p-openeuler-x86_64\n    outputs:\n      IMAGE_NAME_DEV: ${{ steps.env.outputs.IMAGE_NAME_DEV }}\n      IMAGE_NAME_RUN: ${{ steps.env.outputs.IMAGE_NAME_RUN }}\n    steps:\n      - name: Set-env\n        id: env\n        run: |\n          echo \"IMAGE_NAME_DEV=modelbox/modelbox-develop-mindspore_1.9.0-cann_6.0.1-d310p-openeuler-x86_64\" >> $GITHUB_OUTPUT\n          echo \"IMAGE_NAME_RUN=modelbox/modelbox-runtime-mindspore_1.9.0-cann_6.0.1-d310p-openeuler-x86_64\" >> $GITHUB_OUTPUT\n      - name: Checkout\n        uses: actions/checkout@main\n      - name: CMake\n        run: |\n          mkdir build\n          cd build\n          cmake .. -DCMAKE_BUILD_TYPE=${{env.BUILD_TYPE}} -DWITH_ALL_DEMO=on -DWITH_WEBUI=on\n      - name: Build\n        working-directory: build\n        run: |\n          make package -j8\n      - name: Release check\n        run: ./docker/artifact_check.sh\n        shell: bash\n      - name: Prepare Artifact\n        run: |\n          mkdir artifact\n          cp -af build/release artifact/\n          ls -lh artifact\n      - name: Upload Artifact\n        uses: actions/upload-artifact@main\n        with:\n          name: modelbox_x86d310p_openeuler\n          path: artifact\n\n  build_x86d310p_openeuler_develop_image:\n    runs-on: ubuntu-latest\n    needs: compile_x86d310p_openeuler\n    steps:\n      - name: Checkout\n        uses: actions/checkout@main\n      - name: Set up Docker Buildx\n        uses: docker/setup-buildx-action@master\n      - name: Login to DockerHub\n        uses: docker/login-action@master\n        with:\n          username: ${{ secrets.DOCKERHUB_USERNAME }}\n          password: ${{ secrets.DOCKERHUB_TOKEN }}\n      - name: Download Artifact\n        uses: actions/download-artifact@main\n        with:\n          name: modelbox_x86d310p_openeuler\n          path: .\n      - name: Download for dev package\n        run: |\n          sed -i '3s/OS_NAME=.*/OS_NAME=openEuler/' docker/prepare_for_dev.sh\n          bash docker/prepare_for_dev.sh\n        shell: bash\n      - name: Build and Push\n        uses: docker/build-push-action@master\n        with:\n          push: true\n          context: .\n          file: docker/Dockerfile.ascend.develop.openeuler\n          build-args: dtype=710\n          tags: |\n            ${{ needs.compile_x86d310p_openeuler.outputs.IMAGE_NAME_DEV }}:latest\n            ${{ needs.compile_x86d310p_openeuler.outputs.IMAGE_NAME_DEV }}:${{ env.IMAGE_VERSION }}\n\n  build_x86d310p_openeuler_runtime_image:\n    runs-on: ubuntu-latest\n    needs: compile_x86d310p_openeuler\n    steps:\n      - name: Checkout\n        uses: actions/checkout@main\n      - name: Set up Docker Buildx\n        uses: docker/setup-buildx-action@master\n      - name: Login to DockerHub\n        uses: docker/login-action@master\n        with:\n          username: ${{ secrets.DOCKERHUB_USERNAME }}\n          password: ${{ secrets.DOCKERHUB_TOKEN }}\n      - name: Download Artifact\n        uses: actions/download-artifact@main\n        with:\n          name: modelbox_x86d310p_openeuler\n          path: .\n      - name: Download for run package\n        run: |\n          sed -i '3s/OS_NAME=.*/OS_NAME=openEuler/' docker/prepare_for_run.sh\n          bash docker/prepare_for_run.sh\n        shell: bash\n      - name: Build and Push\n        uses: docker/build-push-action@master\n        with:\n          push: true\n          context: .\n          file: docker/Dockerfile.ascend.runtime.openeuler\n          build-args: dtype=710\n          tags: |\n            ${{ needs.compile_x86d310p_openeuler.outputs.IMAGE_NAME_RUN }}:latest\n            ${{ needs.compile_x86d310p_openeuler.outputs.IMAGE_NAME_RUN }}:${{ env.IMAGE_VERSION }}\n\n  test_x86d310p_openeuler:\n    runs-on: ubuntu-latest\n    needs: [compile_x86d310p_openeuler,build_x86d310p_openeuler_develop_image]\n    container:\n      image: ${{ needs.compile_x86d310p_openeuler.outputs.IMAGE_NAME_DEV }}:${{ github.event.inputs.version }}\n    steps:\n      - name: Checkout\n        uses: actions/checkout@main\n      - name: CMake\n        run: |\n          mkdir build\n          cd build\n          cmake .. -DCMAKE_BUILD_TYPE=${{env.BUILD_TYPE}} -DWITH_ALL_DEMO=on -DWITH_WEBUI=on\n      - name: Test\n        working-directory: build\n        run: |\n          rpm -e $(rpm -qa|grep modelbox)\n          make build-test -j8\n          make unittest\n\n  compile_armd310p_openeuler:\n    runs-on: openeuler-d310p\n    needs: compile_armd_openeuler\n    outputs:\n      IMAGE_NAME_DEV: ${{ steps.env.outputs.IMAGE_NAME_DEV }}\n      IMAGE_NAME_RUN: ${{ steps.env.outputs.IMAGE_NAME_RUN }}\n    steps:\n      - name: Set-env\n        id: env\n        run: |\n          echo \"IMAGE_NAME_DEV=modelbox/modelbox-develop-mindspore_1.9.0-cann_6.0.1-d310p-openeuler-aarch64\" >> $GITHUB_OUTPUT\n          echo \"IMAGE_NAME_RUN=modelbox/modelbox-runtime-mindspore_1.9.0-cann_6.0.1-d310p-openeuler-aarch64\" >> $GITHUB_OUTPUT\n      - name: Checkout\n        uses: actions/checkout@main\n      - name: CMake\n        run: |\n          sed -i -e '107,116'H -e '116'G thirdparty/CMake/local-package.in\n          sed -i '118,120s@APIGW_Cpp@modelbox-webui@g' thirdparty/CMake/local-package.in\n          sed -i '121s@APIGW-cpp-sdk@modelbox-webui@g' thirdparty/CMake/local-package.in\n          sed -i '122s@APIGW-cpp-sdk-1.0.2@modelbox-webui@g' thirdparty/CMake/local-package.in\n          sed -n '118,127p' thirdparty/CMake/local-package.in\n          mkdir build\n          cd build\n          cmake .. -DCMAKE_BUILD_TYPE=${{env.BUILD_TYPE}} -DWITH_ALL_DEMO=on -DWITH_WEBUI=on -DLOCAL_PACKAGE_PATH=/opt/thirdparty/source\n      - name: Build\n        working-directory: build\n        run: |\n          npm cache clean --force\n          make package -j8\n      - name: Release check\n        run: ./docker/artifact_check.sh\n        shell: bash\n      - name: Prepare Artifact\n        run: |\n          mkdir artifact\n          cp -af build/release artifact/\n          ls -lh artifact\n      - name: Upload Artifact\n        uses: actions/upload-artifact@main\n        with:\n          name: modelbox_armd310p_openeuler\n          path: artifact\n\n  build_armd310p_openeuler_develop_image:\n    runs-on: ubuntu-latest\n    needs: compile_armd310p_openeuler\n    steps:\n      - name: Checkout\n        uses: actions/checkout@main\n      - name: Set up Docker Buildx\n        uses: docker/setup-buildx-action@master\n      - name: Login to DockerHub\n        uses: docker/login-action@master\n        with:\n          username: ${{ secrets.DOCKERHUB_USERNAME }}\n          password: ${{ secrets.DOCKERHUB_TOKEN }}\n      - name: Download Artifact\n        uses: actions/download-artifact@main\n        with:\n          name: modelbox_armd310p_openeuler\n          path: .\n      - name: Download for dev package\n        run: |\n          sed -i '3s/OS_NAME=.*/OS_NAME=openEuler/' docker/prepare_for_dev.sh\n          sed -i '5s/PLATFROM=.*/PLATFROM=aarch64/' docker/prepare_for_dev.sh\n          ./docker/prepare_for_dev.sh\n        shell: bash\n      - name: Build and Push\n        run: |\n          docker run --rm --privileged multiarch/qemu-user-static --reset -p yes\n          docker buildx create --name cibuilder --driver docker-container --use\n          docker buildx ls\n          docker buildx inspect --bootstrap\n          docker buildx build --platform linux/arm64 \\\n                 --build-arg dtype=710 \\\n                 --file docker/Dockerfile.ascend.develop.openeuler \\\n                 --tag ${{ needs.compile_armd310p_openeuler.outputs.IMAGE_NAME_DEV }}:latest \\\n                 --tag ${{ needs.compile_armd310p_openeuler.outputs.IMAGE_NAME_DEV }}:${{ env.IMAGE_VERSION }} \\\n                 --push .\n\n  build_armd310p_openeuler_runtime_image:\n    runs-on: ubuntu-latest\n    needs: compile_armd310p_openeuler\n    steps:\n      - name: Checkout\n        uses: actions/checkout@main\n      - name: Set up Docker Buildx\n        uses: docker/setup-buildx-action@master\n      - name: Login to DockerHub\n        uses: docker/login-action@master\n        with:\n          username: ${{ secrets.DOCKERHUB_USERNAME }}\n          password: ${{ secrets.DOCKERHUB_TOKEN }}\n      - name: Download Artifact\n        uses: actions/download-artifact@main\n        with:\n          name: modelbox_armd310p_openeuler\n          path: .\n      - name: Download for run package\n        run: |\n          sed -i '3s/OS_NAME=.*/OS_NAME=openEuler/' docker/prepare_for_run.sh\n          sed -i '5s/PLATFROM=.*/PLATFROM=aarch64/' docker/prepare_for_run.sh\n          ./docker/prepare_for_run.sh\n        shell: bash\n      - name: Build and Push\n        run: |\n          docker run --rm --privileged multiarch/qemu-user-static --reset -p yes\n          docker buildx create --name cibuilder --driver docker-container --use\n          docker buildx ls\n          docker buildx inspect --bootstrap\n          docker buildx build --platform linux/arm64 \\\n                 --build-arg dtype=710 \\\n                 --file docker/Dockerfile.ascend.runtime.openeuler \\\n                 --tag ${{ needs.compile_armd310p_openeuler.outputs.IMAGE_NAME_RUN }}:latest \\\n                 --tag ${{ needs.compile_armd310p_openeuler.outputs.IMAGE_NAME_RUN }}:${{ env.IMAGE_VERSION }} \\\n                 --push .\n\n  test_armd310p_openeuler:\n    runs-on: openeuler-d310p\n    needs: compile_armd310p_openeuler\n    steps:\n      - name: Test\n        working-directory: build\n        run: |\n          rpm -e $(rpm -qa|grep modelbox) || true\n          make build-test -j8\n          make unittest\n"
  },
  {
    "path": ".github/workflows/unit-test-daily-on-device.yml",
    "content": "name: Daily Unit Test On Device\n\non:\n  workflow_dispatch:\n  schedule:\n    - cron: '0 18 * * *'  \n\nenv:\n  BUILD_TYPE: Debug\n\njobs:\n  build:\n    runs-on: ${{ matrix.devices }}\n    if: github.repository == 'modelbox-ai/modelbox'\n    strategy:\n      max-parallel: 1\n      matrix:\n          devices: \n            - tensorrt\n            - pytorch\n            - tensorflow\n            - ubuntu-d310p\n\n    steps:\n    - uses: actions/checkout@v3\n    - name: Configure CMake\n      run: |\n        mkdir build\n        cd build\n        cmake .. -DCMAKE_BUILD_TYPE=${{env.BUILD_TYPE}} -DCLANG_TIDY=on -DCLANG_TIDY_AS_ERROR=on\n\n    - name: Build\n      working-directory: build\n      run: |\n        make package -j4\n\n    - name: Test\n      working-directory: build\n      run: |\n        dpkg -r $(dpkg -l|grep modelbox|awk '{print $2}') || true\n        [ \"$(arch)\" == \"aarch64\" ] && export LD_PRELOAD=/usr/lib/aarch64-linux-gnu/libgomp.so.1\n        make build-test -j4\n        unset LD_LIBRARY_PATH\n        make unittest\n      shell: bash\n"
  },
  {
    "path": ".github/workflows/unit-test-pull-requests-on-device.yml",
    "content": "name: Pull-Requests Unit Test On Device\n\non:\n  workflow_dispatch:\n    inputs:\n      PullRequestId:\n        description: Pull Request ID\n        default: \"\"\n        required: true\n      maxParallel:\n        type: choice\n        description: Max Parallel\n        default: 4\n        options: \n        - 1\n        - 2\n        - 4\n        - 6\n        - 8\n      WithWebUI:\n        type: choice\n        description: build with webui or not\n        options:\n        - OFF\n        - ON\n      WithClangTidy:\n        type: choice\n        description: build with clang-tidy or not\n        options:\n        - OFF\n        - ON\n\nenv:\n  BUILD_TYPE: Debug\n\njobs:\n  build:\n    runs-on: ${{ matrix.devices }}\n    strategy:\n      max-parallel: 2\n      matrix:\n          devices: \n            - tensorrt\n            - pytorch\n            - tensorflow\n            - ubuntu-d310p\n\n    steps:\n    - uses: actions/checkout@v3\n      with:\n        ref: pull/${{github.event.inputs.PullRequestId}}/head\n    - name: Configure CMake\n      run: |\n        mkdir build\n        cd build\n        cmake .. -DCMAKE_BUILD_TYPE=${{env.BUILD_TYPE}} -DWITH_WEBUI=${{github.event.inputs.WithWebUI}} -DCLANG_TIDY=${{github.event.inputs.WithClangTidy}} -DCLANG_TIDY_AS_ERROR=on\n\n    - name: Build\n      working-directory: build\n      run: |\n        make package -j${{github.event.inputs.maxParallel}}\n\n    - name: Test\n      working-directory: build\n      run: |\n        dpkg -r $(dpkg -l|grep modelbox|awk '{print $2}') || true\n        export LD_PRELOAD=/usr/lib/aarch64-linux-gnu/libgomp.so.1\n        make build-test -j${{github.event.inputs.maxParallel}}\n        unset LD_LIBRARY_PATH\n        make unittest\n"
  },
  {
    "path": ".gitignore",
    "content": "\n# IDE product files\n.idea\n*.iml\n*.ipr\n*.iws\n*.bin\n*.exe\n*.pyc\n\nout\ngen\ntarget\n.vscode\nbuild\n.settings\n.classpath\n.project\n\n#clangd\n.cache\ncompile_commands.json\n"
  },
  {
    "path": "CMake/Clang-tidy.cmake",
    "content": "\nfind_program(CLANG_TIDY_BIN clang-tidy)\n\nif(NOT CLANG_TIDY_BIN)\n    message(STATUS \"No clang-tidy found, skip lint\")\n    return()\nendif()\n\nif (NOT CLANG_TIDY_BIN)\n    message(STATUS \"clang-tidy disabled, skip lint\")\n    return()\nendif()\n\nif (NOT CLANG_TIDY)\n    message(STATUS \"disable clang-tidy\")\n    return()\nendif()\n\nif (CLANG_TIDY_FIX)\n    set(CLANG_TIDY_FLAG \"${CLANG_TIDY_FLAG};-fix;\")\nendif()\n\nif (CLANG_TIDY_AS_ERROR)\n    set(CLANG_TIDY_FLAG \"${CLANG_TIDY_FLAG};-warnings-as-errors=*;\")\nendif()\n\nmessage(STATUS \"enable clang-tidy lint\")\nset(CMAKE_CXX_CLANG_TIDY \n   ${CMAKE_CURRENT_LIST_DIR}/clang-tidy-warp;${CLANG_TIDY_FLAG})\n"
  },
  {
    "path": "CMake/FindACL.cmake",
    "content": "set(HITS_DDK_PATH $ENV{DDK_PATH})\nfind_path(ACL_INCLUDE\n  NAMES acl/acl.h\n  HINTS ${CMAKE_INSTALL_FULL_INCLUDEDIR} ${HITS_DDK_PATH}/include\n        /usr/local/Ascend/ascend-toolkit/latest/fwkacllib/include\n)\nmark_as_advanced(ACL_INCLUDE)\n\n# Look for the library (sorted from most current/relevant entry to least).\nset(ACL_LIBRARY_NAME ascendcl)\nlist(APPEND DDK_LIB_PATH ${HITS_DDK_PATH}/lib64)\nlist(APPEND DDK_LIB_PATH /usr/local/Ascend/ascend-toolkit/latest/fwkacllib/lib64)\n\nfind_library(ACL_LIBRARY NAMES ascendcl HINTS ${CMAKE_INSTALL_FULL_LIBDIR} ${DDK_LIB_PATH})\nfind_library(ACL_DVPP_LIBRARY NAMES acl_dvpp HINTS ${CMAKE_INSTALL_FULL_LIBDIR} ${DDK_LIB_PATH})\nfind_library(ACL_CBLAS_LIBRARY NAMES acl_cblas HINTS ${CMAKE_INSTALL_FULL_LIBDIR} ${DDK_LIB_PATH})\nfind_library(ACL_RT_LIBRARY NAMES runtime HINTS ${CMAKE_INSTALL_FULL_LIBDIR} ${DDK_LIB_PATH})\n\nmark_as_advanced(ACL_LIBRARY)\n\ninclude(FindPackageHandleStandardArgs)\nFIND_PACKAGE_HANDLE_STANDARD_ARGS(ACL\n                                  REQUIRED_VARS ACL_LIBRARY ACL_DVPP_LIBRARY ACL_CBLAS_LIBRARY ACL_RT_LIBRARY\n                                  ACL_INCLUDE\n                                  VERSION_VAR ACL_VERSION_STRING)\n\nif(ACL_FOUND)\n  set(ACL_LIBRARIES ${ACL_LIBRARY} ${ACL_DVPP_LIBRARY} ${ACL_CBLAS_LIBRARY} ${ACL_RT_LIBRARY})\n  set(ACL_INCLUDE_DIR ${ACL_INCLUDE})\nendif()\n"
  },
  {
    "path": "CMake/FindCPPREST.cmake",
    "content": "find_path(CPPREST_INCLUDE NAMES cpprest/http_client.h \n    HINTS ${CMAKE_INSTALL_FULL_INCLUDEDIR}\n)\nmark_as_advanced(CPPREST_INCLUDE)\n\n# Look for the library (sorted from most current/relevant entry to least).\nfind_library(CPPREST_LIBRARY NAMES\n    cpprest\n    cpprestlib\n    libcpprest_imp\n    cpprestlib_static\n    libcpprest\n    HINTS ${CMAKE_INSTALL_FULL_LIBDIR}\n)\nmark_as_advanced(CPPREST_LIBRARY)\n\ninclude(FindPackageHandleStandardArgs)\nFIND_PACKAGE_HANDLE_STANDARD_ARGS(CPPREST\n                                  REQUIRED_VARS CPPREST_LIBRARY CPPREST_INCLUDE\n                                  VERSION_VAR CPPREST_VERSION_STRING)\n\nif(CPPREST_FOUND)\n  set(CPPREST_LIBRARIES ${CPPREST_LIBRARY})\n  set(CPPREST_INCLUDE_DIR ${CPPREST_INCLUDE})\nendif()\n"
  },
  {
    "path": "CMake/FindCUDACUDA.cmake",
    "content": "if (CUDA_CUDA_LIBRARY) \n  return()\nendif()\n\nfind_library(CUDA_CUDA_LIBRARY\n  NAMES cuda\n  HINTS ${CMAKE_INSTALL_FULL_LIBDIR} /usr/local/cuda/lib64/stubs /usr/local/cuda/lib/stubs \n)\nmark_as_advanced(CUDA_CUDA_LIBRARY)\n"
  },
  {
    "path": "CMake/FindDIS.cmake",
    "content": "find_path(DIS_INCLUDE NAMES dis/dis.h \n    HINTS ${CMAKE_INSTALL_FULL_INCLUDEDIR}\n)\nmark_as_advanced(DIS_INCLUDE)\n \nset(HINT_LIBS ${CMAKE_INSTALL_FULL_LIBDIR} /usr/lib64)\n \nfind_library(DIS_LIBRARY NAMES\n    DISSDK\n    HINTS ${HINT_LIBS}\n)\nmark_as_advanced(DIS_LIBRARY)\n\nfind_library(CURL_LIBRARY NAMES\n    curl\n    HINTS ${HINT_LIBS}\n)\nmark_as_advanced(CURL_LIBRARY)\n\ninclude(FindPackageHandleStandardArgs)\nFIND_PACKAGE_HANDLE_STANDARD_ARGS(DIS\n                                  REQUIRED_VARS DIS_LIBRARY CURL_LIBRARY DIS_INCLUDE\n                                  VERSION_VAR DIS_VERSION_STRING)\n \nif(DIS_FOUND)\n  set(DIS_LIBRARIES ${DIS_LIBRARY} ${CURL_LIBRARY})\n  set(DIS_INCLUDE_DIR ${DIS_INCLUDE})\n  message(STATUS \"Dis dependency found, ${DIS_LIBRARIES} ${DIS_INCLUDE_DIR}\")\nendif()"
  },
  {
    "path": "CMake/FindDLENGINE.cmake",
    "content": "set(DLENGINE_PATH $ENV{DLENGINE_PATH})\n\nif(NOT DLENGINE_PATH)\n  find_package(PythonInterp QUIET)\n  execute_process(\n    COMMAND ${PYTHON_EXECUTABLE} \"-c\" \"import re, dlengine; print(re.compile('/__init__.py.*').sub('', dlengine.__file__))\"\n    RESULT_VARIABLE DLENGINE_STATUS\n    OUTPUT_VARIABLE DLENGINE_PATH\n    ERROR_QUIET\n    OUTPUT_STRIP_TRAILING_WHITESPACE\n  )\nendif()\n\nfind_path(DLENGINE_INCLUDE\n  NAMES dlengine.h\n  HINTS ${CMAKE_INSTALL_FULL_INCLUDEDIR} ${DLENGINE_PATH}/include\n)\nmark_as_advanced(DLENGINE_INCLUDE)\n\nfind_library(DLENGINE_LIBRARY\n  NAMES dlengine\n  HINTS ${DLENGINE_PATH}\n)\nmark_as_advanced(DLENGINE_LIBRARY)\n\ninclude(FindPackageHandleStandardArgs)\nFIND_PACKAGE_HANDLE_STANDARD_ARGS(DLENGINE\n                                  REQUIRED_VARS DLENGINE_PATH DLENGINE_LIBRARY DLENGINE_INCLUDE\n                                  VERSION_VAR DLENGINE_VERSION_STRING)\n\nif(DLENGINE_FOUND)\n  set(DLENGINE_LIBRARIES ${DLENGINE_LIBRARY})\n  set(DLENGINE_INCLUDE_DIR ${DLENGINE_INCLUDE})\n  set(DLENGINE_BACKEND_ZOO_DIR ${DLENGINE_PATH}/backend_zoo)\nendif()\n"
  },
  {
    "path": "CMake/FindDSMI.cmake",
    "content": "set(HITS_DRIVER_PATH $ENV{DRIVER_PATH})\nfind_path(DSMI_INCLUDE\n  NAMES dsmi_common_interface.h\n  HINTS ${CMAKE_INSTALL_FULL_INCLUDEDIR} \n        ${HITS_DRIVER_PATH}/include\n        /usr/local/Ascend/driver/include\n)\nmark_as_advanced(DSMI_INCLUDE)\n\n# Look for the library (sorted from most current/relevant entry to least).\nset(DRIVER_LIB_PATH ${HITS_DRIVER_PATH}/lib64)\nfind_library(DSMI_LIBRARY NAMES\n    drvdsmi_host\n    HINTS ${CMAKE_INSTALL_FULL_LIBDIR} \n          ${DRIVER_LIB_PATH}\n          ${DRIVER_LIB_PATH}/driver\n          /usr/local/Ascend/driver/lib64\n          /usr/local/Ascend/driver/lib64/driver\n)\nmark_as_advanced(DSMI_LIBRARY)\n\ninclude(FindPackageHandleStandardArgs)\nFIND_PACKAGE_HANDLE_STANDARD_ARGS(DSMI\n                                  REQUIRED_VARS DSMI_LIBRARY DSMI_INCLUDE\n                                  VERSION_VAR DSMI_VERSION_STRING)\n\nif(DSMI_FOUND)\n  set(DSMI_LIBRARIES ${DSMI_LIBRARY})\n  set(DSMI_INCLUDE_DIR ${DSMI_INCLUDE})\nendif()\n"
  },
  {
    "path": "CMake/FindDUKTAPE.cmake",
    "content": "find_path(DUKTAPE_INCLUDE NAMES duktape.h\n  HINTS ${CMAKE_INSTALL_FULL_INCLUDEDIR}\n)\nmark_as_advanced(DUKTAPE_INCLUDE)\n\n# Look for the library (sorted from most current/relevant entry to least).\nset(DRIVER_LIB_PATH ${HITS_DRIVER_PATH}/lib64)\nfind_library(DUKTAPE_LIBRARY NAMES\n    duktape\n    HINTS ${CMAKE_INSTALL_FULL_LIBDIR}\n)\nmark_as_advanced(DUKTAPE_LIBRARY)\n\ninclude(FindPackageHandleStandardArgs)\nFIND_PACKAGE_HANDLE_STANDARD_ARGS(DUKTAPE\n                                  REQUIRED_VARS DUKTAPE_LIBRARY DUKTAPE_INCLUDE\n                                  VERSION_VAR DUKTAPE_VERSION_STRING)\n\nif(DUKTAPE_FOUND)\n  set(DUKTAPE_LIBRARIES ${DUKTAPE_LIBRARY})\n  set(DUKTAPE_INCLUDE_DIR ${DUKTAPE_INCLUDE})\n  message(STATUS \"Duktape dependency found, ${DUKTAPE_LIBRARIES} ${DUKTAPE_INCLUDE_DIR}\")\nendif()"
  },
  {
    "path": "CMake/FindFFMPEG.cmake",
    "content": "find_path(FFMPEG_INCLUDE \n  NAMES libavformat/avformat.h libavcodec/avcodec.h libavutil/avutil.h libswscale/swscale.h\n  HINTS ${CMAKE_INSTALL_FULL_INCLUDEDIR}\n)\nmark_as_advanced(FFMPEG_INCLUDE)\n\n# Look for the library (sorted from most current/relevant entry to least).\nset(FFMPEG_LIBRARY_NAME avutil avcodec avformat)\nfind_library(AVCODEC_LIBRARY NAMES avcodec HINTS ${CMAKE_INSTALL_FULL_LIBDIR})\nfind_library(AVUTIL_LIBRARY NAMES avutil HINTS ${CMAKE_INSTALL_FULL_LIBDIR})\nfind_library(AVFORMAT_LIBRARY NAMES avformat HINTS ${CMAKE_INSTALL_FULL_LIBDIR})\nfind_library(SWSCALE_LIBRARY NAMES swscale HINTS ${CMAKE_INSTALL_FULL_LIBDIR})\nset(FFMPEG_LIBRARY ${AVCODEC_LIBRARY}\n                   ${AVUTIL_LIBRARY}\n                   ${AVFORMAT_LIBRARY}\n                   ${SWSCALE_LIBRARY})\nmark_as_advanced(FFMPEG_LIBRARY)\n\ninclude(FindPackageHandleStandardArgs)\nFIND_PACKAGE_HANDLE_STANDARD_ARGS(FFMPEG\n                                  REQUIRED_VARS FFMPEG_LIBRARY AVCODEC_LIBRARY AVUTIL_LIBRARY AVFORMAT_LIBRARY SWSCALE_LIBRARY FFMPEG_INCLUDE\n                                  VERSION_VAR FFMPEG_VERSION_STRING)\n\nif(FFMPEG_FOUND)\n  set(FFMPEG_LIBRARIES ${FFMPEG_LIBRARY})\n  set(FFMPEG_INCLUDE_DIR ${FFMPEG_INCLUDE})\nendif()\n"
  },
  {
    "path": "CMake/FindFUSE.cmake",
    "content": "find_path(FUSE_INCLUDE \n  NAMES fuse.h\n  HINTS ${CMAKE_INSTALL_FULL_INCLUDEDIR}\n)\nmark_as_advanced(FUSE_INCLUDE)\n\n# Look for the library (sorted from most current/relevant entry to least).\nset(FUSE_LIBRARY_NAME fuse)\nfind_library(FUSE_LIBRARY NAMES fuse HINTS ${CMAKE_INSTALL_FULL_LIBDIR})\nset(FUSE_LIBRARY ${FUSE_LIBRARY})\nmark_as_advanced(FUSE_LIBRARY)\n\ninclude(FindPackageHandleStandardArgs)\nFIND_PACKAGE_HANDLE_STANDARD_ARGS(FUSE\n                                  REQUIRED_VARS FUSE_LIBRARY FUSE_INCLUDE\n                                  VERSION_VAR FUSE_VERSION_STRING)\n\nif(FUSE_FOUND)\n  set(FUSE_LIBRARIES ${FUSE_LIBRARY})\n  set(FUSE_INCLUDE_DIR ${FUSE_INCLUDE})\nendif()"
  },
  {
    "path": "CMake/FindMINDSPORE_LITE.cmake",
    "content": "set(HITS_MINDSPORE_LITE_PATH $ENV{MINDSPORE_LITE_PATH})\n\nif (NOT WITH_MINDSPORE) \n  message(STATUS \"not build with mindspore-lite, to enable please add -DWITH_MINDSPORE=on\")\n  return()\nendif()\n\nfind_path(MINDSPORE_LITE_DIR NAMES \n  runtime/include/api/context.h \n  runtime/include/api/graph.h \n  runtime/include/api/model.h \n  HINTS ${CMAKE_INSTALL_FULL_INCLUDEDIR} ${HITS_MINDSPORE_LITE_PATH} /usr/local/mindspore-lite)\nmark_as_advanced(MINDSPORE_LITE_DIR)\n\n# Look for the library (sorted from most current/relevant entry to least).\nfind_library(MINDSPORE_LITE_LIBRARY NAMES\n    mindspore-lite\n    HINTS ${CMAKE_INSTALL_FULL_LIBDIR} ${HITS_MINDSPORE_LITE_PATH}/runtime/lib /usr/local/mindspore-lite/runtime/lib\n)\nmark_as_advanced(MINDSPORE_LITE_LIBRARY)\n\ninclude(FindPackageHandleStandardArgs)\nFIND_PACKAGE_HANDLE_STANDARD_ARGS(MINDSPORE_LITE\n                                  REQUIRED_VARS MINDSPORE_LITE_LIBRARY MINDSPORE_LITE_DIR\n                                  VERSION_VAR MINDSPORE_VERSION_STRING)\n\nif(MINDSPORE_LITE_FOUND)\n  set(MINDSPORE_LITE_LIBRARIES ${MINDSPORE_LITE_LIBRARY})\n  set(MINDSPORE_LITE_INCLUDE_DIR ${MINDSPORE_LITE_DIR}/runtime)\n  set(MINDSOPRE_LITE_LIB_DIR ${MINDSPORE_LITE_DIR}/runtime/lib)\nendif()\n"
  },
  {
    "path": "CMake/FindNVCUVID.cmake",
    "content": "set(HITS_NVCUVID_PATH /usr/local/Video_Codec_SDK)\nfind_path(NVCUVID_INCLUDE NAMES nvcuvid.h HINTS ${CMAKE_INSTALL_FULL_INCLUDEDIR} ${HITS_NVCUVID_PATH}/include)\nmark_as_advanced(NVCUVID_INCLUDE)\n\n# Look for the library (sorted from most current/relevant entry to least).\nfind_library(NVCUVID_LIBRARY NAMES\n    nvcuvid\n    HINTS ${CMAKE_INSTALL_FULL_LIBDIR} ${HITS_NVCUVID_PATH}/lib\n)\nmark_as_advanced(NVCUVID_LIBRARY)\n\ninclude(FindPackageHandleStandardArgs)\nFIND_PACKAGE_HANDLE_STANDARD_ARGS(NVCUVID\n                                  REQUIRED_VARS NVCUVID_LIBRARY NVCUVID_INCLUDE\n                                  VERSION_VAR NVCUVID_VERSION_STRING)\n\nif(NVCUVID_FOUND)\n  set(NVCUVID_LIBRARIES ${NVCUVID_LIBRARY})\n  set(NVCUVID_INCLUDE_DIR ${NVCUVID_INCLUDE})\nendif()\n"
  },
  {
    "path": "CMake/FindOBS.cmake",
    "content": "find_path(OBS_INCLUDE NAMES eSDKOBS.h HINTS ${CMAKE_INSTALL_FULL_INCLUDEDIR})\nmark_as_advanced(OBS_INCLUDE)\n\n# Look for the library (sorted from most current/relevant entry to least).\nfind_library(OBS_LIBRARY NAMES \n                eSDKOBS \n                HINTS ${CMAKE_INSTALL_FULL_LIBDIR})\nfind_library(OBSAPI_LIBRARY NAMES \n                eSDKLogAPI \n                HINTS ${CMAKE_INSTALL_FULL_LIBDIR})\n\nmark_as_advanced(OBSAPI_LIBRARY OBS_LIBRARY)\n\ninclude(FindPackageHandleStandardArgs)\nFIND_PACKAGE_HANDLE_STANDARD_ARGS(OBS\n                                  REQUIRED_VARS OBS_LIBRARY OBSAPI_LIBRARY OBS_INCLUDE\n                                  VERSION_VAR OBS_VERSION_STRING)\n\nif(OBS_FOUND)\n  set(OBS_LIBRARIES ${OBS_LIBRARY} ${OBSAPI_LIBRARY})\n  set(OBS_INCLUDE_DIR ${OBS_INCLUDE})\n  message(STATUS \"OBS dependency found, ${OBS_LIBRARIES} ${OBS_INCLUDE_DIR}\")\nendif()\n"
  },
  {
    "path": "CMake/FindROCKCHIP.cmake",
    "content": "if(DEFINED ENV{ROCKCHIP_PATH})\n    set(HINTS_ROCKCHIP_PATH $ENV{ROCKCHIP_PATH})\n\n    message(STATUS \"DEFINED ${HINTS_ROCKCHIP_PATH}\")\nelse()\n    set(HINTS_ROCKCHIP_PATH \"/opt/rockchip\")\n\n    message(STATUS \"set default search path: /opt/rockchip\")\nendif()\n\nfind_path(ROCKCHIP_RGA_INCLUDE NAMES im2d.h rga.h\n    HINTS ${HINTS_ROCKCHIP_PATH}/rk-rga/include)\nmark_as_advanced(ROCKCHIP_RGA_INCLUDE)\n\nfind_path(ROCKCHIP_MPP_INCLUDE NAMES rk_mpi.h rk_type.h\n    HINTS ${HINTS_ROCKCHIP_PATH}/rkmpp/include/rockchip)\nmark_as_advanced(ROCKCHIP_MPP_INCLUDE)\n\nfind_path(RKNN_INCLUDE NAMES rknn_api.h\n    HINTS ${HINTS_ROCKCHIP_PATH}/rknn/include)\nmark_as_advanced(RKNN_INCLUDE)\n\nfind_path(RKNPU2_INCLUDE NAMES rknn_api.h\n    HINTS ${HINTS_ROCKCHIP_PATH}/rknnrt/include)\nmark_as_advanced(RKNPU2_INCLUDE)\n\nfind_library(RKNN_LIBRARY NAMES rknn_api HINTS ${HINTS_ROCKCHIP_PATH}/rknn/lib)\nmark_as_advanced(RKNN_LIBRARY)\n\nfind_library(RKNPU2_LIBRARY NAMES rknnrt HINTS ${HINTS_ROCKCHIP_PATH}/rknnrt/lib)\nmark_as_advanced(RKNPU2_LIBRARY)\n\nfind_library(RKRGA_LIBRARY NAMES rga HINTS ${HINTS_ROCKCHIP_PATH}/rk-rga/lib)\nmark_as_advanced(RKRGA_LIBRARY)\n\nfind_library(RKMPP_LIBRARY NAMES rockchip_mpp HINTS ${HINTS_ROCKCHIP_PATH}/rkmpp/lib)\nmark_as_advanced(RKMPP_LIBRARY)\n\ninclude(FindPackageHandleStandardArgs)\nFIND_PACKAGE_HANDLE_STANDARD_ARGS(ROCKCHIP\n    REQUIRED_VARS ROCKCHIP_RGA_INCLUDE ROCKCHIP_MPP_INCLUDE\n    VERSION_VAR ROCKCHIP_VERSION_STRING)\n\nif(ROCKCHIP_FOUND)\n    set(RKNN_LIBRARIES ${RKNN_LIBRARY})\n    set(RKNN_INCLUDE_DIR ${RKNN_INCLUDE})\n    set(RKNPU2_INCLUDE_DIR ${RKNPU2_INCLUDE})\n    set(RKNPU2_LIBRARIES ${RKNPU2_LIBRARY})\n    set(RKRGA_LIBRARIES ${RKRGA_LIBRARY})\n    set(RKMPP_LIBRARIES ${RKMPP_LIBRARY})\n    set(ROCKCHIP_INCLUDE_DIR ${ROCKCHIP_RGA_INCLUDE} ${ROCKCHIP_MPP_INCLUDE})\n\n    message(STATUS \"rockchip dependency found, ${ROCKCHIP_INCLUDE_DIR}\")\nendif()"
  },
  {
    "path": "CMake/FindTENSORFLOW.cmake",
    "content": "find_path(TENSORFLOW_INCLUDE NAMES tensorflow/c/c_api.h HINTS ${CMAKE_INSTALL_FULL_INCLUDEDIR})\nmark_as_advanced(TENSORFLOW_INCLUDE)\n\n# Look for the library (sorted from most current/relevant entry to least).\nfind_library(TENSORFLOW_LIBRARY NAMES\n    tensorflow\n    HINTS ${CMAKE_INSTALL_FULL_LIBDIR}\n)\nmark_as_advanced(TENSORFLOW_LIBRARY)\n\ninclude(FindPackageHandleStandardArgs)\nFIND_PACKAGE_HANDLE_STANDARD_ARGS(TENSORFLOW\n                                  REQUIRED_VARS TENSORFLOW_LIBRARY TENSORFLOW_INCLUDE\n                                  VERSION_VAR TENSORFLOW_VERSION_STRING)\n\nif(TENSORFLOW_FOUND)\n  set(TENSORFLOW_LIBRARIES ${TENSORFLOW_LIBRARY})\n  set(TENSORFLOW_INCLUDE_DIR ${TENSORFLOW_INCLUDE})\nendif()\n"
  },
  {
    "path": "CMake/FindTENSORRT.cmake",
    "content": "set(TRT_ROOT /tensorrt)\nset(HINT_DIRS $ENV{TRT_RELEASE}/include ${CMAKE_INSTALL_FULL_INCLUDEDIR} ${TRT_ROOT}/include)\nset(HINT_LIBS $ENV{TRT_RELEASE}/lib ${CMAKE_INSTALL_FULL_LIBDIR} ${TRT_ROOT}/lib)\nfind_path(TENSORRT_INCLUDE \n    NAMES NvCaffeParser.h\n          NvInfer.h\n          NvInferPlugin.h\n          NvOnnxConfig.h\n          NvOnnxParser.h\n          NvOnnxParserRuntime.h\n          NvUffParser.h\n          NvUtils.h\n          NvInferPluginUtils.h\n          NvInferRuntimeCommon.h\n          NvInferRuntime.h\n          NvInferVersion.h\n    HINTS ${HINT_DIRS})\n\nmark_as_advanced(TENSORRT_INCLUDE)\n\nfind_library(TENSORRT_LIBRARY NAMES nvinfer HINTS ${HINT_LIBS})\nfind_library(TENSORRT_PLUGIN_LIBRARY NAMES nvinfer_plugin HINTS ${HINT_LIBS})\nfind_library(TRT_CAFFEPARSER_LIBRARY NAMES nvcaffe_parser nvparsers HINTS ${HINT_LIBS})\nfind_library(TRT_ONNXPARSER_LIBRARY NAMES nvonnxparser nvonnxparser_runtime HINTS ${HINT_LIBS})\n\nmark_as_advanced(TENSORRT_LIBRARY)\n\nif(TENSORRT_INCLUDE AND EXISTS \"${TENSORRT_INCLUDE}/NvInfer.h\")\n    file(STRINGS \"${TENSORRT_INCLUDE}/NvInfer.h\" TensorRT_MAJOR REGEX \"^#define NV_TENSORRT_MAJOR [0-9]+.*$\")\n    file(STRINGS \"${TENSORRT_INCLUDE}/NvInfer.h\" TensorRT_MINOR REGEX \"^#define NV_TENSORRT_MINOR [0-9]+.*$\")\n    file(STRINGS \"${TENSORRT_INCLUDE}/NvInfer.h\" TensorRT_PATCH REGEX \"^#define NV_TENSORRT_PATCH [0-9]+.*$\")\n\n    string(REGEX REPLACE \"^#define NV_TENSORRT_MAJOR ([0-9]+).*$\" \"\\\\1\" TensorRT_VERSION_MAJOR \"${TensorRT_MAJOR}\")\n    string(REGEX REPLACE \"^#define NV_TENSORRT_MINOR ([0-9]+).*$\" \"\\\\1\" TensorRT_VERSION_MINOR \"${TensorRT_MINOR}\")\n    string(REGEX REPLACE \"^#define NV_TENSORRT_PATCH ([0-9]+).*$\" \"\\\\1\" TensorRT_VERSION_PATCH \"${TensorRT_PATCH}\")\n    \n    set(TENSORRT_VERSION \"${TensorRT_VERSION_MAJOR}.${TensorRT_VERSION_MINOR}.${TensorRT_VERSION_PATCH}\")\n    message(\"find tensorrt version: \" ${TENSORRT_VERSION})\n    set(TENSORRT_VERSION_STRING \"${TensorRT_VERSION_MAJOR}.${TensorRT_VERSION_MINOR}.${TensorRT_VERSION_PATCH}\")\nendif()\n\nif(TENSORRT_INCLUDE AND EXISTS \"${TENSORRT_INCLUDE}/NvInferVersion.h\")\n    file(STRINGS \"${TENSORRT_INCLUDE}/NvInferVersion.h\" TensorRT_MAJOR REGEX \"^#define NV_TENSORRT_MAJOR [0-9]+.*$\")\n    file(STRINGS \"${TENSORRT_INCLUDE}/NvInferVersion.h\" TensorRT_MINOR REGEX \"^#define NV_TENSORRT_MINOR [0-9]+.*$\")\n    file(STRINGS \"${TENSORRT_INCLUDE}/NvInferVersion.h\" TensorRT_PATCH REGEX \"^#define NV_TENSORRT_PATCH [0-9]+.*$\")\n\n    string(REGEX REPLACE \"^#define NV_TENSORRT_MAJOR ([0-9]+).*$\" \"\\\\1\" TensorRT_VERSION_MAJOR \"${TensorRT_MAJOR}\")\n    string(REGEX REPLACE \"^#define NV_TENSORRT_MINOR ([0-9]+).*$\" \"\\\\1\" TensorRT_VERSION_MINOR \"${TensorRT_MINOR}\")\n    string(REGEX REPLACE \"^#define NV_TENSORRT_PATCH ([0-9]+).*$\" \"\\\\1\" TensorRT_VERSION_PATCH \"${TensorRT_PATCH}\")\n    \n    set(TENSORRT_VERSION \"${TensorRT_VERSION_MAJOR}.${TensorRT_VERSION_MINOR}.${TensorRT_VERSION_PATCH}\")\n    message(\"find tensorrt version: \" ${TENSORRT_VERSION})\n    set(TENSORRT_VERSION_STRING \"${TensorRT_VERSION_MAJOR}.${TensorRT_VERSION_MINOR}.${TensorRT_VERSION_PATCH}\")\nendif()\n\ninclude(FindPackageHandleStandardArgs)\nFIND_PACKAGE_HANDLE_STANDARD_ARGS(TENSORRT\n                                  REQUIRED_VARS \n                                  TENSORRT_LIBRARY \n                                  TENSORRT_INCLUDE \n                                  TRT_CAFFEPARSER_LIBRARY \n                                  TRT_ONNXPARSER_LIBRARY \n                                  TENSORRT_PLUGIN_LIBRARY\n                                  TENSORRT_VERSION_STRING\n                                  VERSION_VAR TENSORRT_VERSION_STRING)\n\nif(TENSORRT_FOUND)\n  set(TENSORRT_LIBRARIES ${TENSORRT_LIBRARY} ${TRT_CAFFEPARSER_LIBRARY} ${TRT_ONNXPARSER_LIBRARY} ${TENSORRT_PLUGIN_LIBRARY})\n  set(TENSORRT_INCLUDE_DIR ${TENSORRT_INCLUDE})\n  set(TENSORRT_VERSION ${TENSORRT_VERSION_STRING})\nendif()"
  },
  {
    "path": "CMake/FindVCN.cmake",
    "content": "find_path(VCN_INCLUDE \n    NAMES IVS_SDK.h \n        hwsdk.h\n        ivs_error.h\n    HINTS ${CMAKE_INSTALL_FULL_INCLUDEDIR})\nmark_as_advanced(VCN_INCLUDE)\n\n# Look for the library (sorted from most current/relevant entry to least).\nfind_library(VCN_LIBRARY NAMES \n                IVS_SDK \n                HINTS ${CMAKE_INSTALL_FULL_LIBDIR})\n\nmark_as_advanced(VCN_LIBRARY)\n\ninclude(FindPackageHandleStandardArgs)\nFIND_PACKAGE_HANDLE_STANDARD_ARGS(VCN\n                                  REQUIRED_VARS VCN_LIBRARY VCN_INCLUDE\n                                  VERSION_VAR VCN_VERSION_STRING)\n\nif(VCN_FOUND)\n  set(VCN_LIBRARIES ${VCN_LIBRARY})\n  set(VCN_INCLUDE_DIR ${VCN_INCLUDE})\n  message(STATUS \"VCN dependency found, ${VCN_LIBRARIES} ${VCN_INCLUDE_DIR}\")\nendif()\n"
  },
  {
    "path": "CMake/Function.cmake",
    "content": "macro(subdirlist result dir)\n  file(GLOB children RELATIVE ${dir} ${dir}/*)\n  set (file ${ARGN})\n  set(dirs \"\")\n  foreach(child ${children})\n    if(IS_DIRECTORY ${dir}/${child})\n        if(NOT ${file} STREQUAL \"\")\n            if(NOT EXISTS ${dir}/${child}/${file})\n                CONTINUE()\n            endif()\n        endif()\n        set(dirs ${dirs} ${child})\n    endif()\n  endforeach()\n  set(${result} ${dirs})\nendmacro()\n\nfunction (exclude_files_from_dir_in_list result filelist excludedir)\n  foreach (ITR ${filelist})\n    if (\"${ITR}\" MATCHES \"(.*)${excludedir}(.*)\")                   \n      list (REMOVE_ITEM filelist ${ITR})                              \n    endif (\"${ITR}\" MATCHES \"(.*)${excludedir}(.*)\")\n\n  endforeach(ITR)\n  set(${result} ${filelist} PARENT_SCOPE)                          \nendfunction (exclude_files_from_dir_in_list)\n\nfunction (group_source_test_files source test test_pattern filelist)\n  set(list_var \"${ARGN}\")\n  string(REPLACE \".\" \"\\\\.*\" test_pattern ${test_pattern})\n  string(REPLACE \"*\" \".*\" test_pattern ${test_pattern})\n  foreach (ITR ${filelist} ${list_var})\n    if (\"${ITR}\" MATCHES \"(.*)${test_pattern}(.*)\")                   \n      list (APPEND test_list ${ITR})                 \n    else()\n      list (APPEND source_list ${ITR})              \n    endif ()\n  endforeach(ITR) \n  set(${source} ${source_list} PARENT_SCOPE)  \n  set(${test} ${test_list} PARENT_SCOPE)  \nendfunction(group_source_test_files)\n\nfunction (group_files groupone grouptwo pattern filelist)\n  set(list_var \"${ARGN}\")\n  foreach (ITR ${filelist} ${list_var})\n    if (\"${ITR}\" MATCHES \"${pattern}\")                   \n      list (APPEND grouptwo_list ${ITR})                 \n    else()\n      list (APPEND groupone_list ${ITR})              \n    endif ()\n  endforeach(ITR) \n  set(${groupone} ${groupone_list} PARENT_SCOPE)  \n  set(${grouptwo} ${grouptwo_list} PARENT_SCOPE)  \nendfunction(group_files)\n"
  },
  {
    "path": "CMake/JavaJDK.cmake",
    "content": "if(${CMAKE_VERSION} VERSION_LESS \"3.16.0\") \n    find_library(JAVA_AWT_LIBRARY NAMES\n        jawt\n        HINTS $ENV{JAVA_HOME}\n            $ENV{JAVA_HOME}/lib\n            $ENV{JAVA_HOME}/lib/amd64\n            $ENV{JAVA_HOME}/lib/aarch64\n            $ENV{JDK_HOME}\n            $ENV{JDK_HOME}/lib\n            $ENV{JDK_HOME}/lib/amd64\n            $ENV{JDK_HOME}/lib/aarch64\n    )\n\n    find_library(JAVA_JVM_LIBRARY NAMES\n        jvm\n        HINTS $ENV{JAVA_HOME}/lib/server\n            $ENV{JAVA_HOME}/lib/amd64/server\n            $ENV{JAVA_HOME}/lib/aarch64/server\n            $ENV{JAVA_HOME}/jre/lib/server\n            $ENV{JAVA_HOME}/jre/lib/amd64/server\n            $ENV{JAVA_HOME}/jre/lib/aarch64/server\n            $ENV{JDK_HOME}/lib/server\n            $ENV{JDK_HOME}/lib/amd64/server\n            $ENV{JDK_HOME}/lib/aarch64/server\n    )\n\n    find_path(JAVA_INCLUDE_PATH NAMES \n        jni.h\n        HINTS $ENV{JAVA_HOME}/include\n              $ENV{JDK_HOME}/include\n    )\nendif()"
  },
  {
    "path": "CMake/Options.cmake",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\noption(ENABLE_FRAME_POINTER \"enable frame pointer on 64bit system with flag -fno-omit-frame-pointer, on 32bit system, it is always enabled\" ON)\n\ninclude (CheckFunctionExists)\ncheck_function_exists(dladdr HAVE_DLADDR)\ncheck_function_exists(nanosleep HAVE_NANOSLEEP)\n\noption(STANDALONE \"build standalone modelbox\" OFF)\noption(PYTHONE_DISABLED \"Disable build python\" OFF)\noption(WITH_SECURE_C \"include libsecurec.so\" ON)\noption(WITH_ALL_DEMO \"build with all demo with large model file\" OFF)\noption(TEST_COVERAGE \"build with coverage\" OFF)\noption(WITH_JAVA \"build java support\" OFF)\noption(CLANG_TIDY \"build with clang tidy\" OFF)\noption(CLANG_TIDY_FIX \"do auto fix\" ON)\noption(CLANG_TIDY_AS_ERROR \"make clang-tidy warning as error\" OFF)\noption(USE_CN_MIRROR \"download from cn mirror\" OFF)\noption(WITH_WEBUI \"build modelbox webui\" ON)\noption(WITH_MINDSPORE \"build mindspore\" OFF)\n\nset(CMAKE_CXX_STANDARD 11)\nset(CMAKE_CXX_STANDARD_REQUIRED ON)\nset(CMAKE_EXPORT_COMPILE_COMMANDS ON) \n\n\n# speedup compilation\nfind_program(CCACHE ccache)\nif(CCACHE)\n    set(CMAKE_C_COMPILER_LAUNCHER ${CCACHE})\n    set(CMAKE_CXX_COMPILER_LAUNCHER ${CCACHE})\n    message(STATUS \"Enable ccache\")\nendif(CCACHE)\n\nset(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} -Wall -fno-strict-aliasing\")\nset(CMAKE_C_FLAGS \"${CMAKE_C_FLAGS} -Wall -fno-strict-aliasing\")\n\nif (TEST_COVERAGE)\n    set(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} -fprofile-arcs -ftest-coverage\")\n    set(CMAKE_C_FLAGS \"${CMAKE_C_FLAGS} -fprofile-arcs -ftest-coverage\")\nendif(TEST_COVERAGE)\n\nif(ENABLE_FRAME_POINTER STREQUAL ON)\n    set(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} -fno-omit-frame-pointer\")\nendif(ENABLE_FRAME_POINTER STREQUAL ON) \n\nadd_definitions(-D__STDC_FORMAT_MACROS)\nadd_definitions(-D_GNU_SOURCE)\n\nif(OS_LINUX)\n    set(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} -Wl,--export-dynamic\")\nendif(OS_LINUX)\n\nset(CUDA_NVCC_FLAGS \"-Xcompiler -Wall,-fno-strict-aliasing,${CMAKE_CXX_FLAGS_DEBUG}\" CACHE INTERNAL \"\") \nset(CUDA_PROPAGATE_HOST_FLAGS OFF CACHE INTERNAL \"\")\n"
  },
  {
    "path": "CMake/ProjectEnvVars.cmake",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nif(NOT TEST_WORKING_DIR)\n\tset(TEST_WORKING_DIR ${CMAKE_BINARY_DIR}/test/test-working-dir)\n\tfile(MAKE_DIRECTORY ${TEST_WORKING_DIR})\nendif()\nset(TEST_WORKING_DATA_DIR \"${TEST_WORKING_DIR}/data\")\nfile(MAKE_DIRECTORY ${TEST_WORKING_DATA_DIR})\nset(TEST_WORKING_LIB_DIR \"${TEST_WORKING_DIR}/lib\")\nfile(MAKE_DIRECTORY ${TEST_WORKING_LIB_DIR})\nset(TEST_WORKING_BIN_DIR \"${TEST_WORKING_DIR}/bin\")\nfile(MAKE_DIRECTORY ${TEST_WORKING_BIN_DIR})\nset(TEST_WORKING_DRIVERS_DIR \"${TEST_WORKING_DIR}/drivers\")\nfile(MAKE_DIRECTORY ${TEST_WORKING_DRIVERS_DIR})\nset(TEST_ASSETS ${CMAKE_SOURCE_DIR}/test/assets)\nset(TEST_SOURCE_DIR ${CMAKE_SOURCE_DIR}/test)\nset(TEST_DEMO_DRIVERS_DIR \"${TEST_WORKING_DIR}/demo\")\nfile(MAKE_DIRECTORY ${TEST_DEMO_DRIVERS_DIR})\n\nset(MODELBOX_TOOLS_PATH \"${CMAKE_INSTALL_FULL_DATAROOTDIR}/modelbox/tools\")\nset(MODELBOX_DEMO_DIR \"/opt/modelbox/demo\")\nset(MODELBOX_WWW_DIR \"${CMAKE_INSTALL_FULL_DATAROOTDIR}/modelbox/www\")\n"
  },
  {
    "path": "CMake/SecureCompilerOption.cmake",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nif(NOT CMAKE_BUILD_TYPE STREQUAL \"Debug\")\n  set(CMAKE_C_FLAGS \"${CMAKE_C_FLAGS} -fstack-protector-all -fPIC\")\n  set(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} -fstack-protector-all -fPIC\")\n  \n  set(CMAKE_EXE_LINKER_FLAGS \"${CMAKE_EXE_LINKER_FLAGS} -Wl,-z,relro,-z,now,-z,noexecstack -pie\")\n  set(CMAKE_SHARED_LINKER_FLAGS \"${CMAKE_SHARED_LINKER_FLAGS} -Wl,-z,relro,-z,now,-z,noexecstack\")\nelse()\n  set(CMAKE_C_FLAGS \"${CMAKE_C_FLAGS} -fPIC\")\n  set(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} -fPIC\")\n  set(CMAKE_EXE_LINKER_FLAGS \"${CMAKE_EXE_LINKER_FLAGS} -pie\")\nendif()\n  "
  },
  {
    "path": "CMake/clang-tidy-warp",
    "content": "#!/bin/bash\n#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# wrap for clang-tidy, patch command line.\nARGS=$(echo $@ | sed 's/-fno-gnu-unique//g' | sed 's/-rdynamic//g')\nif [ \"$1\" = \"-fix\" ]; then\n    sourcefile=\"$2\"\n    if [ -e \"$sourcefile\" ]; then\n        exec {lockfd}<$sourcefile\n        flock -x $lockfd\n    fi\nfi\nexec clang-tidy ${ARGS}\n"
  },
  {
    "path": "CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nproject(modelbox)\n\nif (NOT MODELBOX_VERSION_MAJOR)\n    set(MODELBOX_VERSION_MAJOR 1)\nendif()\n\nif (NOT MODELBOX_VERSION_MINOR)\n    set(MODELBOX_VERSION_MINOR 0)\nendif()\n\nif (NOT MODELBOX_VERSION_PATCH)\n    set(MODELBOX_VERSION_PATCH 0)\nendif()\n\nset(MODELBOX_VERSION_STRING \"${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\")\nset(MODELBOX_VERSION_API 1)\n\nset(CMAKE_PROJECT_VERSION_MAJOR ${MODELBOX_VERSION_MAJOR})\nset(CMAKE_PROJECT_VERSION_MINOR ${MODELBOX_VERSION_MINOR})\nset(CMAKE_PROJECT_VERSION_PATCH ${MODELBOX_VERSION_PATCH})\n\nset(MODELBOX_AUTHOR \"Huawei Technologies Co., Ltd.\")\nset(MODELBOX_AUTHOR_EMAIL \"\")\n\nif(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_BINARY_DIR})\n    message(FATAL_ERROR \"Do not build in source directory!\")\nendif()\n\nset(CMAKE_MODULE_PATH \"${CMAKE_CURRENT_SOURCE_DIR}/CMake\" ${CMAKE_MODULE_PATH})\n\ninclude(Options)\ninclude(SecureCompilerOption)\ninclude(Clang-tidy)\ninclude(Function)\ninclude(FindPkgConfig)\ninclude(CMakeDependentOption)\ninclude(GNUInstallDirs)\ninclude(CPackComponent)\ninclude(ProjectEnvVars)\ninclude(JavaJDK)\n\nif (NOT STANDALONE)\n    PKG_GET_VARIABLE(SYSTEMDSYSTEMUNITDIR systemd systemdsystemunitdir)\nendif()\n\nset(LICENSE_FILE ${CMAKE_CURRENT_LIST_DIR}/LICENSE)\nset(README_FILE ${CMAKE_CURRENT_LIST_DIR}/README.md)\nset(RELEASE_PACKAGE_DIR ${CMAKE_BINARY_DIR}/release)\nset(CUSTOM_LIBRARY_PATH ${CMAKE_CURRENT_SOURCE_DIR}/libs)\nset(MODELBOX_TOP_DIR ${CMAKE_CURRENT_SOURCE_DIR})\n\nfind_package(CPPREST)\nfind_package(OpenSSL REQUIRED)\nfind_package(Boost COMPONENTS system)\nfind_package(CUDA 10.0)\nfind_package(CUDACUDA)\nfind_package(TENSORRT)\nfind_package(TENSORFLOW)\nfind_package(OBS)\nfind_package(DIS)\nfind_package(VCN)\nfind_package(NVCUVID)\nfind_package(FFMPEG)\nfind_package(ACL)\nfind_package(DSMI)\nfind_package(OpenCV)\nfind_package(DUKTAPE)\nfind_package(MINDSPORE_LITE)\nfind_package(FUSE)\nfind_package(JNI)\nfind_package(Java)\nfind_package(ROCKCHIP)\nfind_package(DLENGINE)\n\nadd_subdirectory(thirdparty)\nadd_subdirectory(src)\nadd_subdirectory(examples)\nadd_subdirectory(test EXCLUDE_FROM_ALL)\nadd_subdirectory(docs)\nadd_subdirectory(package)\n"
  },
  {
    "path": "LICENSE",
    "content": "                                 Apache License\n                           Version 2.0, January 2004\n                        http://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to Licensor for inclusion in the Work by the copyright owner\n      or by an individual or Legal Entity authorized to submit on behalf of\n      the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n   END OF TERMS AND CONDITIONS\n\n   APPENDIX: How to apply the Apache License to your work.\n\n      To apply the Apache License to your work, attach the following\n      boilerplate notice, with the fields enclosed by brackets \"[]\"\n      replaced with your own identifying information. (Don't include\n      the brackets!)  The text should be enclosed in the appropriate\n      comment syntax for the file format. We also recommend that a\n      file or class name and description of purpose be included on the\n      same \"printed page\" as the copyright notice for easier\n      identification within third-party archives.\n\n   Copyright [yyyy] [name of copyright owner]\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License."
  },
  {
    "path": "NOTICE",
    "content": "ModelBox\nCopyright 2021 Huawei Technologies Co., Ltd\n"
  },
  {
    "path": "README.md",
    "content": "# ModelBox\n\n[![daily building](https://github.com/modelbox-ai/modelbox/actions/workflows/unit-test-daily-on-device.yml/badge.svg)](https://github.com/modelbox-ai/modelbox/actions/workflows/unit-test-daily-on-device.yml)\n\n[English](README_en.md)\n\nModelBox是一个适用于端边云场景的AI推理应用开发框架，提供了基于Pipeline的并行执行流程，能帮助AI应用开发者较快的开发出高效，高性能，以及支持软硬协同优化的AI应用。[详细介绍](http://modelbox-ai.com/modelbox-book/)\n\n## ModelBox特点\n\n1. **易于开发**  \n   AI推理业务可视化编排开发，功能模块化，丰富组件库；c++，python, Java多语言支持。\n\n1. **易于集成**  \n   集成云上对接的组件，云上对接更容易。\n\n1. **高性能，高可靠**  \n   pipeline并发运行，数据计算智能调度，资源管理调度精细化，业务运行更高效。\n\n1. **软硬件异构**  \n   CPU，GPU，NPU多异构硬件支持，资源利用更便捷高效。\n\n1. **全场景**  \n   视频，语音，文本，NLP全场景，专为服务化定制，云上集成更容易，端边云数据无缝交换。\n\n1. **易于维护**  \n   服务运行状态可视化，应用，组件性能实时监控，优化更容易。\n\n## ModelBox解决的问题\n\n目前AI应用开发时，训练完成模型后，需要将多个模型和应用逻辑串联在一起组成AI应用，并上线发布成为服务或应用。在整个过程中，需要面临复杂的应用编程问题：\n  \n|问题|问题说明|\n|--|--|\n|需要开发AI应用的周边功能|比如AI应用编译工程，应用初始化，配置管理接口，日志管理口，应用故障监控等功能。|\n|需要开发AI常见的前后处理|音视频加解码，图像转换处理，推理前处理，后处理YOLO等开发。 |\n|需要开发和云服务对接的周边功能|比如HTTP服务开发，云存储，大数据服务，视频采集服务对接开发。 |\n|需要开发出高性能的推理应用|需要基于多线程，内存池化，显存池化，多GPU加速卡，模型batch批处理，调用硬件卡的API等手段开发应用。|\n|需要开发验证docker镜像|需要开发docker镜像，集成必要的ffmpeg，opencv软件，CUDA, MindSpore，TensorFlow等软件，并做集成测试验证。|\n|多种AI业务，需要共享代码，降低维护工作|需要复用不同组件的代码，包括AI前后处理代码，AI应用管理代码，底层内存，线程管理代码等。|\n|模型开发者，验证模型功能比较复杂|模型开发者完成模型训练后，需要编写python代码验证，之后，再转成生产代码；在高性能，高可靠场景改造工作量大。|\n\nModelBox的目标是解决AI开发者在开发AI应用时的编程复杂度，降低AI应用的开发难度，将复杂的数据处理，并发互斥，多设备协同，组件复用，数据通信，交由ModelBox处理。开发者主要聚焦业务逻辑本身，而不是软件细节。 在提高AI推理开发的效率同时，保证软件的性能，可靠性，安全性等属性。\n\n## 开始使用\n\nModelBox支持两种方式运行，一种是服务化，一种是SDK，开发者可以按照下表选择相关的开发模式。\n\n|开发模式|开发模式适用场景|\n|--|--|\n|服务化|ModelBox为独立的服务，适合云服务，端侧服务的AI推理开发场景，包括了后台服务，运维工具，docker镜像等服务化组件|\n|SDK|ModelBox提供了ModelBox开发库，使用于扩展现有应用支持高性能AI推理，专注AI推理业务，支持c++，Python集成|\n\n在开发AI推理应用时，可以按照[第一个应用](https://modelbox-ai.com/modelbox-book/first-app/mnist.html)的流程开发AI应用。\n"
  },
  {
    "path": "README_en.md",
    "content": "# ModelBox\n\n[![daily building](https://github.com/modelbox-ai/modelbox/actions/workflows/unit-test-daily-on-device.yml/badge.svg)](https://github.com/modelbox-ai/modelbox/actions/workflows/unit-test-daily-on-device.yml)\n\nModelBox is an AI application development framework featuring device-edge-cloud synergy. It provides a parallel execution framework based on pipelines, helping developers quickly develop high-performance AI applications that support software-hardware synergized optimization. [See details](http://modelbox-ai.com/modelbox-book/)\n\n## ModelBox Highlights\n\n1. **Easy to develop**  \n   Simplified orchestration and development of inference applications via a graphical interface, modularized functions, rich component libraries, and multi-language support (C++, Python, Java).\n\n1. **Easy to integrate**  \n   Easy to integrate different components on the cloud.\n\n1. **High performance and reliability**  \n   Parallel pipeline execution, intelligent scheduling of compute capacities, fine-grained resource management and scheduling, and higher efficiency.\n\n1. **Heterogeneous software and hardware**  \n   Support for heterogeneous compute resources, including CPU, GPU and NPU, higher resource utilization.\n\n1. **All-scenario**  \n   Able to process various types of data, such as video, voice, text, and NLP; service-oriented; easy to customize and integrate; and seamless data exchange across the cloud, edge, and devices.\n\n1. **Easy to maintain**  \n   Real-time monitoring of service status and application and component performance, facilitated optimization.\n\n## Tasks Facilitated by ModelBox\n\nWith typical AI application development, after model training, multiple models need to be joined together through coding to form a single application and the released as an online service or application. This may involve complex application programming, as described in the table below:\n  \n|Task|Description|\n|--|--|\n|Developing dependent functions for AI applications|AI application compiler project, application initialization, configuration management interface, log management interface, application fault monitoring, and more.|\n|Developing common pre- and postprocessing functions for AI applications|Audion and video codecs, image conversion, preprocessing, postprocessing (YOLO), and more.|\n|Enabling interconnection with cloud services|For example, HTTP service and interconnection with cloud storage, big data service, and video collection service.|\n|Developing AI applications for high-performance inference|Develop applications by leveraging techniques such multi-threading, memory pooling, GPU pooling, multi-GPU accelerator, batch model processing, and hardware module calling via APIs.|\n|Developing and verifying Docker images|Develop Docker images, integrate the needed software, such as FFmpeg, OpenCV, CUDA, MindSpore, and TensorFlow, and perform integration and verification tests.|\n|Reusing code between different services to si|Code may need to be reused between different components, including those for preprocessing, postprocessing, and the management of applications, bottom-layer memory, and threads.|\n|Verifying models|Developers may need to write a piece of Python code th verify the models they develop. To prepare the models for demanding production scenarios, the model code may still need to be rewritten or modified significantly.|\n\nModelBox simplifies AI application development for developers by freeing them from complex data processing, decision-making on concurrency and mutual exclusion, multi-device collaboration, code reuse between different components, data communication, and more. This way, the developers can focus on the applications themselves, rather than the underlying software details. Additionally, ModelBox also ensures software performance, reliability, and security.\n\n## Getting Started\n\nModelBox can run in either of the following modes: service-oriented and SDK.\n\n|Development Mode|Description|\n|--|--|\n|Service-oriented|ModelBox is offered as an independent service that helps developers develop AI application. It provides service-based components for backend services, O&M tools, and Docker images.|\n|SDK|ModelBox provides development libraries for developers to extend and scale their applications for more performance-demanding inference needs, C++ and Python are supported.|\n\nTo develop an AI application for inference, follow the procedures described in [First Application](https://modelbox-ai.com/modelbox-book/first-app/mnist.html).\n"
  },
  {
    "path": "Third_Party_Open_Source_Software_Notice",
    "content": "OPEN SOURCE SOFTWARE NOTICE\n\nPlease note we provide an open source software notice along with this product and/or this product firmware (in the following just “this product”). The open source software licenses are granted by the respective right holders. And the open source licenses prevail all other license information with regard to the respective open source software contained in the product, including but not limited to End User Software Licensing Agreement. This notice is provided on behalf of Huawei Technologies Co. Ltd. and any of its local subsidiaries which may have provided this product to you in your local country. \n\nWarranty Disclaimer    \nTHE OPEN SOURCE SOFTWARE IN THIS PRODUCT IS DISTRIBUTED IN THE HOPE THAT IT WILL BE USEFUL, BUT WITHOUT ANY WARRANTY, WITHOUT EVEN THE IMPLIED WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. SEE THE APPLICABLE LICENSES FOR MORE DETAILS.\n\nCopyright Notice and License Texts\n\nSoftware: APIGW-cpp-sdk 1.0.2\nCopyright notice:\nCopyright (C) 2020 Huawei Technologies Co., Ltd. All rights reserved.\nCopyright (c) 2006, 2008 Junio C Hamano\n\n\nLicense:\n\n\nSoftware: googletest 1.10.0\nCopyright notice:\nCopyright 2017 Google Inc.\nCopyright 2008, Google Inc.\nCopyright 2008, Google Inc.\nCopyright 2008, Google Inc.\nCopyright 2007, Google Inc.\nCopyright 2008, Google Inc.\nCopyright 2007, Google Inc.\nCopyright 2017 Google Inc.\nCopyright 2018, Google Inc.\nCopyright 2008, Google Inc.\nCopyright 2006, Google Inc.\nCopyright 2009, Google Inc.\nCopyright 2013, Google Inc.\nCopyright 2009, Google Inc.\nCopyright 2009, Google Inc.\nCopyright 2007 Google Inc.\nCopyright 2008, Google Inc.\nCopyright 2008 Google Inc. All Rights Reserved.\nCopyright [2007] Neal Norwitz\nPortions Copyright [2007] Google Inc.\nCopyright 2007 Neal Norwitz\nPortions Copyright 2007 Google Inc.\nCopyright 2008 Google Inc.  All Rights Reserved.\nCopyright 2009 Neal Norwitz All Rights Reserved.\nCopyright 2007, Google Inc.\nCopyright 2008, Google Inc.\nCopyright 2013, Google Inc.\nCopyright 2007, Google Inc.\nCopyright 2008, Google Inc.\nCopyright 2015, Google Inc.\nCopyright (c) 2006, 2008 Junio C Hamano\nCopyright 2008, Google Inc.\nCopyright 2008 Google Inc.\nCopyright 2005, Google Inc.\nCopyright 2007, Google Inc.\nCopyright 2008, Google Inc.\nCopyright 2006, Google Inc.\nCopyright 2007, Google Inc.\nCopyright 2006, Google Inc.\nCopyright 2006, Google Inc.\nCopyright 2009, Google Inc.\nCopyright 2010, Google Inc.\nCopyright 2009 Google Inc. All rights reserved.\nCopyright 2005, Google Inc.\nCopyright 2015, Google Inc.\nCopyright 2017 Google Inc.\nCopyright 2008, Google Inc.\nCopyright 2009 Google Inc.  All rights reserved.\nCopyright 2008 Google Inc.\nCopyright 2008, Google Inc.\nCopyright 2018, Google LLC.\nCopyright 2019 Google LLC.  All Rights Reserved.\nCopyright 2018, Google Inc.\nCopyright 2009 Google Inc. All Rights Reserved.\nCopyright 2009, Google Inc.\nCopyright 2013, Google Inc.\nCopyright 2018, Google Inc.\nCopyright 2018 Google LLC. All rights reserved.\nCopyright 2015 Google Inc. All rights reserved.\nCopyright 2019, Google LLC.\nCopyright 2010 Google Inc.  All Rights Reserved.\nCopyright 2005 Google Inc. All Rights Reserved.\nCopyright 2013 Google Inc. All Rights Reserved.\nCopyright 2009, Google Inc.\nCopyright 2007 Google Inc.\nCopyright 2005, Google Inc.\nCopyright 2005, Google Inc.\nCopyright 2008, Google Inc.\nCopyright 2015, Google Inc.\nCopyright 2015, Google Inc.\nCopyright 2005, Google Inc.\nCopyright 2008 Google Inc.\nCopyright 2017 Google Inc.\n\n\nLicense: BSD 3-Clause License\nCopyright 2008, Google Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n    * Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above\ncopyright notice, this list of conditions and the following disclaimer\nin the documentation and/or other materials provided with the\ndistribution.\n    * Neither the name of Google Inc. nor the names of its\ncontributors may be used to endorse or promote products derived from\nthis software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nOWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nSoftware: Huawei_Secure_C 1.0.0\nCopyright notice:\nCopyright (C) 2020 Huawei Technologies Co., Ltd. All rights reserved.\nCopyright (c) Huawei Technologies Co., Ltd. 2014-2018. All rights reserved.\nCopyright (C), 2001-2012, Huawei Tech. Co., Ltd.\nCopyright @ Huawei Technologies Co., Ltd. 1998-2014. All rights reserved.\nCopyright @ Huawei Technologies Co., Ltd. 1998-2014. All rights reserved.  ^M\nCopyright (C), 2001-2012, Huawei Tech. Co., Ltd.\nCopyright (c) Huawei Technologies Co., Ltd. 2014-2018. All rights reserved.\nCopyright (c) 2006, 2008 Junio C Hamano\n\nLicense:\n\n\nSoftware: nlohmann 3.7.3\nCopyright notice:\nCopyright &copy; 2013-2019 [Niels Lohmann](http://nlohmann.me)\nThe above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\nThe class contains the UTF-8 Decoder from Bjoern Hoehrmann which is licensed under the [MIT License](http://opensource.org/licenses/MIT) (see above). Copyright &copy; 2008-2009 [Björn Hoehrmann](http://bjoern.hoehrmann.de/) <bjoern@hoehrmann.de>\nThe class contains a slightly modified version of the Grisu2 algorithm from Florian Loitsch which is licensed under the [MIT License](http://opensource.org/licenses/MIT) (see above). Copyright &copy; 2009 [Florian Loitsch](http://florian.loitsch.com/)\nCopyright (c) 2013-2019 Niels Lohmann\nCopyright (c) 2012, Erik Edlund <erik.edlund@32767.se>\nCopyright (c) 2009 Google Inc. All rights reserved.\ncpplint.py and its corresponding unit tests are Copyright (C) 2009 Google Inc.\nCopyright (c) 2013-2019 Niels Lohmann <http://nlohmann.me>.\nThe above copyright notice and this permission notice shall be included in all\nCopyright (c) 2018 Vitaliy Manushkin <agri@akamo.info>.\nCHECK(j[\"copyright\"] == \"(C) 2013-2017 Niels Lohmann\");\nCopyright 2014 The Authors\nCopyright (c) 2015 Max Woolf\nThe above copyright notice and this permission notice shall be included in all\nCopyright (c) 2016 Nicolas Seriot\nThe above copyright notice and this permission notice shall be included in all\nCopyright (c) 2016 Nicolas Seriot\nThe above copyright notice and this permission notice shall be included in all\nCopyright (c) 2018-2019 Bryan Gillespie\nThe above copyright notice and this permission notice shall be included in\nCopyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>\nCopyright (c) 2016-2019 Viktor Kirilov\nCopyright (c) 2016-2018 Viktor Kirilov\nThe above copyright notice and this permission notice shall be included in all\nCopyright (c) 2015-2017 Niels Lohmann.\nThe above copyright notice and this permission notice shall be included in all\nCopyright (c) 2015-2017 Niels Lohmann\nCopyright 2015 Google Inc. All rights reserved.\nCopyright 2018 Google Inc. All rights reserved.\nCopyright 2016 Ismael Jimenez Martinez. All rights reserved.\nCopyright 2017 Roman Lebedev. All rights reserved.\nCopyright 2015 Google Inc. All rights reserved.\nCopyright (c) 2013-2019 Niels Lohmann <http://nlohmann.me>.\nThe above copyright notice and this permission notice shall be included in all\n`copyright` | The copyright line for the library as string.\nresult[\"copyright\"] = \"(C) 2013-2017 Niels Lohmann\";\ncopyright and related and neighboring rights to this software to\n@copyright Copyright (c) 2008-2009 Bjoern Hoehrmann <bjoern@hoehrmann.de>\nThe code is distributed under the MIT license, Copyright (c) 2009 Florian Loitsch.\nCopyright (c) 2013-2019 Niels Lohmann <http://nlohmann.me>.\nThe above copyright notice and this permission notice shall be included in all\ncopyright and related and neighboring rights to this software to\nThe code is distributed under the MIT license, Copyright (c) 2009 Florian Loitsch.\n@copyright Copyright (c) 2008-2009 Bjoern Hoehrmann <bjoern@hoehrmann.de>\nresult[\"copyright\"] = \"(C) 2013-2017 Niels Lohmann\";\n@copyright Copyright &copy; 2013-2019 Niels Lohmann. The code is licensed under the [MIT License](http://opensource.org/licenses/MIT).\nCopyright Louis Dionne 2015\n\"copyright\": \"(C) 2013-2017 Niels Lohmann\",\nCopyright (c) 2006, 2008 Junio C Hamano\n\n\nLicense: MIT License\nMIT License \n\nCopyright (c) 2013-2019 Niels Lohmann\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n\nSoftware: pybind11 2.5.0\nCopyright notice:\nCopyright (c) 2015 Wenzel Jakob <wenzel@inf.ethz.ch>\nCopyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>, All rights reserved.\nCopyright (c) 2006, 2007 Montel Laurent, <montel@kde.org>\nCopyright (c) 2008, 2009 Gael Guennebaud, <g.gael@free.fr>\nCopyright (c) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>\nCopyright (c) 2015 Wenzel Jakob <wenzel@inf.ethz.ch>\nCopyright 2001-2009 Kitware, Inc.\nCopyright 2012 Continuum Analytics, Inc.\nCopyright (c) 2007-2012 University of Illinois at Urbana-Champaign.\nCopyright (c) 2017 Wenzel Jakob <wenzel.jakob@epfl.ch>\nCopyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>\nCopyright (c) 2019 Google Inc.\nCopyright (c) 2017 Henry F. Schreiner\nCopyright (c) 2016 Ivan Smirnov\nCopyright (c) 2018 Hudson River Trading LLC <opensource@hudson-trading.com>\nCopyright (c) 2016 Sergey Lyskov\nCopyright (c) 2016 Trent Houliston <trent@houliston.me> and\nCopyright (c) 2016 Ivan Smirnov <i.s.smirnov@gmail.com>\nCopyright (c) 2019 Roland Dreier <roland.dreier@gmail.com>\nCopyright (c) 2016 Jason Rhinelander <jason@imaginary.ca>\nCopyright (c) 2016 Ben North <ben@redfrontdoor.org>\nCopyright (c) 2017 Jason Rhinelander <jason@imaginary.ca>\nCopyright (c) 2015 Wenzel Jakob <wenzel@inf.ethz.ch>\nCopyright (c) 2019 Google LLC\nCopyright (c) 2016 Klemens D. Morgenstern\nCopyright (c) 2016 Pim Schellart <P.Schellart@princeton.edu>\nCopyright (c) 2017 Borja Zarco (Google LLC) <bzarco@google.com>\nCopyright (c) 2016 Trent Houliston <trent@houliston.me> and\nCopyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>\nCopyright (c) 2016 Sergey Lyskov and Wenzel Jakob\nCopyright (c) 2017 Henry F. Schreiner\nCopyright (c) 2017 Wenzel Jakob <wenzel.jakob@epfl.ch>\nCopyright (c) 2016 Klemens Morgenstern <klemens.morgenstern@ed-chemnitz.de> and\nCopyright (c) 2017 Jason Rhinelander <jason@imaginary.ca>\nCopyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>\nCopyright (c) 2017 Wenzel Jakob <wenzel.jakob@epfl.ch>\nCopyright (c) 2006, 2008 Junio C Hamano\nCopyright (c) 2006, 2008 Junio C Hamano\n\n\nLicense: BSD 3-Clause License\nCopyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>, All rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n1. Redistributions of source code must retain the above copyright notice, this\n   list of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright notice,\n   this list of conditions and the following disclaimer in the documentation\n   and/or other materials provided with the distribution.\n\n3. Neither the name of the copyright holder nor the names of its contributors\n   may be used to endorse or promote products derived from this software\n   without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nPlease also refer to the file CONTRIBUTING.md, which clarifies licensing of\nexternal contributions to this project including patches, pull requests, etc.\n\n\nSoftware: tinylog 1.4\nCopyright notice:\nCopyright (C) 2018-2020 Nick Peng <pymumu@gmail.com>\nCopyright (c) 2018 Nick Peng <pymumu@gmail.com>\nCopyright (C) 2018-2020 Ruilin Peng (Nick) <pymumu@gmail.com>\nCopyright (c) 2006, 2008 Junio C Hamano\n\n\nLicense: MIT License\nMIT License\n\ntinylog\nCopyright (c) 2018 Nick Peng <pymumu@gmail.com>\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n\nSoftware: toml11 3.5.0\nCopyright notice:\nCopyright (c) 2017-2020 Toru Niina\nCopyright (c) 2017 Toru Niina\nCopyright Toru Niina 2019.\nCopyright Toru Niina 2017.\nCopyright (c) 2006, 2008 Junio C Hamano\n\n\nLicense: MIT License\n\nThe MIT License (MIT)\n\nCopyright (c) 2017 Toru Niina\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE."
  },
  {
    "path": "docker/Dockerfile.ascend.base",
    "content": "FROM ubuntu:20.04 as base\n\nCOPY ascend /root/ascend\n\nWORKDIR /root\n\nRUN ln -snf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && echo \"Asia/Shanghai\" > /etc/timezone && \\\n    apt update && \\\n    apt install -y python3-dev python3-pip curl pciutils && \\\n    apt clean all && \\\n    rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*\n\nRUN mkdir -p /root/.pip && \\\n    echo \"[global]\" > /root/.pip/pip.conf && \\\n    echo \"index-url = https://pypi.mirrors.ustc.edu.cn/simple\" >>/root/.pip/pip.conf && \\\n    echo \"trusted-host = pypi.mirrors.ustc.edu.cn\" >>/root/.pip/pip.conf && \\\n    echo \"timeout = 120\" >>/root/.pip/pip.conf && \\\n    python3 -m pip install --upgrade pip && \\\n    ls -lh ascend/ && groupadd HwHiAiUser && \\\n    useradd -g HwHiAiUser -d /home/HwHiAiUser -m HwHiAiUser\n\n\nFROM base as dev310\n\nARG cann_ver=\"6.0.RC1\"\nARG driver_ver=\"6.0.rc1\"\n\nRUN if [ \"$(arch)\" = \"aarch64\" ];then driver_tag=3000;else driver_tag=3010; fi && \\\n    bash ascend/A300-${driver_tag}-npu-driver_${driver_ver}_linux-$(arch).run --quiet --docker && \\\n    cp -af /usr/local/Ascend/driver/lib64 /root/ && \\\n    bash ascend/A300-${driver_tag}-npu-driver_${driver_ver}_linux-$(arch).run --quiet --devel && \\\n    cp -af /root/lib64 /usr/local/Ascend/driver/ && \\\n    rm -rf /root/lib64 /usr/local/Ascend/develop && \\\n    bash ascend/Ascend-cann-toolkit_${cann_ver}_linux-$(arch).run --quiet --full && \\\n    ls -lh /usr/local/Ascend/* /usr/local/sbin/npu-smi\n\n\nFROM base as run310\n\nARG cann_ver=\"6.0.RC1\"\nARG driver_ver=\"6.0.rc1\"\n\nRUN if [ \"$(arch)\" = \"aarch64\" ];then driver_tag=3000;else driver_tag=3010; fi && \\\n    bash ascend/A300-${driver_tag}-npu-driver_${driver_ver}_linux-$(arch).run --quiet --docker && \\\n    bash ascend/Ascend-cann-nnae_${cann_ver}_linux-$(arch).run --quiet --install && \\\n    ls -lh /usr/local/Ascend/*\n\nFROM base as dev310p\n\nARG cann_ver=\"6.0.RC1\"\nARG driver_ver=\"6.0.rc1\"\n\nRUN bash ascend/Ascend-hdk-310p-npu-driver_${driver_ver}_linux-$(arch).run --quiet --docker && \\\n    cp -af /usr/local/Ascend/driver/lib64 /root/ && \\\n    bash ascend/Ascend-hdk-310p-npu-driver_${driver_ver}_linux-$(arch).run --quiet --devel && \\\n    cp -af /root/lib64 /usr/local/Ascend/driver/ && \\\n    rm -rf /root/lib64 /usr/local/Ascend/develop && \\\n    bash ascend/Ascend-cann-toolkit_${cann_ver}_linux-$(arch).run --quiet --full && \\\n    ls -lh /usr/local/Ascend/* /usr/local/sbin/npu-smi\n\nFROM base as run310p\n\nARG cann_ver=\"6.0.RC1\"\nARG driver_ver=\"6.0.rc1\"\n\nRUN bash ascend/Ascend-hdk-310p-npu-driver_${driver_ver}_linux-$(arch).run --quiet --docker && \\\n    bash ascend/Ascend-cann-nnae_${cann_ver}_linux-$(arch).run --quiet --install && \\\n    ls -lh /usr/local/Ascend/*\n\nFROM ubuntu:20.04\n\nRUN ln -snf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && echo \"Asia/Shanghai\" > /etc/timezone && \\\n    groupadd HwHiAiUser && \\\n    useradd -g HwHiAiUser -d /home/HwHiAiUser -m HwHiAiUser\n\nCOPY --from=dev310 /usr/local/Ascend /usr/local/Ascend_dev310\nCOPY --from=dev310 /usr/local/sbin/npu-smi /usr/local/sbin/npu-smi310\nCOPY --from=run310 /usr/local/Ascend /usr/local/Ascend_run310\nCOPY --from=dev310p /usr/local/Ascend /usr/local/Ascend_dev310p\nCOPY --from=dev310p /usr/local/sbin/npu-smi /usr/local/sbin/npu-smi310p\nCOPY --from=run310p /usr/local/Ascend /usr/local/Ascend_run310p\n"
  },
  {
    "path": "docker/Dockerfile.ascend.develop.openeuler",
    "content": "FROM modelbox/c83-base:latest\nFROM openeuler/openeuler:20.03-lts-sp3\n\nARG dtype\n\nCOPY release /opt/release\nCOPY --from=0 /usr/local/Ascend_dev${dtype} /usr/local/Ascend\nCOPY --from=0 /usr/local/sbin/npu-smi${dtype} /usr/local/sbin/npu-smi\nADD *.tar.gz /usr/local/\n\nARG ASCEND_PATH=/usr/local/Ascend\nENV LOCAL_ASCEND=/usr/local/Ascend\nENV ASCEND_AICPU_PATH=${ASCEND_PATH}/ascend-toolkit/latest\nENV ASCEND_OPP_PATH=${ASCEND_PATH}/ascend-toolkit/latest/opp\nENV TOOLCHAIN_HOME=${ASCEND_PATH}/ascend-toolkit/latest/toolkit\nENV TBE_IMPL_PATH=${ASCEND_PATH}/ascend-toolkit/latest/opp/op_impl/build-in/ai_core/tbe\nENV MINDSPORE_PATH=/usr/local/lib/python3.7/site-packages/mindspore\nENV DDK_PATH=${ASCEND_PATH}/ascend-toolkit/latest/fwkacllib\nENV DRIVER_PATH=${ASCEND_PATH}/driver\n\nENV PATH=\\\n${ASCEND_PATH}/ascend-toolkit/latest/atc/bin:\\\n${ASCEND_PATH}/ascend-toolkit/latest/fwkacllib/bin:\\\n${ASCEND_PATH}/ascend-toolkit/latest/fwkacllib/ccec_compiler/bin:\\\n${ASCEND_PATH}/ascend-toolkit/latest/atc/ccec_compiler/bin${PATH:+:${PATH}}\n\nENV PYTHONPATH=\\\n${ASCEND_PATH}/ascend-toolkit/latest/atc/python/site-packages:\\\n${ASCEND_PATH}/ascend-toolkit/latest/toolkit/python/site-packages:\\\n${ASCEND_PATH}/ascend-toolkit/latest/fwkacllib/python/site-packages:\\\n${ASCEND_PATH}/ascend-toolkit/latest/opp/op_impl/build-in/ai_core/tbe:\\\n${ASCEND_PATH}/ascend-toolkit/latest/pyACL/python/site-packages/acl${PYTHONPATH:+:${PYTHONPATH}}\n\nENV LD_LIBRARY_PATH=${ASCEND_PATH}/ascend-toolkit/latest/fwkacllib/lib64${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}}\n\nWORKDIR /root\n\nRUN ln -snf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && echo \"Asia/Shanghai\" > /etc/timezone && \\\n    dnf update -y --nogpgcheck && \\\n    dnf install -y --nogpgcheck vim gcc gcc-c++ make cmake libnsl perf doxygen gcc-gfortran pciutils alsa-lib gdb clang gtk3-devel \\\n        zlib-devel curl-devel bzip2-devel rpm-build sqlite-devel libffi-devel openssl-devel xz-devel git passwd systemd \\\n        krb5-devel expat-devel boost-devel ncurses-devel libxml2-devel libssh2-devel python3-devel python3-perf bc nc \\\n        readline-devel nss-devel fuse-devel gdbm-devel glibc-devel glibc-debuginfo gnutls-devel net-tools libatomic \\\n        xauth tk-devel lapack-devel graphviz-devel mesa-libGL-devel openblas-devel protobuf-c-devel openssh-server && \\\n    ln -sf pip3.7 /usr/bin/pip && ln -sf python3.7 /usr/bin/python && \\\n    ln -sf opencv4/opencv2 /usr/local/include/opencv2 && \\\n    dnf clean all && rm -rf /var/cache/dnf/*\n\nRUN mkdir -p /root/.pip && \\\n    echo \"[global]\" > /root/.pip/pip.conf && \\\n    echo \"index-url = https://pypi.python.org/simple\" >>/root/.pip/pip.conf && \\\n    echo \"trusted-host = pypi.python.org\" >>/root/.pip/pip.conf && \\\n    echo \"timeout = 120\" >>/root/.pip/pip.conf && \\\n    groupadd HwHiAiUser && \\\n    useradd -g HwHiAiUser -d /home/HwHiAiUser -m HwHiAiUser && \\\n    python3 -m pip install --upgrade pip && \\\n    python3 -m pip install --no-cache-dir wheel numpy attrs psutil decorator protobuf scipy sympy cffi grpcio grpcio-tools requests pillow pyyaml opencv-python==4.5.5.64 && \\\n    python3 -m pip install --no-cache-dir https://ms-release.obs.cn-north-4.myhuaweicloud.com/1.9.0/MindSpore/ascend/$(arch)/mindspore_ascend-1.9.0-cp37-cp37m-linux_$(arch).whl && \\\n    python3 -m pip install --no-cache-dir ${ASCEND_PATH}/ascend-toolkit/latest/fwkacllib/lib64/topi-0.4.0-py3-none-any.whl && \\\n    python3 -m pip install --no-cache-dir ${ASCEND_PATH}/ascend-toolkit/latest/fwkacllib/lib64/te-0.4.0-py3-none-any.whl && \\\n    python3 -m pip install --no-cache-dir ${ASCEND_PATH}/ascend-toolkit/latest/fwkacllib/lib64/hccl-0.1.0-py3-none-any.whl && \\\n    echo \"${ASCEND_PATH}/ascend-toolkit/latest/fwkacllib/lib64\" >>/etc/ld.so.conf.d/ascend.conf && \\\n    echo \"${ASCEND_PATH}/driver/lib64/driver\" >>/etc/ld.so.conf.d/ascend.conf && \\\n    echo \"${ASCEND_PATH}/driver/lib64/common\" >>/etc/ld.so.conf.d/ascend.conf && \\\n    echo \"${ASCEND_PATH}/driver/lib64\" >>/etc/ld.so.conf.d/ascend.conf && \\\n    sed -i '/include/i\\/usr/local/lib64' /etc/ld.so.conf && \\\n    sed -i '/include/i\\/usr/local/lib' /etc/ld.so.conf\n\nRUN if [ \"$(arch)\" = \"aarch64\" ];then node_arch=\"arm64\";else node_arch=\"x64\";fi && \\\n    curl https://nodejs.org/dist/v16.13.2/node-v16.13.2-linux-${node_arch}.tar.xz|tar -xJ && \\\n    cp -af node-v16.13.2-linux-${node_arch}/* /usr/local/ && \\\n    npm install -g npm@latest && npm -v && node -v && \\\n    npm install -g @angular/cli && \\\n    npm cache clean --force && rm -rf /root/* && \\\n    python3 -m pip install --no-cache-dir /opt/release/python/modelbox-*.whl && \\\n    rpm -ivh /opt/release/*.rpm && \\\n    usermod -G HwHiAiUser modelbox\n\nRUN (cd /lib/systemd/system/sysinit.target.wants/; for i in *; \\\n    do [ $i = systemd-tmpfiles-setup.service ] || rm -f $i; done); \\\n    rm -f /lib/systemd/system/multi-user.target.wants/*; \\\n    rm -f /etc/systemd/system/*.wants/*; \\\n    rm -f /lib/systemd/system/local-fs.target.wants/*; \\\n    rm -f /lib/systemd/system/sockets.target.wants/*udev*; \\\n    rm -f /lib/systemd/system/sockets.target.wants/*initctl*; \\\n    rm -f /lib/systemd/system/basic.target.wants/*; \\\n    rm -f /lib/systemd/system/anaconda.target.wants/*; \\\n    sed -i 's/^SystemMaxUse=.*/SystemMaxUse=16M/g' /etc/systemd/journald.conf && \\\n    echo 'export TMOUT=0' >> /etc/bashrc && \\\n    echo 'export HISTSIZE=1000' >> /etc/bashrc && \\\n    echo '[ -n \"${SSH_TTY}\" ] && export $(cat /proc/1/environ|tr \"\\\\0\" \"\\\\n\"|xargs)' >> /etc/bashrc && \\\n    echo 'export PS1=\"\\[\\e[35;1m\\][\\u@\\h \\W]$ \\[\\e[0m\\]\"' >> /etc/bashrc && \\\n    echo \"ldconfig &>/dev/null\" >> /etc/bashrc && systemctl enable sshd\n\nVOLUME [\"/sys/fs/cgroup\", \"/tmp\", \"/run\", \"/run/lock\"]\nSTOPSIGNAL SIGRTMIN+3\n\nCMD [\"/sbin/init\", \"--log-target=journal\"]\n"
  },
  {
    "path": "docker/Dockerfile.ascend.develop.ubuntu",
    "content": "FROM modelbox/c83-base:latest\nFROM ubuntu:20.04\n\nARG dtype\n\nCOPY release /opt/release\nCOPY --from=0 /usr/local/Ascend_dev${dtype} /usr/local/Ascend\nCOPY --from=0 /usr/local/sbin/npu-smi${dtype} /usr/local/sbin/npu-smi\nADD *.tar.gz /usr/local/\n\nARG ASCEND_PATH=/usr/local/Ascend\nENV LOCAL_ASCEND=/usr/local/Ascend\nENV ASCEND_AICPU_PATH=${ASCEND_PATH}/ascend-toolkit/latest\nENV ASCEND_OPP_PATH=${ASCEND_PATH}/ascend-toolkit/latest/opp\nENV TOOLCHAIN_HOME=${ASCEND_PATH}/ascend-toolkit/latest/toolkit\nENV TBE_IMPL_PATH=${ASCEND_PATH}/ascend-toolkit/latest/opp/op_impl/build-in/ai_core/tbe\nENV MINDSPORE_PATH=/usr/local/lib/python3.8/dist-packages/mindspore\nENV DDK_PATH=${ASCEND_PATH}/ascend-toolkit/latest/fwkacllib\nENV DRIVER_PATH=${ASCEND_PATH}/driver\n\nENV PATH=\\\n${ASCEND_PATH}/ascend-toolkit/latest/atc/bin:\\\n${ASCEND_PATH}/ascend-toolkit/latest/fwkacllib/bin:\\\n${ASCEND_PATH}/ascend-toolkit/latest/fwkacllib/ccec_compiler/bin:\\\n${ASCEND_PATH}/ascend-toolkit/latest/atc/ccec_compiler/bin${PATH:+:${PATH}}\n\nENV PYTHONPATH=\\\n${ASCEND_PATH}/ascend-toolkit/latest/atc/python/site-packages:\\\n${ASCEND_PATH}/ascend-toolkit/latest/toolkit/python/site-packages:\\\n${ASCEND_PATH}/ascend-toolkit/latest/fwkacllib/python/site-packages:\\\n${ASCEND_PATH}/ascend-toolkit/latest/opp/op_impl/build-in/ai_core/tbe:\\\n${ASCEND_PATH}/ascend-toolkit/latest/pyACL/python/site-packages/acl${PYTHONPATH:+:${PYTHONPATH}}\n\nENV LD_LIBRARY_PATH=${ASCEND_PATH}/ascend-toolkit/latest/fwkacllib/lib64${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}}\n\nWORKDIR /root\n\nRUN ln -snf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && echo \"Asia/Shanghai\" > /etc/timezone && \\\n    echo \"deb http://archive.ubuntu.com/ubuntu/ bionic-proposed main restricted universe multiverse\" >>/etc/apt/sources.list && \\\n    if [ \"$(arch)\" = \"aarch64\" ];then sed -i '/proposed/d' /etc/apt/sources.list;fi && \\\n    export DEBIAN_FRONTEND=noninteractive DEBIAN_PRIORITY=low TZ=Asia/Shanghai && \\\n    apt update && \\\n    apt install -y python3.8-dev python3-pip python3-apt python3-setuptools apt-utils && \\\n    apt install -y dbus systemd systemd-cron iproute2 gnupg2 curl libcurl4-openssl-dev ca-certificates \\\n        build-essential unzip ffmpeg sudo bash vim gdb git doxygen autoconf cmake gettext openssh-server \\\n        pkg-config kmod net-tools pciutils libssl-dev libcpprest-dev libswscale-dev libavformat-dev \\\n        graphviz libgraphviz-dev libfuse-dev libprotobuf-c-dev protobuf-c-compiler duktape-dev \\\n        libopenblas-dev netcat clang clang-tidy-10 libgoogle-glog-dev libtbb-dev && \\\n    update-ca-certificates && apt upgrade -y && \\\n    ln -sf clang-tidy-10 /usr/bin/clang-tidy && \\\n    ln -sf run-clang-tidy-10 /usr/bin/run-clang-tidy && \\\n    ln -sf python3.8 /usr/bin/python3 && \\\n    ln -sf opencv4/opencv2 /usr/local/include/opencv2 && \\\n    rm -rf /var/lib/apt/lists/*\n\nRUN mkdir -p /root/.pip && \\\n    echo \"[global]\" > /root/.pip/pip.conf && \\\n    echo \"index-url = https://pypi.python.org/simple\" >>/root/.pip/pip.conf && \\\n    echo \"trusted-host = pypi.python.org\" >>/root/.pip/pip.conf && \\\n    echo \"timeout = 120\" >>/root/.pip/pip.conf && \\\n    groupadd HwHiAiUser && \\\n    useradd -g HwHiAiUser -d /home/HwHiAiUser -m HwHiAiUser && \\\n    python3 -m pip install --upgrade pip && \\\n    python3 -m pip install --no-cache-dir wheel psutil numpy attrs psutil decorator protobuf scipy sympy cffi grpcio grpcio-tools requests pillow pyyaml opencv-python==4.5.5.64 && \\\n    python3 -m pip install --no-cache-dir https://ms-release.obs.cn-north-4.myhuaweicloud.com/1.9.0/MindSpore/ascend/$(arch)/mindspore_ascend-1.9.0-cp38-cp38-linux_$(arch).whl && \\\n    python3 -m pip install --no-cache-dir ${ASCEND_PATH}/ascend-toolkit/latest/fwkacllib/lib64/topi-0.4.0-py3-none-any.whl && \\\n    python3 -m pip install --no-cache-dir ${ASCEND_PATH}/ascend-toolkit/latest/fwkacllib/lib64/te-0.4.0-py3-none-any.whl && \\\n    python3 -m pip install --no-cache-dir ${ASCEND_PATH}/ascend-toolkit/latest/fwkacllib/lib64/hccl-0.1.0-py3-none-any.whl && \\\n    echo \"${ASCEND_PATH}/ascend-toolkit/latest/fwkacllib/lib64\" >>/etc/ld.so.conf.d/ascend.conf && \\\n    echo \"${ASCEND_PATH}/driver/lib64/driver\" >>/etc/ld.so.conf.d/ascend.conf && \\\n    echo \"${ASCEND_PATH}/driver/lib64/common\" >>/etc/ld.so.conf.d/ascend.conf && \\\n    echo \"${ASCEND_PATH}/driver/lib64\" >>/etc/ld.so.conf.d/ascend.conf && \\\n    sed -i '/include/i\\/usr/local/lib' /etc/ld.so.conf\n\nRUN if [ \"$(arch)\" = \"aarch64\" ];then node_arch=\"arm64\";else node_arch=\"x64\";fi && \\\n    curl https://nodejs.org/dist/v16.13.2/node-v16.13.2-linux-${node_arch}.tar.xz|tar -xJ && \\\n    cp -af node-v16.13.2-linux-${node_arch}/* /usr/local/ && \\\n    npm install -g npm@latest && npm -v && node -v && \\\n    npm install -g @angular/cli && \\\n    npm cache clean --force && rm -rf /root/* && \\\n    python3 -m pip install --no-cache-dir /opt/release/python/modelbox-*.whl && \\\n    dpkg -i /opt/release/*.deb && \\\n    usermod -G HwHiAiUser modelbox\n\nRUN (cd /lib/systemd/system/sysinit.target.wants/; for i in *; \\\n    do [ $i = systemd-tmpfiles-setup.service ] || rm -f $i; done); \\\n    rm -f /lib/systemd/system/multi-user.target.wants/*; \\\n    rm -f /etc/systemd/system/*.wants/*; \\\n    rm -f /lib/systemd/system/local-fs.target.wants/*; \\\n    rm -f /lib/systemd/system/sockets.target.wants/*udev*; \\\n    rm -f /lib/systemd/system/sockets.target.wants/*initctl*; \\\n    rm -f /lib/systemd/system/basic.target.wants/*; \\\n    rm -f /lib/systemd/system/anaconda.target.wants/*; \\\n    sed -i \"32aPermitRootLogin yes\" /etc/ssh/sshd_config && \\\n    sed -i 's/^SystemMaxUse=.*/SystemMaxUse=16M/g' /etc/systemd/journald.conf && \\\n    echo 'export TMOUT=0' >> /etc/bash.bashrc && \\\n    echo 'export HISTSIZE=1000' >> /etc/bash.bashrc && \\\n    echo '[ -n \"${SSH_TTY}\" ] && export $(cat /proc/1/environ|tr \"\\\\0\" \"\\\\n\"|xargs)' >> /etc/bash.bashrc && \\\n    echo 'export PS1=\"\\[\\e[35;1m\\][\\u@\\h \\W]$ \\[\\e[0m\\]\"' >> ~/.bashrc && \\\n    echo \"ldconfig &>/dev/null\" >> /etc/bash.bashrc && systemctl enable ssh\n\nVOLUME [\"/sys/fs/cgroup\", \"/tmp\", \"/run\", \"/run/lock\"]\nSTOPSIGNAL SIGRTMIN+3\n\nCMD [\"/sbin/init\", \"--log-target=journal\"]\n"
  },
  {
    "path": "docker/Dockerfile.ascend.runtime.openeuler",
    "content": "FROM modelbox/c83-base:latest\nFROM openeuler/openeuler:20.03-lts-sp3\n\nARG dtype\n\nCOPY release /opt/release\nCOPY --from=0 /usr/local/Ascend_run${dtype} /usr/local/Ascend\nCOPY --from=0 /usr/local/sbin/npu-smi${dtype} /usr/local/sbin/npu-smi\nADD *.tar.gz /usr/local/\n\nARG ASCEND_PATH=/usr/local/Ascend\nENV LOCAL_ASCEND=/usr/local/Ascend\nENV ASCEND_AICPU_PATH=${ASCEND_PATH}/nnae/latest\nENV ASCEND_OPP_PATH=${ASCEND_PATH}/nnae/latest/opp\nENV DDK_PATH=${ASCEND_PATH}/nnae/latest/fwkacllib\nENV DRIVER_PATH=${ASCEND_PATH}/driver\n\nENV PYTHONPATH=${ASCEND_PATH}/nnae/latest/pyACL/python/site-packages/acl${PYTHONPATH:+:${PYTHONPATH}}\n\nENV LD_LIBRARY_PATH=${ASCEND_PATH}/nnae/latest/fwkacllib/lib64${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}}\n\nWORKDIR /root\n\nRUN ln -snf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && echo \"Asia/Shanghai\" > /etc/timezone && \\\n    dnf update -y --nogpgcheck && \\\n    dnf install -y --nogpgcheck curl boost libnsl libssh2 libatomic mesa-libGL graphviz protobuf-c \\\n        systemd openblas gcc python3-devel lapack python3-perf fuse libxml2 openssl bc && \\\n    ln -sf pip3.7 /usr/bin/pip && ln -sf python3.7 /usr/bin/python3 && \\\n    dnf clean all && rm -rf /var/cache/dnf/*\n\nRUN mkdir -p /root/.pip && \\\n    echo \"[global]\" > /root/.pip/pip.conf && \\\n    echo \"index-url = https://pypi.python.org/simple\" >>/root/.pip/pip.conf && \\\n    echo \"trusted-host = pypi.python.org\" >>/root/.pip/pip.conf && \\\n    echo \"timeout = 120\" >>/root/.pip/pip.conf && \\\n    groupadd HwHiAiUser && \\\n    useradd -g HwHiAiUser -d /home/HwHiAiUser -m HwHiAiUser && \\\n    python3 -m pip install --upgrade pip && \\\n    python3 -m pip install --no-cache-dir numpy decorator sympy cffi pyyaml pathlib2 grpcio grpcio-tools protobuf scipy requests pillow opencv-python==4.5.5.64 && \\\n    python3 -m pip install --no-cache-dir https://ms-release.obs.cn-north-4.myhuaweicloud.com/1.9.0/MindSpore/ascend/$(arch)/mindspore_ascend-1.9.0-cp37-cp37m-linux_$(arch).whl && \\\n    python3 -m pip install --no-cache-dir ${ASCEND_PATH}/nnae/latest/fwkacllib/lib64/topi-0.4.0-py3-none-any.whl && \\\n    python3 -m pip install --no-cache-dir ${ASCEND_PATH}/nnae/latest/fwkacllib/lib64/te-0.4.0-py3-none-any.whl && \\\n    python3 -m pip install --no-cache-dir ${ASCEND_PATH}/nnae/latest/fwkacllib/lib64/hccl-0.1.0-py3-none-any.whl && \\\n    echo \"${ASCEND_PATH}/nnae/latest/fwkacllib/lib64\" >>/etc/ld.so.conf.d/ascend.conf && \\\n    echo \"${ASCEND_PATH}/driver/lib64/driver\" >>/etc/ld.so.conf.d/ascend.conf && \\\n    echo \"${ASCEND_PATH}/driver/lib64/common\" >>/etc/ld.so.conf.d/ascend.conf && \\\n    echo \"${ASCEND_PATH}/driver/lib64\" >>/etc/ld.so.conf.d/ascend.conf && \\\n    sed -i '/include/i\\/usr/local/lib64' /etc/ld.so.conf && \\\n    sed -i '/include/i\\/usr/local/lib' /etc/ld.so.conf && \\\n    find /usr/local -name \"*.a\"|xargs rm -f\n\nRUN python3 -m pip install --no-cache-dir /opt/release/python/modelbox-*.whl && \\\n    rpm -ivh /opt/release/*.rpm && \\\n    usermod -G HwHiAiUser modelbox && \\\n    (cd /lib/systemd/system/sysinit.target.wants/; for i in *; \\\n    do [ $i = systemd-tmpfiles-setup.service ] || rm -f $i; done); \\\n    rm -f /lib/systemd/system/multi-user.target.wants/*; \\\n    rm -f /etc/systemd/system/*.wants/*; \\\n    rm -f /lib/systemd/system/local-fs.target.wants/*; \\\n    rm -f /lib/systemd/system/sockets.target.wants/*udev*; \\\n    rm -f /lib/systemd/system/sockets.target.wants/*initctl*; \\\n    rm -f /lib/systemd/system/basic.target.wants/*; \\\n    rm -f /lib/systemd/system/anaconda.target.wants/*; \\\n    sed -i 's/^SystemMaxUse=.*/SystemMaxUse=16M/g' /etc/systemd/journald.conf && \\\n    echo \"ldconfig &>/dev/null\" >> /etc/bashrc && systemctl enable modelbox\n\nVOLUME [\"/sys/fs/cgroup\", \"/tmp\", \"/run\", \"/run/lock\"]\nSTOPSIGNAL SIGRTMIN+3\n\nCMD [\"/usr/sbin/init\", \"--log-target=journal\"]\n"
  },
  {
    "path": "docker/Dockerfile.ascend.runtime.ubuntu",
    "content": "FROM modelbox/c83-base:latest\nFROM ubuntu:20.04\n\nARG dtype\n\nCOPY release /opt/release\nCOPY --from=0 /usr/local/Ascend_run${dtype} /usr/local/Ascend\nCOPY --from=0 /usr/local/sbin/npu-smi${dtype} /usr/local/sbin/npu-smi\nADD *.tar.gz /usr/local/\n\nARG ASCEND_PATH=/usr/local/Ascend\nENV LOCAL_ASCEND=/usr/local/Ascend\nENV ASCEND_AICPU_PATH=${ASCEND_PATH}/nnae/latest\nENV ASCEND_OPP_PATH=${ASCEND_PATH}/nnae/latest/opp\nENV DDK_PATH=${ASCEND_PATH}/nnae/latest/fwkacllib\nENV DRIVER_PATH=${ASCEND_PATH}/driver\n\nENV PYTHONPATH=${ASCEND_PATH}/nnae/latest/pyACL/python/site-packages/acl${PYTHONPATH:+:${PYTHONPATH}}\n\nENV LD_LIBRARY_PATH=${ASCEND_PATH}/nnae/latest/fwkacllib/lib64${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}}\n\nWORKDIR /root\n\nRUN ln -snf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && echo \"Asia/Shanghai\" > /etc/timezone && \\\n    echo \"deb http://archive.ubuntu.com/ubuntu/ bionic-proposed main restricted universe multiverse\" >>/etc/apt/sources.list && \\\n    if [ \"$(arch)\" = \"aarch64\" ];then sed -i '/proposed/d' /etc/apt/sources.list;fi && \\\n    export DEBIAN_FRONTEND=noninteractive DEBIAN_PRIORITY=low TZ=Asia/Shanghai && \\\n    apt update && \\\n    apt install -y python3.8-dev python3-pip python3-apt python3-setuptools && \\\n    apt install -y vim gcc dbus systemd systemd-cron iproute2 gnupg2 libfuse2 libgtk-3-0 \\\n        build-essential bash unzip ffmpeg curl pkg-config ca-certificates libduktape205 \\\n        libssl1.1 libcpprest graphviz libprotobuf-c1 libopenblas-base libgoogle-glog0v5 libtbb2 && \\\n    update-ca-certificates && apt upgrade -y && \\\n    ln -sf python3.8 /usr/bin/python3 && \\\n    rm -rf /var/lib/apt/lists/*\n\nRUN mkdir -p /root/.pip && \\\n    echo \"[global]\" > /root/.pip/pip.conf && \\\n    echo \"index-url = https://pypi.python.org/simple\" >>/root/.pip/pip.conf && \\\n    echo \"trusted-host = pypi.python.org\" >>/root/.pip/pip.conf && \\\n    echo \"timeout = 120\" >>/root/.pip/pip.conf && \\\n    if [ \"$(arch)\" = \"aarch64\" ];then sed -i 's@python.org@douban.com@g' /root/.pip/pip.conf;fi && \\\n    groupadd HwHiAiUser && \\\n    useradd -g HwHiAiUser -d /home/HwHiAiUser -m HwHiAiUser && \\\n    python3 -m pip install --upgrade pip && \\\n    python3 -m pip install --no-cache-dir numpy decorator psutil sympy cffi pyyaml pathlib2 grpcio grpcio-tools protobuf scipy requests pillow opencv-python==4.5.5.64 && \\\n    python3 -m pip install --no-cache-dir https://ms-release.obs.cn-north-4.myhuaweicloud.com/1.9.0/MindSpore/ascend/$(arch)/mindspore_ascend-1.9.0-cp38-cp38-linux_$(arch).whl && \\\n    python3 -m pip install --no-cache-dir ${ASCEND_PATH}/nnae/latest/fwkacllib/lib64/topi-0.4.0-py3-none-any.whl && \\\n    python3 -m pip install --no-cache-dir ${ASCEND_PATH}/nnae/latest/fwkacllib/lib64/te-0.4.0-py3-none-any.whl && \\\n    python3 -m pip install --no-cache-dir ${ASCEND_PATH}/nnae/latest/fwkacllib/lib64/hccl-0.1.0-py3-none-any.whl && \\\n    echo \"${ASCEND_PATH}/nnae/latest/fwkacllib/lib64\" >>/etc/ld.so.conf.d/ascend.conf && \\\n    echo \"${ASCEND_PATH}/driver/lib64/driver\" >>/etc/ld.so.conf.d/ascend.conf && \\\n    echo \"${ASCEND_PATH}/driver/lib64/common\" >>/etc/ld.so.conf.d/ascend.conf && \\\n    echo \"${ASCEND_PATH}/driver/lib64\" >>/etc/ld.so.conf.d/ascend.conf && \\\n    sed -i '/include/i\\/usr/local/lib' /etc/ld.so.conf && \\\n    find /usr/local -name \"*.a\"|xargs rm -f\n\nRUN python3 -m pip install --no-cache-dir /opt/release/python/modelbox-*.whl && \\\n    dpkg -i /opt/release/*.deb && \\\n    usermod -G HwHiAiUser modelbox && \\\n    (cd /lib/systemd/system/sysinit.target.wants/; for i in *; \\\n    do [ $i = systemd-tmpfiles-setup.service ] || rm -f $i; done); \\\n    rm -f /lib/systemd/system/multi-user.target.wants/*; \\\n    rm -f /etc/systemd/system/*.wants/*; \\\n    rm -f /lib/systemd/system/local-fs.target.wants/*; \\\n    rm -f /lib/systemd/system/sockets.target.wants/*udev*; \\\n    rm -f /lib/systemd/system/sockets.target.wants/*initctl*; \\\n    rm -f /lib/systemd/system/basic.target.wants/*; \\\n    rm -f /lib/systemd/system/anaconda.target.wants/*; \\\n    sed -i 's/^SystemMaxUse=.*/SystemMaxUse=16M/g' /etc/systemd/journald.conf && \\\n    echo \"ldconfig &>/dev/null\" >> /etc/bash.bashrc && systemctl enable modelbox\n\nVOLUME [\"/sys/fs/cgroup\", \"/tmp\", \"/run\", \"/run/lock\"]\nSTOPSIGNAL SIGRTMIN+3\n\nCMD [\"/sbin/init\", \"--log-target=journal\"]\n"
  },
  {
    "path": "docker/Dockerfile.cuda.develop.openeuler",
    "content": "ARG BASE_IMAGE=openeuler/openeuler:20.03-lts-sp3\nFROM ${BASE_IMAGE} as base\n\nCOPY release /opt/release\nCOPY docker/repo/*.repo /etc/yum.repos.d/\nADD *.tar.gz /usr/local/\n\nARG CUDA_VER\nARG CUDA_VERSION\nARG TF_VERSION\nARG TRT_VERSION\nARG TORCH_VERSION\nARG CUDA_CUDART_VERSION\nARG NVIDIA_CUDA_VERSION\nARG NVIDIA_REQUIRE_CUDA\n\nWORKDIR /root\n\nRUN ln -snf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && echo \"Asia/Shanghai\" > /etc/timezone && \\\n    dnf update -y --nogpgcheck && \\\n    dnf install -y --nogpgcheck \\\n        vim gcc gcc-c++ make cmake libnsl python3-perf doxygen pciutils gdb net-tools zlib-devel gnutls-devel nc gtk3-devel \\\n        rpm-build gcc-gfortran alsa-lib nss-devel fuse-devel gdbm-devel krb5-devel expat-devel curl-devel bzip2-devel bc \\\n        boost-devel ncurses-devel libxml2-devel libssh2-devel openssh-server python3-devel glibc-debuginfo libatomic systemd \\\n        git clang xauth graphviz-devel sqlite-devel openssl-devel readline-devel mesa-libGL-devel protobuf-c-devel passwd && \\\n    ln -sf pip3.7 /usr/bin/pip && ln -sf python3.7 /usr/bin/python3 && \\\n    ln -sf opencv4/opencv2 /usr/local/include/opencv2 && \\\n    dnf clean all && rm -rf /var/cache/dnf/*\n\nRUN mkdir -p /root/.pip && \\\n    echo \"[global]\" > /root/.pip/pip.conf && \\\n    echo \"index-url = https://pypi.python.org/simple\" >>/root/.pip/pip.conf && \\\n    echo \"trusted-host = pypi.python.org\" >>/root/.pip/pip.conf && \\\n    echo \"timeout = 120\" >>/root/.pip/pip.conf && \\\n    python3 -m pip install --upgrade pip && \\\n    python3 -m pip install --no-cache-dir psutil pillow wheel numpy pyyaml requests opencv-python==4.5.5.64 && \\\n    NVIDIA_GPGKEY_SUM=d1be581509378368edeec8c1eb2958702feedf3bc3d17011adbf24efacce4ab5 && \\\n    curl -fsSL https://developer.download.nvidia.com/compute/cuda/repos/rhel7/x86_64/7fa2af80.pub | sed '/^Version/d' > /etc/pki/rpm-gpg/RPM-GPG-KEY-NVIDIA && \\\n    echo \"$NVIDIA_GPGKEY_SUM  /etc/pki/rpm-gpg/RPM-GPG-KEY-NVIDIA\" | sha256sum -c --strict - && \\\n    dnf install -y --nogpgcheck --setopt=obsoletes=0 \\\n        cuda-cudart-${CUDA_VER}-${CUDA_CUDART_VERSION} \\\n        cuda-minimal-build-${CUDA_VER} \\\n        cuda-libraries-$([ \"${CUDA_VERSION}\" = \"11.2\" ] && echo \"devel\" || echo \"dev\")-${CUDA_VER} \\\n        cuda-command-line-tools-${CUDA_VER} && \\\n    ln -s cuda-${CUDA_VERSION} /usr/local/cuda && \\\n    curl https://nodejs.org/dist/v16.13.2/node-v16.13.2-linux-x64.tar.xz|tar -xJ && \\\n    cp -af node-v16.13.2-linux-x64/{bin,include,lib,share} /usr/local/ && \\\n    npm install -g npm@latest && npm -v && node -v && \\\n    npm install -g @angular/cli && \\\n    npm cache clean --force && \\\n    dnf clean all && rm -rf /var/cache/dnf/* /root/*\n    \nRUN if [ \"${CUDA_VERSION}\" = \"10.2\" ]; then \\\n        dnn_ver=\"8.0.0.180-1.cuda10.2\"; \\\n    elif [ \"${CUDA_VERSION}\" = \"11.2\" ]; then \\\n        dnn_ver=\"8.4.1.50-1.cuda11.6\";fi && \\\n    dnf install -y --nogpgcheck --setopt=obsoletes=0 \\\n        libcudnn8-${dnn_ver} \\\n        libcudnn8-devel-${dnn_ver} && \\\n    if [ -n \"${TF_VERSION}\" ]; then \\\n        curl -LO https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-gpu-linux-x86_64-2.6.0.tar.gz && \\\n        tar zxf libtensorflow-gpu-linux-x86_64-2.6.0.tar.gz && \\\n        cp -af lib/* /usr/local/lib64/ && \\\n        cp -af include /usr/local/ && \\\n        python3 -m pip install --no-cache-dir tensorflow-gpu==2.6.0; \\\n    elif [ -n \"${TORCH_VERSION}\" ]; then \\\n        curl -LO https://download.pytorch.org/libtorch/cu102/libtorch-cxx11-abi-shared-with-deps-1.9.1%2Bcu102.zip && \\\n        unzip libtorch-*.zip -d /root >/dev/null 2>&1 && \\\n        cp -af libtorch/{include,lib,share} /usr/local/; \\\n    elif [ -n \"${TRT_VERSION}\" ]; then \\\n        if [ \"${TRT_VERSION}\" = \"7.1.3.4\" ]; then \\\n            trt_ver=\"7-7.1.3-1.cuda10.2\" ; \\\n        elif [ \"${TRT_VERSION}\" = \"8.4.2.4\" ]; then \\\n            trt_ver=\"8-8.4.2-1.cuda11.6\";fi && \\\n        dnf install -y --nogpgcheck --setopt=obsoletes=0 \\\n            libnvinfer${trt_ver} \\\n            libnvinfer-devel-${trt_ver#*-} \\\n            libnvonnxparsers${trt_ver} \\\n            libnvonnxparsers-devel-${trt_ver#*-} \\\n            libnvparsers${trt_ver} \\\n            libnvparsers-devel-${trt_ver#*-} \\\n            libnvinfer-plugin${trt_ver} \\\n            libnvinfer-plugin-devel-${trt_ver#*-};fi && \\\n    dnf clean all && rm -rf /var/cache/dnf/* /root/*\n\nRUN python3 -m pip install --no-cache-dir /opt/release/python/modelbox-*.whl && \\\n    rpm -ivh /opt/release/*.rpm && \\\n    (cd /lib/systemd/system/sysinit.target.wants/; for i in *; \\\n    do [ $i = systemd-tmpfiles-setup.service ] || rm -f $i; done); \\\n    rm -f /lib/systemd/system/multi-user.target.wants/*; \\\n    rm -f /etc/systemd/system/*.wants/*; \\\n    rm -f /lib/systemd/system/local-fs.target.wants/*; \\\n    rm -f /lib/systemd/system/sockets.target.wants/*udev*; \\\n    rm -f /lib/systemd/system/sockets.target.wants/*initctl*; \\\n    rm -f /lib/systemd/system/basic.target.wants/*; \\\n    rm -f /lib/systemd/system/anaconda.target.wants/*; \\\n    sed -i 's/^SystemMaxUse=.*/SystemMaxUse=16M/g' /etc/systemd/journald.conf && \\\n    sed -i '/include/i\\/usr/local/lib' /etc/ld.so.conf && \\\n    sed -i '/include/i\\/usr/local/lib64' /etc/ld.so.conf && \\\n    echo \"/usr/local/nvidia/lib\" >> /etc/ld.so.conf.d/nvidia.conf && \\\n    echo \"/usr/local/nvidia/lib64\" >> /etc/ld.so.conf.d/nvidia.conf && \\\n    sed -i '/TMOUT/s/300/0/g' /etc/bashrc && \\\n    echo 'HISTSIZE=1000' >> /etc/bashrc && \\\n    echo \"export PKG_CONFIG_PATH=/usr/local/lib64/pkgconfig\" > /etc/profile.d/pkgconfig.sh && \\\n    echo '[ -n \"${SSH_TTY}\" ] && export $(cat /proc/1/environ|tr \"\\\\0\" \"\\\\n\"|xargs)' >> /etc/bashrc && \\\n    echo 'export PS1=\"\\[\\e[35;1m\\][\\u@\\h \\W]$ \\[\\e[0m\\]\"' >> /etc/bashrc && \\\n    echo \"ldconfig &>/dev/null\" >> /etc/bashrc && systemctl enable sshd\n\nVOLUME [\"/sys/fs/cgroup\", \"/tmp\", \"/run\", \"/run/lock\"]\nSTOPSIGNAL SIGRTMIN+3\n\nLABEL com.nvidia.volumes.needed=\"nvidia_driver\" com.nvidia.cuda.verison=\"${NVIDIA_CUDA_VERSION}\"\n\nENV PATH /usr/local/nvidia/bin:/usr/local/cuda/bin${PATH:+:${PATH}}\nENV LD_LIBRARY_PATH=/usr/local/nvidia/lib64:/usr/local/lib${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}}\nENV NVIDIA_VISIBLE_DEVICES all\nENV NVIDIA_DRIVER_CAPABILITIES video,compute,utility\nENV NVIDIA_REQUIRE_CUDA \"${NVIDIA_REQUIRE_CUDA}\"\n\nCMD [\"/usr/sbin/init\", \"--log-target=journal\"]\n"
  },
  {
    "path": "docker/Dockerfile.cuda.develop.ubuntu",
    "content": "ARG BASE_IMAGE=ubuntu:20.04\nFROM ${BASE_IMAGE} as base\n\nCOPY release /opt/release\nADD *.tar.gz /usr/local/\n\nARG CUDA_VER\nARG CUDA_VERSION\nARG TF_VERSION\nARG TRT_VERSION\nARG TORCH_VERSION\nARG CUDA_CUDART_VERSION\nARG NVIDIA_CUDA_VERSION\nARG NVIDIA_REQUIRE_CUDA\n\nWORKDIR /root\n\nRUN ln -snf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && echo \"Asia/Shanghai\" > /etc/timezone && \\\n    echo \"deb http://archive.ubuntu.com/ubuntu/ bionic-proposed main restricted universe multiverse\" >>/etc/apt/sources.list && \\\n    export DEBIAN_FRONTEND=noninteractive DEBIAN_PRIORITY=low TZ=Asia/Shanghai && \\\n    apt update && \\\n    apt install -y python3.8-dev python3-pip python3-apt python3-setuptools apt-utils && \\\n    apt install -y dbus systemd systemd-cron iproute2 gnupg2 curl libcurl4-openssl-dev ca-certificates \\\n        build-essential unzip ffmpeg sudo bash vim gdb git doxygen autoconf cmake gettext openssh-server \\\n        pkg-config kmod net-tools pciutils libgtk-3-dev libprotobuf-c-dev protobuf-c-compiler duktape-dev \\\n        libssl-dev libcpprest-dev libswscale-dev libavformat-dev graphviz libgraphviz-dev libfuse-dev \\\n        netcat clang clang-tidy-10 ccache libgoogle-glog-dev libtbb-dev && \\\n    update-ca-certificates && apt upgrade -y && \\\n    ln -sf clang-tidy-10 /usr/bin/clang-tidy && \\\n    ln -sf run-clang-tidy-10 /usr/bin/run-clang-tidy && \\\n    ln -sf python3.8 /usr/bin/python3 && \\\n    ln -sf opencv4/opencv2 /usr/local/include/opencv2 && \\\n    rm -rf /var/lib/apt/lists/*\n\nRUN mkdir -p /root/.pip && \\\n    echo \"[global]\" > /root/.pip/pip.conf && \\\n    echo \"index-url = https://pypi.python.org/simple\" >>/root/.pip/pip.conf && \\\n    echo \"trusted-host = pypi.python.org\" >>/root/.pip/pip.conf && \\\n    echo \"timeout = 120\" >>/root/.pip/pip.conf && \\\n    python3 -m pip install --upgrade pip && \\\n    python3 -m pip install --no-cache-dir psutil pillow wheel numpy pyyaml requests opencv-python==4.5.5.64 && \\\n    curl -fsSL https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64/3bf863cc.pub | apt-key add - && \\\n    curl -fsSL https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu2004/x86_64/7fa2af80.pub | apt-key add - && \\\n    echo \"deb https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64 /\" > /etc/apt/sources.list.d/cuda.list && \\\n    echo \"deb https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu2004/x86_64 /\" > /etc/apt/sources.list.d/nvidia-ml.list && \\\n    apt update && \\\n    apt install -y --no-install-recommends \\\n        cuda-cudart-${CUDA_VER}=${CUDA_CUDART_VERSION} \\\n        cuda-minimal-build-${CUDA_VER} \\\n        cuda-libraries-dev-${CUDA_VER} \\\n        cuda-command-line-tools-${CUDA_VER} && \\\n    ln -s cuda-${CUDA_VERSION} /usr/local/cuda && \\\n    curl https://nodejs.org/dist/v16.13.2/node-v16.13.2-linux-x64.tar.xz|tar -xJ && \\\n    cp -af node-v16.13.2-linux-x64/* /usr/local/ && \\\n    npm install -g npm@latest && npm -v && node -v && \\\n    npm install -g @angular/cli && \\\n    npm cache clean --force && \\\n    rm -rf /var/lib/apt/lists/* /root/*\n\nRUN apt update && \\\n    if [ \"${CUDA_VERSION}\" = \"10.2\" ]; then \\\n        dnn_ver=\"8=8.0.0.180-1+cuda10.2\"; \\\n    elif [ \"${CUDA_VERSION}\" = \"11.2\" ]; then \\\n        dnn_ver=\"8=8.4.1.50-1+cuda11.6\";fi && \\\n    apt install -y --no-install-recommends \\\n        libcudnn${dnn_ver} \\\n        libcudnn${dnn_ver%=*}-dev=${dnn_ver#*=} && \\\n    if [ -n \"${TF_VERSION}\" ]; then \\\n        curl -LO https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-gpu-linux-x86_64-2.6.0.tar.gz && \\\n        tar zxf libtensorflow-gpu-linux-x86_64-2.6.0.tar.gz && \\\n        cp -af lib include /usr/local/ && \\\n        python3 -m pip install --no-cache-dir tensorflow-gpu==2.6.0; \\\n    elif [ -n \"${TORCH_VERSION}\" ]; then \\\n        curl -LO https://download.pytorch.org/libtorch/cu102/libtorch-cxx11-abi-shared-with-deps-1.9.1%2Bcu102.zip && \\\n        unzip libtorch-*.zip -d /root >/dev/null 2>&1 && \\\n        cp -af libtorch/* /usr/local/; \\\n    elif [ -n \"${TRT_VERSION}\" ]; then \\\n        if [ \"${TRT_VERSION}\" = \"7.1.3.4\" ]; then \\\n            trt_ver=\"7=7.1.3-1+cuda10.2\"; \\\n        elif [ \"${TRT_VERSION}\" = \"8.4.2.4\" ]; then \\\n            trt_ver=\"8=8.4.2-1+cuda11.6\";fi && \\\n        apt install -y --no-install-recommends \\\n            libnvinfer${trt_ver} \\\n            libnvinfer-dev=${trt_ver#*=} \\\n            libnvparsers${trt_ver} \\\n            libnvparsers-dev=${trt_ver#*=} \\\n            libnvonnxparsers${trt_ver} \\\n            libnvonnxparsers-dev=${trt_ver#*=} \\\n            libnvinfer-plugin${trt_ver} \\\n            libnvinfer-plugin-dev=${trt_ver#*=} \\\n            python3-libnvinfer=${trt_ver#*=} \\\n            python3-libnvinfer-dev=${trt_ver#*=};fi && \\\n    rm -rf /var/lib/apt/lists/* /root/*\n\nRUN python3 -m pip install --no-cache-dir /opt/release/python/modelbox-*.whl && \\\n    dpkg -i /opt/release/*.deb && \\\n    (cd /lib/systemd/system/sysinit.target.wants/; for i in *; \\\n    do [ $i = systemd-tmpfiles-setup.service ] || rm -f $i; done); \\\n    rm -f /lib/systemd/system/multi-user.target.wants/*; \\\n    rm -f /etc/systemd/system/*.wants/*; \\\n    rm -f /lib/systemd/system/local-fs.target.wants/*; \\\n    rm -f /lib/systemd/system/sockets.target.wants/*udev*; \\\n    rm -f /lib/systemd/system/sockets.target.wants/*initctl*; \\\n    rm -f /lib/systemd/system/basic.target.wants/*; \\\n    rm -f /lib/systemd/system/anaconda.target.wants/*; \\\n    sed -i 's/^SystemMaxUse=.*/SystemMaxUse=16M/g' /etc/systemd/journald.conf && \\\n    sed -i '/include/i\\/usr/local/lib' /etc/ld.so.conf && \\\n    echo \"/usr/local/nvidia/lib\" >> /etc/ld.so.conf.d/nvidia.conf && \\\n    echo \"/usr/local/nvidia/lib64\" >> /etc/ld.so.conf.d/nvidia.conf && \\\n    sed -i \"32aPermitRootLogin yes\" /etc/ssh/sshd_config && \\\n    echo 'export TMOUT=0' >> /etc/bash.bashrc && \\\n    echo 'export HISTSIZE=1000' >> /etc/bash.bashrc && \\\n    echo '[ -n \"${SSH_TTY}\" ] && export $(cat /proc/1/environ|tr \"\\\\0\" \"\\\\n\"|xargs)' >> /etc/bash.bashrc && \\\n    echo 'export PS1=\"\\[\\e[35;1m\\][\\u@\\h \\W]$ \\[\\e[0m\\]\"' >> ~/.bashrc && \\\n    echo \"ldconfig &>/dev/null\" >> /etc/bash.bashrc && systemctl enable ssh\n\nVOLUME [\"/sys/fs/cgroup\", \"/tmp\", \"/run\", \"/run/lock\"]\nSTOPSIGNAL SIGRTMIN+3\n\nLABEL com.nvidia.volumes.needed=\"nvidia_driver\" com.nvidia.cuda.verison=\"${NVIDIA_CUDA_VERSION}\"\n\nENV PATH=/usr/local/nvidia/bin:/usr/local/cuda/bin${PATH:+:${PATH}}\nENV LD_LIBRARY_PATH=/usr/local/nvidia/lib64:/usr/local/lib${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}}\nENV NVIDIA_VISIBLE_DEVICES=all\nENV NVIDIA_DRIVER_CAPABILITIES=video,compute,utility\nENV NVIDIA_REQUIRE_CUDA=\"${NVIDIA_REQUIRE_CUDA}\"\n\nCMD [\"/sbin/init\", \"--log-target=journal\"]\n"
  },
  {
    "path": "docker/Dockerfile.cuda.runtime.openeuler",
    "content": "ARG BASE_IMAGE=openeuler/openeuler:20.03-lts-sp3\nFROM ${BASE_IMAGE} as base\n\nCOPY release /opt/release\nCOPY docker/repo/*.repo /etc/yum.repos.d/\nADD *.tar.gz /usr/local/\n\nARG CUDA_VER\nARG CUDA_VERSION\nARG TF_VERSION\nARG TRT_VERSION\nARG TORCH_VERSION\nARG CUDA_CUDART_VERSION\nARG NVIDIA_CUDA_VERSION\nARG NVIDIA_REQUIRE_CUDA\n\nWORKDIR /root\n\nRUN ln -snf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && echo \"Asia/Shanghai\" > /etc/timezone && \\\n    dnf update -y --nogpgcheck && \\\n    dnf install -y --nogpgcheck curl boost libnsl libssh2 libatomic mesa-libGL graphviz protobuf-c \\\n        systemd fuse libxml2 openssl bc && \\\n    ln -sf pip3.7 /usr/bin/pip && ln -sf python3.7 /usr/bin/python3 && \\\n    dnf clean all && rm -rf /var/cache/dnf/*\n\nRUN mkdir -p /root/.pip && \\\n    echo \"[global]\" > /root/.pip/pip.conf && \\\n    echo \"index-url = https://pypi.python.org/simple\" >>/root/.pip/pip.conf && \\\n    echo \"trusted-host = pypi.python.org\" >>/root/.pip/pip.conf && \\\n    echo \"timeout = 120\" >>/root/.pip/pip.conf && \\\n    python3 -m pip install --upgrade pip && \\\n    python3 -m pip install --no-cache-dir psutil pillow wheel numpy pyyaml requests opencv-python==4.5.5.64 && \\\n    NVIDIA_GPGKEY_SUM=d1be581509378368edeec8c1eb2958702feedf3bc3d17011adbf24efacce4ab5 && \\\n    curl -fsSL https://developer.download.nvidia.com/compute/cuda/repos/rhel7/x86_64/7fa2af80.pub | sed '/^Version/d' > /etc/pki/rpm-gpg/RPM-GPG-KEY-NVIDIA && \\\n    echo \"$NVIDIA_GPGKEY_SUM  /etc/pki/rpm-gpg/RPM-GPG-KEY-NVIDIA\" | sha256sum -c --strict - && \\\n    dnf install -y --nogpgcheck --setopt=obsoletes=0 \\\n        cuda-nvtx-${CUDA_VER} \\\n        cuda-libraries-${CUDA_VER} && \\\n    ln -s cuda-${CUDA_VERSION} /usr/local/cuda && \\\n    find /usr/local -name \"*.a\"|xargs rm -f && \\\n    dnf clean all && rm -rf /var/cache/dnf/* /root/*\n\nRUN if [ \"${CUDA_VERSION}\" = \"10.2\" ]; then \\\n        dnn_ver=\"8.0.0.180-1.cuda10.2\"; \\\n    elif [ \"${CUDA_VERSION}\" = \"11.2\" ]; then \\\n        dnn_ver=\"8.4.1.50-1.cuda11.6\";fi && \\\n    dnf install -y --nogpgcheck --setopt=obsoletes=0 \\\n        libcudnn8-${dnn_ver} && \\\n    if [ -n \"${TF_VERSION}\" ]; then \\\n        curl -LO https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-gpu-linux-x86_64-2.6.0.tar.gz && \\\n        tar zxf libtensorflow-gpu-linux-x86_64-2.6.0.tar.gz && \\\n        cp -af lib/* /usr/local/lib64/ && \\\n        python3 -m pip install --no-cache-dir tensorflow-gpu==2.6.0; \\\n    elif [ -n \"${TRT_VERSION}\" ]; then \\\n        if [ \"${TRT_VERSION}\" = \"7.1.3.4\" ]; then \\\n            trt_ver=\"7-7.1.3-1.cuda10.2\"; \\\n        elif [ \"${TRT_VERSION}\" = \"8.4.2.4\" ]; then \\\n            trt_ver=\"8-8.4.2-1.cuda11.6\";fi && \\\n        dnf install -y --nogpgcheck --setopt=obsoletes=0 \\\n            libnvinfer${trt_ver} \\\n            libnvonnxparsers${trt_ver} \\\n            libnvparsers${trt_ver} \\\n            libnvinfer-plugin${trt_ver}; \\\n    elif [ -n \"${TORCH_VERSION}\" ]; then \\\n        curl -LO https://download.pytorch.org/libtorch/cu102/libtorch-cxx11-abi-shared-with-deps-1.9.1%2Bcu102.zip && \\\n        unzip libtorch-*.zip -d /root >/dev/null 2>&1 && \\\n        cp -af libtorch/lib /usr/local/;fi && \\\n    find /usr/local -name \"*.a\"|xargs rm -f && \\\n    dnf clean all && rm -rf /var/cache/dnf/* /root/*\n\nRUN python3 -m pip install --no-cache-dir /opt/release/python/modelbox-*.whl && \\\n    rpm -ivh /opt/release/*.rpm && \\\n    (cd /lib/systemd/system/sysinit.target.wants/; for i in *; \\\n    do [ $i = systemd-tmpfiles-setup.service ] || rm -f $i; done); \\\n    rm -f /lib/systemd/system/multi-user.target.wants/*; \\\n    rm -f /etc/systemd/system/*.wants/*; \\\n    rm -f /lib/systemd/system/local-fs.target.wants/*; \\\n    rm -f /lib/systemd/system/sockets.target.wants/*udev*; \\\n    rm -f /lib/systemd/system/sockets.target.wants/*initctl*; \\\n    rm -f /lib/systemd/system/basic.target.wants/*; \\\n    rm -f /lib/systemd/system/anaconda.target.wants/*; \\\n    sed -i 's/^SystemMaxUse=.*/SystemMaxUse=16M/g' /etc/systemd/journald.conf && \\\n    sed -i '/include/i\\/usr/local/lib' /etc/ld.so.conf && \\\n    sed -i '/include/i\\/usr/local/lib64' /etc/ld.so.conf && \\\n    echo \"/usr/local/nvidia/lib\" >> /etc/ld.so.conf.d/nvidia.conf && \\\n    echo \"/usr/local/nvidia/lib64\" >> /etc/ld.so.conf.d/nvidia.conf && \\\n    echo \"export PKG_CONFIG_PATH=/usr/local/lib64/pkgconfig\" > /etc/profile.d/pkgconfig.sh && \\\n    echo \"ldconfig &>/dev/null\" >> /etc/bashrc && systemctl enable modelbox\n\nVOLUME [\"/sys/fs/cgroup\", \"/tmp\", \"/run\", \"/run/lock\"]\nSTOPSIGNAL SIGRTMIN+3\n\nLABEL com.nvidia.volumes.needed=\"nvidia_driver\" com.nvidia.cuda.verison=\"${NVIDIA_CUDA_VERSION}\"\n\nENV PATH=/usr/local/nvidia/bin:/usr/local/cuda/bin${PATH:+:${PATH}}\nENV LD_LIBRARY_PATH=/usr/local/nvidia/lib64:/usr/local/lib${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}}\nENV NVIDIA_VISIBLE_DEVICES=all\nENV NVIDIA_DRIVER_CAPABILITIES=video,compute,utility\nENV NVIDIA_REQUIRE_CUDA=\"${NVIDIA_REQUIRE_CUDA}\"\n\nCMD [\"/usr/sbin/init\", \"--log-target=journal\"]\n"
  },
  {
    "path": "docker/Dockerfile.cuda.runtime.ubuntu",
    "content": "ARG BASE_IMAGE=ubuntu:20.04\nFROM ${BASE_IMAGE} as base\n\nCOPY release /opt/release\nADD *.tar.gz /usr/local/\n\nARG CUDA_VER\nARG CUDA_VERSION\nARG TF_VERSION\nARG TRT_VERSION\nARG TORCH_VERSION\nARG CUDA_CUDART_VERSION\nARG NVIDIA_CUDA_VERSION\nARG NVIDIA_REQUIRE_CUDA\n\nWORKDIR /root\n\nRUN ln -snf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && echo \"Asia/Shanghai\" > /etc/timezone && \\\n    echo \"deb http://archive.ubuntu.com/ubuntu/ bionic-proposed main restricted universe multiverse\" >>/etc/apt/sources.list && \\\n    export DEBIAN_FRONTEND=noninteractive DEBIAN_PRIORITY=low TZ=Asia/Shanghai && \\\n    apt update && \\\n    apt install -y python3.8 libpython3.8 python3-pip python3-apt python3-setuptools && \\\n    apt install -y vim dbus systemd systemd-cron iproute2 gnupg2 libfuse2 apt-utils \\\n        build-essential bash unzip ffmpeg curl pkg-config ca-certificates libduktape205 \\\n        libssl1.1 libcpprest graphviz libprotobuf-c1 libgtk-3-0 libgoogle-glog0v5 libtbb2 && \\\n    update-ca-certificates && apt upgrade -y && \\\n    ln -sf python3.8 /usr/bin/python3 && \\\n    rm -rf /var/lib/apt/lists/*\n\nRUN mkdir -p /root/.pip && \\\n    echo \"[global]\" > /root/.pip/pip.conf && \\\n    echo \"index-url = https://pypi.python.org/simple\" >>/root/.pip/pip.conf && \\\n    echo \"trusted-host = pypi.python.org\" >>/root/.pip/pip.conf && \\\n    echo \"timeout = 120\" >>/root/.pip/pip.conf && \\\n    python3 -m pip install --upgrade pip && \\\n    python3 -m pip install --no-cache-dir psutil pillow wheel numpy pyyaml requests opencv-python==4.5.5.64 && \\\n    curl -fsSL https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64/3bf863cc.pub | apt-key add - && \\\n    curl -fsSL https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu2004/x86_64/7fa2af80.pub | apt-key add - && \\\n    echo \"deb https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64 /\" > /etc/apt/sources.list.d/cuda.list && \\\n    echo \"deb https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu2004/x86_64 /\" > /etc/apt/sources.list.d/nvidia-ml.list && \\\n    apt update && \\\n    apt install -y --no-install-recommends \\\n        cuda-nvtx-${CUDA_VER} \\\n        cuda-libraries-${CUDA_VER} && \\\n    ln -s cuda-${CUDA_VERSION} /usr/local/cuda && \\\n    find /usr/local -name \"*.a\"|xargs rm -f && \\\n    rm -rf /var/lib/apt/lists/* /root/*\n\nRUN apt update && \\\n    if [ \"${CUDA_VERSION}\" = \"10.2\" ]; then \\\n        dnn_ver=\"8.0.0.180-1+cuda10.2\"; \\\n    elif [ \"${CUDA_VERSION}\" = \"11.2\" ]; then \\\n        dnn_ver=\"8.4.1.50-1+cuda11.6\";fi && \\\n    apt install -y --no-install-recommends \\\n        libcudnn8=${dnn_ver} && \\\n    if [ -n \"${TF_VERSION}\" ]; then \\\n        curl -LO https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-gpu-linux-x86_64-2.6.0.tar.gz && \\\n        tar zxf libtensorflow-gpu-linux-x86_64-2.6.0.tar.gz && \\\n        cp -af lib/* /usr/local/lib/ && \\\n        python3 -m pip install --no-cache-dir tensorflow-gpu==2.6.0; \\\n    elif [ -n \"${TRT_VERSION}\" ]; then \\\n        if [ \"${TRT_VERSION}\" = \"7.1.3.4\" ]; then \\\n            trt_ver=\"7=7.1.3-1+cuda10.2\"; \\\n        elif [ \"${TRT_VERSION}\" = \"8.4.2.4\" ]; then \\\n            trt_ver=\"8=8.4.2-1+cuda11.6\";fi && \\\n        apt install -y --no-install-recommends \\\n            libnvinfer${trt_ver} \\\n            libnvonnxparsers${trt_ver} \\\n            libnvparsers${trt_ver} \\\n            libnvinfer-plugin${trt_ver} \\\n            python3-libnvinfer=${trt_ver#*=}; \\\n    elif [ -n \"${TORCH_VERSION}\" ]; then \\\n        curl -LO https://download.pytorch.org/libtorch/cu102/libtorch-cxx11-abi-shared-with-deps-1.9.1%2Bcu102.zip && \\\n        unzip libtorch-*.zip -d /root >/dev/null 2>&1 && \\\n        cp -af libtorch/* /usr/local/;fi && \\\n    find /usr/local -name \"*.a\"|xargs rm -f && \\\n    rm -rf /var/lib/apt/lists/* /root/*\n\nRUN python3 -m pip install --no-cache-dir /opt/release/python/modelbox-*.whl && \\\n    dpkg -i /opt/release/*.deb && \\\n    (cd /lib/systemd/system/sysinit.target.wants/; for i in *; \\\n    do [ $i = systemd-tmpfiles-setup.service ] || rm -f $i; done); \\\n    rm -f /lib/systemd/system/multi-user.target.wants/*; \\\n    rm -f /etc/systemd/system/*.wants/*; \\\n    rm -f /lib/systemd/system/local-fs.target.wants/*; \\\n    rm -f /lib/systemd/system/sockets.target.wants/*udev*; \\\n    rm -f /lib/systemd/system/sockets.target.wants/*initctl*; \\\n    rm -f /lib/systemd/system/basic.target.wants/*; \\\n    rm -f /lib/systemd/system/anaconda.target.wants/*; \\\n    sed -i '/include/i\\/usr/local/lib' /etc/ld.so.conf && \\\n    echo \"/usr/local/nvidia/lib\" >> /etc/ld.so.conf.d/nvidia.conf && \\\n    echo \"/usr/local/nvidia/lib64\" >> /etc/ld.so.conf.d/nvidia.conf && \\\n    sed -i 's/^SystemMaxUse=.*/SystemMaxUse=16M/g' /etc/systemd/journald.conf && \\\n    echo \"ldconfig &>/dev/null\" >> /etc/bash.bashrc && systemctl enable modelbox\n\nVOLUME [\"/sys/fs/cgroup\", \"/tmp\", \"/run\", \"/run/lock\"]\nSTOPSIGNAL SIGRTMIN+3\n\nLABEL com.nvidia.volumes.needed=\"nvidia_driver\" com.nvidia.cuda.verison=\"${NVIDIA_CUDA_VERSION}\"\n\nENV PATH=/usr/local/nvidia/bin:/usr/local/cuda/bin${PATH:+:${PATH}}\nENV LD_LIBRARY_PATH=/usr/local/nvidia/lib64:/usr/local/lib${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}}\nENV NVIDIA_VISIBLE_DEVICES=all\nENV NVIDIA_DRIVER_CAPABILITIES=video,compute,utility\nENV NVIDIA_REQUIRE_CUDA=\"${NVIDIA_REQUIRE_CUDA}\"\n\nCMD [\"/sbin/init\", \"--log-target=journal\"]\n"
  },
  {
    "path": "docker/Dockerfile.rknnrt.build.ubuntu",
    "content": "FROM ubuntu:20.04\n\nADD rockchip/* /usr/local/rockchip\n\nWORKDIR /root\n\nENV DEBIAN_FRONTEND=\"noninteractive\"\nENV ROCKCHIP_PATH=/usr/local/rockchip\n\nRUN ln -snf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && echo \"Asia/Shanghai\" > /etc/timezone && \\\n    if [ \"$(arch)\" = \"aarch64\" ];then sed -i 's@ports.ubuntu.com@mirrors.ustc.edu.cn@g' /etc/apt/sources.list;fi && \\\n    apt update && apt upgrade -y && \\\n    apt install -y python3.8-dev python3-pip python3-apt python3-setuptools apt-utils && \\\n    apt install -y \\\n        dbus systemd systemd-cron iproute2 gnupg2 curl libcurl4-openssl-dev ca-certificates \\\n        build-essential unzip ffmpeg sudo bash vim gdb git doxygen autoconf cmake gettext openssh-server \\\n        python3-wheel python3-numpy python3-opencv libopencv-dev pkg-config kmod net-tools pciutils \\\n        libssl-dev libcpprest-dev libswscale-dev libavformat-dev graphviz libgraphviz-dev libfuse-dev \\\n        libprotobuf-c-dev protobuf-c-compiler duktape-dev libopenblas-dev netcat && \\\n    rm -f /usr/bin/python3 /usr/bin/python && \\\n    update-alternatives --install /usr/bin/python python /usr/bin/python3.8 100 && \\\n    update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.8 100 && \\\n    update-alternatives --config python3 && \\\n    rm -rf /var/lib/apt/lists/* /root/*\n\nRUN mkdir -p /root/.pip && \\\n    echo \"[global]\" > /root/.pip/pip.conf && \\\n    echo \"index-url = https://pypi.python.org/simple\" >>/root/.pip/pip.conf && \\\n    echo \"trusted-host = pypi.python.org\" >>/root/.pip/pip.conf && \\\n    echo \"timeout = 120\" >>/root/.pip/pip.conf && \\\n    if [ \"$(arch)\" = \"aarch64\" ];then sed -i 's@python.org@douban.com@g' /root/.pip/pip.conf;fi && \\\n    python3 -m pip install --upgrade pip && \\\n    python3 -m pip install --no-cache-dir wheel attrs psutil decorator protobuf scipy sympy cffi grpcio grpcio-tools requests pillow pyyaml opencv-python && \\\n    echo \"/usr/local/lib\" >>  /etc/ld.so.conf\n\nRUN if [ \"$(arch)\" = \"aarch64\" ];then node_arch=\"arm64\";else node_arch=\"x64\";fi && \\\n    curl https://nodejs.org/dist/v16.13.2/node-v16.13.2-linux-${node_arch}.tar.xz|tar -xJ && \\\n    cp -af node-v16.13.2-linux-${node_arch}/* /usr/local/ && \\\n    if [ \"$(arch)\" = \"aarch64\" ];then npm config set registry https://registry.npm.taobao.org/;fi && \\\n    npm install -g npm@latest && npm -v && node -v && \\\n    npm install -g @angular/cli && \\\n    npm cache clean --force && rm -rf /root/*\n\nRUN (cd /lib/systemd/system/sysinit.target.wants/; for i in *; \\\n    do [ $i = systemd-tmpfiles-setup.service ] || rm -f $i; done); \\\n    rm -f /lib/systemd/system/multi-user.target.wants/*; \\\n    rm -f /etc/systemd/system/*.wants/*; \\\n    rm -f /lib/systemd/system/local-fs.target.wants/*; \\\n    rm -f /lib/systemd/system/sockets.target.wants/*udev*; \\\n    rm -f /lib/systemd/system/sockets.target.wants/*initctl*; \\\n    rm -f /lib/systemd/system/basic.target.wants/*; \\\n    rm -f /lib/systemd/system/anaconda.target.wants/*; \\\n    sed -i \"32aPermitRootLogin yes\" /etc/ssh/sshd_config && \\\n    sed -i 's/^SystemMaxUse=.*/SystemMaxUse=16M/g' /etc/systemd/journald.conf && \\\n    echo 'export TMOUT=0' >> ~/.bashrc && \\\n    echo 'export HISTSIZE=1000' >> ~/.bashrc && \\\n    echo '[ -n \"${SSH_TTY}\" ] && export $(cat /proc/1/environ|tr \"\\\\0\" \"\\\\n\"|xargs)' >> /etc/bash.bashrc && \\\n    echo 'export PS1=\"\\[\\e[35;1m\\][\\u@\\h \\W]$ \\[\\e[0m\\]\"' >> ~/.bashrc && \\\n    systemctl enable ssh\n\nRUN echo \"/usr/local/rockchip/rga/libs/Linux/gcc-aarch64\" >> /etc/ld.so.conf.d/rockchip.conf && \\\n    echo \"/usr/local/rockchip/rknpu/rknn/rknn_api/librknn_api/lib64\" >> /etc/ld.so.conf.d/rockchip.conf && \\\n    if [ -d /usr/local/rockchip/rknpu2/runtime/RK356X ]; then echo \"/usr/local/rockchip/rknpu2/runtime/RK356X/Linux/librknn_api/aarch64\" >> /etc/ld.so.conf.d/rockchip.conf; elif [ -d /usr/local/rockchip/rknpu2/runtime/RK3588 ]; then echo \"/usr/local/rockchip/rknpu2/runtime/RK3588/Linux/librknn_api/aarch64\" >> /etc/ld.so.conf.d/rockchip.conf; fi && \\\n    echo \"/usr/local/rockchip/rkmpp/lib\" >> /etc/ld.so.conf.d/rockchip.conf\n\nVOLUME [\"/sys/fs/cgroup\", \"/tmp\", \"/run\", \"/run/lock\"]\nSTOPSIGNAL SIGRTMIN+3\n\nCMD [\"/sbin/init\", \"--log-target=journal\"]"
  },
  {
    "path": "docker/README.md",
    "content": "# ModelBox Image List\n\n|type|os|image name|\n|--|--|--|\n|develop|ubuntu-20.04|modelbox/modelbox-develop-tensorrt_8.4.2-cuda_11.2-ubuntu-x86_64|\n|develop|ubuntu-20.04|modelbox/modelbox-develop-tensorflow_2.6.0-cuda_11.2-ubuntu-x86_64|\n|develop|ubuntu-18.04|modelbox/modelbox-develop-tensorrt_7.1.3-cuda_10.2-ubuntu-x86_64|\n|develop|ubuntu-18.04|modelbox/modelbox-develop-libtorch_1.9.1-cuda_10.2-ubuntu-x86_64|\n|develop|ubuntu-20.04|modelbox/modelbox-develop-mindspore_1.9.0-cann_6.0.1-ubuntu-x86_64|\n|develop|ubuntu-20.04|modelbox/modelbox-develop-mindspore_1.9.0-cann_6.0.1-ubuntu-aarch64|\n|develop|ubuntu-20.04|modelbox/modelbox-develop-mindspore_1.9.0-cann_6.0.1-d310p-ubuntu-x86_64|\n|develop|ubuntu-20.04|modelbox/modelbox-develop-mindspore_1.9.0-cann_6.0.1-d310p-ubuntu-aarch64|\n|-\n|develop|openeuler|modelbox/modelbox-develop-tensorrt_8.4.2-cuda_11.2-openeuler-x86_64|\n|develop|openeuler|modelbox/modelbox-develop-tensorflow_2.6.0-cuda_11.2-openeuler-x86_64|\n|develop|openeuler|modelbox/modelbox-develop-tensorrt_7.1.3-cuda_10.2-openeuler-x86_64|\n|develop|openeuler|modelbox/modelbox-develop-libtorch_1.9.1-cuda_10.2-openeuler-x86_64|\n|develop|openeuler|modelbox/modelbox-develop-mindspore_1.9.0-cann_6.0.1-openeuler-x86_64|\n|develop|openeuler|modelbox/modelbox-develop-mindspore_1.9.0-cann_6.0.1-openeuler-aarch64|\n|develop|openeuler|modelbox/modelbox-develop-mindspore_1.9.0-cann_6.0.1-d310p-openeuler-x86_64|\n|develop|openeuler|modelbox/modelbox-develop-mindspore_1.9.0-cann_6.0.1-d310p-openeuler-aarch64|\n|-\n|runtime|ubuntu-20.04|modelbox/modelbox-runtime-tensorrt_8.4.2-cuda_11.2-ubuntu-x86_64|\n|runtime|ubuntu-20.04|modelbox/modelbox-runtime-tensorflow_2.6.0-cuda_11.2-ubuntu-x86_64|\n|runtime|ubuntu-18.04|modelbox/modelbox-runtime-tensorrt_7.1.3-cuda_10.2-ubuntu-x86_64|\n|runtime|ubuntu-18.04|modelbox/modelbox-runtime-libtorch_1.9.1-cuda_10.2-ubuntu-x86_64|\n|runtime|ubuntu-20.04|modelbox/modelbox-runtime-mindspore_1.9.0-cann_6.0.1-ubuntu-x86_64|\n|runtime|ubuntu-20.04|modelbox/modelbox-runtime-mindspore_1.9.0-cann_6.0.1-ubuntu-aarch64|\n|runtime|ubuntu-20.04|modelbox/modelbox-runtime-mindspore_1.9.0-cann_6.0.1-d310p-ubuntu-x86_64|\n|runtime|ubuntu-20.04|modelbox/modelbox-runtime-mindspore_1.9.0-cann_6.0.1-d310p-ubuntu-aarch64|\n|-\n|runtime|openeuler|modelbox/modelbox-runtime-tensorrt_8.4.2-cuda_11.2-openeuler-x86_64|\n|runtime|openeuler|modelbox/modelbox-runtime-tensorflow_2.6.0-cuda_11.2-openeuler-x86_64|\n|runtime|openeuler|modelbox/modelbox-runtime-tensorrt_7.1.3-cuda_10.2-openeuler-x86_64|\n|runtime|openeuler|modelbox/modelbox-runtime-libtorch_1.9.1-cuda_10.2-openeuler-x86_64|\n|runtime|openeuler|modelbox/modelbox-runtime-mindspore_1.9.0-cann_6.0.1-openeuler-x86_64|\n|runtime|openeuler|modelbox/modelbox-runtime-mindspore_1.9.0-cann_6.0.1-openeuler-aarch64|\n|runtime|openeuler|modelbox/modelbox-runtime-mindspore_1.9.0-cann_6.0.1-d310p-openeuler-x86_64|\n|runtime|openeuler|modelbox/modelbox-runtime-mindspore_1.9.0-cann_6.0.1-d310p-openeuler-aarch64|"
  },
  {
    "path": "docker/artifact_check.sh",
    "content": "#!/bin/bash\nCODE_DIR=$(cd $(dirname $0)/..;pwd)\nrelease_dir=${CODE_DIR}/build/release\nls -lh ${release_dir}\nosName=$(sed -nr '1s/^NAME=\"(.*)\"$/\\1/gp' /etc/os-release)\nif [ \"${osName,,}\" == \"ubuntu\" ];then\n    rm -f ${release_dir}/*.rpm\n    postfix=\"*.deb\"\nelif [ \"${osName,,}\" == \"openeuler\" ];then\n    postfix=\"*.rpm\"\nfi\n\nif [ $(ls ${release_dir}|grep \"cuda\"|wc -l) -eq 2 ];then\n    type=\"cuda\"\nelif [ $(ls ${release_dir}|grep \"ascend\"|wc -l) -eq 2 ];then\n    type=\"ascend\"\nfi\n\nfilecount=$(ls ${release_dir} | wc -l)\npkgcount=$(ls ${release_dir} | egrep \"${postfix}\" | wc -l)\nartifacts_file=$(ls ${release_dir} | grep \"${type}\"| wc -l)\n\nif [ ${filecount} -ge 14 ] && [ ${pkgcount} -ge 12 ] && [ ${artifacts_file} -eq 2 ]; then\n    echo \"compile success\"\nelse\n    echo \"compile failed\"\n    exit 1\nfi\n"
  },
  {
    "path": "docker/prepare_for_dev.sh",
    "content": "#!/bin/bash\nCUR_DIR=$(cd $(dirname \"${BASH_SOURCE[0]}\");pwd)\nOS_NAME=$(sed -nr '/NAME/s/^NAME=\"(.*)\"$/\\1/gp' /etc/os-release)\nVERSION_ID=$(sed -nr '/VERSION_ID/s/^VERSION_ID=\"(.*)\"$/\\1/gp' /etc/os-release)\nPLATFROM=$(arch)\necho \"OS_NAME:$OS_NAME\"\necho \"PLATFROM:$PLATFROM\"\necho \"VERSION_ID:$VERSION_ID\"\n\ndownload() {\n    url=\"$1\"\n    softName=${url##*/}\n    echo -e \"\\n\\nBegin to download ${softName}\"\n\n    times=0\n    while true\n    do\n        curl -k -L -O ${url}\n        if [ $(ls -l ${softName}|awk '{print $5}') -ge 50000 ]; then\n            echo \"${softName} download complete\"\n            break\n        else\n            times=$[${times}+1]\n            if [ ${times} -gt 3 ]; then\n                echo \"package ${softName} download failed,pls check\"\n                exit 1\n            fi\n            echo \"package ${softName} download failed, retry $times in 3 seconds......\"\n            sleep 3\n        fi\n    done\n}\n\nif [ \"${PLATFROM}\" == \"x86_64\" ];then\n    if [ \"$OS_NAME\" == \"Ubuntu\" ];then\n        if [ \"$VERSION_ID\" == \"20.04\" ];then\n            download https://github.com/modelbox-ai/modelbox-binary/releases/download/BinaryArchive/glog_0.6.0_dev_ubuntu.tar.gz\n            download https://github.com/modelbox-ai/modelbox-binary/releases/download/BinaryArchive/opencv_4.5.5_dev_ubuntu.tar.gz\n        elif [ \"$VERSION_ID\" == \"22.04\" ];then\n            download https://github.com/modelbox-ai/modelbox-binary/releases/download/BinaryArchive/opencv_4.2.0_dev_ubuntu.tar.gz\n        fi\n        download https://github.com/modelbox-ai/modelbox-binary/releases/download/BinaryArchive/obssdk_3.22.3_dev_ubuntu.tar.gz\n    elif [ \"$OS_NAME\" == \"openEuler\" ];then\n        download https://github.com/modelbox-ai/modelbox-binary/releases/download/BinaryArchive/cpprestsdk_2.10.15_dev.tar.gz\n        download https://github.com/modelbox-ai/modelbox-binary/releases/download/BinaryArchive/duktape_2.6.0_dev.tar.gz\n        download https://github.com/modelbox-ai/modelbox-binary/releases/download/BinaryArchive/ffmpeg_4.4_dev.tar.gz\n        download https://github.com/modelbox-ai/modelbox-binary/releases/download/BinaryArchive/opencv_4.2.0_dev.tar.gz\n        download https://github.com/modelbox-ai/modelbox-binary/releases/download/BinaryArchive/obssdk_3.22.3_dev.tar.gz\n    fi\n    download https://github.com/modelbox-ai/modelbox-binary/releases/download/BinaryArchive/nlohmann-json_3.7.3.tar.gz\n    download https://github.com/modelbox-ai/modelbox-binary/releases/download/BinaryArchive/Video_Codec_SDK_9.1.23.tar.gz\nelif [ \"${PLATFROM}\" == \"aarch64\" ];then\n    if [ \"$OS_NAME\" == \"Ubuntu\" ];then\n        download https://github.com/modelbox-ai/modelbox-binary/releases/download/BinaryArchive_aarch64/glog_0.6.0_dev_ubuntu.tar.gz\n        download https://github.com/modelbox-ai/modelbox-binary/releases/download/BinaryArchive_aarch64/opencv_4.5.5_dev_ubuntu.tar.gz\n        download https://github.com/modelbox-ai/modelbox-binary/releases/download/BinaryArchive_aarch64/obssdk_3.22.3_dev_ubuntu.tar.gz\n    elif [ \"$OS_NAME\" == \"openEuler\" ];then\n        download https://github.com/modelbox-ai/modelbox-binary/releases/download/BinaryArchive_aarch64/cpprestsdk_2.10.18_dev.tar.gz\n        download https://github.com/modelbox-ai/modelbox-binary/releases/download/BinaryArchive_aarch64/duktape_2.6.0_dev.tar.gz\n        download https://github.com/modelbox-ai/modelbox-binary/releases/download/BinaryArchive_aarch64/ffmpeg_4.4_dev.tar.gz\n        download https://github.com/modelbox-ai/modelbox-binary/releases/download/BinaryArchive_aarch64/opencv_4.2.0_dev.tar.gz\n        download https://github.com/modelbox-ai/modelbox-binary/releases/download/BinaryArchive_aarch64/obssdk_3.22.3_dev.tar.gz\n    fi\n    download https://github.com/modelbox-ai/modelbox-binary/releases/download/BinaryArchive/nlohmann-json_3.7.3.tar.gz\nelse\n    echo \"build error\"\n    exit 1\nfi\n\nls -lh *.tar.gz\nls -lh release\n"
  },
  {
    "path": "docker/prepare_for_rockchip.sh",
    "content": "#!/bin/bash\nCODE_DIR=$(cd $(dirname $0)/..;pwd)\n\necho ${CODE_DIR}\nPLATFORM_NAME=$1\nif [ \"${PLATFORM_NAME}\" == \"356x\" ] || [ \"${PLATFORM_NAME}\" == \"3588\" ]; then\n  echo \"build ${PLATFORM_NAME}\"\nelse\n  echo \"no support ${PLATFORM_NAME}\"\n  exit 1\nfi\n\nmkdir -p ${CODE_DIR}/rockchip\nunset https_proxy\nunset http_proxy\napt update\napt-get install -y gnutls-bin\ngit config --global http.sslVerify false\n\n# download lib rga\nfunction download_rga() {\n  mkdir -p ${CODE_DIR}/githubrga\n  cd ${CODE_DIR}/githubrga\n  if [ \"${PLATFORM_NAME}\" == \"356x\" ]; then\n    wget https://ghproxy.com/github.com/airockchip/librga/archive/refs/heads/1.3.2_release.zip\n    unzip 1.3.2_release.zip\n    LIB_RGA_PATH=${CODE_DIR}/githubrga/librga-1.3.2_release\n  elif [ \"${PLATFORM_NAME}\" == \"3588\" ]; then\n    wget https://ghproxy.com/github.com/airockchip/librga/archive/refs/heads/main.zip\n    unzip main.zip\n    LIB_RGA_PATH=${CODE_DIR}/githubrga/librga-main\n  else\n    echo \"no support ${PLATFORM_NAME}\"\n    exit 1\n  fi\n\n  echo \"download rga finish\"\n\n  mkdir -p ${CODE_DIR}/rga/\n  mkdir -p ${CODE_DIR}/rga/libs/Linux/gcc-aarch64\n  cp -rfd ${LIB_RGA_PATH}/include ${CODE_DIR}/rga\n  cp -rfd ${LIB_RGA_PATH}/libs/Linux/gcc-aarch64/* ${CODE_DIR}/rga/libs/Linux/gcc-aarch64/\n\n  cd ${CODE_DIR}\n  tar -czf rga.tar.gz rga\n  mv rga.tar.gz ${CODE_DIR}/rockchip/rga.tar.gz\n\n  rm -rf githubrga rga\n}\n\n# download lib rknpu\ndownload_rknpu() {\n  mkdir -p ${CODE_DIR}/githubrknpu\n  cd ${CODE_DIR}/githubrknpu\n  wget https://ghproxy.com/github.com/airockchip/RK3399Pro_npu/archive/refs/heads/main.zip\n  unzip main.zip\n\n  echo \"download rknpu finish\"\n\n  RKNPU_PATH=rknn-api/librknn_api/\n  mkdir -p ${CODE_DIR}/rknpu\n  mkdir -p ${CODE_DIR}/rknpu/${RKNPU_PATH}/Linux/lib64\n  cp -rfd ${CODE_DIR}/githubrknpu/RK3399Pro_npu-main/${RKNPU_PATH}/include ${CODE_DIR}/rknpu/${RKNPU_PATH}\n  cp -rfd ${CODE_DIR}/githubrknpu/RK3399Pro_npu-main/${RKNPU_PATH}/Linux/lib64/* ${CODE_DIR}/rknpu/${RKNPU_PATH}/Linux/lib64\n\n  cd ${CODE_DIR}\n  tar -czf rknpu.tar.gz rknpu\n  mv rknpu.tar.gz ${CODE_DIR}/rockchip/rknpu.tar.gz\n\n  rm -rf rknpu githubrknpu\n}\n\n# download lib rknpu2\ndownload_rknpu2() {\n  mkdir -p ${CODE_DIR}/githubrknpu2\n  cd ${CODE_DIR}/githubrknpu2\n  wget https://ghproxy.com/github.com/rockchip-linux/rknpu2/archive/refs/heads/master.zip\n  unzip master.zip\n  mkdir -p ${CODE_DIR}/rknpu2\n  if [ \"${PLATFORM_NAME}\" == \"356x\" ]; then\n    RKNPU2_PATH=runtime/RK356X/Linux/librknn_api\n    mkdir -p ${CODE_DIR}/rknpu2/${RKNPU2_PATH}\n    cp -rfd ${CODE_DIR}/githubrknpu2/rknpu2-master/${RKNPU2_PATH}/include ${CODE_DIR}/rknpu2/${RKNPU2_PATH}\n    cp -rfd ${CODE_DIR}/githubrknpu2/rknpu2-master/${RKNPU2_PATH}/aarch64 ${CODE_DIR}/rknpu2/${RKNPU2_PATH}\n  elif [ \"${PLATFORM_NAME}\" == \"3588\" ]; then\n    RKNPU2_PATH=runtime/RK3588/Linux/librknn_api\n    mkdir -p ${CODE_DIR}/rknpu2/${RKNPU2_PATH}\n    cp -rfd ${CODE_DIR}/githubrknpu2/rknpu2-master/${RKNPU2_PATH}/include ${CODE_DIR}/rknpu2/${RKNPU2_PATH}\n    cp -rfd ${CODE_DIR}/githubrknpu2/rknpu2-master/${RKNPU2_PATH}/aarch64 ${CODE_DIR}/rknpu2/${RKNPU2_PATH}\n  else\n    echo \"no support ${PLATFORM_NAME}\"\n    exit 1\n  fi\n  \n  echo \"download rknpu2 finish\"\n \n  cd ${CODE_DIR}\n  tar -czf rknpu2.tar.gz rknpu2\n  mv rknpu2.tar.gz ${CODE_DIR}/rockchip/rknpu2.tar.gz\n\n  rm -rf rknpu2 githubrknpu2\n}\n\n# download mpp and build lib mpp\ndownload_rkmpp() {\n  mkdir -p ${CODE_DIR}/mpp\n  cd ${CODE_DIR}/mpp\n  wget https://ghproxy.com/github.com/rockchip-linux/mpp/archive/refs/heads/develop.zip\n  unzip develop.zip\n  echo \"download mpp finish\"\n\n  if [ -d \"${CODE_DIR}/mpp/mpp-develop/mpp/release\" ]; then\n    rm -rf ${CODE_DIR}/mpp/mpp-develop/mpp/release\n  fi\n\n  cd ${CODE_DIR}/mpp/mpp-develop/build/linux/aarch64\n  ./make-Makefiles.bash\n  make -j4\n\n  mkdir -p ${CODE_DIR}/rkmpp\n  mkdir -p ${CODE_DIR}/rkmpp/include\n  mkdir -p ${CODE_DIR}/rkmpp/lib\n  \n  cp -rfd ${CODE_DIR}/mpp/mpp-develop/inc/* ${CODE_DIR}/rkmpp/include\n  cp -rfd ${CODE_DIR}/mpp/mpp-develop/utils/camera_source.h ${CODE_DIR}/rkmpp/include\n  cp -rfd ${CODE_DIR}/mpp/mpp-develop/build/linux/aarch64/mpp/librockchip* ${CODE_DIR}/rkmpp/lib\n  cp -rfd ${CODE_DIR}/mpp/mpp-develop/build/linux/aarch64/utils/libutils.a ${CODE_DIR}/rkmpp/lib/librk_utils.a\n  \n  cd ${CODE_DIR}\n  tar -czf rkmpp.tar.gz rkmpp\n  mv rkmpp.tar.gz ${CODE_DIR}/rockchip/rkmpp.tar.gz\n  \n  rm -rf mpp rkmpp\n}\n\ndownload_rga\ndownload_rknpu\ndownload_rknpu2\ndownload_rkmpp"
  },
  {
    "path": "docker/prepare_for_run.sh",
    "content": "#!/bin/bash\nCUR_DIR=$(cd $(dirname \"${BASH_SOURCE[0]}\");pwd)\nOS_NAME=$(sed -nr '/NAME/s/^NAME=\"(.*)\"$/\\1/gp' /etc/os-release)\nVERSION_ID=$(sed -nr '/VERSION_ID/s/^VERSION_ID=\"(.*)\"$/\\1/gp' /etc/os-release)\nPLATFROM=$(arch)\necho \"OS_NAME:$OS_NAME\"\necho \"PLATFROM:$PLATFROM\"\necho \"VERSION_ID:$VERSION_ID\"\n\ndownload() {\n    url=\"$1\"\n    softName=${url##*/}\n    echo -e \"\\n\\nBegin to download ${softName}\"\n\n    times=0\n    while true\n    do\n        curl -k -L -O ${url}\n        if [ $(ls -l ${softName}|awk '{print $5}') -ge 50000 ]; then\n            echo \"${softName} download complete\"\n            break\n        else\n            times=$[${times}+1]\n            if [ ${times} -gt 3 ]; then\n                echo \"package ${softName} download failed,pls check\"\n                exit 1\n            fi\n            echo \"package ${softName} download failed, retry $times in 3 seconds......\"\n            sleep 3\n        fi\n    done\n}\n\nif [ \"${PLATFROM}\" == \"x86_64\" ];then\n    if [ \"$OS_NAME\" == \"Ubuntu\" ];then\n        if [ \"$VERSION_ID\" == \"20.04\" ];then\n            download https://github.com/modelbox-ai/modelbox-binary/releases/download/BinaryArchive/glog_0.6.0_ubuntu.tar.gz\n            download https://github.com/modelbox-ai/modelbox-binary/releases/download/BinaryArchive/opencv_4.5.5_ubuntu.tar.gz\n        elif [ \"$VERSION_ID\" == \"22.04\" ];then\n            download https://github.com/modelbox-ai/modelbox-binary/releases/download/BinaryArchive/opencv_4.2.0_ubuntu.tar.gz\n        fi\n        download https://github.com/modelbox-ai/modelbox-binary/releases/download/BinaryArchive/obssdk_3.22.3_ubuntu.tar.gz\n    elif [ \"$OS_NAME\" == \"openEuler\" ];then\n        download https://github.com/modelbox-ai/modelbox-binary/releases/download/BinaryArchive/cpprestsdk_2.10.15.tar.gz\n        download https://github.com/modelbox-ai/modelbox-binary/releases/download/BinaryArchive/duktape_2.6.0.tar.gz\n        download https://github.com/modelbox-ai/modelbox-binary/releases/download/BinaryArchive/ffmpeg_4.4.tar.gz\n        download https://github.com/modelbox-ai/modelbox-binary/releases/download/BinaryArchive/opencv_4.2.0.tar.gz\n        download https://github.com/modelbox-ai/modelbox-binary/releases/download/BinaryArchive/obssdk_3.22.3.tar.gz\n    fi\nelif [ \"${PLATFROM}\" == \"aarch64\" ];then\n    if [ \"$OS_NAME\" == \"Ubuntu\" ];then\n        download https://github.com/modelbox-ai/modelbox-binary/releases/download/BinaryArchive_aarch64/glog_0.6.0_ubuntu.tar.gz\n        download https://github.com/modelbox-ai/modelbox-binary/releases/download/BinaryArchive_aarch64/opencv_4.5.5_ubuntu.tar.gz\n        download https://github.com/modelbox-ai/modelbox-binary/releases/download/BinaryArchive_aarch64/obssdk_3.22.3_ubuntu.tar.gz\n    elif [ \"$OS_NAME\" == \"openEuler\" ];then\n        download https://github.com/modelbox-ai/modelbox-binary/releases/download/BinaryArchive_aarch64/cpprestsdk_2.10.18.tar.gz\n        download https://github.com/modelbox-ai/modelbox-binary/releases/download/BinaryArchive_aarch64/duktape_2.6.0.tar.gz\n        download https://github.com/modelbox-ai/modelbox-binary/releases/download/BinaryArchive_aarch64/ffmpeg_4.4.tar.gz\n        download https://github.com/modelbox-ai/modelbox-binary/releases/download/BinaryArchive_aarch64/opencv_4.2.0.tar.gz\n        download https://github.com/modelbox-ai/modelbox-binary/releases/download/BinaryArchive_aarch64/obssdk_3.22.3.tar.gz\n    fi\nelse\n    echo \"build error\"\n    exit 1\nfi\n\nls -lh *.tar.gz\nls release|egrep 'devel|document|solution|demo'|xargs -i rm -f release/{}\nls -lh release\n"
  },
  {
    "path": "docker/repo/cuda.repo",
    "content": "[cuda]\nname=cuda\nbaseurl=https://developer.download.nvidia.com/compute/cuda/repos/rhel7/$basearch/\nenabled=1\ngpgcheck=0\n"
  },
  {
    "path": "docker/repo/nvidia-ml.repo",
    "content": "[nvidia]\nname=nvidia\nbaseurl=https://developer.download.nvidia.com/compute/machine-learning/repos/rhel7/$basearch/\nenabled=1\ngpgcheck=0\n"
  },
  {
    "path": "docs/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nproject(modelbox)\n\nfind_package(Doxygen)\nif(NOT DOXYGEN_FOUND)\n    message(STATUS \"Disable document building.\")\n    return()\nendif()\n\nset(DOXYGEN_IN ${CMAKE_CURRENT_SOURCE_DIR}/Doxyfile.in)\nset(DOXYGEN_OUT ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile)\n\nset(DOXYGEN_API_INSTALL_DIR \"${MODELBOX_WWW_DIR}\")\nif (NOT DOXYGEN_API_INSTALL_DIR)\n    set(DOXYGEN_API_INSTALL_DIR \"${CMAKE_INSTALL_FULL_DATAROOTDIR}/modelbox/www\")\nendif()\n\nstring(REPLACE \";\" \" \" DOXYGEN_LIBMODELBOX_INCLUDES \"${LIBMODELBOX_INCLUDE}\")\nstring(REPLACE \";\" \" \" DOXYGEN_MODELBOX_SERVER_INCLUDES \"${MODELBOX_SERVER_INCLUDE}\")\n\nconfigure_file(${DOXYGEN_IN} ${DOXYGEN_OUT} @ONLY)\n\nadd_custom_target(api-docs ALL\n    COMMAND ${DOXYGEN_EXECUTABLE} ${DOXYGEN_OUT}\n    WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/../\n    COMMENT \"Generating API documentation with Doxygen\"\n    VERBATIM)\n\ninstall(DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/doxygen/html/ DESTINATION ${DOXYGEN_API_INSTALL_DIR}/api\n    COMPONENT document\n)\n"
  },
  {
    "path": "docs/Design.md",
    "content": "# MODELBOX设计规格\n\n## MODELBOX场景\n\n![use-case](assets/use-case.png)\n\nMODELBOX用例包含如下场景：\n\n1. 数据输入，输出  \n用户，数据输出，输出主要是来自业务的数据请求，业务下发请求给MODELBOX，MODELBOX将数据发送到GPU，D处理后，返回给用户。\n\n1. 流单元开发  \n开发者：开发者使用MODELBOX进行流单元开发，其中流单元开发以c++，python流单元为主。  \n开发时，使用MODELBOX提供的开发接口。\n\n1. 图开发  \n开发者：开发者使用MODELBOX提供的Editor编辑器，编写对应格式的图，MODELBOX加载后，执行。\n\n1. 性能优化  \n开发者：开发者使用MODELBOX提供的profile能力，在执行流单元的时候，打开profiling接口，并导出profiling的数据，加载到chrome-tracing中，画出对应的性能甘特图。\n\n1. 动态图开发  \n开发者：开发者使用Notebook，使用python语言开发图，此时可以进行单步调试，开发完成后，业务使用动态图运行。\n\n1. 流单元调度执行  \nMODELBOX：MODELBOX加载对应的图以及流单元，将数据送入到CPU，GPU，D芯片中处理。\n\n## MODELBOX架构\n\nMODELBOX架构图：  \n\n![architecture](assets/architecture.png)\n\n架构上，MODELBOX框架自底向上由如下组件构成：  \n\n1. APP Server  \n业务服务组件，包含IVA，OCR等服务组件，IVA为C++接口，OCR为python接口。  \n其中IVA业务为异步业务，OCR为同步数据业务。\n\n1. Flow Unit  \n流集合，包含输入流单元，处理流单元，公共子流单元和数据整合流单元\n\n1. Standalone Server  \n可独立运行服务器，用于支撑业务的即时运行，业务用户不用从头开始开发进程，只需要开发对应的流单元代码，即可实现业务功能。\n\n1. Develop  \n开发支撑工具，包含Graph Editor, Notebook, Chrome Tracing，分别用于图开发，动态图写作和性能调优。\n\n1. Graph Manage  \n图管理接口，用于提供接口让开发这配置图的流程，和业务流程。  \ngraphviz用于图的展示和DOH图格式处理。\n\n1. API  \n对开发者提供的开发接口，支持C++，Java，Python，Object-c的开发。\n\n1. Performance  \n性能跟踪包含MODELBOX性能profiling，staticts打点，以及业务tracing。  \nprofiling支持性能打点，构造出业务的流程图，以及性能消耗。  \nstatics打点用于上次业务对业务进行打点，汇总上报执行接口。  \ntracing用于特定业务的流程跟踪，输出。  \n\n1. Engine  \n引擎部分为MODELBOX的核心组件，包含了buff数据接口，flow unit base流单元接口，scheduler调度器，python 流单元接口和graph图。  \ngraph图包含了，builder图创建，runner图执行，Register图注册，Context图上下文，StaticGraph静态图，DynamicGraph动态图，Datatype数据已经数据类型。\n\n1. Adapter  \n适配层，适配底层OS和硬件差异。  \n适配层包含OS适配，设备适配，推理适配。\n\n1. Base Component  \n基础组件，包含配置读取，线程池，日志，RPC总线，队列，内存池。\n内存池，包含设备内存管理，slab内存分配器。\n\n1. OS  \nMODELBOX支持的OS有Linux，andriod，IOS。\n\n1. Hardware  \nMODELBOX包含支持X86-64，ARM64， GPU，D芯片硬件。\n\n### AppServer\n\n![appserver](assets/appserver.png)\n\nAppServer为对外服务适配的的业务组件。其功能为。\n\n* 对外提供http api接口，用于任务控制。\n* 提供插件机制，用于外部服务适配开发。\n\nappServer内部，包含任务管理功能和状态查询功能。  \nappServer外部导出接口有：\n\n* 任务控制，任务的创建，删除，查询。\n* 任务状态查询，查询任务的耗时，内存消耗。\n* appserver进程统计信息，包含内存使用，CPU消耗，设备资源占用情况。\n\n## modelbox发布件以及目录组织结构\n\nmodelbox发布件如下\n\n| 发布件                                                         | 说明                                             |\n| -------------------------------------------------------------- | ------------------------------------------------ |\n| modelbox.&#91;deb&#124;rpm&#124;tar.gz&#93;                      | modelbox独立进程安装包，安装到系统中，可单独执行。 |\n| libmodelbox.&#91;deb&#124;rpm&#124;tar.gz&#93;                   | modelbox核心库。                                   |\n| libmodelbox-dev.&#91;deb&#124;rpm&#124;tar.gz&#93;               | modelbox开发支持，包含头文件，.a库                 |\n| libmodelbox-device_cpu_dev.&#91;deb&#124;rpm&#124;tar.gz&#93;    | modelbox CPU开发支持，包含头文件，.a库             |\n| libmodelbox-device_cuda_dev.&#91;deb&#124;rpm&#124;tar.gz&#93;   | modelbox CUDA开发支持，包含头文件，.a库            |\n| libmodelbox-device_ascend_dev.&#91;deb&#124;rpm&#124;tar.gz&#93; | modelbox ASCEND开发支持，包含头文件，.a库          |\n| modelbox.jar                                                     | java版本modelbox接口                               |\n| modelbox.tar.gz                                                  | python pip package。                             |\n\nmodelbox目录结构如下\n\n| 文件路径                    | 说明                   |\n| --------------------------- | ---------------------- |\n| /usr/bin/modelbox             | modelbox独立服务器主进程 |\n| /etc/modelbox/modelbox.conf     | modelbox配置文件。       |\n| /etc/modelbox/graph.conf      | modelbox执行程序图配置。 |\n| /lib/systemd/modelbox.systemd | modelbox服务启动程序     |\n\nlibmodelbox目录结构如下\n\n| 文件路径                                                         | 说明               |\n| ---------------------------------------------------------------- | ------------------ |\n| /usr/lib/libmodelbox.so                                            | modelbox开发库       |\n| /usr/include/modelbox/                                            | modelbox头文件路径       |\n| /usr/lib/libmodelbox/libmodelbox-device-&#91;cpu&#124;cuda&#124;ascend&#93;.so | modelbox开发库       |\n| /usr/lib/libmodelbox/libmodelbox-uint-device-&#91;fu_xx.so&#93;      | modelbox流单元库路径 |\n\npython开发包目录\n| 文件路径                              | 说明                |\n| ------------------------------------- | ------------------- |\n| /usr/lib/python3/dist-packages/modelbox | modelbox python库路径 |\n\n## Hardware\n\n硬件上，CPU需要支持x86-64, ARM64，推理训练支持GPU，D芯片。  \n在设计上，CPU部分的执行代码，需要支持跨平台执行。\nGPU，D芯片的代码，需要支持可选，即：\n\n1. MODELBOX框架支持x86-64，和ARM64.\n1. MODELBOX GPU部分，D芯片部分可选。\n\n## OS\n\nOS上，需要支持标准Linux，andriod，iOS。\nandriod，iOS支持NDK运行，所以在语言上，需要：\n\n1. 接口部分：  \n   接口部分需要支持JAVA，C++，Object-C, swift，python五种语言。\n\n1. 框架部分：  \n   框架部分采用c/c++语言开发：\n\n1. 底层系统库：\n   * Linux  \n     使用标准的C++库，采用动态库的形式。需要编译配套OS的SDK。\n\n   * Andriod  \n     Andriod c++库对应的为libc++_shared.so，特别的使用了new delete同时需要连接/system/lib/libstdc++.so  \n     依赖的系统库采用动态库的形式依赖。  \n     异常支持，需要手工指定开启c++异常的支持。  \n     语法支持，系统采用c++ 11的语法。  \n\n   * iOS\n     支持方式和Linux类似。\n\n1. 适配接口：\n   非posix标准的接口，采用适配的形式，比如线程优先级，进程资源占用接口等。\n\n## Base Component\n\n基础组件库是MODELBOX框架的公共功能，用于提供接口给上层框架、流单元开发，以及上层业务开发。\n对应的基础组件列表如下：\n基础组件，包含配置读取，线程池，日志，RPC总线，队列，内存池。\n内存池，包含设备内存管理，slab内存分配器。\n\n| 基础组件     | 功能说明                                                                                              |\n| ------------ | ----------------------------------------------------------------------------------------------------- |\n| toml配置读取 | 支持读取配置文件，格式采用toml，用于读取MODELBOX进程的启动参数                                          |\n| 线程池       | 支持CPU资源调度，可以在给定线程数，优先级对指定的任务进行调度执行。                                   |\n| 日志         | 异步日志，支持将日志输出到屏幕，指定的文件，或输出日志到上层业务日志模块。                            |\n| 队列         | 阻塞队列，可设定队列的大小，当超过指定大小时，则阻塞。同时支持poll数据，有数据时，唤醒阻塞的线程。    |\n| RPC总线      | 数据通信模块，支持异步RPC接口，数据格式使用protobuff格式。                                            |\n| 内存池       | 管理MODELBOX使用的内存，这些内存包括CPU内存，GPU显存，D芯片内存，可配置指定大小的内存，并进行池化管理。 |\n| slab分配器   | 对频繁使用对象的申请管理。                                                                            |\n\n### 配置管理\n\nMODELBOX框架，支持从配置文件中读取图运行的必要参数，这些参数包含对aflow框架的设置，设备层的配置，统计的配置等。  \nMODELBOX默认使用toml配置接口  \n\n对应的配置段如下：\n| 配置项    | 功能说明 |\n| ------------ | --------------|\n| [global] | 全局配置|\n| [device] | 设备相关的配置|\n| [graph] | 图相关的配置|\n| [driver] | driver相关的配置|\n\n上述配置参数，采用点分结构，如：`device.cpu`，图相关的配置可以支持中类型的图。\n\n### Logging\n\n#### LIBMODELBOX框架\n\nMODELBOX默认情况下，日志输出到屏幕，输出级别为ERROR级别的日志，业务组件可以注册日志处理函数，MODELBOX的日志将会输出到对应的业务组件模块。\n\n对应日志输出的内容：\n\n* 级别\n* 行号\n* 文件\n* 函数名\n* 日志内容。\n\nMODELBOX日志组件在base中，日志接口可以提供给核心，业务，以及流单元插件调用。  \n日志支持接口重定向，默认情况下输出到屏幕，业务可重定向日志接口到日志组件。  \n\n#### MODELBOX进程日志\n\nMODELBOX进程启动后，初始化日志组件，设置日志级别，文件信息，并将日志信息注册给MODELBOX框架。  \n日志的输出由MODELBOX进程处理，将输出日志存储到本地磁盘上。  \n\nMODELBOX进程默认将日志输出到`/var/log/modelbox/modelbox.log`目录中。\n\n### RPC总线\n\nRPC总线（或数据总线）主要用于系统内跨进程间的数据通信。初步选型使用grpc开源组件。同时在其上封装简易的异步接口。RPC调用的鉴权需预留接口，整体在鉴权系统中考虑。\n\n## Adapter\n\n底层适配层，适配层包含三部分适配，分别是硬件差异，OS差异，推理引擎差异。如下图所示：\n\n![dynamic](assets/adapter.png)\n\n每种适配都有独立的抽象接口层、统一数据结构层及接入管理层，可实现动态插拔。\n\n* 抽象接口层：每种适配都独立定义了抽象接口，不同类型的接入都需要独立实现抽象接口\n* 统一数据结构：用于屏蔽不同接入的差异，在适配层之上作为元数据统一操作，需要在适配层对不同接入的数据做转\n* 接入管理层：用于管理每种适配不同接入的动态插拔，动态插拔功能依赖抽象接口层、统一数据结构层\n\n对每种适配的说明如下：\n\n1. 硬件适配：  \n硬件适配层主要区分有GPU和D芯片的适配，这部分适配接口为\n   * 内存管理接口的适配\n   * 设备资源占用率的适配\n\n![dynamic](assets/device-adapter.png)\n\n1. OS适配：  \nAPI使用上，需要尽量避免Linux相关的接口，比如epoll之类的接口。  \n其他方面，使用标准的系统库。  \n第三方库，比如protobuf，rpc等使用静态编译的形式连接到ModelBox中。\n\n![dynamic](assets/os-adapter.png)\n\n1. 推理引擎适配：  \n为支持跨硬件平台推理，目前模型格式有pb，mo，me。在执行推理引擎接口标准化，不同的推理模型可以在不同的硬件上推理，比如同一个训练后的模型，可以在CPU，GPU，D芯片上执行。\n推理引擎适配包含两部分；\n    * 接口标准化  \n      接口标准化为提供标准的推理接口。\n    * 模型标准化  \n      模型标准化为业务提供标准的模型，在不同设备上执行时，由推理适配层中的标准模型转换层转换为对应的模型后推理。\n\n![dynamic](assets/inference-adapter.png)\n\n## Engine\n\nEngine是MODELBOX的核心组件，MODELBOX的所有流单元调度运行全部有Engine负责，Engine有如下几个部分组成：\n\n1. Buffer。  \n基础数据，用于承载MODELBOX的处理数据。\n\n1. Flow Unit Manager：  \n流单元管理，负责根据图加载管理需要的流单元。\n\n1. Flow Unit Base：  \n流单元的基础结构，包含流单元的init，pre，post和run四部分。\n\n1. Scheduler/`EngineCore`(名字待定，跟谷音讨论，暂时调度相关能力与Executor一起考虑，本处模块用于描述Engine的加载)：  \n引擎调度程序，加载图，并启动线程池，按照业务请求生成上下文，并执行图的推理任务。  \n`EngineCore`用于协调周边组件，是ModelBox实际运行的驱动器。其需要完成如下功能：\n   * 根据外部传入的描述文件，构建完整图，使用Graph能力\n   * 从Task列表中获取Task，构建Session级别的Context。图的执行器会从Context列表中获取Context进行执行\n\n1. Python Flow Unit Service\npython流单元服务程序，在使用纯python流单元时，python流单元代码将在独立的进程中执行以提高执行并发度。\n\n1. graph：\n图引擎，用于图的执行，其包含了Data/Type，builder，Runner，Register，Context，Static Graph，Dynamic Graph\n    * DataType  \n    数据接口，数据关联模块，对数据类型做强制检查和自动转换。  \n    * builder  \n    图创建接口，更具配置文件，建立图。\n    * runner  \n    图运行组件，按照图的拓扑排序执行每次推理任务。\n    * Register  \n    多图管理模块，可以对多个图进行管理。\n    * Context  \n    执行上下文，记录执行的状态，前后关联关系。\n    * Static Graph\n    静态图执行组件，按照静态图运行任务。\n    * Dynamic Graph\n    动态图执行组件，按照动态图执行任务。\n\n### Buffer组成\n\nBuffer为所有流单元的基础数据，其结构如下\n\n![buff](assets/buffer.png)\n\n1. 一个Buffer由一个Meta和一个Data组成。  \nMeta记录的此数据的类型，长度和对应数据的位置。\nData存储的对应的数据。\n\n1. 多个Buffer组成一个BufferList。  \nBufferList对应的Meta存储在一个List中，相应的数据，存储到同一个RawData中。\n\n1. 数据保护  \nMeta对应的Data存储到连续的RawData上，可能会导致误操作。RawData用Magic进行保护。\n\n### Flow Unit Manager\n\n1. 流单元开发\n\n用户开发流单元时，只需要安装相应的流单元开发包libmodelbox-dev即可，安装完成后，modelbox的头文件将安装到`/usr/include/modelbox`中。\n\n开发时：用户只需包含`modelbox/modelbox.h`即可开发，若开发对应设备的推理流单元，则包含对应的外设头文件`modelbox/device/[cpu|gpu|ascend]/modelbox.h`\n编译时：用户指定编译参数`-shared -lmodelbox -lmodelbox-device-[cpu|gpu|ascend]`即可生成对应的modelbox流单元。\n\n1. 流单元加载管理\n\n流单元管理组件负责管理当前的所有流单元，并提供列表给图模块，同时在需要的时候进行流单元加载。\n基本流单元应该包含基本的信息。\n\n| 包含信息     | 信息说明                                   |\n| ------------ | ------------------------------------------ |\n| 文件路径     | 流单元文件路径，流单元的路径。             |\n| 流单元名称   | 流单元的名称，加载流单元后执行             |\n| 流单元版本号 | 流单元的版本号                             |\n| 流单元输入   | 流单元输入个数，以及每个数据输入的类型。   |\n| 流单元输出   | 流单元输出个数，以及每个输出的类型。       |\n| 流单元参数   | 流单元参数列表，在图加载时执行流单元安装。 |\n\nMODELBOX流单元路径可以如下：\n\n| 路径             | 说明                                                           |\n| ---------------- | -------------------------------------------------------------- |\n| /usr/lib/modelbox/ | modelbox默认流单元库路径                                         |\n| 图中指定         | 开发人员，可以在图中指定流单元库路径，在加载图的时候进行加载。 |\n\n### Flow Unit Base\n\n流单元基础接口，流单元的基础类，包含如下必要接口：\n\n1. Open  \n流单元加入图时，执行的一些配置参数，这些配置和图的执行相关。\n\n2. StreamOpen  \n当业务处理流数据时，触发调用streamopen，流单元打开流处理能力。\n\n3. Process  \n流单元执行接口，执行流单元，并返回对应的结果，如果流单元执行出错，返回对应的错误信息。\n\n4. StreamClose\n当业务流数据处理结束时，触发调用streamclose，通知业务关闭数据流。\n\n5. close  \n流单元销毁接口，销毁流单元时，释放对应的资源。\n\n### 流单元的计算\n\n流单元采用计算和流程分离的形式。\n\n![buffer-execute](assets/buffexecute.png)\n\nEngine调度执行buffer时，以bufferlist为单位调度，流单元中的pre，post以流单元为数据处理。  \n当Engine执行对应的流单元时，执行策略由对应的device触发。采用业务流程和数据计算分离的模式。\n\n* CPU的执行策略采用多线程+批量的形式。\nCPU流单元可以设置批量处理能力，当业务处理时，会多线程方式批量处理数据。\n\n* GPU的执行策略采用批处理的形式。\nGPU采用全批量处理的能力。\n\n### Scheduler\n\n图调度器，用于并发执行图的任务使用，将数据从输入源调度到执行源，并对数据进行汇总后输出。\n\n### Python Flow Unit Service\n\n流单元并发调度任务，当流单元配置为纯python流单元时，由流单元管理模块启动相关的python服务。对应的工作模式如下：\n\n![python](assets/python-flowunit.png)\n\n当有纯python流单元时，会经由python-master启动多个worker进程。相应的业务请求会由modelbox进程内部的Flow Unit将请求发送到python-worker，由python-worker进行并发处理后，返回给流单元。\n\n### Graph\n\n图引擎，为MODELBOX的关键组件，MODELBOX在创建图时，支持两种方式，一种是动态图，一种是静态图。\n图支持一定的监控能力，可查询流单元数量，流单元执行次数等，同时结合Profiling，提供调优信息。\n\n图的处理采用如下的方式。\n\n![graph-process](assets/Graph-Process.png)\n\n图的数据处理，采用从`INPUT`循环读取数据，并将数据送入`PORCESS`处理，处理完成后，由`SINK`输出。\nGraph引擎包含了如下几部分：\n\n#### 数据类型DataType\n\n数据的流动校验采用的是DataType的方式。\n对于DataType内置如下三种：\n\n| 类型   | 类型说明                                                                                 |\n| ------ | ---------------------------------------------------------------------------------------- |\n| BUFF   | 基础类型，对应为BUFF，由数据长度                                                         |\n| Tensor | Tensor类型，可支持多种shape，但调度框架的最小调度单位为tensor，而非tensor内部的N维数据。 |\n| String | 字符串，字符串类型的数据。                                              |\n\n数据类型采用数据类型树的方式匹配管理类型，大致的类型树如图：\n\n![buffer-type](assets/buffer-type-tree.png)\n\n根节点为RAW类型，叶子节点基于RAW派生出来，可以子类型可以分为包含上述类型的多种数据类型。  \n\n* 数据类型定义：\n每个流单元可以自定义自己的数据类型，这些数据类型可基于上述三个类型扩展。每个流单元在初始化的时候，会注册对应数据类型，流单元管理器会对数据类型做校验判断。\n\n* 流单元配置数据类型：\nMODELBOX中数据类型采用树状结构进行管理。将继承关系描述清楚。同时数据类型会对外开放，流单元开发者可定义自己的数据类型，并纳入到树状管理结构中。  \n\n* 数据类型校验：\n图构建过程中，会对流单元的连接关系进行数据类型校验，此处校验为弱校验，允许父节点数据传递给子节点数据类型（等同编程语言的类型隐士转换）。流单元输入输出在流单元执行时进行强校验。\n\n### builder\n\n图主要包含Node，节点，对应流单元执行实体，与流单元处理函数绑定。每个Node具备输入、输出。输入与输出具备三个要素：名称、数据类型、详细描述。在可视化编排中，输入输出在Node上已短划线点表示（类似simulink）。图支持有向有环图，和子图。\n\n图的生成主要完成图的建立。提供node添加接口、子图添加接口、node关联关系接口。关联关系添加时完成数据类型校验。\n\n提供check接口，对构建完成的图进行进一步校验。规则主要有：\n\n* 连接的流单元，输入输出数据类型匹配\n* 不存在孤立流单元\n* 流单元不存在孤立输入或输出\n\n对应的图支持如下几种形式。\n\n1. 形式一  \n![type1](assets/graph-type1.png)\n\n单流向，数据从NodeA->NodeB\n\n1. 形式二  \n![type2](assets/graph-type2.png)\n\n数据分流，数据从NodeA->NodeB, NodeA->NodeC\n\n1. 形式三  \n![type3](assets/graph-type3.png)\n\n数据汇聚，数据从NodeB->NodeA, NodeC->NodeA\n\n1. 形式四  \n![type4](assets/graph-type4.png)\n\n带条件Condition的边，可与之前的数据形成环状，对业务进行循环处理。\n\n1. 形式五  \n![type5](assets/graph-type5.png)\n\n数据组合，数据有一个子图整体处理。NodeA->SubGraph->NodeC\n\n#### D芯片流单元的支持\n\n当前D提供了Matrix编程框架，该框架构建了一套完整的图模式。在图构建过程中需要对D芯片的编程模式进行封装。基本原则如下：\n\n1. 针对使用D的业务逻辑，也封装为流单元。该流单元构建时会同时完成Hiai的图构建。流单元连接关系需对应转化为D模式下图的边。多个D芯片流单元组成子图。\n1. D芯片子图图构建过程中在子图边界添加数据同步流单元。图执行过程中，D芯片流单元直接跳过或执行为空，由实际D Matrix的Graph 完成计算。\n![d-matrix-Flow Unit](assets/d-matrix-flowunit.png)\n\n#### nvidia GPU的支持\n\nnvidia显卡支持多路stream并发的计算，不同stream之间可以并发执行，同一个stream内部异步串行执行。\nmodelbox在支持nvidia显卡时，需要支持stream，stream的管理，跟踪由适配层的deivce和GPU 流单元负责。\n\n![nv-stream](assets/nvidia-stream.png)\n\nstreamid仅存在nVidia的GPU中。\n\n* GPU初始创建一些stream池子。\n* 当数据从其他设备复制到本GPU时，从池子中获取一个未使用的streamid，并调用nVidia的异步内存复制接口。\n* 当数据在GPU中处理时，数据之间使用同一个streamid，并将streamid传递到后面的流单元中。\n* 当数据从GPU复制回其他设备时，采用同步等待的形式等待数据复制完成。\n\n### Context\n\nContext上下文，为当前处理任务的情况，上下文主要提供流单元记录当前任务处理状态的能力，比如语音，视频等流式数据。\n上下文分为两级上下文：\n\n* 第一级是和图的生命周期绑定，为Session-Context。\n* 第二级是从INPUT数据输入创建，到SINK结束回收，为Buffer-Context。\n\n上下文包含如下内容：\n\n1. 输入，输出数据指针\n1. 执行序号。\n1. 对应流单元的key-value状态数据存储。\n\n### Runner\n\n图执行器，负责使用调度器，调度对应图的流单元，调度策略为：\n\n1. 对于INPUT流单元，采用线程池的形式调度， INPUT流单元产生数据上下文。\n1. 对于PROCESS，SINK流单元，采用Schedule-With-Data的形式调度。\n1. 对于GPU，D芯片需要批量处理的流单元，采用BufferList数据结合处理的形式。\n\n![runner](assets/runner.png)\n\n如上图所示：\n\n* 一个图Graph分为5个Node，当由数据从INPUT产生时，INPUT产生的数据会放入一个RunQuene中。\n* RunQueue按照执行数据由Executor调度执行，每个Executor都会和BufferList绑定。  \n* Executor在执行OP时，会与后一个Buffer尽量做合并，将数据合并后给流单元处理。\n* 最底层为公共线程池。\n\n时序：  \nRunQueue可设置为保序和非保序两种模式，当处理类似视频时序数据时，则保序执行。\n当处理图片非顺序数据时，则可乱序执行。\n\n### 动态图\n\n动态图的执行是由主程序调度流单元执行，MODELBOX在执行时，需要将每一步的数据都返回给调用者。  \n动态图执行和调用RPC接口类似，主程序调用接口后，由MODELBOX框架将数据送入对应的流单元，对应的流单元由Executor进行并发执行。\n执行后的结果汇总后返回给调用点。\n\n为优化动态图性能，如果动态图的结构是业务从将数据送入A流单元，立即需要B流单元，则数据不做传输动作。  \n只有控制代码，需要读取操作数据时，才传输数据。\n\n具体原理如下：\n\n![dynamic](assets/dynamic-graph-data-flow.png)\n\n如上述动态代码为\n\n```python\nV1 = A()\nV2 = B(V1)\nV2.modify()\nV3 = C(V2)\n```\n\n对于V1，因为主流程代码未使用，所以当调用B时，B直接从A处获取数据。\n对于V2，因为主流程代码有修改，所以当要修改V2时，MODELBOX框架从B处将数据复制到MAIN处供main修改。\n最后，C处理得修改后的数据。返回V3结果，V3的处理方式类似V1,V2\n\n### 静态图\n\n静态图的执行，主要的流程都有MODELBOX框架触发，只需要将最终结果返回调用者。\n静态图是动态图的扩展，与动态图差别有：\n\n1. 图的建立\n图是在初期建立好的，静态图有全局图，动态图没有全局图。\n\n2. 数据的处理\n主程序不控制中间数据的处理，值由数据输入和数据输出要处理。\n\n其他流程上基本一致，对流单元没有任何区别。\n\n## Performance\n\n性能跟踪包含Profiling，Staticts及Tracing三个模块。如下图所示：\n\n![dynamic](assets/performance.png)\n\n1. Profiling  \nProfiling由运行中流单元内置的代理定时收集指标数据，收集的指标包括：CPU使用率、内存使用率、消息处理CPU时间及实际用时、并发度等，利用收集的数据进行性能统计剖析，并提供导出接口可以为流单元生成一个性能剖析文件，可快速发现延迟和低效情况，有助于业务消除应用瓶颈并减少资源消耗量\n\n2. Tracing  \nTracing用于全流程的调试跟踪，可以跟踪业务内部的消息传递情况，自动分析业务流程中所有流单元的跟踪记录，辅助业务快速确定问题的根本原因，主要是用在开发阶段，辅助跟踪任务的执行情况以及对应的日志信息。Tracing模块还提供导出接口导出tracing文件，可加载到chrome-tracing中，展示对应的甘特图\n\n3. Statics  \nStatics提供打点接口，用于上层业务对业务流程进行打点，汇总并统计展示执行结果，同时提供导出接口可以导出业务Statics打点数据\n\n性能跟踪的整体流程如下图所示：\n\n![dynamic](assets/performance-collection.png)\n\n## API\n\n## Thread Pool\n\n主要包括线程生命周期管理，线程调度器和线程绑定器三个模块。如下图所示：\n\n![dynamic](assets/thread-pool.png)\n\n1. Lifecycle Manager  \n线程生命周期管理器，管理线程生命周期，如创建线程，销毁线程\n\n1. Scheduler  \n线程调度器，线程具有优先级，调度器根据具体优先级调度，优先级范围1-10，值越低，优先级越高\n\n1. Binder  \n线程绑定器，提供简单API可以对线程进行绑核操作，提高线程运行效率，保证业务运行性能\n\n## Memory Pool\n\nMemory Pool用于屏蔽底层不同硬件环境下的内存操作，包括GPU、D芯片和普通内存，提供统一的内存池及内存操作接口，主要包括生命周期管理器、\n内存高速交换、内存回收管理、日志跟踪、Slab内存分配和设备内存适配层六个模块。如下图所示：\n\n![dynamic](assets/memory-pool.png)\n\n1. Lifecycle Manager  \n内存池及内存的生命周管理，包括内存池及内存的创建和销毁，内存池的创建会优先保证内存是连续的，内存使用slab分配器分配，减少碎片产生\n\n2. Efficient Exchange  \n支持不同硬件设备环境下内存的高速交换，减少跨不同硬件环境下内存的拷贝次数和耗时\n\n3. Memory Collection  \n内存回收管理，通过一个计数器对对象进行计数，对象被引用时+1，引用失效时-1，当计数为0时则说明可以被回收，由该模块定时对计数为0的对象进行回收\n\n4. Log Tracking  \n内存操作的日志跟踪，如记录内存块分配的大小，总数，回收，方便追踪及统计内存的使用情况\n\n5. Slab  \n内存分配机制，slab分配器是基于对象进行管理，相同类型的对象归为一类，每当要申请一个对象，slab分配器就从一个slab列表中分配一个这样大小的单元出去，而当要释放时，将其重新保存在该列表中，而不是直接返回给系统，从而避免内存碎片产生\n\n6. Device Memory Adapter  \n设备内存适配层，屏蔽不同的硬件设备差异，提供统一的内存管理接口，不同的设备内存包括CUDA、HIAI及GLIBC\n\n### CUDA stream\n\n为更好利用cuda stream提供的并发能力，CUDA device层，支持cuda的stream接口。\n\n![dynamic](assets/nvidia-stream.png)\n\n使用CUDA stream的过程如下：\n\n1. CUDA的设备层，创建一个`stream pool`，用于记录stream的句柄。\n2. 当内存从CPU侧的FlowUnit复制到GPU时，CUDA device层从stream pool中创建stream，并将stream绑定到设备内存上。\n3. 当CUDA调用核函数处理业务时，从设备内存获取到stream句柄，并触发异步业务。\n4. 同一个任务，共享同一个stream。\n5. 当结果从CUDA设备复制到HOST设备时，进行阻塞等待。\n\n## Message Queue\n\n消息队列用于传递事件和业务消息，在功能上，需要支持消息的：\n\n* 泛型消息。\n* 消息上限设置。\n* 消息的BLOCKING，当队列满或空时阻塞请求。\n* 消息POLLING机制，有消息时唤醒阻塞的任务。\n* 超时机制，BLOCKING接口和POLLING接口支持超时能力。\n\n消息队列消耗的内存来源于底层内存池，内存统计接口支持统计出消息的占用情况。\n\n## Graph Manage\n\n主要包含Graphviz、Interface和Subgraph Visualization功能模块，如下图所示：\n\n![dynamic](assets/graph-manage.png)\n\n1. Graphviz  \nGraphviz用于图的可视化展示和DOH图格式处理。Graphviz是一个开源的图形可视化软件，具有web和交互式图形界面，以简单的文本语言描述图形，并以几种有用的格式绘制图形，如用于web页面的images和SVG、用于包含在PDF或其他文档中的Postscript、或显示在交互式图形浏览器中(Graphviz还支持GXL，一种XML方言)。Graphviz对于具体图表有许多有用的特性，比如颜色、字体、表格节点布局、行样式、超链接和自定义形状的选项。\n\n2. Interface  \n提供统一的图管理操作接口，让开发者快速地让配置、编排图的流程。图的管理接口包括静态图和动态图的管理。图的接口主要包含点及点属性的增删改查、边及边属性的增删改查、运行态图的启动和销毁等\n\n3. Subgraph Visualization  \n支持子图可视化、子图的导航及元数据展现。通过图形化界面可以快速展现各个嵌套子图的组成，还支持展现和导出各个子图的配置信息\n\n## Develop\n\n## Standalone Server\n\nmodelbox独立的运行服务器，用于帮助用户减少业务开发工作的独立进行组件，其功能是安装到OS中.\n服务启动时，读取对应的服务配置文件，和图文件，加载对应的流单元来运行业务。\n\n独立服务器流程：\n\n1. 开发者下载MODELBOX安装包后，进行安装\n2. 开发者配置MODELBOX的基本参数modelbox.conf。\n3. 开发者按照图开发指导，开发graph.conf，并开发相应的流单元。\n4. 开发完成后，配置modelbox读取对应的图和流单元。\n5. 启动modelbox任务。\n6. modelbox从输入流单元读取数据，并将数据推理后输出。\n\nmodelbox服务的启停，监控由systemd服务负责。\n\n## Flow Unit\n\n### 流单元接口\n\n流单元根据运行的硬件设备不同划分为CPU流单元，CUDA流单元，HIAI流单元。它们继承自FlowUnitBase\n\n其接口如下：\n\n* open，close： 用于在图创建时，创建或关闭对应的流单元上下文\n* pre，post：用于数据处理的预处理和后处理。\n* process： 用于数据处理。\n\ninit函数提供了创建流单元对象的方法，并且提供了参数的检查方法。\n流单元加载器会加载流单元对应的so文件，并且获取名为init函数的函数指针，并且调用它。从此获得了该流单元的名称, 流单元的create function, 支持的运行设备(CPU/CUDA/HIAI)以及版本号。\n\n流单元的Setup()函数会在Initialize()函数之前调用，其主要是设置流单元的参数。\n\n流单元的Initialize()函数根据Flow UnitOptions的配置初始化流单元实例，Flow UnitOptions的配置由用户在创建图的时候指定。\n\n流单元的Process()根据当前运行的上下文执行流单元功能。Flow UnitContext包含当前流单元的运行环境，比如Inputs，Outputs，Context。\n\n流单元的Destroy()函数在运行结束时释放资源。\n\n### 流单元打包\n\n流单元根据运行的设备不用，分别生成不同的so，例如Resize流单元分别有CPU的Reszie，CUDA的Resize，HIAI的Resize流单元，那么将分别生成modelbox_resize_cpu.so，modelbox_resize_gpu.so， modelbox_resize_hiai.so动态库。\n\nMODELBOX根据用户指定的Device，或者自动根据当前的运行环境选择Device，\n如果Device指定CPU，那么MODELBOX将动态加载modelbox_resize_cpu.so，如果Device指定CUDA，那么MODELBOX将动态加载modelbox_resize_gpu.so，如果在指定的路径下找不多对应的so，那么MODELBOX将返回失败。\n\n### 流单元列表\n\nMODELBOX需要提供尽可能多的流单元库\n\n| 流单元名称           | 流单元类别   | 流单元功能说明                                                                                  |\n| -------------------- | ------------ | ----------------------------------------------------------------------------------------------- |\n| SyncHttp             | 输入类流单元 | 接收HTTP输入请求，并将请求的报文转发到后端流单元，待后端完成后，结果返回给对应的客户端。        |\n| AsyncHttp            | 输入类流单元 | 接收HTTP输入请求，并将请求的报文转发到后端流单元。                                              |\n| URLReader            | 输入类流单元 | 读取特定URL下的特定文件，并将数据输出到后续流单元，URL包含本地目录，obs目录，远端http文件。     |\n| SyncQueue            | 输入类流单元 | 接收来自外部的数据，并将数据输出到后续流单元处理，后续流单元处理完成后，返回个等待的任务。      |\n| AsyncQueue           | 输入类流单元 | 接收来自外部队列的数据，并将数据输出到后续流单元。                                              |\n| DecodeImage          | 图像类流单元 | 解码图片。                                                                                      |\n| EncodeImage          | 图像类流单元 | 编码图片。                                                                                      |\n| Resize               | 图像类流单元 | 缩放图片，包括指定长宽，等比例缩放，按较短边等比例缩放，按较长边等比例缩放，等比例缩放padding。 |\n| Rotate               | 图像类流单元 | 按指定的角度旋转图片。                                                                          |\n| ColorSpaceConversion | 图像类流单元 | 转换图片的颜色空间，例如RGB转换成BGR。                                                          |\n| Crop                 | 图像类流单元 | 根据给定的box抠图。                                                                             |\n| HSV                  | 图像类流单元 | 设置图片的饱和度，透明度。                                                                      |\n| Pad                  | 图像类流单元 | 用指定的值padding图片。                                                                         |\n| BBoxPaste            | 图像类流单元 | resize bbox。                                                                                   |\n| DecodeAudio          | 音频类流单元 | 解码Audio。                                                                                     |\n| Reshape              | 数学类流单元 | 对Tensor进行Reshape。                                                                           |\n| Normalize            | 数学类流单元 | 对Tensor进行Normalize。                                                                         |\n| DecodeBase64         | 数学类流单元 | 解码Base64。                                                                                    |\n| EncodeBase64         | 数学类流单元 | 编码Base64。                                                                                    |\n| Cast                 | 数学类流单元 | 对Tensor进行类型转换，例如uint8转换float32。                                                    |\n| Inference            | 数学类流单元 | 模型推理。                                                                                      |\n| 同步输出流单元       | 输出类流单元 | 将数据原路输出，比如http rest请求时，需要等待推理完成后，再输出结果。                           |\n| 文件流单元           | 输出类流单元 | 将结果输出到指定的位置。                                                                        |\n| 队列流单元           | 输出类流单元 | 将结果输出到队列中。                                                                            |\n\n## 动态图接口设计\n\n作为入口函数， 主要提供了flowunits接口， 初始化ModelBox\n\n| 函数                 | 参数 | 作用                              |\n| -------------------- | ---- | --------------------------------- |\n| ModelBox.Flowunits() |      | 创建Flowunits对象，初始化ModelBox |\n\nflowunits: ModelBox的功能初始化接口:\n\n| 函数                                                 | 参数                   | 作用|\n| ----------------------------------- | ----------------------|---------- |\n| Flowunits::init(flowunit_path, skip_default = false) | flowunit_path: 用户自定义的流单元路径 <br> skip_default: 是否跳过默认的流单元路径 | 加载并初始化流单元驱动|\n| Flowunit::SetConfig(dict)               |                                      | 全局配置 |\n| BufferList =  Flowunits::CreateInput() |                    | 创建输入的bufferlist                                    |\n\nFlowunit算子：  flowunit作为Driver的具体单元，执行具体的功能执行\n\n|函数                              | 函数参数说明                                             | 函数说明       |\n| ------------------------------------------------------------ | -------------------------------------------------------- | -------------- |\n| BufferSequenceMap = Flowunits.unit.FlowUnitName({portname: Buffer01 },  ... ，  option) | 输入为{PortName：Buffer}结构，输出为BufferSequenceMap | 输入输出都是多端口的话，则输入为｛portname：Buffer｝，输出为Map结构 |\n| BufferSequenceMap= Flowunits.unit.FlowUnitName({portname: BufferSequence01,} ... ，  option) | 输入为{PortName：BufferSequence}结构，输出为BufferSequenceMap | 输入输出都是多端口的话，则输入为｛portname：BufferSequence｝，输出为Map结构 |\n| BufferSequenceMap = Flowunits.unit.FlowUnitName({portname: BufferList01} , ... ，  option) | 输入为{PortName：Bufferlist}结构，输出为BufferSequenceMap | 输入输出都是多端口的话，则输入为｛portname：BufferList｝，输出为Map结构 |\n| BufferSequenceMap = Flowunits.unit.FlowUnitName({portname: Stream01} , ... ，  option) | 输入为{PortName：Stream}结构，输出为BufferSequenceMap | 输入输出都是多端口的话，则输入为｛portname：Stream｝，输出为Map结构 |\n| BufferSequence = Flowunits.unit.FlowUnitName({portname: Buffer01 },  ... ，  option) | 输入为{PortName：Buffer}结构，输出为BufferSequence | 只有一个输出，直接将输出转换为BufferSequence |\n| BufferSequence = Flowunits.unit.FlowUnitName({portname: BufferSequence01},  ... ，  option) | 输入为{PortName：BufferSequence}结构，输出为BufferSequence | 只有一个输出，直接将输出转换为BufferSequence |\n| BufferSequence = Flowunits.unit.FlowUnitName({portname: BufferList01},  ... ，  option) | 输入为{PortName：Bufferlist}结构，输出为BufferSequence | 只有一个输出，直接将输出转换为BufferSequence |\n| BufferSequence = Flowunits.unit.FlowUnitName({portname: Stream01} , ... ，  option) | 输入为{PortName：Stream}结构，输出为BufferSequence | 只有一个输出，直接将输出转换为BufferSequence |\n| BufferSequenceMap = Flowunits.unit.FlowUnitName( Buffer,   option) | 输入为Buffer，输出为BufferSequenceMap | 输入只有一个Buffer， 输出为BufferSequenceMap |\n| BufferSequenceMap= Flowunits.unit.FlowUnitName(BufferList,   option) | 输入为BufferList，输出为BufferSequenceMap | 输入只有一个BufferList， 输出为BufferSequenceMap |\n| BufferSequenceMap= Flowunits.unit.FlowUnitName(BufferSequence，  option) | 输入为BufferSequence，输出为BufferSequenceMap | 输入只有一个BufferSequence， 输出为BufferSequenceMap |\n| BufferSequenceMap = Flowunits.unit.FlowUnitName(Stream，option) | 输入为Stream，输出为BufferSequenceMap | 输入只有一个Stream， 输出BufferSequenceMap |\n| BufferSequence = Flowunits.unit.FlowUnitName(Buffer,  option) | 输入为Buffer，输出为BufferSequence | 输入只有一个Buffer， 只有一个输出为BufferSequence |\n| BufferSequence = Flowunits.unit.FlowUnitName(BufferList,  option) | 输入为BufferList，输出为BufferSequence | 输入只有一个BufferList， 只有一个输出为BufferSequence |\n| BufferSequence = Flowunits.unit.FlowUnitName(BufferSequence,  option) | 输入为BufferSequence，输出为BufferSequence | 输入只有一个BufferSequence， 只有一个输出为BufferSequence |\n| BufferSequence = Flowunits.unit.FlowUnitName(Stream,  option) | 输入为Stream，输出为BufferSequence | 输入只有一个Stream，只有一个输出为BufferSequence |\n\n基本数据结构：\n\nBuffer：\n| 函数和变量             | 作用                   |\n| ---------------------- | ---------------------- |\n| Buffer(data)           | 使用字符串初始化buffer |\n| Buffer::PushBack(data) | 添加数据               |\n| SetMeta  (dict)        | 设置Buffer Meta        |\n| GetMeta  (key)         | 获取Buffer Meta        |\n\nStream： Flowunit的父类， 提供stream接口\n\n| 函数                       | 作用                             |\n| -------------------------- | -------------------------------- |\n| __next__                   | 遍历包含的Buffer（支持in操作符） |\n| __iterator__               | 迭代器，用于遍历Buffer           |\n| BufferList GetBufferList() | 获取Stream包含的Bufferlist       |\n| bool Stream::EOF()         | 判断流是否结束                   |\n| Close()                    | 关闭bufferlist                   |\n| PushBack(Buffer)           | 插入数据到BufferList尾部         |\n| Buffer  PopFront()         | 取出buffer_list_中第一个的buffer |\n| SetMeta  (dict)            | 设置BufferList Meta              |\n| GetMeta  (key)             | 获取Stream Meta                  |\n\nBufferList： Flowunit父类， 提供Bufferlist接口\n\n| 函数               | 作用                             |\n| ------------------ | -------------------------------- |\n| PushBack(Buffer)   | 插入数据到BufferList尾部         |\n| Buffer  PopFront() | 取出buffer_list_中第一个的buffer |\n| [] At(Buffer)      | 获取buffer                       |\n| __next__           | 返回值Buffer                     |\n| __iterator__       | 迭代器，用例遍历Buffer           |\n| Length()           | 获取BufferList的长度             |\n\nBufferSequence: 表示流单元之间交互的单元， 作为Stream和 Bufferlist的子类\n\n| 函数                        | 作用                                 |\n| --------------------------- | ------------------------------------ |\n| Stream = asStream()         | 转为Stream                           |\n| BufferList = asBufferList() | 转为BufferList                       |\n| Buffer = asBuffer()         | 转为Buffer                           |\n| BufferSequence(Buffer)      |                                      |\n| BufferSequence(BufferList)  |                                      |\n| BufferSequence(Stream)      |                                      |\n| SetMeta  (dict)             | 设置BufferList Meta                  |\n| __next__                    | 用于遍历包含的Buffer（支持in操作符） |\n| __iterator__                | 迭代器， 返回为Buffer                |\n\nDemo如下:\n\n```python\nImport ModelBox \nflowunits = ModelBox.Flowunits();\nif !flowunits.init({configuration}):\n    log(\"failed init flowunits\")\nelse:\n    #  创建输入流，用于流单元的输入\n    input = flowunits.CreateStream()  \n    input.pushback({Buffer});\n    input.close()\n\n    # 创建encoder流， 用于encoder的输出\n    encoder_stream = flowunits.CreateStream()\n    \n    # 将流绑定到Encoder， 用于encoder的输入\n    flowunits.unit.VideoEncoder(encoder_stream, None)  \n \n    # 将input绑定到VideoDemux，用于demux的输入\n    demux_option = {\"device\": \"cpu\",\"device_id\": 0}  # 使用dict设置Option \n    video_demuxer_output = flowunits.unit.VideoDemux(input, demux_option)  \n    video_demuxer_stream = video_demuxer_output.asstream()\n    \n    video_decoder_output = flowunits.unit.VideoDecode(video_demuxer_stream, None) \n    video_decoder_stream = video_decoder_output.asstream()\n    \n    while video_decoder_stream.EOF():\n        Bufferlist buffer_list = video_decoder_stream.GetBufferList()  \n        if buffer_list.Length()!=0:\n            infer_frame_output = flowunits.unit.InferFrame(buffer_list, None)\n            infer_frame_list = infer_frame_output.asbufferlist()\n            frame_list = flowunits.unit.Expand(infer_frame_list, None).asbufferlist()\n            for frame in frame_list:\n                word_list =  flowunits.unit.WordInfer(frame_list[i], None).asbufferlist()\n                draw_pic = flowunits.unit.DrawWord({\"input1\": word_list, \"input2\" :buffer_list}, None).asbufferlist()\n                encoder_stream.pushBack(draw_pic) # 向Encoder的输入流中填充数据\n            #...\n    encoder_stream.close()\n```\n\n精简写法：\n\n```python\nImport ModelBox \nflowunits = ModelBox.Flowunits()\n\nif !flowunits.init({configuration}):\n    log(\"flowunit init failed\")\nelse:\n    input  = flowunits.CreateStream()\n    input.pushBack({Buffer}) \n    input.close()\n    \n    encoder_stream  = flowunits.CreateStream() # 创建encoder流， 用于encoder的输出\n    \n    demux_option = {\"device\": \"cpu\",\"device_id\": 0}\n    video_demuxer_output = flowunits.unit.VideoDemux(input, demux_option)  \n    video_decoder_output = flowunits.unit.VideoDecode(video_demuxer_output, None) //  首先读取video_demux的config\n\n    flowunits.unit.VideoEncoder(encoder_stream, option)\n    \n    for buffer in video_decoder_output:\n        infer_frame_output = flowunits.unit.InferFrame(Buffer,None)\n        frame_list = flowunits.unit.Expand(infer_frame_output, None)\n        \n        for frame in frame_list:\n            word_list =  flowunits.unit.WordInfer(frame,  None)\n            draw_pic = flowunits.unit.DrawWord({\"input1\": word_list, \"input2\" :buffer_list}, None)\n            encoder_stream.pushBack(draw_pic)\n            #...\n    encoder_stream.close()\n\n```\n"
  },
  {
    "path": "docs/Doxyfile.in",
    "content": "PROJECT_NAME           = \"@CMAKE_PROJECT_NAME@\"\nPROJECT_NUMBER         = @MODELBOX_VERSION_MAJOR@.@MODELBOX_VERSION_MINOR@.@MODELBOX_VERSION_PATCH@\nINPUT                  = @DOXYGEN_LIBMODELBOX_INCLUDES@ \\\n                         @LIBMODELBOX_BASE_INCLUDE@ \\\n                         @DOXYGEN_MODELBOX_SERVER_INCLUDES@ \\\n                         @LIBMODELBOX_DEVICE_CPU_INCLUDE@ \\\n                         @LIBMODELBOX_DEVICE_CUDA_INCLUDE@ \\\n                         @LIBMODELBOX_DEVICE_ASCEND_INCLUDE@\n\nFILE_PATTERNS          = *.h \\\n                         *.py\nRECURSIVE              = YES\nWARN_IF_UNDOCUMENTED   = NO\nGENERATE_LATEX         = NO\nEXCLUDE                = Doxygen.in\nEXAMPLE_PATH           = @CMAKE_CURRENT_SOURCE_DIR@/../examples\nEXAMPLE_PATTERNS       = *\nEXAMPLE_RECURSIVE      = YES\nIMAGE_PATH             = @CMAKE_CURRENT_SOURCE_DIR@/../docs\nOUTPUT_DIRECTORY       = @CMAKE_CURRENT_BINARY_DIR@/doxygen/\nEXTRACT_STATIC         = YES\nQUIET                  = YES"
  },
  {
    "path": "docs/Goal.md",
    "content": "# ModelBox\n\nMODELBOX支持AI业务的快速应用开发，开发人员能通过简单的配置或编码完成AI推理业务的开发工作，并且使业务的运行性能高效，和跨平台、设备运行。  \n对华为D芯片能较好的支持，是客户易于使用D芯片能力，在分布式方面也能更容易开发分布式代码。\n\n## 场景\n\nMODELBOX需要支持如下业务场景（挑选部分行业场景）：\n\n行业|解决方案归属|场景说明\n--|--|--\n智慧城市|数字政府|人群聚集（横幅游行）、占道经营、打架斗殴、烟火检测、共享单车乱摆放等业务场景\n园区|智慧园区解决方案|人脸检测、徘徊检测、人流量、热力图等业务场景\n高速|智慧高速解决方案|车流、车速、异常事件检测、车脸识别、车辆属性结构化等业务场景\n机场|智慧机场解决方案|飞机入离位事件识别、客梯车停靠、撤离识别、客舱门开关识别、餐车配餐开始和结束识别等业务场景\n物流|智慧物流解决方案|暴力分拣检测、叉车掉货识别、皮带践踏识别等业务场景\n防暴防恐|防暴防恐解决方案|涉黄、涉暴、涉恐、涉政、涉敏识别等业务场景\nOCR|OCR解决方案|票据OCR、智能相册、图像搜索等业务场景\n互联网|社交|阿联酋G42 Totok类的TTS业务，实现文字转语音，NLP和语音类的业务场景需要对样本进行多次处理\n\n## 目标\n\n### MODELBOX对应的设计目标\n\n1. 动态图/静态图  \nMODELBOX支持动态图和静态图:  \n`动态图`主要提供python相关的接口，开发人员直接利用python编写AI推理相关的应用，同时可以使用PDB进行推理应用的开发调试。  \n`静态图`提供可执行二进制，开发人员可通过图形界面编排AI静态图，和开发相关的流单元，即可实现高性能推理业务的开发工作。  \n对于动态图/静态图，支持PDB，GDB在线调式。  \n\n1. 图形化开发推理，一键式发布服务：  \n用户在UI界面编排页面通过拖拽方式生成编排静态图文件完成后，一键式将编排业务下发到后台，由后台生成推理服务镜像，并加载对应的编排信息，启动对应的业务。\n用户在NodeBook界面使用python调试开发完推理业务成后，一键式将python业务下发到后台，由后台生成推理服务镜像，并加载对应的编排信息，启动对应的业务。\n\n1. 性能监控和跟踪  \n`性能监控`：推理流程，对每个步骤进行打点，统计业务的负载，资源占用情况，并将资源在后台展示给用户，同时支持用户配置自动负载均衡策略。  \n`profiling`：在开发过程中，支持生成推理业务的profiling文件，对每个执行步骤、流单元的并发度，耗时做采集，并生成profileing文件，UI工具展示采集结果。  \n\n1. 支持平台、异构部署  \n支持端、边、云部署；支持IVA， ModelArts，HILens平台；支持GPU，D芯片，ARM，X86等设备。\n\n1. 提高开发调试易用性  \n易于调试开发，支持动态图转换为静态图。高效率运行服务。\n\n### 支持特性列表  \n\n| MODELBOX              |                                                           |\n| ------------------ | -------------------------------------------------------- |\n| 业务场景           | 视频，音频，图片，通用数据                                  |\n| 跨平台             | 移动设备（andriod，iOS），边侧设备，嵌入式设备，服务器。      |\n| 图形化编排         | 支持模型的串联，支持视频流，音频流，图片等推理                |\n| 用户群             | 研究人员，学生，软件开发者                                  |\n| 主要业务场景       | 快速完成AI推理业务的开发工作                                 |\n| API列表            | C++SDK，JAVA SDK，PYTHON SDK，OBJECT-C SDK                |\n| 支持OS             | Linux, andriod，iOS                                       |\n| 支持硬件           | CPU, GPU， D芯片                                           |\n| 图可视化           | 编辑器可视化图，子图                                        |\n| 性能调测           | 性能跟踪。                                                 |\n| 循环图，条件       | 支持                                                       |\n| 流支持             | 按序号排序                                                 |\n| 数据类型检查       | ports概念，ports有类型，构建图的时候校验。                    |\n| 数据源节点         | 数据源节点                                                 |\n| 汇聚节点           | 数据汇总节点                                               |\n| 输入保序           | 按照序号保序                                               |\n| 分布式             | 支持分布式图处理，分布式动态调整业务执行                      |\n| 编排格式           | Graphviz格式。                                             |\n| 一次开发，多处运行  | PYTHON流单元，C++流单元，java流单元。                            |\n| 通用流单元覆盖率      | 90%以上的业务场景无需单独写流单元                            |\n\n### 单机编排\n\n提供编排推理开发界面，提供流单元库，通过将流单元库中的流单元连接的方式完成推理编排。  \n![flow](assets/flow.png)\n\n上述编排例子：  \n从OBS中读取数据，调用decoder进行解码，解码后分类，分类为人，自行车，汽车。  \n分别对人进行人脸识别，汽车进行车牌识别，自行车进行判断，处理后，将数据上报存储到OBS上。  \n\n每个流单元都可以进行相关的设置，其支持功能列表：  \n\n1. 编排方式创建推理任务。\n1. 输入流单元，输出流单元标准化，无需单独开发。\n1. 多个流单元可以打包为一个流单元group，外部提供接口。\n1. 数据类型检查匹配，对不匹配的数据不能连接图。\n1. 自定义流单元加入到编排过程中。\n1. 每个流单元的执行策略可进行设置。\n\n流单元开发要求\n\n1. 与大数据结合作为UDF使用、图像处理（matlab/opencv）\n1. 流单元完备\n1. 流单元开发简单\n1. 流单元选择（流单元理解、流单元联想）\n1. 流单元打包（集合）\n\n### 性能调测\n\n![profiling](assets/profiling.png)\n\n针对编排的流程，性能统计给出每个流单元，每个步骤的性能统计，运维人员或者开发人员可以对编排进行可视化调优。\n\n### 开发调测\n\n在动态图的情况下，使用Notebook开发业务流程，让用户聚焦于业务开发。\n\n## 接口\n\n### 代码写作样例\n\n![code-example-graph](assets/code-example-graph.png)\n\n上述例子流程：\n\n1. 输入图片，classification对图像分类，\n2. 如果图像是car，则走B1分支，识别车牌，并将结果通过json输出。\n3. 如果是body，则走B2分支，将图像进行处理，识别人脸，同时输出人脸信息，最后通过json输出。\n\n### 静态图写法\n\n```python\ndef IsCar(INPUT):\n    #分支B1\n    if INPUT == CAR:\n        return True\n    else:\n        return False\n\n\ndef IsBody(INPUT):\n    #分支B2\n    if INPUT == BODY:\n        return True\n    else:\n        return False\n\n\ndef loop_Json(INPUT1, INPUT2, OUTPUT):\n    #循环\n    if OUTPUT == IS_COMPLETE:\n        return False\n    return True\n\n\ndef graph():\n    # 分类\n    classification = flow.Classification(INPUT)\n\n    cpu = modelbox.device(\"cpu1~20\")\n\n    # car的流程，获取车牌\n    car = flow.Car(classification, condition_func=IsCar, device=cpu)\n    plate = flow.Plate(car)\n    json = flow.Json(plate, None, loop_func=loop_Json)\n\n    # 人的流程，采集人脸，识别人脸\n    body = flow.Crop(classification, condition_func=IsBody)\n    face_resize = flow.Resize(classification, condition_func=IsBody)\n    face_rotated = flow.Rotate(face_resize)\n    face = flow.Face(face_rotated)\n    json = flow.Json(body, face, loop_func=loop_Json)\n\n    #返回json结果\n    return json\n\ndef runTask():\n    # 执行编排并获取结果\n    json = flow.Output()\n    return json\n```\n\n### 动态图写法\n\n```python\ndef runTask():\n    classification = flowunit.Classification(INPUT)\n    json_in1 = None\n    json_in2 = None\n\n    if classification == CAR:\n        # 如果是Car\n        car = flowunit.Car(classification)\n        json_in1 = flowunit.Plate(car)\n        json_in2 = None\n    elif classification == BODY:\n        # 如果是人\n        # 采集人图像\n        json_in1 = flowunit.Crop(classification)\n\n        # 人脸识别\n        face_resize = flowunit.Resize(classification)\n        face_rotated = flowunit.Rotate(face_resize)\n        json_in2 = flowunit.Face(face_rotated)\n    else:\n        return None\n\n    # 循环处理多个结果，组装为json报文\n    for in1, in2 in json_in1, json_in2:\n        if in1 == DROP or in2 == DROP:\n            continue\n\n        json.append(flowunit.Json(in1, in2))\n\n    return json\n```\n\n### 配置文件格式\n\n静态图配置文件语法：对应上述的例子，配置文件采用graphviz的DOT语法，或caffe的prototxt格式, 可以在vscode中直接写作DOT。\n\n#### 例子\n\n```graphviz\nstrict digraph \"G\" {\n    \"classfication\" [type=flowunit, flowunit=class, device=\"cpu\", deviceid=\"0\"]\n    \"car\" [type=flowunit, flowunit=car_predict, device=\"gpu\", deviceid=\"1\"]\n    \"plate\" [type=flowunit, flowunit=plate_predict, device=\"gpu\", deviceid=\"2\"]\n\n    \"crop\" [type=flowunit, flowunit=crop]\n    \"resize\" [type=flowunit, flowunit=resize]\n    \"rotate\" [type=flowunit, flowunit=rotate]\n    \"face\" [type=flowunit, flowunit=face_predict]\n    \"json\" [type=flowunit, flowunit=json]\n\n    \"has_car\" [type=flowunit, flowunit=has_car]\n    \"has_body\" [type=flowunit, flowunit=has_body]\n\n\n    # 描述关系\n    # B1 分支\n    \"classfication\" -> \"has_car\"  ->\"car\" -> \"plate\" -> \"json\"\n\n    # B2 分支\n    \"classfication\" -> \"has_body\" -> \"crop\" -> \"json\"\n    \"classfication\" -> \"has_body\" -> \"resize\" -> \"rotate\" -> \"face\" -> \"json\"\n\n    # 循环\n    \"json\" -> \"json\" [label=\"LOOP N=10\"]\n}\n```\n\n具有子图的例子，将B2分支完全放入子图中。\n\n![profiling](assets/subgraph-example.png)\n\n```graphviz\nstrict digraph \"G\" {\n    # 定义流单元\n\n    \"cpu\" [type=devices, device=cpu,1~12]\n    \"gpu1\" [type=devices, device=cuda,1]\n    \"gpu2\" [type=devices, device=cuda,2]\n\n    \"classfication\" [type=flowunit, op=class, device=\"cpu\"]\n    \"car\" [type=flowunit, op=car_predict, device=\"gpu1\"]\n    \"plate\" [type=flowunit, op=plate_predict, device=\"gpu2\"]\n\n    \"has_car\" [type=control, cond=has_car]\n    \"has_body\" [type=control, cond=has_body]\n\n    # 描述关系\n    # B1 分支\n    \"classfication\" -> \"has_car\"  ->\"car\" -> \"plate\" -> \"json\"\n\n    # B2 分支，采用子图形式\n    \"classfication\" -> \"has_car\"  ->\"has_body\" -> \"sub:INPUT\"\n    subgraph \"sub\" {\n        \"INPUT\" [type=input]\n        \"OUTPUT\" [type=output]\n\n        \"crop\" [type=flowunit, op=crop]\n        \"resize\" [type=flowunit, op=resize]\n        \"rotate\" [type=flowunit, op=rotate]\n        \"face\" [type=flowunit, op=face_predict]\n        \"json\" [type=flowunit, op=json]\n\n        \"INPUT\" -> \"crop\" -> \"json\"\n        \"INPUT\" -> \"has_body\" -> \"resize\" -> \"rotate\" -> \"face\" -> \"json\"\n\n        \"json\" -> \"OUTPUT\"\n    }\n    \"sub:OUTPUT\" -> \"json\"\n\n    # 循环\n    \"json\" -> \"json\" [label=\"LOOP N=10\"]\n}\n```\n"
  },
  {
    "path": "examples/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nfile(GLOB_RECURSE EXAMPLE_BIN_FILES ${CMAKE_CURRENT_LIST_DIR}/bin/*)\n\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${MODELBOX_SERVER_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_CPU_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_CUDA_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_ASCEND_INCLUDE})\n\nlink_directories(${LIBMODELBOX_BINARY_DIR})\nlink_directories(${LIBMODELBOX_DEVICE_CPU_BINARY_DIR})\nlink_directories(${LIBMODELBOX_DEVICE_CUDA_BINARY_DIR})\nlink_directories(${LIBMODELBOX_DEVICE_ASCEND_BINARY_DIR})\n\nif(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_BINARY_DIR})\n    message(FATAL_ERROR \"Do not build in source directory!\")\nendif()\n\nif (STANDALONE)\n    set(MODELBOX_ROOT_VAR \"\\${MODELBOX_ROOT}\")\nendif()\n\nset(MODELBOX_TEMPLATE_DIR \"${CMAKE_INSTALL_FULL_DATAROOTDIR}/modelbox/project-template\")\nset(MODELBOX_EXAMPLE_DIR \"${CMAKE_CURRENT_LIST_DIR}\")\n\nset(PROJECT_BASE_BINARY_DIR \"${CMAKE_CURRENT_BINARY_DIR}/project-base\")\nset(PROJECT_BINARY_DIR \"${CMAKE_CURRENT_BINARY_DIR}/project-release\")\nfile(COPY ${MODELBOX_EXAMPLE_DIR}/project/base/ DESTINATION ${PROJECT_BASE_BINARY_DIR})\nconfigure_file(${MODELBOX_TOP_DIR}/CMake/FindACL.cmake   ${PROJECT_BASE_BINARY_DIR}/CMake/FindACL.cmake  @ONLY)\nconfigure_file(${MODELBOX_TOP_DIR}/CMake/FindDSMI.cmake   ${PROJECT_BASE_BINARY_DIR}/CMake/FindDSMI.cmake  @ONLY)\nconfigure_file(${MODELBOX_TOP_DIR}/CMake/Function.cmake   ${PROJECT_BASE_BINARY_DIR}/CMake/Function.cmake  @ONLY)\nconfigure_file(${MODELBOX_TOP_DIR}/CMake/Options.cmake   ${PROJECT_BASE_BINARY_DIR}/CMake/Options.cmake  @ONLY)\nconfigure_file(${MODELBOX_TOP_DIR}/test/test_main.cc   ${PROJECT_BASE_BINARY_DIR}/test/test_main.cc  @ONLY)\nfile(COPY ${MODELBOX_TOP_DIR}/.gitignore DESTINATION ${PROJECT_BASE_BINARY_DIR})\n\nfile(COPY ${MODELBOX_EXAMPLE_DIR}/service-plugin DESTINATION ${PROJECT_BINARY_DIR})\nfile(COPY ${MODELBOX_EXAMPLE_DIR}/flowunit DESTINATION ${PROJECT_BINARY_DIR})\n\nsubdirlist(PROJECT_TEMPLATE ${MODELBOX_EXAMPLE_DIR}/project \"\")\nlist(REMOVE_ITEM PROJECT_TEMPLATE \"base\")\n\nsubdirlist(DEMO_LIST ${DEMO_SOURCE_DIR} \"\")\n\n# Create template project\nforeach (ITR ${PROJECT_TEMPLATE})\n\tset(PROJECT_TEMPLATE_DIR \"${PROJECT_BINARY_DIR}/project/${ITR}\")\n\tset(PROJECT_TEMPLATE_SRC_DIR \"${CMAKE_CURRENT_SOURCE_DIR}/project/${ITR}\")\n    set(PROJECT_DEMO_DIR \"\")\n    file(COPY ${PROJECT_BASE_BINARY_DIR}/ DESTINATION ${PROJECT_TEMPLATE_DIR})\n\n    # check if whether copy demo source\n    foreach (ITR1 ${DEMO_LIST})\n        if(${ITR} STREQUAL ${ITR1})\n            file(COPY ${DEMO_SOURCE_DIR}/${ITR1}/ DESTINATION ${PROJECT_TEMPLATE_DIR}/src)\n            set(PROJECT_DEMO_DIR \"${DEMO_SOURCE_DIR}/${ITR1}\")\n        endif()\n    endforeach(ITR1)\n    file(COPY ${PROJECT_TEMPLATE_SRC_DIR}/ DESTINATION ${PROJECT_TEMPLATE_DIR})\n    if(EXISTS ${PROJECT_TEMPLATE_SRC_DIR}/setup.sh)\n        # run setup script in template directory\n        execute_process(COMMAND sh ${PROJECT_TEMPLATE_SRC_DIR}/setup.sh \"${PROJECT_TEMPLATE_DIR}\" \"${MODELBOX_EXAMPLE_DIR}\" \"${PROJECT_DEMO_DIR}\" \"${DEMO_SOURCE_DIR}\"\n            RESULT_VARIABLE COMMAND_RESULT\n            WORKING_DIRECTORY ${PROJECT_TEMPLATE_DIR} \n        )\n\n        if(COMMAND_RESULT)\n            message(FATAL_ERROR \"run setup script failed\")\n        endif()\n        file(REMOVE ${PROJECT_TEMPLATE_DIR}/setup.sh)\n    endif()\nendforeach(ITR) \n\ninstall(DIRECTORY ${PROJECT_BINARY_DIR}/ DESTINATION ${MODELBOX_TEMPLATE_DIR}\n    COMPONENT document\n)\n\nconfigure_file(${CMAKE_CURRENT_LIST_DIR}/misc/modelbox-template-cmd.json.in ${CMAKE_CURRENT_BINARY_DIR}/misc/modelbox-template-cmd.json @ONLY)\ninstall(FILES \n    ${CMAKE_CURRENT_BINARY_DIR}/misc/modelbox-template-cmd.json\n    DESTINATION ${MODELBOX_TOOLS_PATH}\n    COMPONENT document\n    )\n\ninstall(PROGRAMS ${EXAMPLE_BIN_FILES}\n    DESTINATION ${MODELBOX_TOOLS_PATH}\n    COMPONENT document)\n\nset(MODELBOX_TEMPLATE_BIN_DIR \"${PROJECT_BINARY_DIR}\" CACHE INTERNAL \"\")\nset(MODELBOX_TEMPLATE_CMD_PATH \"${CMAKE_CURRENT_LIST_DIR}/bin/template\" CACHE INTERNAL \"\")\n"
  },
  {
    "path": "examples/bin/template",
    "content": "#!/bin/bash\n#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nMODELBOX_TEMPLATE_PATH=\"${MODELBOX_TEMPLATE_PATH:-${MODELBOX_ROOT}/usr/local/share/modelbox/project-template}\"\nDEFAULT_PROJECT_PATH=\"$HOME/modelbox-project\"\nAPPLICATION_PATH=\"${MODELBOX_ROOT}/opt/modelbox/application\"\n\nshowhelp() {\n\techo \"usage $(basename $0) [OPTIONS]\"\n\techo \"create modelbox project or flowunit\"\n\techo \"-project                              create modelbox project.\"\n\techo \"  -name [name]                          project name, default is example\"\n\techo \"  -path [path]                          project path, default is \\$HOME/modelbox-project.\"\n\techo \"  -getname [path]                       get project name.\"\n\techo \"  -list-template                        list suppored template.\"\n\techo \"  -rootpath [path]                      project root path.\"\n\techo \"  -template [template name]             project template, default is empty project.\"\n\techo \"-flowunit                             create modelbox flowunit.\"\n\techo \"  -name [name]                          flowunit name\"\n\techo \"  -lang [python|c++|infer|yolo]       flowunit program language.\"\n\techo \"  -type [type]                          flowunit type:\"\n\techo \"                                          normal|stream|condition|collapse|collapse_all|expand\"\n\techo \"  -group-type [group type]              flowunit group type, for UI display:\"\n\techo \"                                          generic|video|inference\" \n\techo \"  -project-path [path-to-project]       create flowunit in specific project, default is \\$HOME/modelbox-project.\"\n\techo \"  -input [name=name,device=device|-]      flowunit input ports. '-' for default port name\"\n\techo \"  -output [name=name,device=device|-]     flowunit output ports. '-' for default port name\"\n\techo \"  -device [cpu|cuda|ascend]             flowunit device.\"\t\n\n\techo \"  flowunit infer\"\n\techo \"    -virtual-type [type]                  virtual flowunit type: 'tensorflow', 'tensorrt', 'torch', 'acl', 'mindspore'\"\n\techo \"    -model [path]                         model file path\"\n\techo \"    -copy-model                           copy model file to flowunit source dir\"\n\techo \"  flowunit yolo\"  \n\techo \"    -virtual-type [type]                  virtual flowunit type: 'yolov3_postprocess', 'yolov5_postprocess'\"\n\techo \"-service-plugin                        create modelbox service plugin\"\n\techo \"  -project-path [path-to-project]       create flowunit in specific project, default is \\$HOME/modelbox-project.\"\n\techo \"  -name [name]                          plugin name\"\n\techo \"-help                                  show this help message.\"\n\techo \"\"\n\techo \"example:\"\n\techo \" create project:\"\n\techo \"   $(basename $0) -project -name=\\\"project\\\"\"\n\techo \" create flowunit:\"\n\techo \"   $(basename $0) -flowunit -name \\\"flowunit\\\" -lang python -input name=in1,device=cuda -output -\"\n\techo \" create service plugin:\"\n\techo \"   $(basename $0) -service-plugin -name=\\\"project\\\"\"\n\techo \"\"\n}\n\nechoerr() {\n\techo \"$@\" 1>&2\n}\n\ncreate_project_template()\n{\n\tif [ -d \"${project_path}\" ]; then\n\t\techoerr \"Project '${project_path}' already exists.\"\n\t\treturn 2\n\tfi\n\n\tmkdir -p \"${project_path}\"\n\tif [ $? -ne 0 ]; then\n\t\techoerr \"Create project directory ${project_path} failed.\"\n\t\treturn 1\n\tfi\n\n\tif [ ! -d \"$MODELBOX_TEMPLATE_PATH/project/${template}\" ]; then\n\t\techoerr \"Project template $MODELBOX_TEMPLATE_PATH/project/${template} not exists, please input valid project template\"\n\t\treturn 1\n\tfi\n\n\tcp -r \"$MODELBOX_TEMPLATE_PATH/project/${template}\"/. \"${project_path}\"\n\tif [ $? -ne 0 ]; then\n\t\techoerr \"Copy project template failed.\"\n\t\treturn 1\n\tfi\n\n\tsed -i \"s/example/$project_name/g\" \"${project_path}/CMakeLists.txt\"\n\tif [ $? -ne 0 ]; then\n\t\techoerr \"sed CMakeLists.txt failed.\"\n\t\treturn 1\n\tfi\n\n\tmv ${project_path}/src/graph/${template}.toml ${project_path}/src/graph/${project_name}.toml 2>/dev/null\n\tmv ${project_path}/src/graph/${template}.json ${project_path}/src/graph/${project_name}.json 2>/dev/null\n\n\tproject_dir_name=$(basename ${project_path})\n\tdriver_dir=\"${APPLICATION_PATH}/${project_name}\"\n\tsed -i \"s#@APPLICATION_PATH@#${driver_dir}#g\" ${project_path}/src/graph/*.toml 2>/dev/null\n\tsed -i \"s#@APPLICATION_PATH@#${driver_dir}#g\" ${project_path}/src/graph/*.json 2>/dev/null\n\t\n\tproject_real_path=$(realpath ${project_path})\n\tsed -i \"s#@PROJECT_PATH@#${project_real_path}#g\" ${project_path}/src/graph/*.toml 2>/dev/null\n\tsed -i \"s#@PROJECT_PATH@#${project_real_path}#g\" ${project_path}/src/graph/*.json 2>/dev/null\n\n\tsed -i \"s# graph_.* {# graph_${project_name} {#g\" ${project_path}/src/graph/*.toml 2>/dev/null\n\tsed -i \"s# graph_.* {# graph_${project_name} {#g\" ${project_path}/src/graph/*.json 2>/dev/null\n\n\tchmod 600 ${project_path}/src/graph/*.toml 2>/dev/null\n\tchmod 600 ${project_path}/src/graph/*.json 2>/dev/null\n\n\techo \"Create project at '${project_path}' success\"\n\n\trm -f ${project_path}/desc.toml\n    \n\treturn 0\n}\n\nget_project_name() {\n\tif [ -z \"${project_path}\" ]; then\n\t\techoerr \"project path is not set\"\n\t\treturn 1\n\tfi\n\n\tCMAKEFILE=\"${project_path}\"/CMakeLists.txt \n\n\tif [ ! -e \"${CMAKEFILE}\" ]; then\n\t\techoerr \"${project_path} is not a project directory\"\n\t\treturn 1\n\tfi\n\tgrep \"^ *project\" \"${CMAKEFILE}\" | sed 's/^ *project *(\\(.*\\))/\\1/g' 2>/dev/null\n\treturn $?\n}\n\nlist_project_template () {\n\tfor tempdir in $MODELBOX_TEMPLATE_PATH/project/*; do\n\t\tif [ ! -d \"$tempdir\" ]; then\n\t\t\tcontinue\n\t\tfi\n\n\t\tif [ ! -f \"${tempdir}/desc.toml\" ]; then\n\t\t\tcontinue\n\t\tfi\n\n\t\tname=$(basename ${tempdir})\n\t\tdesc=$(cat ${tempdir}/desc.toml | grep -E \"^desc *=\" | sed \"s/desc *= *\\\"\\(.*\\)\\\"/\\1/g\")\n\t\tprintf \"%-20.20s %s\\n\" \"$name\" \"$desc\"\n\tdone\n\treturn 0\n}\n\ncreate_project() {\n\tproject_name=\"\"\n\tproject_path=\"$DEFAULT_PROJECT_PATH\"\n\tproject_root_path=\"\"\n\ttemplate=\"empty\"\n\n\twhile true; do\n\t\tif [ $# -le 0 ]; then\n\t\t\tbreak\n\t\tfi\n\n\t\tparserarg \"$1\" \"$2\"\n\t\tcase \"$arg_option\" in\n\t\t--name)\n\t\t\tproject_name=\"$arg_value\"\n\t\t\tshift \"$shift_count\" ;;\n\t\t--template)\n\t\t\ttemplate=\"$arg_value\"\n\t\t\tshift \"$shift_count\" ;;\n\t\t--getname)\n\t\t\tproject_path=\"$arg_value\"\n\t\t\tget_project_name \"$project_path\"\n\t\t\treturn $?\n\t\t\tshift \"$shift_count\" ;;\n\t\t--list-template)\n\t\t\tlist_project_template\n\t\t\treturn $?\n\t\t\tshift \"$shift_count\" ;;\n\t\t--path)\n\t\t\tproject_path=\"$arg_value\"\n\t\t\tshift \"$shift_count\" ;;\n\t\t--rootpath)\n\t\t\tproject_root_path=\"$arg_value\"\n\t\t\tshift \"$shift_count\" ;;\n\t\t-- )\n\t\t\techo \"invalid arg -\"\n\t\t\tshift ;;\n\t\t* )\n\t\t\techo \"invalid arg $arg_option\"\n\t\t\tshift ;;\n  \t\tesac\n\tdone\n\n\tif [ -z \"${project_name}\" ]; then\n\t\techoerr \"Project name is not set\"\n\t\treturn 1\n\tfi\n\n\tif [ \"${project_path}\" = \"$DEFAULT_PROJECT_PATH\" ] && [ -n \"$project_root_path\" ]; then\n\t\tproject_path=\"${project_root_path}/${project_name}\"\n\t\tif [ ! -d \"\" ]; then\n\t\t\tmkdir -p \"${project_root_path}\"\n\t\tfi\n\tfi\n\n\tcreate_project_template \"$project_name\" \"${project_path}\" \"$template\"\n\tret=$?\n\tif [ $ret -ne 0 ] && [ $ret -ne 2 ]; then\n\t\techoerr \"Create project template failed.\"\n\t\tif [ -n \"${project_path}\" ]; then\n\t\t\trm -fr \"${project_path}\"\n\t\tfi\n\t\treturn 1\n\tfi\n\n\treturn $ret\n}\n\ncreate_service_plugin_template() {\n\tTEMPLATE_PATH=\"$MODELBOX_TEMPLATE_PATH/service-plugin\"\n\tSOURCE_DIR=\"${project_path}/src/service-plugin/${plugin_name}\"\n\tCMAKE_FILE=\"${SOURCE_DIR}/CMakeLists.txt\"\n\tSOURCE_FILE=\"${SOURCE_DIR}/${plugin_name}.cc\"\n\tHEAD_FILE=\"${SOURCE_DIR}/${plugin_name}.h\"\n\n\tif [ ! -d \"$TEMPLATE_PATH\" ]; then\n\t\techoerr \"Server plugin template ${TEMPLATE_PATH} not exist.\"\n\t\treturn 1\n\tfi\n\n\tif [ -d \"$SOURCE_DIR\" ]; then\n\t\techoerr \"Server plugin source directory $SOURCE_DIR already exists.\"\n\t\treturn 2\n\tfi\n\n\tmkdir -p \"$SOURCE_DIR\"\n\tif [ $? -ne 0 ]; then\n\t\techoerr \"Create server plugin source directory $SOURCE_DIR failed.\"\n\t\treturn 1\n\tfi\n\n\tcp -a \"$TEMPLATE_PATH\"/* \"$SOURCE_DIR\"\n\tif [ $? -ne 0 ]; then\n\t\techoerr \"Copy template source $TEMPLATE_PATH failed.\"\n\t\treturn 1\n\tfi\n\n\tfor example_name in \"$SOURCE_DIR\"/example*; do\n\t\tnewname=$(basename \"$example_name\" | sed \"s/example/${plugin_name}/g\")\n\t\tmv \"$example_name\" \"$SOURCE_DIR/$newname\"\n\t\tif [ $? -ne 0 ]; then\n\t\t\techoerr \"Rename template file failed.\"\n\t\t\treturn 1\n\t\tfi\n\tdone\n\n\tfor file in ${CMAKE_FILE} ${SOURCE_FILE} ${HEAD_FILE}; do\n\t\tsed -i \"s/example/${plugin_name}/g\" \"$file\"\n\t\tif [ $? -ne 0 ]; then\n\t\t\techoerr \"Setup cmake file failed.\"\n\t\t\treturn 1\n\t\tfi\n\n\t\tsed -i \"s/Example/${plugin_name}/g\" \"$file\"\n\t\tif [ $? -ne 0 ]; then\n\t\t\techoerr \"Setup source file failed.\"\n\t\t\treturn 1\n\t\tfi\n\tdone\n\n\techo \"Create server plugin at $SOURCE_DIR success\"\n\treturn 0\t\n}\n\ncreate_service_plugin() {\n\tplugin_name=\"\"\n\tproject_path=\"$DEFAULT_PROJECT_PATH\"\n\tSOURCE_DIR=\"\"\n\n\twhile true; do\n\t\tif [ $# -le 0 ]; then\n\t\t\tbreak\n\t\tfi\n\n\t\tparserarg \"$1\" \"$2\"\n\t\tcase \"$arg_option\" in\n\t\t--name)\n\t\t\tplugin_name=\"$arg_value\"\n\t\t\tshift \"$shift_count\" ;;\n\t\t--project-path)\n\t\t\tproject_path=\"$arg_value\"\n\t\t\tshift \"$shift_count\" ;;\n\t\t-- )\n\t\t\techo \"invalid arg -\"\n\t\t\tshift ;;\n\t\t* )\n\t\t\techo \"invalid arg $arg_option\"\n\t\t\tshift ;;\n  \t\tesac\n\tdone\n\n\tif [ -z \"${plugin_name}\" ]; then\n\t\techoerr \"Plugin name is not set\"\n\t\treturn 1\n\tfi\n\n\tis_project_path_valid \"${project_path}\"\n\tif [ $? -ne 0 ]; then\n\t\techoerr \"Project path ${project_path} is invalid, create project first.\"\n\t\treturn 1\n\tfi\n\n\tcreate_service_plugin_template \"${plugin_name}\" \"${project_path}\"\n\tret=$?\n\tif [ $ret -ne 0 ] && [ $ret -ne 2 ]; then\n\t\techoerr \"Create plugin template failed.\"\n\t\tif [ -n \"${SOURCE_DIR}\" ]; then\n\t\t\trm -fr \"${SOURCE_DIR}\"\n\t\tfi\n\t\treturn 1\n\tfi\n\n\treturn $ret\n}\n\ntoml_data_init() {\n\tTOML_DATA=\"\"\n}\n\ntoml_data_append() {\n\tTOML_DATA=\"${TOML_DATA}$*\\n\"\n}\n\ntoml_data_complete() {\n\techo -e \"${TOML_DATA}\" > \"$1\"\n\tchmod 600 \"$1\"\n}\n\ntoml_data_write_base() {\n\ttoml_data_append \"[base]\"\n\ttoml_data_append \"name = \\\"${flowunit_name}\\\" # The FlowUnit name \"\n\ttoml_data_append \"device = \\\"${flowunit_device}\\\" # The device the flowunit runs on,cpu,cuda,ascend\"\n\ttoml_data_append \"version = \\\"1.0.0\\\" # The version of the flowunit\"\n\ttoml_data_append \"description = \\\"${flowunit_desc}\\\" # The description of the flowunit\"\n\ttoml_data_append \"group_type = \\\"$group_type\\\" # flowunit group attribution \"\n\tif [ -n \"$flowunit_type\" ]; then\n\t\ttoml_data_append \"${flowunit_type} = true # flowunit type\"\n\tfi\n}\n\ntoml_data_write_inputports() {\n\ttoml_data_append \"# Input ports description\"\n\ttoml_data_append \"[input]\"\n\ti=1\n\tfor input in \"${flowunit_inputs[@]}\"; do\n\t\ttoml_data_append \"[input.input$i]\"\n\t\ti=$((i+1))\n\t\tIFS=',' read -ra options <<< \"$input\"\n\t\tfor option in \"${options[@]}\"; do\n\t\t\tIFS=\"=\" read -r -a values <<< \"$option\"\n\t\t\ttoml_data_append \"${values[0]} = \\\"${values[1]}\\\"\"\n\t\tdone\n\t\ttoml_data_append \"\"\n\tdone\n}\n\ntoml_data_write_outputports() {\n\ttoml_data_append \"\"\n\ttoml_data_append \"# Output ports description\"\n\ttoml_data_append \"[output]\"\n\ti=1\n\tfor output in \"${flowunit_outputs[@]}\"; do\n\t\ttoml_data_append \"[output.output$i]\"\n\t\ti=$((i+1))\n\t\tIFS=',' read -ra options <<< \"$output\"\n\t\tfor option in \"${options[@]}\"; do\n\t\t\tIFS=\"=\" read -r -a values <<< \"$option\"\n\t\t\ttoml_data_append \"${values[0]} = \\\"${values[1]}\\\"\"\n\t\tdone\n\t\ttoml_data_append \"\"\n\tdone\n}\n\nprepare_flowunit_base() {\n\tTEMPLATE_PATH=\"$MODELBOX_TEMPLATE_PATH/flowunit/$1\"\n\tSOURCE_DIR=\"${project_path}/src/flowunit/${flowunit_name}\"\n\n\tif [ ! -d \"$TEMPLATE_PATH\" ]; then\n\t\techoerr \"Flowunit template not exist.\"\n\t\treturn 1\n\tfi\n\n\tif [ -d \"$SOURCE_DIR\" ]; then\n\t\techoerr \"Flowunit source directory $SOURCE_DIR already exists.\"\n\t\treturn 2\n\tfi\n\n\tmkdir -p \"$SOURCE_DIR\"\n\tif [ $? -ne 0 ]; then\n\t\techoerr \"Create flowunit source directory $SOURCE_DIR failed.\"\n\t\treturn 1\n\tfi\n\n\tcp -a \"$TEMPLATE_PATH\"/* \"$SOURCE_DIR\"\n\tif [ $? -ne 0 ]; then\n\t\techoerr \"Copy template source $TEMPLATE_PATH failed.\"\n\t\treturn 1\n\tfi\n\n\tfor example_name in \"$SOURCE_DIR\"/example*; do\n\t\tnewname=$(basename \"$example_name\" | sed \"s/example/${flowunit_name}/g\")\n\t\tmv \"$example_name\" \"$SOURCE_DIR/$newname\"\n\t\tif [ $? -ne 0 ]; then\n\t\t\techoerr \"Rename template file failed.\"\n\t\t\treturn 1\n\t\tfi\n\tdone\n\n\treturn 0\n}\n\nsetup_flowunit_python_source() {\n\tSOURCE_FILE=\"${SOURCE_DIR}/${flowunit_name}.py\"\n\tclassname=${flowunit_name^}\n\tCMAKE_FILE=\"${SOURCE_DIR}/CMakeLists.txt\"\n\n\tsed -i \"s/ExampleFlowUnit/${classname}FlowUnit/g\" \"$SOURCE_FILE\" \n\tif [ $? -ne 0 ]; then\n\t\techoerr \"Setup source file failed.\"\n\t\treturn 1\n\tfi\n\n\tsed -i \"s/example/${flowunit_name}/g\" \"$CMAKE_FILE\"\n\tif [ $? -ne 0 ]; then\n\t\techoerr \"Setup source file failed.\"\n\t\treturn 1\n\tfi\n\n\tspace=$(grep \"in_data = data_context.input\" \"$SOURCE_FILE\" | sed 's/\\([ |\\t]*\\)in_data = data_context.input.*/\\1/g')\n\tinput_flag=\"# input data\"\n\tsed -i \"s/in_data = data_context.input.*/${input_flag}/\" \"$SOURCE_FILE\"\n\ti=1\n\tfor port in \"${flowunit_input_names[@]}\"; do\n\t\tif [ ${#flowunit_inputs[@]} -eq 1 ]; then\n\t\t\tline=\"in_data = data_context.input(\\\"${port}\\\")\"\n\t\telse\n\t\t\tline=\"in_data_${i} = data_context.input(\\\"${port}\\\")\"\n\t\tfi\n\t\tsed -i \"/${input_flag}/a \\\\${space}${line}\" \"$SOURCE_FILE\"\n\t\ti=$((i+1))\n\tdone\n\n\t\n\toutput_flag=\"# output data\"\n\tsed -i \"s/out_data = data_context.output.*/${output_flag}/\" \"$SOURCE_FILE\"\n\ti=1\n\tfor port in \"${flowunit_output_names[@]}\"; do\n\t\tif [ ${#flowunit_outputs[@]} -eq 1 ]; then\n\t\t\tline=\"out_data = data_context.output(\\\"${port}\\\")\"\n\t\telse\n\t\t\tline=\"out_data_${i} = data_context.output(\\\"${port}\\\")\"\n\t\tfi\n\t\tsed -i \"/${output_flag}/a \\\\${space}${line}\" \"$SOURCE_FILE\"\n\t\ti=$((i+1))\n\tdone\n\t\n\treturn 0\n}\n\nsetup_flowunit_python_toml() {\n\tTOML_FILE=\"${SOURCE_DIR}/${flowunit_name}.toml\"\n\tclassname=\"${flowunit_name^}FlowUnit\"\n\n\ttoml_data_init\n\ttoml_data_write_base\n\ttoml_data_append \"entry = \\\"${flowunit_name}@${classname}\\\" # Python flowunit entry function\"\n\ttoml_data_append \"type = \\\"python\\\" # Fixed value\"\n\ttoml_data_append \"\"\n\ttoml_data_write_inputports\n\ttoml_data_write_outputports\n\n\ttoml_data_complete \"$TOML_FILE\"\n\tif [ $? -ne 0 ]; then\n\t\techoerr \"Write toml file failed.\"\n\t\treturn 1\n\tfi\n\n\treturn 0\n}\n\ncreate_flowunit_python() {\n\tprepare_flowunit_base \"python\"\n\tret=$?\n\tif [ $ret -ne 0 ]; then\n\t\treturn $ret\n\tfi\n\n\tsetup_flowunit_python_source\n\tif [ $? -ne 0 ]; then\n\t\techoerr \"Setup python source file failed.\"\n\t\treturn 1\n\tfi\n\n\tsetup_flowunit_python_toml\n\tif [ $? -ne 0 ]; then\n\t\techoerr \"Setup python toml file failed.\"\n\t\treturn 1\n\tfi\n\n\techo \"create python flowunit at $SOURCE_DIR success\"\n\treturn 0\n}\n\nsetup_flowunit_cpp_source() {\n\tHEADER_FILE=\"${SOURCE_DIR}/${flowunit_name}.h\"\n\tSOURCE_FILE=\"${SOURCE_DIR}/${flowunit_name}.cc\"\n\tCMAKE_FILE=\"${SOURCE_DIR}/CMakeLists.txt\"\n\tHEAD_MACRO=\"${flowunit_name^^}_${flowunit_device^^}\"\n\tclassname=${flowunit_name^}\n\tsed -i \"s/ExampleFlowUnit/${classname}FlowUnit/g\" \"$HEADER_FILE\"\n\tif [ $? -ne 0 ]; then\n\t\techoerr \"Setup source file failed.\"\n\t\treturn 1\n\tfi\n\n\tsed -i \"s/ExampleFlowUnit/${classname}FlowUnit/g\" \"$SOURCE_FILE\" \n\tif [ $? -ne 0 ]; then\n\t\techoerr \"Setup source file failed.\"\n\t\treturn 1\n\tfi\n\n\tsed -i \"s/example.h/${flowunit_name}.h/g\" \"$SOURCE_FILE\" \n\tif [ $? -ne 0 ]; then\n\t\techoerr \"Setup source file failed.\"\n\t\treturn 1\n\tfi\n\n\tsed -i \"s/cpu/${flowunit_device}/g\" \"$HEADER_FILE\" \"$CMAKE_FILE\"\n\tif [ $? -ne 0 ]; then\n\t\techoerr \"Setup source file failed.\"\n\t\treturn 1\n\tfi\n\n\tsed -i \"s/example/${flowunit_name}/g\" \"$HEADER_FILE\" \"$CMAKE_FILE\"\n\tif [ $? -ne 0 ]; then\n\t\techoerr \"Setup source file failed.\"\n\t\treturn 1\n\tfi\n\n\tsed -i \"s/EXAMPLE_CPU/${HEAD_MACRO}/g\" \"$HEADER_FILE\"\n\tif [ $? -ne 0 ]; then\n\t\techoerr \"Setup source file failed.\"\n\t\treturn 1\n\tfi\n\n\tsed -i \"s/@Brief: A .*/@Brief: ${flowunit_desc}\\\";/g\" \"$HEADER_FILE\"\n\tif [ $? -ne 0 ]; then\n\t\techoerr \"Setup source file failed.\"\n\t\treturn 1\n\tfi\n\n\t# add input port information\n\tspace=$(grep \"auto input_bufs = data_ctx->Input\" \"$SOURCE_FILE\" | sed 's/\\([ |\\t]*\\)auto input_bufs = data_ctx->Input.*/\\1/g')\n\tspace_desc=$(grep \"desc.AddFlowUnitInput(modelbox::FlowUnitInput\" \"$SOURCE_FILE\" | sed 's/\\([ |\\t]*\\)desc.AddFlowUnitInput(modelbox::FlowUnitInput.*/\\1/g')\n\tinput_flag=\"// input data\"\n\tinput_port_flag=\"// input port\"\n\tsed -i \"s#auto input_bufs = data_ctx->Input.*#${input_flag}#\" \"$SOURCE_FILE\"\n\tsed -i \"s#desc.AddFlowUnitInput(modelbox::FlowUnitInput.*#${input_port_flag}#\" \"$SOURCE_FILE\"\n\ti=1\n\tfor port in \"${flowunit_input_names[@]}\"; do\n\t\tif [ ${#flowunit_inputs[@]} -eq 1 ]; then\n\t\t\tline=\"auto input_bufs = data_ctx->Input(\\\"${port}\\\");\"\n\t\telse\n\t\t\tline=\"auto input_bufs_${i} = data_ctx->Input(\\\"${port}\\\");\"\n\t\tfi\n\t\tsed -i \"\\#${input_flag}#a \\\\${space}${line}\" \"$SOURCE_FILE\"\n\t\ti=$((i+1))\n\n\t\tline=\"desc.AddFlowUnitInput(modelbox::FlowUnitInput(\\\"${port}\\\", FLOWUNIT_TYPE));\"\n\t\tsed -i \"\\#${input_port_flag}#a \\\\${space_desc}${line}\" \"$SOURCE_FILE\"\n\tdone\n\n\t# add output port information\n\toutput_flag=\"// output data\"\n\toutput_port_flag=\"// output port\"\n\tsed -i \"s#auto output_bufs = data_ctx->Output.*#${output_flag}#\" \"$SOURCE_FILE\"\n\tsed -i \"s#desc.AddFlowUnitOutput(modelbox::FlowUnitOutput.*#${output_port_flag}#\" \"$SOURCE_FILE\"\n\ti=1\n\tfor port in \"${flowunit_output_names[@]}\"; do\n\t\tif [ ${#flowunit_outputs[@]} -eq 1 ]; then\n\t\t\tline=\"auto output_bufs = data_ctx->Output(\\\"${port}\\\");\"\n\t\telse\n\t\t\tline=\"auto output_bufs_${i} = data_ctx->Output(\\\"${port}\\\");\"\n\t\tfi\n\n\t\tsed -i \"\\#${output_flag}#a \\\\${space}${line}\" \"$SOURCE_FILE\"\n\t\ti=$((i+1))\n\n\t\tline=\"desc.AddFlowUnitOutput(modelbox::FlowUnitOutput(\\\"${port}\\\", FLOWUNIT_TYPE));\"\n\t\tsed -i \"\\#${output_port_flag}#a \\\\${space_desc}${line}\" \"$SOURCE_FILE\"\n\tdone\n\n\treturn 0\n}\n\nsetup_flowunit_cpp_toml() {\n\tTOML_FILE=\"${SOURCE_DIR}/${flowunit_name}.toml\"\n\tclassname=\"${flowunit_name^}FlowUnit\"\n\n\ttoml_data_init\n\ttoml_data_write_base\n\ttoml_data_append \"type = \\\"c++\\\" # Fixed value\"\n\ttoml_data_append \"\"\n\ttoml_data_write_inputports\n\ttoml_data_write_outputports\n\n\ttoml_data_complete \"$TOML_FILE\"\n\tif [ $? -ne 0 ]; then\n\t\techoerr \"Write toml file failed.\"\n\t\treturn 1\n\tfi\n\n\treturn 0\n}\n\ncreate_flowunit_cpp() {\n\tprepare_flowunit_base \"c++\"\n\tret=$?\n\tif [ $ret -ne 0 ]; then\n\t\treturn $ret\n\tfi\n\n\tsetup_flowunit_cpp_source\n\tif [ $? -ne 0 ]; then\n\t\techoerr \"Setup cpp source file failed.\"\n\t\treturn 1\n\tfi\n\n\tsetup_flowunit_cpp_toml\n\tif [ $? -ne 0 ]; then\n\t\techoerr \"Setup cpp toml file failed.\"\n\t\treturn 1\n\tfi\n\n\techo \"create c++ flowunit at $SOURCE_DIR success\";\n\treturn 0;\n}\n\nsetup_flowunit_yolo_source() {\n\tCMAKE_FILE=\"${SOURCE_DIR}/CMakeLists.txt\"\n\n\tsed -i \"s/example/${flowunit_name}/g\" \"$CMAKE_FILE\"\n\tif [ $? -ne 0 ]; then\n\t\techoerr \"Setup source file failed.\"\n\t\treturn 1\n\tfi\n\n\treturn 0\n}\n\nsetup_flowunit_yolo_toml() {\n\tTOML_FILE=\"${SOURCE_DIR}/${flowunit_name}.toml\"\n\n\ttoml_data_init\n\ttoml_data_write_base\n\ttoml_data_append \"type = \\\"yolo_postprocess\\\" # Fixed value\"\n\ttoml_data_append \"virtual_type = \\\"$virtual_type\\\" # yolo type 'yolov3_postprocess', 'yolov5_postprocess' \"\n\ttoml_data_append \"\"\n\n\ttoml_data_append \"[config]\"\n\ttoml_data_append \"# input_width = 800\"\n\ttoml_data_append \"# input_height = 480\"\n\ttoml_data_append \"# class_num = 1\"\n\ttoml_data_append \"# score_threshold = [0.6,0.7]\"\n\ttoml_data_append \"# nms_threshold = [0.45,0.3]\"\n\ttoml_data_append \"# yolo_output_layer_num = 2\"\n\ttoml_data_append \"# yolo_output_layer_wh = [25,15,50,30]\"\n\ttoml_data_append \"# anchor_num = [4,4]\"\n\ttoml_data_append \"# anchor_biases = [100.0,72.0,173.12,55.04,165.12,132.0,280.0,252.0,10.0,8.0,20.0,16.0,30.0,24.0,67.0,56.0]\"\n\ttoml_data_append \"\"\n\n\ttoml_data_write_inputports\n\ttoml_data_write_outputports\n\n\ttoml_data_complete \"$TOML_FILE\"\n\tif [ $? -ne 0 ]; then\n\t\techoerr \"Write toml file failed.\"\n\t\treturn 1\n\tfi\n\n\treturn 0\n}\n\nflowunit_yolo_valid_virtual_type() {\n\tcase \"$1\" in\n\tyolov3_postprocess | yolov5_postprocess)\n\t\treturn 0\n\t\t;;\n\t*)\n\t\treturn 1\n\tesac\n\n\treturn 0\n}\n\ncreate_flowunit_yolo() {\n\tvirtual_type=\"\"\n\targs=\"\"\n\n\twhile true; do\n\t\tif [ $# -le 0 ]; then\n\t\t\tbreak\n\t\tfi\n\n\t\tparserarg \"$1\" \"$2\"\n\n\t\tcase \"$arg_option\" in\n\t\t--virtual-type)\n\t\t\tvirtual_type=\"$arg_value\"\n\t\t\tshift \"$shift_count\" ;;\n\t\t* ) \n\t\t\tappendargs \"$1\"\n\t\t\tshift ;;\n  \t\tesac\n\tdone\n\n\tif [ -z \"$virtual_type\" ]; then\n\t\techoerr \"virtual type for yolo is not set\"\n\t\treturn 1\n\tfi\n\n\tflowunit_yolo_valid_virtual_type \"$virtual_type\"\n\tif [ $? -ne 0 ]; then\n\t\techoerr \"Virtual type $virtual_type is invalid\"\n\t\treturn 1\n\tfi\n\n\tprepare_flowunit_base \"yolo\"\n\tret=$?\n\tif [ $ret -ne 0 ]; then\n\t\treturn $ret\n\tfi\n\n\tsetup_flowunit_yolo_source\n\tif [ $? -ne 0 ]; then\n\t\techoerr \"Setup yolo source file failed.\"\n\t\treturn 1\n\tfi\n\n\tsetup_flowunit_yolo_toml\n\tif [ $? -ne 0 ]; then\n\t\techoerr \"Setup yolo toml file failed.\"\n\t\treturn 1\n\tfi\n\n\techo \"create yolo flowunit at $SOURCE_DIR success\";\n\treturn 0;\n}\n\nsetup_flowunit_infer_source() {\n\tCMAKE_FILE=\"${SOURCE_DIR}/CMakeLists.txt\"\n\n\tsed -i \"s/example/${flowunit_name}/g\" \"$CMAKE_FILE\"\n\tif [ $? -ne 0 ]; then\n\t\techoerr \"Setup source file failed.\"\n\t\treturn 1\n\tfi\n\n\treturn 0\n}\n\nsetup_flowunit_infer_toml() {\n\tTOML_FILE=\"${SOURCE_DIR}/${flowunit_name}.toml\"\n\n\ttoml_data_init\n\ttoml_data_write_base\n\ttoml_data_append \"type = \\\"inference\\\" # Fixed value\"\n\ttoml_data_append \"entry = \\\"${modelfile}\\\" # model file path\"\n\ttoml_data_append \"virtual_type = \\\"$virtual_type\\\" # inference engine type: 'tensorflow', 'tensorrt', 'torch', 'acl', 'mindspore' \"\n\ttoml_data_append \"\"\n\n\tif [ -n \"$plugin\" ]; then\n\t\ttoml_data_append \"[config]\"\n\t\ttoml_data_append \"plugin = \\\"$plugin\\\"\"\n\t\ttoml_data_append \"\"\n\tfi\n\n\ttoml_data_write_inputports\n\ttoml_data_write_outputports\n\n\ttoml_data_complete \"$TOML_FILE\"\n\tif [ $? -ne 0 ]; then\n\t\techoerr \"Write toml file failed.\"\n\t\treturn 1\n\tfi\n\n\treturn 0\n}\n\nflowunit_valid_device() {\n\tcase \"$1\" in\n\tcpu | cuda | ascend)\n\t\treturn 0\n\t\t;;\n\t*)\n\t\treturn 1\n\tesac\n\n\treturn 0\n}\n\nflowunit_infer_valid_virtual_type() {\n\tcase \"$1\" in\n\ttensorrt | tensorflow | torch | acl |mindspore)\n\t\treturn 0\n\t\t;;\n\t*)\n\t\treturn 1\n\tesac\n\n\treturn 0\n}\n\nflowunit_type_valid() {\n\tif [ -z \"$1\" ]; then\n\t\treturn 0\n\tfi\n\n\tcase \"$1\" in\n\tstream | condition | collapse | expand | normal | collapse_all | loop)\n\t\treturn 0\n\t\t;;\n\t*)\n\t\treturn 1\n\tesac\n\n\treturn 0\t\n}\n\ncreate_flowunit_infer() {\n\tmodelfile=\"\"\n\tvirtual_type=\"\"\n\targs=\"\"\n\tplugin=\"\"\n\tdo_copy=\"\"\n\n\twhile true; do\n\t\tif [ $# -le 0 ]; then\n\t\t\tbreak\n\t\tfi\n\n\t\tparserarg \"$1\" \"$2\"\n\n\t\tcase \"$arg_option\" in\n\t\t--model)\n\t\t\tmodelfile=\"$arg_value\"\n\t\t\tshift \"$shift_count\" ;;\n\t\t--copy-model)\n\t\t\tdo_copy=\"true\"\n\t\t\tshift 1;;\n\t\t--virtual-type)\n\t\t\tvirtual_type=\"$arg_value\"\n\t\t\tshift \"$shift_count\" ;;\n\t\t--plugin)\n\t\t\tplugin=\"$arg_value\"\n\t\t\tshift \"$shift_count\" ;;\n\t\t* ) \n\t\t\tappendargs \"$1\"\n\t\t\tshift ;;\n  \t\tesac\n\tdone\n\n\tif [ -z \"$modelfile\" ]; then\n\t\techoerr \"Model file path is not set\"\n\t\treturn 1\n\tfi\n\n\tflowunit_infer_valid_virtual_type \"$virtual_type\"\n\tif [ $? -ne 0 ]; then\n\t\techoerr \"Virtual type $virtual_type is invalid\"\n\t\treturn 1\n\tfi\n\n\tprepare_flowunit_base \"infer\"\n\tret=$?\n\tif [ $ret -ne 0 ]; then\n\t\treturn $ret\n\tfi\n\n\tif [ \"$do_copy\" = \"true\" ]; then\n\t\tif [ ! -e \"$modelfile\" ]; then\n\t\t\techoerr \"Model file $modelfile is not exists\"\n\t\t\treturn 1\n\t\tfi\n\n\t\tmodelfilename=$(basename \"$modelfile\")\n\t\tcp \"$modelfile\" \"${SOURCE_DIR}/${modelfilename}\"\n\t\tif [ $? -ne 0 ]; then\n\t\t\techoerr \"Copy model file to $SOURCE_DIR failed\"\n\t\t\treturn 1\n\t\tfi\n\n\t\tmodelfile=${modelfilename}\n\tfi\n\n\tif [ -z \"$group_type\" ] || [ \"$group_type\" = \"generic\" ]; then\n\t\tgroup_type=\"inference\"\n\tfi\n\n\tsetup_flowunit_infer_source\n\tif [ $? -ne 0 ]; then\n\t\techoerr \"Setup infer source file failed.\"\n\t\treturn 1\n\tfi\n\n\tsetup_flowunit_infer_toml\n\tif [ $? -ne 0 ]; then\n\t\techoerr \"Setup infer toml file failed.\"\n\t\treturn 1\n\tfi\n\n\techo \"create infer flowunit at $SOURCE_DIR success\";\n\n\treturn 0\n}\n\nflowunit_port_valid_check() {\n\tcase \"$1\" in\n\tname)\n\t\treturn 0\n\t\t;;\n\ttype)\n\t\treturn 0\n\t\t;;\n\tdevice)\n\t\treturn 0\n\t\t;;\n\t*)\n\t\treturn 1\n\tesac\n\n\treturn 0\t\n}\n\nflowunit_parser_ports() {\n\tfor output in \"${flowunit_inputs[@]}\"; do\n\t\tIFS=',' read -ra options <<< \"$output\"\n\t\tfor option in \"${options[@]}\"; do\n\t\t\tIFS=\"=\" read -r -a values <<< \"$option\"\n\t\t\tflowunit_port_valid_check \"${values[0]}\" \"${values[1]}\"\n\t\t\tif [ $? -ne 0 ]; then\n\t\t\t\techo \"invalid input port option $option\"\n\t\t\t\treturn 1\n\t\t\tfi\n\n\t\t\tcase \"${values[0]}\" in\n\t\t\tname)\n\t\t\t\tflowunit_input_names+=(\"${values[1]}\")\n\t\t\t\t;;\n\t\t\t*)\n\t\t\t\t;;\n\t\t\tesac\n\t\tdone\n\tdone\n\n\tfor output in \"${flowunit_outputs[@]}\"; do\n\t\tIFS=',' read -ra options <<< \"$output\"\n\t\tfor option in \"${options[@]}\"; do\n\t\t\tIFS=\"=\" read -r -a values <<< \"$option\"\n\t\t\tflowunit_port_valid_check \"${values[0]}\" \"${values[1]}\"\n\t\t\tif [ $? -ne 0 ]; then\n\t\t\t\techo \"invalid output port option $option\"\n\t\t\t\treturn 1\n\t\t\tfi\n\t\t\tcase \"${values[0]}\" in\n\t\t\tname)\n\t\t\t\tflowunit_output_names+=(\"${values[1]}\")\n\t\t\t\t;;\n\t\t\t*)\n\t\t\t\t;;\n\t\t\tesac\n\t\tdone\n\tdone\n}\n\nis_project_path_valid() {\n\tlocal project=$1\n\tif [ ! -d \"$project/src\" ]; then\n\t\treturn 1\n\tfi\n\n\tif [ ! -e \"$project/CMakeLists.txt\" ]; then\n\t\treturn 1\n\tfi\n\n\treturn 0\n}\n\ncreate_flowunit() {\n\targs=\"\"\n\tflowunit_lang=\"\"\n\tflowunit_name=\"\"\n\tproject_path=\"$DEFAULT_PROJECT_PATH\"\n\tflowunit_device=\"cpu\"\n\tflowunit_desc=\"A flowunit for modelbox\"\n\tflowunit_inputs=()\n\tflowunit_outputs=()\n\tflowunit_input_names=()\n\tflowunit_output_names=()\n\tflowunit_type=\"\"\n\tgroup_type=\"generic\"\t\n\tSOURCE_DIR=\"\"\n\n\twhile true; do\n\t\tif [ $# -le 0 ]; then\n\t\t\tbreak\n\t\tfi\n\n\t\tparserarg \"$1\" \"$2\"\n\n\t\tcase \"$arg_option\" in\n\t\t--lang)\n\t\t\tflowunit_lang=\"$arg_value\"\n\t\t\tshift \"$shift_count\" ;;\n\t\t--type)\n\t\t\tflowunit_type=\"$arg_value\"\n\t\t\tshift \"$shift_count\" ;;\n\t\t--group-type)\n\t\t\tgroup_type=\"$arg_value\"\n\t\t\tshift \"$shift_count\" ;;\n\t\t--name)\n\t\t\tflowunit_name=\"$arg_value\"\n\t\t\tshift \"$shift_count\" ;;\n\t\t--desc)\n\t\t\tflowunit_desc=\"$arg_value\"\n\t\t\tshift \"$shift_count\" ;;\n\t\t--device)\n\t\t\tflowunit_device=\"$arg_value\"\n\t\t\tshift \"$shift_count\" ;;\n\t\t--project-path)\n\t\t\tproject_path=\"$arg_value\"\n\t\t\tshift \"$shift_count\" ;;\n\t\t--input)\n\t\t\tif [ \"$arg_value\" = \"-\" ]; then\n\t\t\t\targ_value=\"name=in\"\n\t\t\tfi\n\t\t\tflowunit_inputs+=(\"$arg_value\")\n\t\t\tshift \"$shift_count\" ;;\n\t\t--output)\n\t\t\tif [ \"$arg_value\" = \"-\" ]; then\n\t\t\t\targ_value=\"name=out\"\n\t\t\tfi\n\t\t\tflowunit_outputs+=(\"$arg_value\")\n\t\t\tshift \"$shift_count\" ;;\n\t\t* ) \n\t\t\tappendargs \"$1\"\n\t\t\tshift ;;\n  \t\tesac\n\tdone\n\n\tif [ ${#flowunit_inputs[@]} -eq 0 ] && [ ${#flowunit_outputs[@]} -eq 0 ]; then\n\t\techoerr \"Flowunit input or output port is not set.\"\n\t\treturn 1\n\tfi\n\n\tflowunit_parser_ports\n\tif [ $? -ne 0 ]; then\n\t\techoerr \"Read port name failed\"\n\t\treturn 1\n\tfi\n\n\tflowunit_type_valid\n\tif [ $? -ne 0 ]; then\n\t\techoerr \"Flowunit type is invalid.\"\n\t\treturn 1\n\tfi\n\n\tis_project_path_valid \"${project_path}\"\n\tif [ $? -ne 0 ]; then\n\t\techoerr \"Project path ${project_path} is invalid, create project first.\"\n\t\treturn 1\n\tfi\n\n\tflowunit_valid_device \"${flowunit_device}\"\n\tif [ $? -ne 0 ]; then\n\t\techoerr \"Input device is invalid.\"\n\t\treturn 1\n\tfi\n\n\tif [ -z \"$flowunit_name\" ]; then\n\t\techoerr \"Flowunit name is not set.\"\n\t\treturn 1\n\tfi\n\n\tif [ -z \"$flowunit_lang\" ]; then\n\t\techoerr \"Flowunit program language is not set\".\n\t\treturn 1\n\tfi\n\n\tif [ ! -d \"${project_path}\" ]; then \n\t\techoerr \"Project ${project_path} is not exist, please create project first, or set project path to create.\"\n\t\treturn 1\n\tfi\n\n\tappendargs \"$@\"\n\teval set -- \"${args}\"\n\n\tcase \"$flowunit_lang\" in \n\tpython)\n\t\tcreate_flowunit_python \"$@\"\n\t\t;;\n\tc++)\n\t\tcreate_flowunit_cpp \"$@\"\n\t\t;;\n\tinfer)\n\t\tcreate_flowunit_infer \"$@\"\n\t\t;;\n\tyolo)\n\t\tcreate_flowunit_yolo \"$@\"\n\t\t;;\n\t*)\n\t\techoerr \"Program language '$flowunit_lang' is not supported.\"\n\t\treturn 1\n\t\t;;\n\tesac\n\n\tret=$?\n\tif [ $ret -ne 0 ] && [ $ret -ne 2 ] && [ -n \"$SOURCE_DIR\" ]; then\n\t\trm -fr \"$SOURCE_DIR\"\n\t\treturn 1\n\tfi\n\n\treturn $ret\n}\n\nappendargs() {\n\ti=0\n\twhile [ $i -lt $# ]; do\n\t\ti=$((i+1))\n\t\teval ARG=\"\\${$i}\"\n\t\tpattern=\" |'\"\n\t\tif [[ \"$ARG\" =~ $pattern ]]; then\n\t\t\targs=\"$args \\\"$ARG\\\"\"\n\t\telse\n\t\t\targs=\"$args $ARG\"\n\t\tfi\n\tdone\n}\n\nparserarg() {\n\t# Support -o=v, --o=v -o v, --o v parameter forms\n\tIFS=\"=\" read -r -a arr <<< \"$1\"\n\n\targ_option=\"$1\"\n\targ_value=\"$2\"\n\tshift_count=2\n\n\tif [ -z \"$2\" ]; then\n\t\tshift_count=1\n\tfi\n\n\tif [ ${#arr[@]} -gt 1 ]; then\n\t\targ_option=\"${arr[0]}\"\n\t\targ_value=${1#\"$arg_option\"=}\n\t\tshift_count=1\n\tfi\n\n\tif [[ \"$arg_option\" = \"--\"* ]]; then\n\t\treturn\n\tfi\n\n\tif [[ \"$arg_option\" = \"-\"* ]]; then\n\t\targ_option=\"-${arg_option}\"\n\t\treturn\n\tfi\n\n\treturn\n}\n\nmain() {\n\targs=\"\"\n\n\tumask 0022\n\n\tif [ $# -lt 1 ]; then showhelp; exit 1; fi\n\twhile true; do\n\t\tif [ $# -le 0 ]; then\n\t\t\tbreak\n\t\tfi\n\n\t\tparserarg \"$1\" \"$2\"\n\n\t\tcase \"$arg_option\" in\n\t\t--project)\n\t\t\tshift\n\t\t\tappendargs \"$@\"\n\t\t\teval set -- \"${args}\"\n\t\t\tcreate_project \"$@\"\n\t\t\treturn $?\n\t\t\tbreak;;\n\t\t--flowunit)\n\t\t\tshift\n\t\t\tappendargs \"$@\"\n\t\t\teval set -- \"${args}\"\n\t\t\tcreate_flowunit \"$@\"\n\t\t\treturn $?\n\t\t\tbreak;;\n\t\t--service-plugin)\n\t\t\tshift\n\t\t\tappendargs \"$@\"\n\t\t\teval set -- \"${args}\"\n\t\t\tcreate_service_plugin \"$@\"\n\t\t\treturn $?\n\t\t\tbreak;;\n\t\t--help)\n\t\t\tshowhelp\n\t\t\treturn 0\n\t\t\t;;\n\t\t* ) \n\t\t\tappendargs \"$1\"\n\t\t\tshift ;;\n  \t\tesac\n\tdone\n\n\techoerr \"unknown command, please try $(basename $0) --help for more information.\"\n\n\treturn 1\n}\n\nmain \"$@\"\n"
  },
  {
    "path": "examples/flowunit/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\n\nsubdirlist(SUBDIRS ${CMAKE_CURRENT_SOURCE_DIR} \"CMakeLists.txt\")\n\nforeach(subdir ${SUBDIRS})\n    add_subdirectory(${subdir})\nendforeach()\n\nif(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_BINARY_DIR})\n    message(FATAL_ERROR \"Do not build in source directory!\")\nendif()\n\n"
  },
  {
    "path": "examples/flowunit/c++/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n \nset(UNIT_NAME \"example\")\nset(UNIT_DEVICE \"cpu\")\nset(UNIT_SECT \"generic\")\n\nif(NOT DEFINED MODELBOX_PROJECT_VERSION_MAJOR)\n    # build from flowunit cmakelists, not from project cmakelists\n    set(MODELBOX_PROJECT_VERSION_MAJOR 0)\n    set(MODELBOX_PROJECT_VERSION_MINOR 0)\n    set(MODELBOX_PROJECT_VERSION_PATCH 1)\n    set(RELEASE_PACKAGE_DIR_ROOT /opt/modelbox/flowunit/${UNIT_SECT} )\n    set(RELEASE_PACKAGE_DIR_LIB ${RELEASE_PACKAGE_DIR_ROOT} ) \n    set(RELEASE_PACKAGE_DIR_BIN ${RELEASE_PACKAGE_DIR_ROOT} ) \n    if(${UNIT_DEVICE} STREQUAL \"cuda\" )\n        find_package(CUDA 10.0)\n    endif()\nendif()\n\n\nif(${UNIT_DEVICE} STREQUAL \"cuda\" )\n    if(NOT CUDA_FOUND)\n        message(FATAL_ERROR \"cannot find cuda in current environment ,please checkout you flowunit device type!\")\n    endif()\nendif()\n\nif(${UNIT_DEVICE} STREQUAL \"ascend\" )\n    if(NOT ACL_FOUND OR NOT DSMI_FOUND)\n        message(FATAL_ERROR \"cannot find acl or dsmi in current environment ,please checkout you flowunit device type!\")\n    endif()\nendif()\n\nfile(GLOB_RECURSE UNIT_SOURCE *.cpp *.cc *.c)\n \ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\n\nset(UNIT_SHARED modelbox-${CMAKE_PROJECT_NAME}-${UNIT_DEVICE}-${UNIT_NAME})\nadd_library(${UNIT_SHARED} SHARED ${UNIT_SOURCE})\n \nset_target_properties(\n  ${UNIT_SHARED} PROPERTIES\n  SOVERSION ${MODELBOX_PROJECT_VERSION_MAJOR}\n  VERSION ${MODELBOX_PROJECT_VERSION_MAJOR}.${MODELBOX_PROJECT_VERSION_MINOR}.${MODELBOX_PROJECT_VERSION_PATCH}\n)\n\ntarget_link_libraries(${UNIT_SHARED} pthread)\ntarget_link_libraries(${UNIT_SHARED} rt)\ntarget_link_libraries(${UNIT_SHARED} dl)\ntarget_link_libraries(${UNIT_SHARED} modelbox)\ntarget_link_libraries(${UNIT_SHARED} modelbox-device-${UNIT_DEVICE})\n\ninstall(TARGETS ${UNIT_SHARED} \n    COMPONENT ${UNIT_COMPONENT}\n    RUNTIME DESTINATION ${RELEASE_PACKAGE_DIR_BIN}\n    LIBRARY DESTINATION ${RELEASE_PACKAGE_DIR_LIB}\n    ARCHIVE DESTINATION ${RELEASE_PACKAGE_DIR_LIB}\n    OPTIONAL\n    )\n\n# for test\nlist(APPEND UNIT_TEST_TARGET ${UNIT_SHARED})\nlist(APPEND UNIT_TEST_LINK_LIBRARIES ${UNIT_LINK_LIBRARY})\nset(UNIT_TEST_TARGET ${UNIT_TEST_TARGET} CACHE INTERNAL \"\")\nset(UNIT_TEST_LINK_LIBRARIES ${UNIT_TEST_LINK_LIBRARIES} CACHE INTERNAL \"\")\n"
  },
  {
    "path": "examples/flowunit/c++/example.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"example.h\"\n#include \"modelbox/flowunit_api_helper.h\"\n\nExampleFlowUnit::ExampleFlowUnit() = default;\nExampleFlowUnit::~ExampleFlowUnit() = default;\n\nmodelbox::Status ExampleFlowUnit::Open(\n    const std::shared_ptr<modelbox::Configuration> &opts) {\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status ExampleFlowUnit::Close() { return modelbox::STATUS_OK; }\n\nmodelbox::Status ExampleFlowUnit::DataPre(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status ExampleFlowUnit::Process(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  auto input_bufs = data_ctx->Input(\"in\");\n  auto output_bufs = data_ctx->Output(\"out\");\n\n  // Your code goes here\n  //\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status ExampleFlowUnit::DataPost(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status ExampleFlowUnit::DataGroupPre(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status ExampleFlowUnit::DataGroupPost(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  return modelbox::STATUS_OK;\n}\n\nMODELBOX_FLOWUNIT(ExampleFlowUnit, desc) {\n  /*set flowunit attributes*/\n  desc.SetFlowUnitName(FLOWUNIT_NAME);\n  desc.SetFlowUnitGroupType(\"Undefined\");\n  desc.AddFlowUnitInput(modelbox::FlowUnitInput(\"in\", FLOWUNIT_TYPE));\n  desc.AddFlowUnitOutput(modelbox::FlowUnitOutput(\"out\", FLOWUNIT_TYPE));\n  desc.SetFlowType(modelbox::NORMAL);\n  desc.SetDescription(FLOWUNIT_DESC);\n  /*set flowunit parameter\n  example code:\n  desc.AddFlowUnitOption(modelbox::FlowUnitOption(\n      \"parameter0\", \"int\", true, \"640\", \"parameter0 describe detail\"));\n  desc.AddFlowUnitOption(modelbox::FlowUnitOption(\n      \"parameter1\", \"int\", true, \"480\", \"parameter1 describe detail\"));\n  */\n}\n\nMODELBOX_DRIVER_FLOWUNIT(desc) {\n  desc.Desc.SetName(FLOWUNIT_NAME);\n  desc.Desc.SetClass(modelbox::DRIVER_CLASS_FLOWUNIT);\n  desc.Desc.SetType(FLOWUNIT_TYPE);\n  desc.Desc.SetDescription(FLOWUNIT_DESC);\n  desc.Desc.SetVersion(FLOWUNIT_VERSION);\n}\n"
  },
  {
    "path": "examples/flowunit/c++/example.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_EXAMPLE_CPU_H_\n#define MODELBOX_FLOWUNIT_EXAMPLE_CPU_H_\n\n#include <modelbox/base/device.h>\n#include <modelbox/flow.h>\n#include <modelbox/flowunit.h>\n\nconstexpr const char *FLOWUNIT_NAME = \"example\";\nconstexpr const char *FLOWUNIT_TYPE = \"cpu\";\nconstexpr const char *FLOWUNIT_VERSION = \"1.0.0\";\nconstexpr const char *FLOWUNIT_DESC = \"\\n\\t@Brief: A example flowunit on cpu\";\n\nclass ExampleFlowUnit : public modelbox::FlowUnit {\n public:\n  ExampleFlowUnit();\n  ~ExampleFlowUnit() override;\n\n  modelbox::Status Open(const std::shared_ptr<modelbox::Configuration> &opts);\n  modelbox::Status Close();\n  modelbox::Status DataPre(std::shared_ptr<modelbox::DataContext> data_ctx) override;\n  modelbox::Status Process(std::shared_ptr<modelbox::DataContext> data_ctx) override;\n  modelbox::Status DataPost(std::shared_ptr<modelbox::DataContext> data_ctx) override;\n  modelbox::Status DataGroupPre(std::shared_ptr<modelbox::DataContext> data_ctx) override;\n  modelbox::Status DataGroupPost(std::shared_ptr<modelbox::DataContext> data_ctx) override;\n};\n\n#endif  // MODELBOX_FLOWUNIT_EXAMPLE_CPU_H_\n"
  },
  {
    "path": "examples/flowunit/c++/example.toml",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Basic config\n[base]\nname = \"example\" # The FlowUnit name\ndevice = \"cpu\" # The device the flowunit runs on，cpu，cuda，ascend。\nversion = \"1.0.0\" # The version of the flowunit\ndescription = \"description\" # The description of the flowunit\nentry = \"example@ExampleFlowUnit\" # Python flowunit entry function\ntype = \"c++\" # Fixed value\n\n# Flowunit Type\nstream = false # Whether the flowunit is a stream flowunit\ncondition = false # Whether the flowunit is a condition flowunit\ncollapse = false # Whether the flowunit is a collapse flowunit\ncollapse_all = false # Whether the flowunit will collapse all the data\nexpand = false #  Whether the flowunit is a expand flowunit\n\n# The default Flowunit config\n[config]\nitem = \"value\"\n\n# Input ports description\n[input]\n[input.input1] # Input port number, the format is input.input[N]\nname = \"in_1\" # Input port name\ndevice = \"cpu\" # Input port device\n\n# Output ports description\n[output]\n[output.output1] # Output port number, the format is output.output[N]\nname = \"out_1\" # Output port name\n"
  },
  {
    "path": "examples/flowunit/infer/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\ncmake_minimum_required(VERSION 3.10)\n \nset(UNIT_NAME \"example\")\n\nfile(GLOB_RECURSE UNIT_SRC *.*)\nfile(GLOB_RECURSE CMAKELISTS_FILE \"CMakeLists.txt\")\nlist(REMOVE_ITEM UNIT_SRC ${CMAKELISTS_FILE})\ngroup_files(UNIT_SRC UNIT_CONF .*.toml \"${UNIT_SRC}\")\nlist(APPEND UNIT_TOML_JSON ${UNIT_CONF})\ngroup_files(UNIT_SRC UNIT_CONF .*.json \"${UNIT_SRC}\")\nlist(APPEND UNIT_TOML_JSON ${UNIT_CONF})\n\ninstall(FILES ${UNIT_SRC}\n        COMPONENT ${UNIT_COMPONENT}\n        DESTINATION ${RELEASE_PACKAGE_DIR_MODEL}/${UNIT_NAME})\n\ninstall(FILES ${UNIT_TOML_JSON}\n        COMPONENT ${UNIT_COMPONENT}\n        PERMISSIONS OWNER_READ\n        DESTINATION ${RELEASE_PACKAGE_DIR_MODEL}/${UNIT_NAME})\n"
  },
  {
    "path": "examples/flowunit/infer/example.toml",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n[base]\nname = \"example\"\ndevice = \"cuda\"  \nversion = \"1.0.0\"\ndescription = \"description\"\nentry = \"./model.pb\"  # model file path\ntype = \"inference\" \nvirtual_type = \"tensorflow\" # inference engine type: 'tensorflow', 'tensorrt', 'torch', 'acl', 'mindspore' \ngroup_type = \"Inference\"  # flowunit group attribution \n\n[config]\nplugin = \"\"  # it take effect when 'virtual_type' is 'tensorrt', it can be set to 'yolo' to provide upsampling layer \n\n# input port description, suporrt multiple input ports\n[input]\n[input.input1] # input port number, Format is input.input[N]\nname = \"in_1\" # input port name\ntype = \"float\" # input port data type ,e.g. float or int. optional.\n\n# output port description, suporrt multiple output ports\n[output]\n[output.output1] # output port number, Format is output.output[N]\nname = \"out_1\" # output port name\n\n"
  },
  {
    "path": "examples/flowunit/plugin/data_source_parser/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nset(PLUGIN_NAME \"example\")\nset(UNIT_DEVICE \"cpu\")\nset(UNIT_NAME \"data_source_parser\")\n\nproject(modelbox-flowunit-${UNIT_DEVICE}-${UNIT_NAME}-${PLUGIN_NAME})\n\nif(NOT DEFINED MODELBOX_PROJECT_VERSION_MAJOR)\n    # build from flowunit cmakelists, not from project cmakelists\n    set(MODELBOX_PROJECT_VERSION_MAJOR 0)\n    set(MODELBOX_PROJECT_VERSION_MINOR 0)\n    set(MODELBOX_PROJECT_VERSION_PATCH 1)\n    set(RELEASE_PACKAGE_DIR_ROOT /opt/modelbox/flowunit/${UNIT_SECT} )\n    set(RELEASE_PACKAGE_DIR_LIB ${RELEASE_PACKAGE_DIR_ROOT} ) \n    set(RELEASE_PACKAGE_DIR_BIN ${RELEASE_PACKAGE_DIR_ROOT} ) \nendif()\n\nfile(GLOB_RECURSE MODELBOX_PLUGIN_SOURCE *.cpp *.cc *.c)\n\nset(MODELBOX_PLUGIN_SHARED modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-${PLUGIN_NAME})\n\nadd_library(${MODELBOX_PLUGIN_SHARED} SHARED ${MODELBOX_PLUGIN_SOURCE})\n\nset_target_properties(${MODELBOX_PLUGIN_SHARED} PROPERTIES \n    SOVERSION ${MODELBOX_PROJECT_VERSION_MAJOR}\n    VERSION ${MODELBOX_PROJECT_VERSION_MAJOR}.${MODELBOX_PROJECT_VERSION_MINOR}.${MODELBOX_PROJECT_VERSION_PATCH}\n)\n\ntarget_link_libraries(${MODELBOX_PLUGIN_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_PLUGIN_SHARED} rt)\ntarget_link_libraries(${MODELBOX_PLUGIN_SHARED} dl)\ntarget_link_libraries(${MODELBOX_PLUGIN_SHARED} modelbox)\n\ninstall(TARGETS ${MODELBOX_PLUGIN_SHARED} \n    COMPONENT ${UNIT_COMPONENT}\n    RUNTIME DESTINATION ${RELEASE_PACKAGE_DIR_BIN}\n    LIBRARY DESTINATION ${RELEASE_PACKAGE_DIR_LIB}\n    ARCHIVE DESTINATION ${RELEASE_PACKAGE_DIR_LIB}\n    OPTIONAL\n    )\n"
  },
  {
    "path": "examples/flowunit/plugin/data_source_parser/driver_desc.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <modelbox/base/status.h>\n\n#include \"example.h\"\n#include \"modelbox/base/driver_api_helper.h\"\n\nstd::shared_ptr<modelbox::DriverFactory> CreateDriverFactory() {\n  std::shared_ptr<modelbox::DriverFactory> factory =\n      std::make_shared<ExampleSourceParserFactory>();\n  return factory;\n}\n\nvoid DriverDescription(modelbox::DriverDesc *desc) {\n  desc->SetName(DRIVER_NAME);\n  desc->SetClass(DRIVER_CLASS_DATA_SOURCE_PARSER_PLUGIN);\n  desc->SetType(DRIVER_TYPE);\n  desc->SetDescription(DRIVER_DESC);\n}\n\nmodelbox::Status DriverInit() {\n  // Driver Init.\n  return modelbox::STATUS_OK;\n}\n\nvoid DriverFini() {\n  // Driver Fini.\n}\n"
  },
  {
    "path": "examples/flowunit/plugin/data_source_parser/example.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n#include \"example.h\"\n\n#include <string>\n\nmodelbox::Status ExampleSourceParser::Init(\n    const std::shared_ptr<modelbox::Configuration> &opts) {\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status ExampleSourceParser::Deinit() { return modelbox::STATUS_OK; }\n\nmodelbox::Status ExampleSourceParser::Parse(\n    const std::shared_ptr<modelbox::SessionContext> &session_context,\n    const std::string &config, std::string &uri,\n    modelbox::DestroyUriFunc &destroy_uri_func) {\n  // Your code goes here\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status ExampleSourceParser::GetStreamType(const std::string &config,\n                                                    std::string &stream_type) {\n  stream_type = \"file\";  // \"file\" or  \"stream\"\n\n  return modelbox::STATUS_OK;\n}"
  },
  {
    "path": "examples/flowunit/plugin/data_source_parser/example.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_DATA_SOURCE_PARSER_EXAMPLE_CPU_H_\n#define MODELBOX_FLOWUNIT_DATA_SOURCE_PARSER_EXAMPLE_CPU_H_\n\n#include <modelbox/base/device.h>\n#include <modelbox/base/status.h>\n#include <modelbox/data_source_parser_plugin.h>\n\nconstexpr const char *DRIVER_NAME = \"example\";\nconstexpr const char *DRIVER_DESC = \"A data source parser plugin on CPU\";\nconstexpr const char *DRIVER_TYPE = \"cpu\";\n\nclass ExampleSourceParser : public modelbox::DataSourceParserPlugin {\n public:\n  ExampleSourceParser() = default;\n  virtual ~ExampleSourceParser() = default;\n\n  modelbox::Status Init(\n      const std::shared_ptr<modelbox::Configuration> &opts) override;\n\n  modelbox::Status Deinit() override;\n\n  modelbox::Status Parse(\n      const std::shared_ptr<modelbox::SessionContext> &session_context,\n      const std::string &config, std::string &uri,\n      modelbox::DestroyUriFunc &destroy_uri_func) override;\n\n  modelbox::Status GetStreamType(const std::string &config,\n                                 std::string &stream_type) override;\n};\n\nclass ExampleSourceParserFactory : public modelbox::DriverFactory {\n public:\n  ExampleSourceParserFactory() = default;\n  virtual ~ExampleSourceParserFactory() = default;\n\n  std::shared_ptr<modelbox::Driver> GetDriver() override {\n    std::shared_ptr<modelbox::Driver> parser =\n        std::make_shared<ExampleSourceParser>();\n    return parser;\n  }\n};\n\n#endif  // MODELBOX_FLOWUNIT_DATA_SOURCE_PARSER_EXAMPLE_CPU_H_\n"
  },
  {
    "path": "examples/flowunit/plugin/output_broker/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nset(PLUGIN_NAME \"example\")\nset(UNIT_DEVICE \"cpu\")\nset(UNIT_NAME \"output_broker\")\n\nproject(modelbox-flowunit-${UNIT_DEVICE}-${UNIT_NAME}-${PLUGIN_NAME})\n\nif(NOT DEFINED MODELBOX_PROJECT_VERSION_MAJOR)\n    # build from flowunit cmakelists, not from project cmakelists\n    set(MODELBOX_PROJECT_VERSION_MAJOR 0)\n    set(MODELBOX_PROJECT_VERSION_MINOR 0)\n    set(MODELBOX_PROJECT_VERSION_PATCH 1)\n    set(RELEASE_PACKAGE_DIR_ROOT /opt/modelbox/flowunit/${UNIT_SECT} )\n    set(RELEASE_PACKAGE_DIR_LIB ${RELEASE_PACKAGE_DIR_ROOT} ) \n    set(RELEASE_PACKAGE_DIR_BIN ${RELEASE_PACKAGE_DIR_ROOT} ) \nendif()\n\nfile(GLOB_RECURSE MODELBOX_PLUGIN_SOURCE *.cpp *.cc *.c)\n\nset(MODELBOX_PLUGIN_SHARED modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-${PLUGIN_NAME})\n\nadd_library(${MODELBOX_PLUGIN_SHARED} SHARED ${MODELBOX_PLUGIN_SOURCE})\n\nset_target_properties(${MODELBOX_PLUGIN_SHARED} PROPERTIES \n    SOVERSION ${MODELBOX_PROJECT_VERSION_MAJOR}\n    VERSION ${MODELBOX_PROJECT_VERSION_MAJOR}.${MODELBOX_PROJECT_VERSION_MINOR}.${MODELBOX_PROJECT_VERSION_PATCH}\n)\n\ntarget_link_libraries(${MODELBOX_PLUGIN_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_PLUGIN_SHARED} rt)\ntarget_link_libraries(${MODELBOX_PLUGIN_SHARED} dl)\ntarget_link_libraries(${MODELBOX_PLUGIN_SHARED} modelbox)\n\ninstall(TARGETS ${MODELBOX_PLUGIN_SHARED} \n    COMPONENT ${UNIT_COMPONENT}\n    RUNTIME DESTINATION ${RELEASE_PACKAGE_DIR_BIN}\n    LIBRARY DESTINATION ${RELEASE_PACKAGE_DIR_LIB}\n    ARCHIVE DESTINATION ${RELEASE_PACKAGE_DIR_LIB}\n    OPTIONAL\n    )\n\n"
  },
  {
    "path": "examples/flowunit/plugin/output_broker/driver_desc.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <modelbox/base/status.h>\n\n#include \"example.h\"\n#include \"modelbox/base/driver_api_helper.h\"\n\nstd::shared_ptr<modelbox::DriverFactory> CreateDriverFactory() {\n  std::shared_ptr<modelbox::DriverFactory> factory =\n      std::make_shared<ExampleOutputBrokerFactory>();\n  return factory;\n}\n\nvoid DriverDescription(modelbox::DriverDesc *desc) {\n  desc->SetName(DRIVER_NAME);\n  desc->SetClass(DRIVER_CLASS_OUTPUT_BROKER_PLUGIN);\n  desc->SetType(DRIVER_TYPE);\n  desc->SetDescription(DRIVER_DESC);\n}\n\nmodelbox::Status DriverInit() {\n  // Driver Init.\n  return modelbox::STATUS_OK;\n}\n\nvoid DriverFini() {\n  // Driver Fini.\n}\n"
  },
  {
    "path": "examples/flowunit/plugin/output_broker/example.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"example.h\"\n\nmodelbox::Status ExampleOutputBroker::Init(\n    const std::shared_ptr<modelbox::Configuration> &opts) {\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status ExampleOutputBroker::Deinit() { return modelbox::STATUS_OK; }\n\nstd::shared_ptr<modelbox::OutputBrokerHandle> ExampleOutputBroker::Open(\n    const std::shared_ptr<modelbox::Configuration> &session_config,\n    const std::string &config) {\n  auto handle = std::make_shared<modelbox::OutputBrokerHandle>();\n  // Your code goes here\n  return handle;\n}\n\nmodelbox::Status ExampleOutputBroker::Write(\n    const std::shared_ptr<modelbox::OutputBrokerHandle> &handle,\n    const std::shared_ptr<modelbox::Buffer> &buffer) {\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status ExampleOutputBroker::Sync(\n    const std::shared_ptr<modelbox::OutputBrokerHandle> &handle) {\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status ExampleOutputBroker::Close(\n    const std::shared_ptr<modelbox::OutputBrokerHandle> &handle) {\n  return modelbox::STATUS_OK;\n}\n"
  },
  {
    "path": "examples/flowunit/plugin/output_broker/example.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_OUTPUT_BROKER_EXAMPLE_CPU_H_\n#define MODELBOX_FLOWUNIT_OUTPUT_BROKER_EXAMPLE_CPU_H_\n\n#include <modelbox/base/device.h>\n#include <modelbox/base/status.h>\n#include <modelbox/output_broker_plugin.h>\n\nconstexpr const char *DRIVER_NAME = \"example\";\nconstexpr const char *DRIVER_DESC = \"A output broker plugin on CPU\";\nconstexpr const char *DRIVER_TYPE = \"cpu\";\n\nclass ExampleOutputBroker : public modelbox::OutputBrokerPlugin {\n public:\n  ExampleOutputBroker() = default;\n  virtual ~ExampleOutputBroker() = default;\n\n  modelbox::Status Init(\n      const std::shared_ptr<modelbox::Configuration> &opts) override;\n\n  modelbox::Status Deinit() override;\n\n  std::shared_ptr<modelbox::OutputBrokerHandle> Open(\n      const std::shared_ptr<modelbox::Configuration> &session_config,\n      const std::string &config) override;\n\n  modelbox::Status Write(\n      const std::shared_ptr<modelbox::OutputBrokerHandle> &handle,\n      const std::shared_ptr<modelbox::Buffer> &buffer) override;\n\n  modelbox::Status Sync(\n      const std::shared_ptr<modelbox::OutputBrokerHandle> &handle) override;\n\n  modelbox::Status Close(\n      const std::shared_ptr<modelbox::OutputBrokerHandle> &handle) override;\n};\n\nclass ExampleOutputBrokerFactory : public modelbox::DriverFactory {\n public:\n  ExampleOutputBrokerFactory() = default;\n  virtual ~ExampleOutputBrokerFactory() = default;\n\n  std::shared_ptr<modelbox::Driver> GetDriver() override {\n    std::shared_ptr<modelbox::Driver> parser =\n        std::make_shared<ExampleOutputBroker>();\n    return parser;\n  }\n};\n\n#endif  // MODELBOX_FLOWUNIT_OUTPUT_BROKER_EXAMPLE_CPU_H_\n"
  },
  {
    "path": "examples/flowunit/python/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\ncmake_minimum_required(VERSION 3.10)\n \nset(UNIT_NAME \"example\")\n\nfile(GLOB_RECURSE UNIT_SRC *.*)\nfile(GLOB_RECURSE CMAKELISTS_FILE \"CMakeLists.txt\")\nlist(REMOVE_ITEM UNIT_SRC ${CMAKELISTS_FILE})\ngroup_files(UNIT_SRC UNIT_CONF .*.toml \"${UNIT_SRC}\")\nlist(APPEND UNIT_TOML_JSON ${UNIT_CONF})\ngroup_files(UNIT_SRC UNIT_CONF .*.json \"${UNIT_SRC}\")\nlist(APPEND UNIT_TOML_JSON ${UNIT_CONF})\n\ninstall(FILES ${UNIT_SRC}\n        COMPONENT ${UNIT_COMPONENT}\n        DESTINATION ${RELEASE_PACKAGE_DIR_MODEL}/${UNIT_NAME})\n\ninstall(FILES ${UNIT_TOML_JSON}\n        COMPONENT ${UNIT_COMPONENT}\n        PERMISSIONS OWNER_READ\n        DESTINATION ${RELEASE_PACKAGE_DIR_MODEL}/${UNIT_NAME})\n"
  },
  {
    "path": "examples/flowunit/python/example.py",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport _flowunit as modelbox\n\nclass ExampleFlowUnit(modelbox.FlowUnit):\n    # Derived from modelbox.FlowUnit\n    def __init__(self):\n        super().__init__()\n\n    def open(self, config):\n        # Open the flowunit to obtain configuration information\n        return modelbox.Status.StatusCode.STATUS_SUCCESS\n\n    def process(self, data_context):\n        # Process the data\n        in_data = data_context.input(\"in_1\")\n        out_data = data_context.output(\"out_1\")\n\n        # Example process code.\n        # Remove the following code and add your own code here.\n        # for buffer in in_data:\n        #     response = \"Hello World \" + buffer.as_object()\n        #     result = response.encode('utf-8').strip()\n        #     add_buffer = modelbox.Buffer(self.get_bind_device(), result)\n        #     out_data.push_back(add_buffer)\n\n        return modelbox.Status.StatusCode.STATUS_SUCCESS\n\n    def close(self):\n        # Close the flowunit\n        return modelbox.Status()\n\n    def data_pre(self, data_context):\n        # Before streaming data starts\n        return modelbox.Status()\n\n    def data_post(self, data_context):\n        # After streaming data ends\n        return modelbox.Status()"
  },
  {
    "path": "examples/flowunit/python/example.toml",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Basic config\n[base]\nname = \"Example\" # The FlowUnit name\ndevice = \"cpu\" # The device the flowunit runs on，cpu，cuda，ascend。\nversion = \"1.0.0\" # The version of the flowunit\ndescription = \"description\" # The description of the flowunit\nentry = \"example@ExampleFlowUnit\" # Python flowunit entry function\ntype = \"python\" # Fixed value\n\n# Flowunit Type\nstream = false # Whether the flowunit is a stream flowunit\ncondition = false # Whether the flowunit is a condition flowunit\ncollapse = false # Whether the flowunit is a collapse flowunit\ncollapse_all = false # Whether the flowunit will collapse all the data\nexpand = false #  Whether the flowunit is a expand flowunit\n\n# The default Flowunit config\n[config]\nitem = \"value\"\n\n# Input ports description\n[input]\n[input.input1] # Input port number, the format is input.input[N]\nname = \"in_1\" # Input port name\ndevice = \"cpu\" # Input port device\n\n# Output ports description\n[output]\n[output.output1] # Output port number, the format is output.output[N]\nname = \"out_1\" # Output port name\n"
  },
  {
    "path": "examples/flowunit/yolo/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\ncmake_minimum_required(VERSION 3.10)\n \nset(UNIT_NAME \"example\")\n\nfile(GLOB_RECURSE UNIT_SRC *.*)\nfile(GLOB_RECURSE CMAKELISTS_FILE \"CMakeLists.txt\")\nlist(REMOVE_ITEM UNIT_SRC ${CMAKELISTS_FILE})\ngroup_files(UNIT_SRC UNIT_CONF .*.toml \"${UNIT_SRC}\")\nlist(APPEND UNIT_TOML_JSON ${UNIT_CONF})\ngroup_files(UNIT_SRC UNIT_CONF .*.json \"${UNIT_SRC}\")\nlist(APPEND UNIT_TOML_JSON ${UNIT_CONF})\n\ninstall(FILES ${UNIT_SRC}\n        COMPONENT ${UNIT_COMPONENT}\n        DESTINATION ${RELEASE_PACKAGE_DIR_MODEL}/${UNIT_NAME})\n\ninstall(FILES ${UNIT_TOML_JSON}\n        COMPONENT ${UNIT_COMPONENT}\n        PERMISSIONS OWNER_READ\n        DESTINATION ${RELEASE_PACKAGE_DIR_MODEL}/${UNIT_NAME})\n"
  },
  {
    "path": "examples/flowunit/yolo/example.toml",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Basic config\n[base]\nname = \"example\"\nversion = \"1.0.0\"\ndescription = \"description\"\ntype = \"yolo_postprocess\"\nvirtual_type = \"yolov3_postprocess\"\ndevice = \"cpu\"\n\n[config]\n# input_width = 800\n# input_height = 480\n# class_num = 1\n# score_threshold = [0.6,0.7]\n# nms_threshold = [0.45,0.3]\n# yolo_output_layer_num = 2\n# yolo_output_layer_wh = [25,15,50,30]\n# anchor_num = [4,4]\n# anchor_biases = [100.0,72.0,173.12,55.04,165.12,132.0,280.0,252.0,10.0,8.0,20.0,16.0,30.0,24.0,67.0,56.0]\n\n[input]\n[input.input1]\nname = \"in_1\" # input port name\ntype = \"float\" # input port type\n\n[output]\n[output.output1]\nname = \"out_1\" # output port name\ntype = \"int\" # output port type"
  },
  {
    "path": "examples/misc/modelbox-template-cmd.json.in",
    "content": "{\n    \"cmd-list\": [\n        {\n            \"name\": \"template\",\n            \"exec\": \"@MODELBOX_ROOT_VAR@@MODELBOX_TOOLS_PATH@/template\",\n            \"desc\": \"create project from template\",\n            \"help-cmd\": \"@MODELBOX_ROOT_VAR@@MODELBOX_TOOLS_PATH@/template --help\"\n        }\n    ]\n}\n"
  },
  {
    "path": "examples/project/base/CMakeLists.txt",
    "content": "#\r\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\r\ncmake_minimum_required(VERSION 3.10)\r\n\r\nproject(example)\r\n\r\nif(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_BINARY_DIR})\r\n    message(FATAL_ERROR \"cannot build the project in the source directory! Out-of-source build is enforced!\")\r\nendif()\r\n\r\noption(WITH_TEST \"build with test\" OFF)\r\n\r\nset(MODELBOX_PROJECT_VERSION_MAJOR 1)\r\nset(MODELBOX_PROJECT_VERSION_MINOR 0)\r\nset(MODELBOX_PROJECT_VERSION_PATCH 0)\r\n\r\nset(CMAKE_PROJECT_VERSION ${MODELBOX_PROJECT_VERSION_MAJOR})\r\nset(CMAKE_PROJECT_VERSION_MAJOR ${MODELBOX_PROJECT_VERSION_MAJOR})\r\nset(CMAKE_PROJECT_VERSION_MINOR ${MODELBOX_PROJECT_VERSION_MINOR})\r\nset(CMAKE_PROJECT_VERSION_PATCH ${MODELBOX_PROJECT_VERSION_PATCH})\r\n\r\nset(CMAKE_MODULE_PATH \"${CMAKE_CURRENT_SOURCE_DIR}/CMake\" ${CMAKE_MODULE_PATH})\r\n\r\ninclude(Options)\r\ninclude(Function)\r\ninclude(FindPkgConfig)\r\ninclude(CMakeDependentOption)\r\ninclude(GNUInstallDirs)\r\n\r\nset(UNIT_COMPONENT ${CMAKE_PROJECT_NAME})\r\nset(CPACK_PACKAGE_NAME ${CMAKE_PROJECT_NAME})\r\n\r\n# User defined installation path\r\nset(RELEASE_PACKAGE_DIR_ROOT /opt/modelbox/application/${CMAKE_PROJECT_NAME})\r\nset(RELEASE_PACKAGE_DIR_LIB ${RELEASE_PACKAGE_DIR_ROOT}/flowunit) # c++ flowunit so path\r\nset(RELEASE_PACKAGE_DIR_PYTHON ${RELEASE_PACKAGE_DIR_ROOT}/flowunit) # python flowunit path\r\nset(RELEASE_PACKAGE_DIR_MODEL ${RELEASE_PACKAGE_DIR_ROOT}/flowunit) # inference flowunit path\r\nset(RELEASE_PACKAGE_DIR_GRAPH ${RELEASE_PACKAGE_DIR_ROOT}/graph) # graph toml path\r\nset(RELEASE_PACKAGE_DIR_ETC ${RELEASE_PACKAGE_DIR_ROOT}/etc) \r\nset(RELEASE_PACKAGE_DIR_BIN ${RELEASE_PACKAGE_DIR_ROOT}/bin) \r\nset(RELEASE_PACKAGE_DIR_THIRDPARTY ${RELEASE_PACKAGE_DIR_ROOT}/thirdparty)\r\n\r\nfind_package(CUDA 10.0)\r\nfind_package(ACL)\r\nfind_package(DSMI)\r\nfind_package(OpenCV)\r\n\r\nadd_subdirectory(src)\r\nadd_subdirectory(thirdparty)\r\nadd_subdirectory(package)\r\nadd_subdirectory(test EXCLUDE_FROM_ALL)\r\n"
  },
  {
    "path": "examples/project/base/package/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\ncmake_minimum_required(VERSION 3.10)\nset(CPACK_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR} CACHE INTERNAL \"\")\nset(CPACK_BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR} CACHE INTERNAL \"\")\n\nfunction(FUNC_CPACK)\n    set(CPACK_PACKAGE_NAME \"modelbox-application\" PARENT_SCOPE)\n    set(CPACK_RPM_COMPONENT_INSTALL ON PARENT_SCOPE)\n    set(CPACK_DEB_COMPONENT_INSTALL ON PARENT_SCOPE)\n    set(CPACK_ARCHIVE_COMPONENT_INSTALL OFF PARENT_SCOPE)\n    set(CPACK_SET_DESTDIR ON PARENT_SCOPE)\n    set(CPACK_STRIP_FILES ON PARENT_SCOPE)\n    find_program(TAR tar)\n    find_program(RPM rpm)\n    find_program(DPKG dpkg)\n    set(MODELBOX_PACK_NAME \"${CPACK_PACKAGE_NAME}\")\n    if(NOT CPACK_PACKAGE_NAME)\n        set(MODELBOX_PACK_NAME \"${CMAKE_PROJECT_NAME}\")\n    endif()\n    if(TAR) \n        set(CPACK_GENERATOR \"${CPACK_GENERATOR}TGZ;\")\n    endif()\n    if(RPM) \n        set(CPACK_GENERATOR \"${CPACK_GENERATOR}RPM;\")\n    endif()\n    if(DPKG) \n        set(CPACK_GENERATOR \"${CPACK_GENERATOR}DEB;\")\n    endif()    \n    set(CPACK_GENERATOR \"${CPACK_GENERATOR}\" PARENT_SCOPE)\n    \n    string(TOUPPER ${UNIT_COMPONENT} UNIT_COMPONENT_UPPER_NAME)\n\n    # deb package configuration\n    set(CPACK_DEBIAN_ENABLE_COMPONENT_DEPENDS ON PARENT_SCOPE)\n    set(CPACK_COMPONENT_${UNIT_COMPONENT_UPPER_NAME}_DESCRIPTION \"${UNIT_COMPONENT} for modelbox\" PARENT_SCOPE)\n    configure_file(${CPACK_SOURCE_DIR}/debian/postinst.in ${CPACK_BINARY_DIR}/debian/postinst @ONLY)\n    configure_file(${CPACK_SOURCE_DIR}/debian/postrm.in ${CPACK_BINARY_DIR}/debian/postrm @ONLY)\n    set(CPACK_DEBIAN_${UNIT_COMPONENT_UPPER_NAME}_PACKAGE_CONTROL_EXTRA\n        ${CPACK_BINARY_DIR}/debian/postinst\n        ${CPACK_BINARY_DIR}/debian/postrm PARENT_SCOPE)\n    set(CPACK_DEBIAN_PACKAGE_MAINTAINER \"undefined.\" PARENT_SCOPE)\n\n    # rpm package configuration\n    set(CPACK_PACKAGE_RELOCATABLE OFF PARENT_SCOPE)\n    set(CPACK_RPM_PACKAGE_AUTOREQ OFF PARENT_SCOPE)\n    set(CPACK_RPM_SPEC_MORE_DEFINE \"%define _build_id_links none\" PARENT_SCOPE)\n    set(CPACK_RPM_EXCLUDE_FROM_AUTO_FILELIST_ADDITION\n        \"/usr/local\"\n        \"/usr/local/bin\"\n        \"/usr/local/lib\"\n        \"/usr/local/lib64\"\n        \"/usr/local/etc\"\n        \"/usr/local/include\"\n        \"/usr/lib/systemd\"\n        \"/opt\"\n        \"/opt/modelbox\"\n        ${CMAKE_INSTALL_FULL_BINDIR}\n        ${CMAKE_INSTALL_FULL_INCLUDEDIR}\n        ${CMAKE_INSTALL_FULL_LIBDIR}\n        \"${CMAKE_INSTALL_FULL_LIBDIR}/pkgconfig\"\n        ${SYSTEMDSYSTEMUNITDIR} PARENT_SCOPE)\n    configure_file(${CPACK_SOURCE_DIR}/rpm/postinscript.in ${CPACK_BINARY_DIR}/rpm/postinscript @ONLY)\n    configure_file(${CPACK_SOURCE_DIR}/rpm/postunscript.in ${CPACK_BINARY_DIR}/rpm/postunscript @ONLY)\n    set(CPACK_RPM_${UNIT_COMPONENT_UPPER_NAME}_POST_INSTALL_SCRIPT_FILE ${CPACK_BINARY_DIR}/rpm/postinscript PARENT_SCOPE)\n    set(CPACK_RPM_${UNIT_COMPONENT_UPPER_NAME}_POST_UNINSTALL_SCRIPT_FILE ${CPACK_BINARY_DIR}/rpm/postunscript PARENT_SCOPE)\n    set(CPACK_OUTPUT_FILE_PREFIX ${CMAKE_BINARY_DIR}/release PARENT_SCOPE)\n    set(CPACK_PACKAGE_DIRECTORY ${CMAKE_BINARY_DIR}/cpack PARENT_SCOPE)\n    \n    get_cmake_property(CPACK_COMPONENTS_ALL COMPONENTS)\n    list(REMOVE_ITEM CPACK_COMPONENTS_ALL \"Unspecified\")\n    set(CPACK_COMPONENTS_ALL ${CPACK_COMPONENTS_ALL} PARENT_SCOPE)\nendfunction(FUNC_CPACK)\n\nset(CPACK_PACKAGE_VERSION_MAJOR ${CMAKE_PROJECT_VERSION_MAJOR})\nset(CPACK_PACKAGE_VERSION_MINOR ${CMAKE_PROJECT_VERSION_MINOR})\nset(CPACK_PACKAGE_VERSION_PATCH ${CMAKE_PROJECT_VERSION_PATCH})\n\ninclude(CPackComponent)\nFUNC_CPACK()\ninclude(CPack)\n"
  },
  {
    "path": "examples/project/base/package/debian/postinst.in",
    "content": "#!/bin/sh\n#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nif [ \"$1\" = \"configure\" ]; then\n\tchown -R modelbox:modelbox @RELEASE_PACKAGE_DIR_ROOT@\nfi\nldconfig \n\n"
  },
  {
    "path": "examples/project/base/package/debian/postrm.in",
    "content": "#!/bin/sh\n#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n# the script will be executed after rpm package uninstalled \n"
  },
  {
    "path": "examples/project/base/package/rpm/postinscript.in",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n \n# the script will be executed after rpm package installed\n\nchown -R modelbox:modelbox @RELEASE_PACKAGE_DIR_ROOT@\n\nldconfig \n"
  },
  {
    "path": "examples/project/base/package/rpm/postunscript.in",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# the script will be executed after rpm package uninstalled \n"
  },
  {
    "path": "examples/project/base/readme.txt",
    "content": "# 使用说明 \n\n1. 工程目录结构说明可见[项目创建](https://modelbox-ai.com/modelbox-book/use-modelbox/standard-mode/create-project.html)章节“项目工程目录”部分。\n\n2. 由于部分模板工程模型文件较大，默认不携带模型，如需要运行模板工程，需要手动执行对应工程自带的下载脚本下载。\n\n3. 如果存在C++自定义功能单元，则需要先编译安装后在执行。详情可见[C++功能单元](https://modelbox-ai.com/modelbox-book/use-modelbox/standard-mode/flowunit/c++.html)章节“功能单元编译运行”部分。 \n\n4. Python功能单元由于不需要编译，调试阶段可以将工程路径配置到图配置文件中。详情可见[Python功能单元](https://modelbox-ai.com/modelbox-book/use-modelbox/standard-mode/flowunit/python.html)章节“功能单元调试运行”部分。\n\n5. 模板工程提供了基于Gtest的C++测试用例编写框架，用例编写可参考resize模板工程测试用例, 测试用例编译执行可见[C++功能单元](https://modelbox-ai.com/modelbox-book/use-modelbox/standard-mode/flowunit/c++.html)章节“功能单元功能测试”部分。\n\n------------------------------------------------------------------------\n更多指导，详细[ModelBox指导文档](https://modelbox-ai.com/modelbox-book/)和[ModelBox主页](https://modelbox-ai.com/)："
  },
  {
    "path": "examples/project/base/src/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\ncmake_minimum_required(VERSION 3.10)\n\nif(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_BINARY_DIR})\n    message(FATAL_ERROR \"cannot build the project in the source directory! Out-of-source build is enforced!\")\nendif()\n\nsubdirlist(SUBDIRS ${CMAKE_CURRENT_SOURCE_DIR} \"CMakeLists.txt\")\n\nforeach(subdir ${SUBDIRS})\n   add_subdirectory(${subdir})\nendforeach()"
  },
  {
    "path": "examples/project/base/src/flowunit/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\ncmake_minimum_required(VERSION 3.10)\n\nif(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_BINARY_DIR})\n    message(FATAL_ERROR \"cannot build the project in the source directory! Out-of-source build is enforced!\")\nendif()\n\nsubdirlist(SUBDIRS ${CMAKE_CURRENT_SOURCE_DIR} \"CMakeLists.txt\")\n\nforeach(subdir ${SUBDIRS})\n   add_subdirectory(${subdir})\nendforeach()"
  },
  {
    "path": "examples/project/base/src/graph/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n \ncmake_minimum_required(VERSION 3.10)\n \nfile(GLOB_RECURSE UNIT_SRC *.*)\nfile(GLOB_RECURSE CMAKELISTS_FILE \"CMakeLists.txt\")\nlist(REMOVE_ITEM UNIT_SRC ${CMAKELISTS_FILE})\n\ninstall(FILES ${UNIT_SRC}\n        COMPONENT ${UNIT_COMPONENT}\n        DESTINATION ${RELEASE_PACKAGE_DIR_GRAPH}/${UNIT_NAME})\n"
  },
  {
    "path": "examples/project/base/src/service-plugin/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\ncmake_minimum_required(VERSION 3.10)\n\nif(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_BINARY_DIR})\n    message(FATAL_ERROR \"cannot build the project in the source directory! Out-of-source build is enforced!\")\nendif()\n\nsubdirlist(SUBDIRS ${CMAKE_CURRENT_SOURCE_DIR} \"CMakeLists.txt\")\n\nforeach(subdir ${SUBDIRS})\n   add_subdirectory(${subdir})\nendforeach()"
  },
  {
    "path": "examples/project/base/test/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\ncmake_minimum_required(VERSION 3.10)\n\nif(NOT TEST_WORKING_DIR)\n\tset(TEST_WORKING_DIR ${CMAKE_CURRENT_BINARY_DIR}/test-working-dir)\n\tfile(MAKE_DIRECTORY ${TEST_WORKING_DIR})\nendif()\nset(TEST_WORKING_DATA_DIR \"${TEST_WORKING_DIR}/data\")\nfile(MAKE_DIRECTORY ${TEST_WORKING_DATA_DIR})\nset(TEST_WORKING_LIB_DIR \"${TEST_WORKING_DIR}/lib\")\nfile(MAKE_DIRECTORY ${TEST_WORKING_LIB_DIR})\nset(TEST_WORKING_BIN_DIR \"${TEST_WORKING_DIR}/bin\")\nfile(MAKE_DIRECTORY ${TEST_WORKING_BIN_DIR})\nset(TEST_WORKING_MODEL_DIR \"${TEST_WORKING_DIR}/model\")\nfile(MAKE_DIRECTORY ${TEST_WORKING_MODEL_DIR})\nset(TEST_WORKING_PYTHON_DIR \"${TEST_WORKING_DIR}/python\")\nfile(MAKE_DIRECTORY ${TEST_WORKING_PYTHON_DIR})\nset(TEST_ASSETS ${CMAKE_CURRENT_LIST_DIR}/assets)\nset(TEST_SOURCE_DIR ${CMAKE_CURRENT_LIST_DIR})\n\nadd_definitions(-DBUILD_TEST)\nset(TEST_MAIN_SOURCE ${CMAKE_CURRENT_LIST_DIR}/test_main.cc)\nconfigure_file(${CMAKE_CURRENT_LIST_DIR}/test_config.h.in ${CMAKE_CURRENT_BINARY_DIR}/test_config.h @ONLY)\n\nlist(APPEND TEST_INCLUDE ${LIBMODELBOX_INCLUDE})\nlist(APPEND TEST_INCLUDE ${MODELBOX_SERVER_INCLUDE})\nlist(APPEND TEST_INCLUDE ${LIBMODELBOX_BASE_INCLUDE})\nlist(APPEND TEST_INCLUDE ${TOML_INCLUDE_DIR})\nlist(APPEND TEST_INCLUDE ${CMAKE_CURRENT_BINARY_DIR})\nlist(APPEND TEST_INCLUDE ${MODELBOX_TOP_DIR})\nlist(REMOVE_DUPLICATES TEST_INCLUDE)\n\nset(TEST_SOURCE \n    ${MODELBOX_SERVER_SOURCES} \n)\n\nset(TEST_LINK_LIBRARIES\n    ${MODELBOX_SERVER_LINK_LIBRARIES}\n\t${LIBMODELBOX_LINK_SOURCES} \n)\n\ninclude_directories(${gtest_SOURCE_DIR}/include ${gtest_SOURCE_DIR})\ninclude_directories(${gmock_SOURCE_DIR}/include ${gmock_SOURCE_DIR})\ninclude_directories(${MODELBOX_SERVER_INCLUDE})\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\n\nset(CMAKE_CXX_FLAGS_OLD ${CMAKE_CXX_FLAGS})\nif (CMAKE_CXX_COMPILER_ID STREQUAL \"GNU\")\n\tset(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} -fPIC -fno-gnu-unique\")\nelse()\n\tset(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} -fPIC\")\nendif()\nadd_subdirectory(mock)\nadd_subdirectory(flowunit)\nset(CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS_OLD})"
  },
  {
    "path": "examples/project/base/test/flowunit/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n \ncmake_minimum_required(VERSION 3.10)\n \nfile(GLOB_RECURSE UNIT_TEST_SOURCE *.cpp *.cc *.c)\n \ninclude_directories(${CMAKE_CURRENT_SOURCE_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${MOCKFLOW_INCLUDE})\ninclude_directories(${TEST_INCLUDE})\n\nadd_executable(unit-test EXCLUDE_FROM_ALL\n\t${UNIT_TEST_SOURCE}\n\t${TEST_MAIN_SOURCE}\n)\n\nadd_custom_target(all-drivers)\nadd_custom_command(TARGET all-drivers PRE_BUILD\n\tCOMMAND rm -fr ${TEST_WORKING_LIB_DIR}/*\n)\n\nadd_custom_command(TARGET all-drivers PRE_BUILD\n\tCOMMAND rm -fr ${TEST_WORKING_LIB_DIR}/*\n)\n\nforeach (ITR ${UNIT_TEST_TARGET})\n\tadd_dependencies(all-drivers ${ITR})\n\tadd_custom_command(TARGET all-drivers POST_BUILD\n\tCOMMAND cp $<TARGET_FILE:${ITR}> ${TEST_WORKING_LIB_DIR}/\n\t)\nendforeach(ITR) \nset(UNIT_TEST_TARGET \"\" CACHE INTERNAL \"\")\n\nadd_custom_command(TARGET all-drivers POST_BUILD\n\tCOMMAND ldconfig ${TEST_WORKING_LIB_DIR} -n\n)\n\nforeach (ITR ${DRIVER_UNIT_TEST_LINK_LIBRARIES})\n\ttarget_link_libraries(unit-test ${ITR})\nendforeach(ITR) \nset(DRIVER_UNIT_TEST_LINK_LIBRARIES \"\" CACHE INTERNAL \"\")\n\ntarget_link_libraries(unit-test pthread)\ntarget_link_libraries(unit-test rt)\ntarget_link_libraries(unit-test dl)\ntarget_link_libraries(unit-test gtest_main)\ntarget_link_libraries(unit-test gmock_main)\ntarget_link_libraries(unit-test modelbox)\ntarget_link_libraries(unit-test ${OpenCV_LIBS})\ntarget_link_libraries(unit-test ${MOCKFLOW_LIB})\nadd_dependencies(unit-test all-drivers)\n\nadd_custom_target(unittest\n\tCOMMAND ${TEST_RUNNER_LIST} ${CMAKE_CURRENT_BINARY_DIR}/unit-test\n\tDEPENDS  unit-test\n\tWORKING_DIRECTORY ${TEST_WORKING_DIR}\n\tCOMMENT \"Run Unit Test...\"\n)\n\nlist(APPEND MODELBOX_UNIT_TEST_TARGETS unit-test)\nset(MODELBOX_UNIT_TEST_TARGETS ${MODELBOX_UNIT_TEST_TARGETS} CACHE INTERNAL \"\")\n\n "
  },
  {
    "path": "examples/project/base/test/mock/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nif(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_BINARY_DIR})\n    message(FATAL_ERROR \"Do not build in source directory!\")\nendif()\n\nfile(GLOB MOCKFLOW_SOURCE *.cpp *.cc *.c)\n\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${MODELBOX_SERVER_INCLUDE})\n\nset(MOCKFLOW_LIB flowmock-lib)\nadd_library(${MOCKFLOW_LIB} ${MOCKFLOW_SOURCE})\n\nset(MOCKFLOW_LIB ${MOCKFLOW_LIB} CACHE INTERNAL \"\")\nset(MOCKFLOW_INCLUDE ${CMAKE_CURRENT_LIST_DIR} CACHE INTERNAL \"\")\n\n\n"
  },
  {
    "path": "examples/project/base/test/mock/mock_modelbox.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"mock_modelbox.h\"\n#include <sstream>\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"modelbox/data_context.h\"\n#include \"modelbox/session_context.h\"\n\nusing ::testing::_;\nnamespace modelbox {\n\nStatus MockModelBox::InitFlow(const std::string &name,\n                              const std::string &graph) {\n  flow_ = std::make_shared<Flow>();\n  return flow_->Init(name, graph);\n}\n\nStatus MockModelBox::BuildAndRun(const std::string &name,\n                                 const std::string &graph, int timeout) {\n  auto ret = InitFlow(name, graph);\n  if (!ret) {\n    return ret;\n  }\n\n  ret = flow_->Build();\n  if (!ret) {\n    return ret;\n  }\n\n  ret = flow_->RunAsync();\n  if (!ret) {\n    return ret;\n  }\n\n  if (timeout < 0) {\n    return ret;\n  }\n\n  Status retval;\n  flow_->Wait(timeout, &retval);\n  return retval;\n}\n\nvoid MockModelBox::Stop() {\n  if (flow_ != nullptr) {\n    flow_->Stop();\n    flow_ = nullptr;\n  }\n}\n\nstd::shared_ptr<Flow> MockModelBox::GetFlow() { return flow_; }\n\nstd::vector<std::shared_ptr<BufferList>> MockModelBox::GetOutputBufferList(\n    std::shared_ptr<ExternalDataMap> ext_data, const std::string &port_name) {\n  Status status;\n  std::vector<std::shared_ptr<BufferList>> output_buffer_lists;\n  while (true) {\n    OutputBufferList map_buffer_list;\n    status = ext_data->Recv(map_buffer_list);\n    if (status == STATUS_SUCCESS) {\n      auto buffer_list = map_buffer_list[port_name];\n      output_buffer_lists.push_back(buffer_list);\n    } else {\n      EXPECT_EQ(status, STATUS_EOF);\n      break;\n    }\n  }\n  return output_buffer_lists;\n}\n\n}  // namespace modelbox\n"
  },
  {
    "path": "examples/project/base/test/mock/mock_modelbox.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MOCK_MODELBOX_H_\n#define MOCK_MODELBOX_H_\n\n#include <modelbox/base/status.h>\n#include <modelbox/flow.h>\n#include <iostream>\n#include <string>\n#include \"test_config.h\"\n\nnamespace modelbox {\n\nclass MockModelBox {\n public:\n  MockModelBox(){};\n  virtual ~MockModelBox() { Stop(); };\n\n  bool Init();\n  void Stop();\n  Status BuildAndRun(const std::string &name, const std::string &graph,\n                     int timeout = 15 * 1000);\n  std::shared_ptr<Flow> GetFlow();\n  Status InitFlow(const std::string &name, const std::string &graph);\n  std::vector<std::shared_ptr<BufferList>> GetOutputBufferList(\n      std::shared_ptr<ExternalDataMap> ext_data, const std::string &port_name);\n\n private:\n  std::shared_ptr<Flow> flow_;\n};\n\n}  // namespace modelbox\n#endif  // MOCK_MODELBOX_H_\n"
  },
  {
    "path": "examples/project/base/test/test_config.h.in",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_TEST_CONFIG_H_\n#define MODELBOX_TEST_CONFIG_H_\n\nnamespace modelbox {\n\n// test working dir\n#define TEST_WORKING_DIR \"@TEST_WORKING_DIR@\"\n\n// test lib dir\n#define TEST_LIB_DIR \"@TEST_WORKING_LIB_DIR@\"\n\n// test bin dir\n#define TEST_BIN_DIR \"@TEST_WORKING_BIN_DIR@\"\n\n// test data dir\n#define TEST_DATA_DIR \"@TEST_WORKING_DATA_DIR@\"\n\n// test driver dir\n#define TEST_DRIVER_DIR \"@TEST_WORKING_DRIVERS_DIR@\"\n\n// test asserts file\n#define TEST_ASSETS \"@TEST_ASSETS@\"\n\n// test source code dir\n#define TEST_SOURCE_DIR \"@TEST_SOURCE_DIR@\"\n\n// python flow unit so path\n#define PYTHON_PATH \"@LIBMODELBOX_FLOWUNIT_PYTHON_SO_PATH@\"\n#define DEVICE_CPU_SO_PATH \"@LIBMODELBOX_DEVICE_CPU_SO_PATH@\"\n#define DEVICE_CUDA_SO_PATH \"@LIBMODELBOX_DEVICE_CUDA_SO_PATH@\"\n#define DEVICE_ASCEND_SO_PATH \"@LIBMODELBOX_DEVICE_ASCEND_SO_PATH@\"\n#define INFERENCE_PATH \"@LIBMODELBOX_FLOWUNIT_INFERENCE_SO_PATH@\"\n#define VIRTUAL_PYTHON_PATH \"@LIBMODELBOX_VIRTUALDRIVER_PYTHON_SO_PATH@\"\n#define VIRTUAL_INFERENCE_PATH \"@LIBMODELBOX_VIRTUALDRIVER_INFERENCE_SO_PATH@\"\n\n}  // namespace modelbox\n\n#endif  // MODELBOX_TEST_CONFIG_H_"
  },
  {
    "path": "examples/project/base/thirdparty/CMake/local-package.in",
    "content": "\n#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nproject(example)\n\nset(THIRDPARTY_DOWNLOAD_DIR @THIRDPARTY_DOWNLOAD_DIR@)\nset(WITH_TEST @WITH_TEST@)\n\ninclude(ExternalProject)\nfind_package(Git)\n\n# download googletest\nif (${WITH_TEST})\n  ExternalProject_Add(\n    GoogleTest\n    URL               @LOCAL_PACKAGE_PATH@/googletest-release-1.10.0.tar.gz\n    SOURCE_DIR        ${THIRDPARTY_DOWNLOAD_DIR}/googletest\n    CONFIGURE_COMMAND \"\"\n    BUILD_COMMAND     \"\"\n    INSTALL_COMMAND   \"\"\n    TEST_COMMAND      \"\"\n  )\nendif()\n"
  },
  {
    "path": "examples/project/base/thirdparty/CMake/pre-download.in",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nproject(example)\n\n# 替换的环境变量\nset(THIRDPARTY_DOWNLOAD_DIR @THIRDPARTY_DOWNLOAD_DIR@)\nset(WITH_TEST @WITH_TEST@)\n\n# 预先下载代码库列表，此处仅包含需要使用ADD_SUBDIRECTORY添加的外部项目。\ninclude(ExternalProject)\nfind_package(Git)\n\n# 下载googletest\nif (${WITH_TEST})\n  ExternalProject_Add(\n    GoogleTest\n    URL               https://github.com/google/googletest/archive/refs/tags/release-1.10.0.zip\n    SOURCE_DIR        ${THIRDPARTY_DOWNLOAD_DIR}/googletest\n    CONFIGURE_COMMAND \"\"\n    BUILD_COMMAND     \"\"\n    INSTALL_COMMAND   \"\"\n    TEST_COMMAND      \"\"\n  )\nendif()\n\n\n"
  },
  {
    "path": "examples/project/base/thirdparty/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nproject(example)\n\ninclude(ExternalProject)\n\nset(THIRDPARTY_DOWNLOAD_BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR}/download)\nset(THIRDPARTY_DOWNLOAD_WORKING_DIR ${THIRDPARTY_DOWNLOAD_BINARY_DIR}/build)\n\n# 预先下载的三方组件，当使用ADD_SUBDIRECTORY包含子项目时，采用此方式。\nif (NOT LOCAL_PACKAGE_PATH) \n  set(THIRDPARTY_DOWNLOAD_DIR ${CMAKE_CURRENT_BINARY_DIR}/download)\n  configure_file(CMake/pre-download.in ${THIRDPARTY_DOWNLOAD_BINARY_DIR}/CMakeLists.txt @ONLY)\nelse()\n  set(THIRDPARTY_DOWNLOAD_DIR ${CMAKE_CURRENT_BINARY_DIR}/download)\n  configure_file(CMake/local-package.in ${THIRDPARTY_DOWNLOAD_BINARY_DIR}/CMakeLists.txt @ONLY)\nendif()\n\nfile(MAKE_DIRECTORY ${THIRDPARTY_DOWNLOAD_WORKING_DIR})\nexecute_process(COMMAND ${CMAKE_COMMAND} -G \"${CMAKE_GENERATOR}\" ..\n  RESULT_VARIABLE COMMAND_RESULT\n  WORKING_DIRECTORY ${THIRDPARTY_DOWNLOAD_WORKING_DIR} \n)\n\nif(COMMAND_RESULT)\n  message(FATAL_ERROR \"Download thirdparty failed: ${COMMAND_RESULT}\")\nendif()\n\nexecute_process(COMMAND ${CMAKE_COMMAND} --build .\n  RESULT_VARIABLE COMMAND_RESULT\n  WORKING_DIRECTORY ${THIRDPARTY_DOWNLOAD_WORKING_DIR} \n)\n\nif(COMMAND_RESULT)\n  message(FATAL_ERROR \"Download thirdparty failed: ${COMMAND_RESULT}\")\nendif()\n\nif (${WITH_TEST})\n  set(CMAKE_CXX_FLAGS_OLD ${CMAKE_CXX_FLAGS})\n  set(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} -fPIC\")\n  set(GOOGLETEST_SOURCE_DIR ${THIRDPARTY_DOWNLOAD_DIR}/googletest)\n  add_subdirectory(${GOOGLETEST_SOURCE_DIR} ${THIRDPARTY_DOWNLOAD_WORKING_DIR}/googletest EXCLUDE_FROM_ALL)\n  set(CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS_OLD})\nendif()\n"
  },
  {
    "path": "examples/project/car_detection/desc.toml",
    "content": "name = \"car detection\"\ndesc = \"A car detection example project template for modelbox\""
  },
  {
    "path": "examples/project/car_detection/setup.sh",
    "content": "#!/bin/sh\n# run this script during cmake prepare\n\n#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nTARGET_DIR=$1\nEXAMPLE_DIR=$2\nDEMO_SRC_DIR=$3\n\nmain() {\n    for flowdir in ${TARGET_DIR}/src/flowunit/* ; do\n        if [ ! -d \"${flowdir}\" ]; then\n            continue;\n        fi\n        \n        cp ${EXAMPLE_DIR}/flowunit/python/CMakeLists.txt ${flowdir}/CMakeLists.txt -f\n        if [ $? -ne 0 ]; then\n            echo \"copy cmake to template failed.\"\n            return 1\n        fi\n        \n        sed -i \"s/example/$(basename ${flowdir})/g\" ${flowdir}/CMakeLists.txt\n        if [ $? -ne 0 ]; then\n            echo \"change cmakefile name failed.\"\n            return 1\n        fi\n    done\n\n    cp ${EXAMPLE_DIR}/project/base/src/graph/CMakeLists.txt ${TARGET_DIR}/src/graph/CMakeLists.txt\n    if [ $? -ne 0 ]; then\n        echo \"copy cmake to graph failed.\"\n        return 1\n    fi\n\n    mv ${TARGET_DIR}/src/graph/car_detection.toml.in ${TARGET_DIR}/src/graph/car_detection.toml\n    sed -i \"s#@DEMO_CAR_DETECTION_FLOWUNIT_DIR@#@APPLICATION_PATH@/flowunit#g\" ${TARGET_DIR}/src/graph/car_detection.toml\n    if [ $? -ne 0 ]; then\n        echo \"change graph path failed.\"\n        return 1\n    fi\n\n    sed -i \"s#@DEMO_VIDEO_DIR@#@PROJECT_PATH@/src/graph#g\" ${TARGET_DIR}/src/graph/car_detection.toml\n    if [ $? -ne 0 ]; then\n        echo \"change test video path failed.\"\n        return 1\n    fi\n}\n\nmain\n"
  },
  {
    "path": "examples/project/emotion_detection/desc.toml",
    "content": "name = \"emotion_detection\"\ndesc = \"A emotion detection example project template for modelbox\"\n\n[guide]\nguide = '''\n\n# 使用指导\n\n需要执行工程根目录下download_emotion_files.sh下载torch模型。\n\n'''\n"
  },
  {
    "path": "examples/project/emotion_detection/download_emotion_files.sh",
    "content": "#!/bin/sh\n#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nDOWNLOAD_EMOTION_FILES=\"https://gitee.com/modelbox/modelbox-binary/attach_files/1010735/download/emotion_demo_files.zip\"\nBASE_PATH=$(cd `dirname $0`; pwd)\n\nmain() {\n    wget ${DOWNLOAD_EMOTION_FILES} -O ${BASE_PATH}/emotion_demo_files.zip\n    if [ $? -ne 0 ]; then\n        echo \"download emotion_demo_files.zip failed\"\n        return 1\n    fi\n\n    unzip ${BASE_PATH}/emotion_demo_files.zip -d ${BASE_PATH}/emotion_demo_files/\n    if [ $? -ne 0 ]; then\n        echo \"decompress emotion_demo_files.zip failed\"\n        return 1\n    fi\n\n    cp ${BASE_PATH}/emotion_demo_files/emotion.pt ${BASE_PATH}/src/flowunit/emotion_infer/emotion.pt -f\n    if [ $? -ne 0 ]; then\n        echo \"copy emotion model failed\"\n        return 1\n    fi\n    \n    cp ${BASE_PATH}/emotion_demo_files/face_detector.pt ${BASE_PATH}/src/flowunit/face_detect/face_detector.pt -f\n    if [ $? -ne 0 ]; then\n        echo \"copy face detector model failed\"\n        return 1\n    fi\n\n    cp ${BASE_PATH}/emotion_demo_files/emotion_test_video.mp4 ${BASE_PATH}/src/graph/emotion_test_video.mp4 -f\n    if [ $? -ne 0 ]; then\n        echo \"copy test video failed\"\n        return 1\n    fi\n\n    rm ${BASE_PATH}/emotion_demo_files -rf\n    if [ $? -ne 0 ]; then\n        echo \"remove emotion_demo_files folder failed\"\n        return 1\n    fi\n}\n\nmain\n"
  },
  {
    "path": "examples/project/emotion_detection/setup.sh",
    "content": "#!/bin/sh\n# run this script during cmake prepare\n\n#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nTARGET_DIR=$1\nEXAMPLE_DIR=$2\nDEMO_SRC_DIR=$3\nDEMO_THIRD_FILE_DIR=$4\n\nBASE_PATH=$(cd `dirname $0`; pwd)\n\nmain() {\n    for flowdir in ${TARGET_DIR}/src/flowunit/* ; do\n        if [ ! -d \"${flowdir}\" ]; then\n            continue;\n        fi\n        \n        cp ${EXAMPLE_DIR}/flowunit/python/CMakeLists.txt ${flowdir}/CMakeLists.txt\n        if [ $? -ne 0 ]; then\n            echo \"copy cmake to template failed.\"\n            return 1\n        fi\n        \n        sed -i \"s/example/$(basename ${flowdir})/g\" ${flowdir}/CMakeLists.txt\n        if [ $? -ne 0 ]; then\n            echo \"change cmakefile name failed.\"\n            return 1\n        fi\n    done\n\n    cp $BASE_PATH/download_emotion_files.sh ${TARGET_DIR}/download_emotion_files.sh\n    if [ $? -ne 0 ]; then\n        echo \"copy download shell to flowunit failed.\"\n        return 1\n    fi\n\n    cp ${EXAMPLE_DIR}/project/base/src/graph/CMakeLists.txt ${TARGET_DIR}/src/graph/CMakeLists.txt\n    if [ $? -ne 0 ]; then\n        echo \"copy cmake to graph failed.\"\n        return 1\n    fi\n\n    mv ${TARGET_DIR}/src/graph/emotion_detection.toml.in ${TARGET_DIR}/src/graph/emotion_detection.toml\n    sed -i \"s#@DEMO_EMOTION_DETECTION_FLOWUNIT_DIR@#@APPLICATION_PATH@/flowunit#g\" ${TARGET_DIR}/src/graph/emotion_detection.toml\n    if [ $? -ne 0 ]; then\n        echo \"change graph path failed.\"\n        return 1\n    fi\n\n    sed -i \"s#@DEMO_VIDEO_DIR@#@PROJECT_PATH@/src/graph#g\" ${TARGET_DIR}/src/graph/emotion_detection.toml\n    if [ $? -ne 0 ]; then\n        echo \"change test video path failed.\"\n        return 1\n    fi\n}\n\nmain\n"
  },
  {
    "path": "examples/project/empty/desc.toml",
    "content": "name = \"empty\"\ndesc = \"A empty project template for modelbox\""
  },
  {
    "path": "examples/project/empty/src/graph/empty.toml",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n[driver]\nskip-default = false\ndir=[\n  \"@APPLICATION_PATH@/flowunit\"\n]\n[profile]\nprofile=false\ntrace=false\ndir=\"\"\n[graph]\nformat = \"graphviz\"\ngraphconf = '''digraph graph_empty {\n\n}'''\n"
  },
  {
    "path": "examples/project/hello_world/desc.toml",
    "content": "name = \"helloworld\"\ndesc = \"A helloworld REST API service example project template for modelbox\"\n\n[restapi]\nmethod = \"POST\"\npath = \"http://0.0.0.0:7770/v1/hello_world\"\nrequestbody = '''\n{\n  \"msg\": \"hello modelbox\"\n}\n'''\n"
  },
  {
    "path": "examples/project/hello_world/setup.sh",
    "content": "#!/bin/sh\n# run this script during cmake prepare\n\n#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nTARGET_DIR=$1\nEXAMPLE_DIR=$2\nDEMO_DIR=$3\n\nmain() {\n    for flowdir in ${TARGET_DIR}/src/flowunit/* ; do\n        if [ ! -d \"${flowdir}\" ]; then\n            continue;\n        fi\n\n        cp ${EXAMPLE_DIR}/flowunit/python/CMakeLists.txt ${flowdir}/CMakeLists.txt -f\n        if [ $? -ne 0 ]; then\n            echo \"copy cmake to template failed.\"\n            return 1\n        fi\n\n        sed -i \"s/example/$(basename ${flowdir})/g\" ${flowdir}/CMakeLists.txt\n        if [ $? -ne 0 ]; then\n            echo \"change cmakefile name failed.\"\n            return 1\n        fi\n    done\n\n    cp ${EXAMPLE_DIR}/project/base/src/graph/CMakeLists.txt ${TARGET_DIR}/src/graph/CMakeLists.txt\n    if [ $? -ne 0 ]; then\n        echo \"copy cmake to graph failed.\"\n        return 1\n    fi\n\n    mv ${TARGET_DIR}/src/graph/hello_world.toml.in ${TARGET_DIR}/src/graph/hello_world.toml\n    sed -i \"s#@DEMO_HELLO_WORLD_FLOWUNIT_DIR@#@APPLICATION_PATH@/flowunit#g\" ${TARGET_DIR}/src/graph/hello_world.toml\n    if [ $? -ne 0 ]; then\n        echo \"change graph path failed.\"\n        return 1\n    fi\n}\n\nmain\n"
  },
  {
    "path": "examples/project/mnist/desc.toml",
    "content": "name = \"mnist\"\ndesc = \"A mnist example project template for modelbox\"\n\n[restapi]\nmethod = \"POST\"\npath = \"http://0.0.0.0:8190/v1/mnist_test\"\nrequestbody = '''\n{\n  \"image_base64\": \"\"\n}\n'''\n"
  },
  {
    "path": "examples/project/mnist/setup.sh",
    "content": "#!/bin/sh\n# run this script during cmake prepare\n\n#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nTARGET_DIR=$1\nEXAMPLE_DIR=$2\nDEMO_DIR=$3\n\nmain() {\n    for flowdir in ${TARGET_DIR}/src/flowunit/* ; do\n        if [ ! -d \"${flowdir}\" ]; then\n            continue;\n        fi\n        \n        cp ${EXAMPLE_DIR}/flowunit/python/CMakeLists.txt ${flowdir}/CMakeLists.txt -f\n        if [ $? -ne 0 ]; then\n            echo \"copy cmake to template failed.\"\n            return 1\n        fi\n        \n        sed -i \"s/example/$(basename ${flowdir})/g\" ${flowdir}/CMakeLists.txt\n        if [ $? -ne 0 ]; then\n            echo \"change cmakefile name failed.\"\n            return 1\n        fi\n    done\n\n    cp ${EXAMPLE_DIR}/project/base/src/graph/CMakeLists.txt ${TARGET_DIR}/src/graph/CMakeLists.txt\n    if [ $? -ne 0 ]; then\n        echo \"copy cmake to graph failed.\"\n        return 1\n    fi\n\n    mv ${TARGET_DIR}/src/graph/mnist.toml.in ${TARGET_DIR}/src/graph/mnist.toml\n    sed -i \"s#@DEMO_MNIST_FLOWUNIT_DIR@#@APPLICATION_PATH@/flowunit#g\" ${TARGET_DIR}/src/graph/mnist.toml\n    if [ $? -ne 0 ]; then\n        echo \"change graph path failed.\"\n        return 1\n    fi\n}\n\nmain\n"
  },
  {
    "path": "examples/project/mnist-mindspore/desc.toml",
    "content": "name = \"mnist-mindspore\"\ndesc = \"A mnist with mindspore example project template for modelbox\"\n\n[restapi]\nmethod = \"POST\"\npath = \"http://0.0.0.0:8190/v1/mnist_test\"\nrequestbody = '''\n{\n  \"image_base64\": \"\"\n}\n'''\n"
  },
  {
    "path": "examples/project/mnist-mindspore/setup.sh",
    "content": "#!/bin/sh\n# run this script during cmake prepare\n\n#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nTARGET_DIR=$1\nEXAMPLE_DIR=$2\nDEMO_DIR=$3\nDEMO_SRC_DIR=$4\n\nmain() {\n    for flowdir in ${TARGET_DIR}/src/flowunit/* ; do\n        if [ ! -d \"${flowdir}\" ]; then\n            continue;\n        fi\n\n        if [ -f \"${flowdir}/CMakeLists.txt\" ]; then\n            continue;\n        fi\n\n        cp ${EXAMPLE_DIR}/flowunit/python/CMakeLists.txt ${flowdir}/CMakeLists.txt\n        if [ $? -ne 0 ]; then\n            echo \"copy cmake to template failed.\"\n            return 1\n        fi\n        \n        sed -i \"s/example/$(basename ${flowdir})/g\" ${flowdir}/CMakeLists.txt\n        if [ $? -ne 0 ]; then\n            echo \"change cmakefile name failed.\"\n            return 1\n        fi\n    done\n\n    cp ${EXAMPLE_DIR}/project/base/src/graph/CMakeLists.txt ${TARGET_DIR}/src/graph/CMakeLists.txt\n    if [ $? -ne 0 ]; then\n        echo \"copy cmake to graph failed.\"\n        return 1\n    fi\n\n    cp ${DEMO_SRC_DIR}/mnist/graph/*.tar.gz ${TARGET_DIR}/src/graph/\n    if [ $? -ne 0 ]; then\n        echo \"copy image failed.\"\n        return 1\n    fi\n\n    cp ${DEMO_SRC_DIR}/mnist/graph/*.py ${TARGET_DIR}/src/graph/\n    if [ $? -ne 0 ]; then\n        echo \"copy test script failed.\"\n        return 1\n    fi\n}\n\nmain\n\n"
  },
  {
    "path": "examples/project/mnist-mindspore/src/flowunit/mnist_infer/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\ncmake_minimum_required(VERSION 3.10)\n \nset(UNIT_NAME \"mnist_infer\")\n\nfile(GLOB_RECURSE UNIT_SRC *.*)\nexclude_files_from_dir_in_list(UNIT_SRC \"${UNIT_SRC}\" \"${CMAKE_CURRENT_LIST_DIR}/mnist-train/\")\nfile(GLOB_RECURSE CMAKELISTS_FILE \"CMakeLists.txt\")\nlist(REMOVE_ITEM UNIT_SRC ${CMAKELISTS_FILE})\ngroup_files(UNIT_SRC UNIT_CONF .*.toml \"${UNIT_SRC}\")\nlist(APPEND UNIT_TOML_JSON ${UNIT_CONF})\ngroup_files(UNIT_SRC UNIT_CONF .*.json \"${UNIT_SRC}\")\nlist(APPEND UNIT_TOML_JSON ${UNIT_CONF})\n\ninstall(FILES ${UNIT_SRC}\n        COMPONENT ${UNIT_COMPONENT}\n        DESTINATION ${RELEASE_PACKAGE_DIR_MODEL}/${UNIT_NAME})\n\ninstall(FILES ${UNIT_TOML_JSON}\n        COMPONENT ${UNIT_COMPONENT}\n        PERMISSIONS OWNER_READ\n        DESTINATION ${RELEASE_PACKAGE_DIR_MODEL}/${UNIT_NAME})\n"
  },
  {
    "path": "examples/project/mnist-mindspore/src/flowunit/mnist_infer/mnist_infer.toml",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n[base]\nname = \"mnist_infer\" \ndevice = \"cpu\" \nversion = \"1.0.0\" \ndescription = \"Recognition handwritten digits recognition.\"\nentry = \"./mnist.ms\" \ntype = \"inference\" \nvirtual_type = \"mindspore\" \n\n[input]\n[input.input1] \nname = \"input\" \ntype = \"float\" \n\n[output]\n[output.output1] \nname = \"output\" \ntype = \"float\"\n"
  },
  {
    "path": "examples/project/mnist-mindspore/src/flowunit/mnist_infer/train.sh",
    "content": "#!/bin/bash\n#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language \n\nCURRDIR=$(pwd)\nVERSION=\"1.7.0\"\nTRAIN_VER=\"r1.6\"\n\nget_package_archname() {\n  case $(uname -m) in\n   x86_64)\n     echo \"linux-x64\"\n     ;;\n   aarch64)\n     echo \"linux-aarch64\"\n     ;;\n   armv7l | armv8l)\n     echo \"linux-aarch32\"\n     ;;\n   *)\n     echo \"\"\n     ;;\n  esac\n}\n\nsetup_var() {\n    TRAIN_DIR=\"${CURRDIR}/mnist-train\"\n    OS_ARCH=$(uname -m)\n    mkdir ${TRAIN_DIR} -p\n    if [ $? -ne 0 ]; then\n        echo \"create train dir failed\"\n        return 1\n    fi\n    TRAIN_FILE=\"${TRAIN_DIR}/train.py\"\n    MINDSPORE_LITE_NAME=\"mindspore-lite-${VERSION}-$(get_package_archname)\"\n    MINDSPORE_LITE_DIR=\"${TRAIN_DIR}/${MINDSPORE_LITE_NAME}\"\n    MINDSPORE_LITE_FILE=\"${MINDSPORE_LITE_NAME}.tar.gz\"\n    MINDSPORE_TRAIN_DOWNLOAD_URL=\"https://obs.dualstack.cn-north-4.myhuaweicloud.com/mindspore-website/notebook/${TRAIN_VER}/tutorials/zh_cn/mindspore_quick_start.py\"\n    MINDSPORE_LITE_DOWNLOAD_URL=\"https://ms-release.obs.cn-north-4.myhuaweicloud.com/${VERSION}/MindSpore/lite/release/linux/${OS_ARCH}/${MINDSPORE_LITE_FILE}\"\n\n    pip list |grep mindspore >/dev/null 2>&1\n    if [ $? -ne 0 ]; then\n        PYVER=$(python --version | awk '{print $2}' | awk -F. '{print $1$2}')\n        PYVER_STR=\"cp${PYVER}-cp${PYVER}\"\n        MINDSPORE_WHEEL_FILE=\"mindspore-${VERSION}-${PYVER_STR}-linux_${OS_ARCH}.whl\"\n        MINDSPORE_DOWNLOAD_URL=\"https://ms-release.obs.cn-north-4.myhuaweicloud.com/${VERSION}/MindSpore/cpu/${OS_ARCH}/${MINDSPORE_WHEEL_FILE}\"\n\n        curl -Is ${MINDSPORE_DOWNLOAD_URL} | head -1 | grep \"200 OK\" >/dev/null 2>&1\n        if [ $? -ne 0 ]; then\n            PYVER_STR=\"cp${PYVER}-cp${PYVER}m\"\n            MINDSPORE_WHEEL_FILE=\"mindspore-${VERSION}-${PYVER_STR}-linux_${OS_ARCH}.whl\"\n            MINDSPORE_DOWNLOAD_URL=\"https://ms-release.obs.cn-north-4.myhuaweicloud.com/${VERSION}/MindSpore/cpu/${OS_ARCH}/${MINDSPORE_WHEEL_FILE}\"\n            curl -Is ${MINDSPORE_DOWNLOAD_URL} | head -1 | grep \"200 OK\" >/dev/null 2>&1\n            if [ $? -ne 0 ]; then\n                echo \"cannot find URL for mindspore, please download manually\";\n                return 1\n            fi\n        fi\n    fi\n\n    rm ${TRAIN_DIR}/checkpoint_lenet* -f\n    cd ${TRAIN_DIR}\n}\n\ndownload_package() {\n    if [ ! -f \"${TRAIN_FILE}\" ]; then\n        wget ${MINDSPORE_TRAIN_DOWNLOAD_URL} -O ${TRAIN_FILE}\n        if [ $? -ne 0 ]; then\n            echo \"download train script failed\"\n            return 1\n        fi\n    fi\n\n    if [ ! -f \"${MINDSPORE_LITE_FILE}\" ]; then\n        wget ${MINDSPORE_LITE_DOWNLOAD_URL}\n        if [ $? -ne 0 ]; then\n            echo \"download lite failed\"\n            return 1\n        fi\n    fi\n\n    if [ ! -d ${MINDSPORE_LITE_DIR} ]; then\n        tar xf ${MINDSPORE_LITE_FILE}\n        if [ $? -ne 0 ]; then\n            echo \"extract failed\"\n            return 1\n        fi\n    fi\n\n    pip list |grep mindspore >/dev/null 2>&1\n    if [ $? -ne 0 ]; then\n        if [ ! -f \"${MINDSPORE_WHEEL_FILE}\" ]; then\n            wget ${MINDSPORE_DOWNLOAD_URL}\n            if [ $? -ne 0 ]; then\n                echo \"download mindspore train package failed\"\n                return 1\n            fi\n        fi\n        SUDO=\"\"\n        if [ \"$(id -u)\" != 0 ]; then\n            SUDO=\"sudo\"\n        fi\n\n        $SUDO pip install ${MINDSPORE_WHEEL_FILE} --trusted-host ms-release.obs.cn-north-4.myhuaweicloud.com -i https://pypi.tuna.tsinghua.edu.cn/simple\n        if [ $? -ne 0 ]; then\n            echo \"install python train package failed\"\n            return 1\n        fi\n    fi\n\n    return 0\n}\n\nsetup_export_code() {\n    if grep \"^export(net.*)\" ${TRAIN_FILE}; then\n        return;\n    fi\n    \n    echo \"\nfrom mindspore import export, load_checkpoint, load_param_into_net\nimport glob\n\nckpt = glob.glob('*.ckpt')\n\nnet = LeNet5()\nparam_dict = load_checkpoint(ckpt[0])\nload_param_into_net(net, param_dict)\ninput = np.random.uniform(0.0, 1.0, size=[1, 1, 32, 32]).astype(np.float32)\nexport(net, Tensor(input), file_name='mnist', file_format='MINDIR')\n\" >> ${TRAIN_FILE}\n}\n\ntrain() {\n    setup_export_code\n    if [ $? -ne 0 ]; then\n        echo \"setup export python script failed.\"\n        return 1\n    fi\n\n    python ${TRAIN_FILE}\n    if [ $? -ne 0 ]; then\n            echo \"train failed\"\n            return 1\n    fi\n\n    return 0\n}\n\nexport_lite_model() {\n    export LD_LIBRARY_PATH=${MINDSPORE_LITE_DIR}/tools/converter/lib\n    ${MINDSPORE_LITE_DIR}/tools/converter/converter/converter_lite --fmk=MINDIR --modelFile=${TRAIN_DIR}/mnist.mindir --outputFile=${TRAIN_DIR}/mnist\n    if [ $? -ne 0 ]; then\n            echo \"export lite model failed\"\n            return 1\n    fi\n\n    cp ${TRAIN_DIR}/mnist.ms ${CURRDIR}/\n\n    return $?\n}\n\n\nmain() {\n    setup_var\n    if [ $? -ne 0 ]; then\n            return 1\n    fi\n\n    echo \"working directory: ${TRAIN_DIR}\"\n\n    download_package\n    if [ $? -ne 0 ]; then\n            return 1\n    fi\n\n    train\n    if [ $? -ne 0 ]; then\n            return 1\n    fi\n\n    export_lite_model\n    if [ $? -ne 0 ]; then\n            return 1\n    fi\n\n    echo \"train success.\"\n    echo \"model dir: ${TRAIN_DIR}\"\n    echo \"quick start: https://www.mindspore.cn/tutorial/en/r0.5/quick_start/quick_start.html\"\n}\n\nmain\n"
  },
  {
    "path": "examples/project/mnist-mindspore/src/flowunit/mnist_preprocess/mnist_preprocess.py",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport _flowunit as modelbox\nimport numpy as np\nimport base64\nimport json\nimport cv2\n\nclass MnistPreprocess(modelbox.FlowUnit):\n    def __init__(self):\n        super().__init__()\n\n    def open(self, config):\n        return modelbox.Status.StatusCode.STATUS_SUCCESS\n\n    def process(self, data_context):\n        in_data = data_context.input(\"in_data\")\n        out_data = data_context.output(\"out_data\")\n\n        for buffer in in_data:\n            # get image from request body\n            request_body = json.loads(buffer.as_object().strip(chr(0)))\n            \n            if  request_body.get(\"image_base64\"):\n                img_base64 = request_body[\"image_base64\"]\n                img_file = base64.b64decode(img_base64)\n\n                # reshape img\n                img = cv2.imdecode(np.fromstring(img_file, np.uint8), cv2.IMREAD_GRAYSCALE)\n                img = cv2.resize(img, (32, 32))\n                infer_data = np.array([255 - img], dtype=np.float32)\n                print(infer_data.shape)\n                \n                # build buffer\n                add_buffer = modelbox.Buffer(self.get_bind_device(), infer_data)\n                out_data.push_back(add_buffer)\n            else:\n                error_msg = \"wrong key of request_body\"\n                modelbox.error(error_msg)\n                add_buffer = modelbox.Buffer(self.get_bind_device(), \"\")\n                add_buffer.set_error(\"MnistPreprocess.BadRequest\", error_msg)\n                out_data.push_back(add_buffer)\n\n        return modelbox.Status.StatusCode.STATUS_SUCCESS\n\n    def close(self):\n        return modelbox.Status()\n\n    def data_pre(self, data_context):\n        return modelbox.Status()\n\n    def data_post(self, data_context):\n        return modelbox.Status()\n\n    def data_group_pre(self, data_context):\n        return modelbox.Status()\n\n    def data_group_post(self, data_context):\n        return modelbox.Status()"
  },
  {
    "path": "examples/project/mnist-mindspore/src/flowunit/mnist_preprocess/mnist_preprocess.toml",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n[base]\nname = \"mnist_preprocess\"\ndevice = \"cpu\" \nversion = \"1.0.0\" \ndescription = \"mnist preprocess\"\nentry = \"mnist_preprocess@MnistPreprocess\" \ntype = \"python\" \n\nstream = false\ncondition  = false\ncollapse = false \ncollapse_all = false \nexpand = false \n\n[config]\n\n[input]\n[input.input1] \nname = \"in_data\"\n\n[output]\n[output.output1] \nname = \"out_data\" "
  },
  {
    "path": "examples/project/mnist-mindspore/src/flowunit/mnist_response/mnist_response.py",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport _flowunit as modelbox\nimport numpy as np\nimport json\n\nclass MnistResponseFlowUnit(modelbox.FlowUnit):\n    def __init__(self):\n        super().__init__()\n\n    def open(self, config):\n        return modelbox.Status.StatusCode.STATUS_SUCCESS\n\n    def process(self, data_context):\n        in_data = data_context.input(\"in_data\")\n        out_data = data_context.output(\"out_data\")\n\n        for buffer in in_data:\n            result_str = ''\n            if buffer.has_error():\n                error_msg = buffer.get_error_msg()\n                result = {\n                    \"error_msg\": str(error_msg)\n                }\n            else:\n                max_index = np.argmax(buffer.as_object())\n                result = {\n                    \"predict_result\": str(max_index)\n                }\n\n            result_str = (json.dumps(result) + chr(0)).encode('utf-8').strip()\n            add_buffer = modelbox.Buffer(self.get_bind_device(), result_str)\n            out_data.push_back(add_buffer)\n\n        return modelbox.Status.StatusCode.STATUS_SUCCESS\n\n    def close(self):\n        return modelbox.Status()\n\n    def data_pre(self, data_context):\n        return modelbox.Status()\n\n    def data_post(self, data_context):\n        return modelbox.Status()\n\n    def data_group_pre(self, data_context):\n        return modelbox.Status()\n\n    def data_group_post(self, data_context):\n        return modelbox.Status()"
  },
  {
    "path": "examples/project/mnist-mindspore/src/flowunit/mnist_response/mnist_response.toml",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n[base]\nname = \"mnist_response\" \ndevice = \"cpu\" \nversion = \"1.0.0\" \ndescription = \"mnist_response\" \nentry = \"mnist_response@MnistResponseFlowUnit\" \ntype = \"python\" \n\nstream = false \ncondition  = false\ncollapse = false \ncollapse_all = false \nexpand = false \nexception_visible = true\n\n[config]\n\n[input]\n[input.input1] \nname = \"in_data\"\n\n[output]\n[output.output1] \nname = \"out_data\""
  },
  {
    "path": "examples/project/mnist-mindspore/src/graph/mnist.toml",
    "content": "[driver]\ndir = [\"@APPLICATION_PATH@/flowunit\"]\n[profile]\nprofile=false\ntrace=false\ndir=\"\"\n[flow]\nname = \"MNIST\"\ndesc = \"Mindspore MNIST detection for image\"\n[graph]\nformat = \"graphviz\"\ngraphconf = '''digraph mnist_sample {\n    node [shape=Mrecord]\n    httpserver_sync_receive[type=flowunit, flowunit=httpserver_sync_receive, device=cpu, time_out_ms=5000, endpoint=\"http://0.0.0.0:8190\", max_requests=100]\n    mnist_preprocess[type=flowunit, flowunit=mnist_preprocess, device=cpu]\n    mnist_infer[type=flowunit, flowunit=mnist_infer, device=cpu, deviceid=0, batch_size=1]\n    mnist_response[type=flowunit, flowunit=mnist_response, device=cpu]\n    httpserver_sync_reply[type=flowunit, flowunit=httpserver_sync_reply, device=cpu]\n\n    httpserver_sync_receive:out_request_info -> mnist_preprocess:in_data\n    mnist_preprocess:out_data -> mnist_infer:input\n    mnist_infer:output -> mnist_response:in_data\n    mnist_response:out_data -> httpserver_sync_reply:in_reply_info\n}\n'''\n\n"
  },
  {
    "path": "examples/project/resize/desc.toml",
    "content": "name = \"resize\"\ndesc = \"A resize example project template for modelbox\""
  },
  {
    "path": "examples/project/resize/src/flowunit/resize_flowunit/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\ncmake_minimum_required(VERSION 3.10)\n\nset(UNIT_NAME \"resize_test\")\nset(UNIT_DEVICE \"cpu\")\n\nproject(modelbox-flowunit-${UNIT_DEVICE}-${UNIT_NAME})\n\nif(NOT DEFINED MODELBOX_PROJECT_VERSION_MAJOR)\n    # build from flowunit cmakelists, not from project cmakelists\n    set(MODELBOX_PROJECT_VERSION_MAJOR 0)\n    set(MODELBOX_PROJECT_VERSION_MINOR 0)\n    set(MODELBOX_PROJECT_VERSION_PATCH 1)\n    set(RELEASE_PACKAGE_DIR_ROOT /opt/modelbox/project/default )\n    set(RELEASE_PACKAGE_DIR_LIB ${RELEASE_PACKAGE_DIR_ROOT}/lib ) \n    set(RELEASE_PACKAGE_DIR_BIN ${RELEASE_PACKAGE_DIR_ROOT}/bin ) \n    if(${UNIT_DEVICE} STREQUAL \"cuda\" )\n        find_package(CUDA 10.0)\n    endif()\nendif()\n\n\nif(${UNIT_DEVICE} STREQUAL \"cuda\" )\n    if(NOT CUDA_FOUND)\n        message(FATAL_ERROR \"cannot find cuda in current environment ,please checkout you flowunit device type!\")\n    endif()\nendif()\n\nif(${UNIT_DEVICE} STREQUAL \"ascend\" )\n    if(NOT ACL_FOUND OR NOT DSMI_FOUND)\n        message(FATAL_ERROR \"cannot find acl or dsmi in current environment ,please checkout you flowunit device type!\")\n    endif()\nendif()\n\nif (NOT OPENCV_FOUND) \n    message(STATUS \"Not found opencv, disable resize flowunit\")\n    return()\nendif()\n\nfile(GLOB_RECURSE UNIT_SOURCE *.cpp *.cc *.c)\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${OpenCV_INCLUDE_DIRS})\n\nset(UNIT_SHARED modelbox-${CMAKE_PROJECT_NAME}-${UNIT_DEVICE}-${UNIT_NAME})\n\nadd_library(${UNIT_SHARED} SHARED ${UNIT_SOURCE})\n\nset_target_properties(\n  ${UNIT_SHARED} PROPERTIES\n  SOVERSION ${MODELBOX_PROJECT_VERSION_MAJOR}\n  VERSION ${MODELBOX_PROJECT_VERSION_MAJOR}.${MODELBOX_PROJECT_VERSION_MINOR}.${MODELBOX_PROJECT_VERSION_PATCH}\n)\n\ntarget_link_libraries(${UNIT_SHARED} pthread)\ntarget_link_libraries(${UNIT_SHARED} rt)\ntarget_link_libraries(${UNIT_SHARED} dl)\ntarget_link_libraries(${UNIT_SHARED} ${OpenCV_LIBS})\n\ninstall(TARGETS ${UNIT_SHARED}\n        COMPONENT ${UNIT_COMPONENT}\n        RUNTIME DESTINATION ${RELEASE_PACKAGE_DIR_BIN}\n        LIBRARY DESTINATION ${RELEASE_PACKAGE_DIR_LIB}\n        ARCHIVE DESTINATION ${RELEASE_PACKAGE_DIR_LIB}  \n        OPTIONAL)\n\n# for test\nlist(APPEND UNIT_TEST_TARGET ${UNIT_SHARED})\nlist(APPEND UNIT_TEST_LINK_LIBRARIES ${UNIT_LINK_LIBRARY})\nset(UNIT_TEST_TARGET ${UNIT_TEST_TARGET} CACHE INTERNAL \"\")\nset(UNIT_TEST_LINK_LIBRARIES ${UNIT_TEST_LINK_LIBRARIES} CACHE INTERNAL \"\")"
  },
  {
    "path": "examples/project/resize/src/flowunit/resize_flowunit/resize_flowunit.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"resize_flowunit.h\"\n#include \"modelbox/device/cpu/device_cpu.h\"\n#include \"modelbox/flowunit.h\"\n#include \"modelbox/flowunit_api_helper.h\"\n\nResizeFlowUnitTest::ResizeFlowUnitTest(){};\nResizeFlowUnitTest::~ResizeFlowUnitTest(){};\n\nmodelbox::Status ResizeFlowUnitTest::Open(\n    const std::shared_ptr<modelbox::Configuration> &opts) {\n  dest_width_ = opts->GetUint32(\"image_width\", 0);\n  dest_height_ = opts->GetUint32(\"image_height\", 0);\n\n  if (dest_width_ <= 0 || dest_height_ <= 0) {\n    auto errMsg = \"resize width or height is not configured or invalid.\";\n    MBLOG_ERROR << errMsg;\n    return {modelbox::STATUS_BADCONF, errMsg};\n  }\n\n  return modelbox::STATUS_OK;\n}\nmodelbox::Status ResizeFlowUnitTest::Close() { return modelbox::STATUS_OK; }\n\nmodelbox::Status ResizeFlowUnitTest::Process(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  MBLOG_DEBUG << \"process image cvresize\";\n\n  auto input_bufs = data_ctx->Input(\"in_1\");\n  auto output_bufs = data_ctx->Output(\"out_1\");\n\n  if (input_bufs->Size() <= 0) {\n    auto errMsg = \"input images batch is \" + std::to_string(input_bufs->Size());\n    MBLOG_ERROR << errMsg;\n    return {modelbox::STATUS_FAULT, errMsg};\n  }\n\n  size_t channel = RGB_CHANNELS;\n  std::vector<size_t> sub_shape{dest_width_, dest_height_, channel};\n  std::vector<size_t> tensor_shape(\n      input_bufs->Size(), modelbox::Volume(sub_shape) * sizeof(u_char));\n  output_bufs->Build(tensor_shape);\n\n  for (size_t i = 0; i < input_bufs->Size(); ++i) {\n    int32_t width = 0;\n    int32_t height = 0;\n    int32_t channel = 0;\n    std::string pix_fmt;\n    bool exists = false;\n    exists = input_bufs->At(i)->Get(\"height\", height);\n    if (!exists) {\n      MBLOG_ERROR << \"meta don't have key height\";\n      return {modelbox::STATUS_NOTSUPPORT, \"meta don't have key height\"};\n    }\n\n    exists = input_bufs->At(i)->Get(\"width\", width);\n    if (!exists) {\n      MBLOG_ERROR << \"meta don't have key width\";\n      return {modelbox::STATUS_NOTSUPPORT, \"meta don't have key width\"};\n    }\n\n    exists = input_bufs->At(i)->Get(\"pix_fmt\", pix_fmt);\n    if (!exists && !input_bufs->At(i)->Get(\"channel\", channel)) {\n      MBLOG_ERROR << \"meta don't have key pix_fmt or channel\";\n      return {modelbox::STATUS_NOTSUPPORT,\n              \"meta don't have key pix_fmt or channel\"};\n    }\n\n    if (exists && pix_fmt != \"rgb\" && pix_fmt != \"bgr\") {\n      MBLOG_ERROR << \"unsupport pix format.\";\n      return {modelbox::STATUS_NOTSUPPORT, \"unsupport pix format.\"};\n    }\n\n    channel = RGB_CHANNELS;\n    MBLOG_DEBUG << \"get \" << width << \" rows \" << height << \" channel \"\n                << channel;\n\n    auto input_data =\n        static_cast<const u_char *>(input_bufs->ConstBufferData(i));\n\n    cv::Mat img_data(cv::Size(width, height), CV_8UC3);\n    memcpy(img_data.data, input_data, input_bufs->At(i)->GetBytes());\n\n    MBLOG_DEBUG << \"ori image : cols \" << img_data.cols << \" rows \"\n                << img_data.rows << \" channel \" << img_data.channels();\n\n    // resize image\n    cv::Size destSize = cv::Size(dest_width_, dest_height_);\n    cv::Mat img_dest;\n    cv::resize(img_data, img_dest, destSize, 0, 0, cv::INTER_LINEAR);\n\n    // output resize image\n    auto output = static_cast<uchar *>(output_bufs->MutableBufferData(i));\n    memcpy(output, img_dest.data, img_dest.total() * img_dest.elemSize());\n    output_bufs->At(i)->Set(\"width\", (int32_t)dest_width_);\n    output_bufs->At(i)->Set(\"height\", (int32_t)dest_height_);\n    output_bufs->At(i)->Set(\"channel\", channel);\n    output_bufs->At(i)->Set(\"pix_fmt\", pix_fmt);\n    output_bufs->At(i)->Set(\"type\", modelbox::ModelBoxDataType::MODELBOX_UINT8);\n    output_bufs->At(i)->Set(\n        \"shape\",\n        std::vector<size_t>{(size_t)dest_height_, (size_t)dest_width_, 3});\n    output_bufs->At(i)->Set(\"layout\", std::string(\"hwc\"));\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nMODELBOX_FLOWUNIT(ResizeFlowUnitTest, desc) {\n  desc.SetFlowUnitName(FLOWUNIT_NAME);\n  desc.SetFlowUnitGroupType(\"Image\");\n  desc.SetDescription(FLOWUNIT_DESC);\n  desc.AddFlowUnitInput(modelbox::FlowUnitInput(\"in_1\", modelbox::DEVICE_TYPE));\n  desc.AddFlowUnitOutput(\n      modelbox::FlowUnitOutput(\"out_1\", modelbox::DEVICE_TYPE));\n  desc.SetFlowType(modelbox::NORMAL);\n  desc.AddFlowUnitOption(modelbox::FlowUnitOption(\"image_width\", \"int\", true,\n                                                  \"640\", \"the resize width\"));\n  desc.AddFlowUnitOption(modelbox::FlowUnitOption(\"image_height\", \"int\", true,\n                                                  \"480\", \"the resize height\"));\n}\n\nMODELBOX_DRIVER_FLOWUNIT(desc) {\n  desc.Desc.SetName(FLOWUNIT_NAME);\n  desc.Desc.SetClass(modelbox::DRIVER_CLASS_FLOWUNIT);\n  desc.Desc.SetType(modelbox::DEVICE_TYPE);\n  desc.Desc.SetDescription(FLOWUNIT_DESC);\n  desc.Desc.SetVersion(\"1.0.0\");\n}\n"
  },
  {
    "path": "examples/project/resize/src/flowunit/resize_flowunit/resize_flowunit.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_ResizeFlowUnitTest_CPU_H_\n#define MODELBOX_FLOWUNIT_ResizeFlowUnitTest_CPU_H_\n\n#include <modelbox/base/device.h>\n#include <modelbox/base/status.h>\n#include <modelbox/flow.h>\n#include <algorithm>\n#include <opencv2/opencv.hpp>\n#include \"modelbox/buffer.h\"\n#include \"modelbox/flowunit.h\"\n\nconstexpr const char *FLOWUNIT_NAME = \"resize_test\";\nconstexpr const char *FLOWUNIT_DESC = \"A resize test flowunit on CPU\";\nconstexpr const char *FLOWUNIT_TYPE = \"cpu\";\nconst int RGB_CHANNELS = 3;\n\nclass ResizeFlowUnitTest : public modelbox::FlowUnit {\n public:\n  ResizeFlowUnitTest();\n  virtual ~ResizeFlowUnitTest();\n\n  modelbox::Status Open(const std::shared_ptr<modelbox::Configuration> &opts);\n\n  modelbox::Status Close();\n\n  /* run when processing data */\n  modelbox::Status Process(std::shared_ptr<modelbox::DataContext> data_ctx);\n\n private:\n  cv::InterpolationFlags GetCVResizeMethod(std::string resizeType);\n\n private:\n  uint32_t dest_width_{224};\n  uint32_t dest_height_{224};\n  std::string method_{\"INTER_LINEAR\"};\n};\n\n#endif  // MODELBOX_FLOWUNIT_ResizeFlowUnitTest_CPU_H_\n"
  },
  {
    "path": "examples/project/resize/src/graph/resize.toml",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n[driver]\nskip-default = false\ndir=[\n  \"@APPLICATION_PATH@/flowunit\"\n]\n[flow]\ndesc = \"A resize modelbox project\"\n[profile]\nprofile=false\ntrace=false\ndir=\"\"  \n[graph]\nformat = \"graphviz\"\ngraphconf = '''digraph graph_resize {\n  video_input[type=flowunit, flowunit=video_input, device=cpu, deviceid=0, repeat=20, source_url=\"/xxx/xxx.mp4\"]\n  videodemuxer[type=flowunit, flowunit=video_demuxer, device=cpu, deviceid=0, queue_size_event=1000, ] \n  videodecoder[type=flowunit, flowunit=video_decoder, device=cuda, deviceid=0, pix_fmt=\"nv12\"]\n  output1[type=output]  \n  video_input:out_video_url -> videodemuxer:in_video_url\n  videodemuxer:out_video_packet -> videodecoder:in_video_packet\n  videodecoder:out_video_frame -> output1\n}'''\n"
  },
  {
    "path": "examples/project/resize/test/flowunit/resize_flowuint_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <mock_modelbox.h>\n#include <functional>\n#include <future>\n#include <opencv2/opencv.hpp>\n#include <random>\n#include <thread>\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"modelbox/base/log.h\"\n#include \"modelbox/base/utils.h\"\n#include \"modelbox/buffer.h\"\n\nusing ::testing::_;\n\nnamespace modelbox {\nclass ResizeFlowUnitTest : public testing::Test {\n public:\n  ResizeFlowUnitTest() : mock_modelbox_(std::make_shared<MockModelBox>()) {}\n\n protected:\n  virtual void SetUp(){};\n  virtual void TearDown() { mock_modelbox_->Stop(); };\n  std::shared_ptr<MockModelBox> GetMockModelbox() { return mock_modelbox_; }\n\n private:\n  std::shared_ptr<MockModelBox> mock_modelbox_;\n};\n\nTEST_F(ResizeFlowUnitTest, TestCase1) {\n  /*create graph config , build and run, \"input1\" and \"output2\" are virtual\n    nodes used to send or receive buffer.\n    you can add \"input2\" or \"input3\" when there are multiple inputs*/\n  const std::string test_lib_dir = TEST_LIB_DIR;\n  std::string toml_content = R\"(\n            [log]\n            level=\"DEBUG\"\n            [driver]\n            skip-default=false\n            dir=[\")\" + test_lib_dir +\n                             \"\\\"]\\n    \" +\n                             R\"([graph]\n            graphconf = '''digraph demo {                                                                            \n                input1[type=input]\n                resize_test[type=flowunit, flowunit=resize_test, device=cpu, deviceid=0, label=\"<in_1> | <out_1>\", image_width=128, image_height=128,batch_size=5]\n                output1[type=output]                                \n                input1 -> resize_test:in_1 \n                resize_test:out_1 -> output1                                                                      \n                }'''\n            format = \"graphviz\"\n        )\";\n  auto mock_modelbox = GetMockModelbox();\n  auto ret = mock_modelbox->BuildAndRun(\"graph_name\", toml_content, -1);\n  EXPECT_EQ(ret, STATUS_SUCCESS);\n\n  /*create buffer list and fill parmeters if you want*/\n  auto ext_data = mock_modelbox->GetFlow()->CreateExternalDataMap();\n  EXPECT_NE(ext_data, nullptr);\n  auto buffer_list = ext_data->CreateBufferList();\n  EXPECT_NE(buffer_list, nullptr);\n  auto img = cv::imread(std::string(TEST_ASSETS) + \"/test.jpg\");\n  buffer_list->Build({img.total() * img.elemSize()});\n  auto buffer = buffer_list->At(0);\n  buffer->Set(\"width\", img.cols);\n  buffer->Set(\"height\", img.rows);\n  buffer->Set(\"width_stride\", img.cols * 3);\n  buffer->Set(\"height_stride\", img.rows);\n  buffer->Set(\"pix_fmt\", std::string(\"bgr\"));\n  memcpy(buffer->MutableData(), img.data, img.total() * img.elemSize());\n\n  /*send buffer list to port \"input1\" ,and then transmit to next flowunit*/\n  auto status = ext_data->Send(\"input1\", buffer_list);\n  EXPECT_EQ(status, STATUS_OK);\n  status = ext_data->Close();\n  EXPECT_EQ(status, STATUS_OK);\n\n  /*wait for output buffer list for port \"output1\" */\n  std::vector<std::shared_ptr<BufferList>> output_buffer_lists =\n      mock_modelbox->GetOutputBufferList(ext_data, \"output1\");\n\n  /*check out whether the results meet expectations*/\n  EXPECT_EQ(output_buffer_lists.size(), 1);\n  auto output_buffer_list = output_buffer_lists[0];\n  EXPECT_EQ(output_buffer_list->Size(), 1);\n  auto output_buffer = output_buffer_list->At(0);\n  int32_t width = 0;\n  int32_t height = 0;\n  auto exists = output_buffer->Get(\"width\", width);\n  EXPECT_EQ(exists, true);\n  exists = output_buffer->Get(\"height\", height);\n  EXPECT_EQ(exists, true);\n  void *img_data = const_cast<void *>(output_buffer->ConstData());\n  cv::Mat out_img(cv::Size(width, height), CV_8UC3, img_data);\n  // cv::imwrite(std::string(TEST_ASSETS) + \"/result.jpg\", out_img);\n}\n\nTEST_F(ResizeFlowUnitTest, TestCase2) {\n  /*create graph config , build and run, \"input1\" and \"output2\" are virtual\n   * nodes used to send or receive buffer*/\n  const std::string test_lib_dir = TEST_LIB_DIR;\n  std::string toml_content = R\"(\n            [log]\n            level=\"INFO\"\n            [driver]\n            skip-default=false\n            dir=[\")\" + test_lib_dir +\n                             \"\\\"]\\n    \" +\n                             R\"([graph]\n                graphconf = '''digraph demo {                                                                            \n                    input1[type=input]                                          \n                    videodemuxer[type=flowunit, flowunit=video_demuxer, device=cpu, deviceid=0]\n                    videodecoder[type=flowunit, flowunit=video_decoder, device=cpu, deviceid=0, pix_fmt=rgb, queue_size = 16, batch_size=5]\n                    resize_test[type=flowunit, flowunit=resize_test, device=cpu, deviceid=0, label=\"<in_1> | <out_1>\", image_width=128, image_height=128, batch_size=5]\n                    output1[type=output]               \n                    input1 -> videodemuxer:in_video_url\n                    videodemuxer:out_video_packet -> videodecoder:in_video_packet\n                    videodecoder:out_video_frame -> resize_test:in_1                 \n                    resize_test:out_1 -> output1                                                                      \n                    }'''\n                format = \"graphviz\"\n        )\";\n\n  auto mock_modelbox = GetMockModelbox();\n  auto ret = mock_modelbox->BuildAndRun(\"graph_name\", toml_content, -1);\n  EXPECT_EQ(ret, STATUS_SUCCESS);\n\n  /*create buffer list and fill parmeters if you want*/\n  auto ext_data = mock_modelbox->GetFlow()->CreateExternalDataMap();\n  EXPECT_NE(ext_data, nullptr);\n  auto buffer_list = ext_data->CreateBufferList();\n  EXPECT_NE(ext_data, nullptr);\n  auto source_url = std::string(TEST_ASSETS) + \"/test.mp4\";\n  buffer_list->Build({source_url.size() + 1});\n  auto buffer = buffer_list->At(0);\n  memcpy(buffer->MutableData(), source_url.data(), source_url.size() + 1);\n  buffer->Set(\"source_url\", source_url);\n  auto data_meta = std::make_shared<modelbox::DataMeta>();\n  data_meta->SetMeta(\"source_url\", std::make_shared<std::string>(source_url));\n  ext_data->SetOutputMeta(\"input1\", data_meta);\n\n  /*send buffer list to port \"input1\" ,and then transmit to next flowunit*/\n  auto status = ext_data->Send(\"input1\", buffer_list);\n  EXPECT_EQ(status, STATUS_OK);\n  status = ext_data->Close();\n  EXPECT_EQ(status, STATUS_OK);\n\n  /*wait for output buffer list for port \"output1\" */\n  std::vector<std::shared_ptr<BufferList>> output_buffer_lists =\n      mock_modelbox->GetOutputBufferList(ext_data, \"output1\");\n\n  /*check out whether the results meet expectations*/\n  uint32_t count = 1;\n  for (auto &output_buffer_list : output_buffer_lists) {\n    for (size_t i = 0; i < output_buffer_list->Size(); i++) {\n      int32_t width = 0;\n      int32_t height = 0;\n      auto output_buffer = output_buffer_list->At(i);\n      auto exists = output_buffer->Get(\"width\", width);\n      EXPECT_EQ(exists, true);\n      exists = output_buffer->Get(\"height\", height);\n      EXPECT_EQ(exists, true);\n      void *img_data = const_cast<void *>(output_buffer->ConstData());\n      cv::Mat out_img(cv::Size(width, height), CV_8UC3, img_data);\n      // cv::imwrite(std::string(TEST_ASSETS) + \"/\" + std::to_string(count) +\n      // \".jpg\", out_img);\n      count++;\n    }\n  }\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "examples/service-plugin/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\ncmake_minimum_required(VERSION 3.10)\n\nif(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_BINARY_DIR})\n    message(FATAL_ERROR \"cannot build the project in the source directory! Out-of-source build is enforced!\")\nendif()\n \nset(PLUGIN_NAME \"example\")\n\nif(NOT DEFINED MODELBOX_PROJECT_VERSION_MAJOR)\n    # build from flowunit cmakelists, not from project cmakelists\n    set(MODELBOX_PROJECT_VERSION_MAJOR 0)\n    set(MODELBOX_PROJECT_VERSION_MINOR 0)\n    set(MODELBOX_PROJECT_VERSION_PATCH 1)\n    set(RELEASE_PACKAGE_DIR_ROOT /opt/modelbox/service-plugin)\n    set(RELEASE_PACKAGE_DIR_LIB ${RELEASE_PACKAGE_DIR_ROOT}/lib ) \n    set(RELEASE_PACKAGE_DIR_BIN ${RELEASE_PACKAGE_DIR_ROOT}/bin ) \nendif()\n\nset(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} -Wall -fno-strict-aliasing -std=c++11\")\nset(CMAKE_C_FLAGS \"${CMAKE_C_FLAGS} -Wall -fno-strict-aliasing\")\n \nfile(GLOB UNIT_SOURCE *.cpp *.cc *.c)\nfile(GLOB_RECURSE SRC_UNIT_SOURCE src/*.cpp src/*.cc src/*.c)\nlist(APPEND UNIT_SOURCE ${SRC_UNIT_SOURCE})\n \ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\n\nset(UNIT_SHARED ${CMAKE_PROJECT_NAME}-${PLUGIN_NAME}-plugin)\nadd_library(${UNIT_SHARED} SHARED ${UNIT_SOURCE})\n\nset_target_properties(${UNIT_SHARED} PROPERTIES \n    OUTPUT_NAME \"${PLUGIN_NAME}\"\n    PREFIX \"\"\n    SUFFIX \".so\")\n\ntarget_link_libraries(${UNIT_SHARED} pthread)\ntarget_link_libraries(${UNIT_SHARED} rt)\ntarget_link_libraries(${UNIT_SHARED} dl)\n\ninstall(TARGETS ${UNIT_SHARED} \n    COMPONENT ${UNIT_COMPONENT}\n    RUNTIME DESTINATION ${RELEASE_PACKAGE_DIR_BIN}\n    LIBRARY DESTINATION ${RELEASE_PACKAGE_DIR_LIB}\n    ARCHIVE DESTINATION ${RELEASE_PACKAGE_DIR_LIB}\n    OPTIONAL\n    )"
  },
  {
    "path": "examples/service-plugin/example.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"example.h\"\n#include <modelbox/common/command.h>\n\nclass ToolCommandExample : public modelbox::ToolCommand {\n public:\n  ToolCommandExample() {};\n  virtual ~ToolCommandExample() {};\n\n  int Run(int argc, char *argv[]) {\n    TOOL_COUT << \"Example command output message.\" << std::endl;\n    TOOL_CERR << \"Example command stderror message.\" << std::endl;\n    return 0;\n  }\n  std::string GetHelp() {\n    return \"Example Help.\";\n  }\n\n  std::string GetCommandName() { return \"example\"; };\n  std::string GetCommandDesc() { return \"control server log\"; };\n};\n\nREG_MODELBOX_TOOL_COMMAND(ToolCommandExample)\n\nstd::shared_ptr<modelbox::Plugin> CreatePlugin() {\n  MBLOG_INFO << \"Example create success.\";\n  return std::make_shared<ExamplePlugin>();\n}\n\nbool ExamplePlugin::Init(std::shared_ptr<modelbox::Configuration> config) {\n  MBLOG_INFO << \"Example plugin Init.\";\n  return true;\n}\n\nbool ExamplePlugin::Start() {\n  MBLOG_INFO << \"Example plugin Start.\";\n  return true;\n}\n\nbool ExamplePlugin::Stop() {\n  MBLOG_INFO << \"Example plugin Stop.\";\n  return true;\n}\n"
  },
  {
    "path": "examples/service-plugin/example.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef EXAMPLE_PLUGIN_PLUGIN_H_\n#define EXAMPLE_PLUGIN_PLUGIN_H_\n\n#include <string>\n#include \"modelbox/base/status.h\"\n#include \"modelbox/server/job_manager.h\"\n#include \"modelbox/server/plugin.h\"\n\nclass ExamplePlugin : public modelbox::Plugin {\n public:\n  ExamplePlugin() = default;\n  ~ExamplePlugin() override = default;\n\n  virtual bool Init(std::shared_ptr<modelbox::Configuration> config) override;\n  virtual bool Start() override;\n  virtual bool Stop() override;\n};\n\n#endif  // EXAMPLE_PLUGIN_PLUGIN_H_"
  },
  {
    "path": "package/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\ncmake_minimum_required(VERSION 3.10)\nset(CPACK_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR} CACHE INTERNAL \"\")\nset(CPACK_BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR} CACHE INTERNAL \"\")\nfunction(MODELBOX_CPACK)\n    set(CPACK_RPM_COMPONENT_INSTALL ON PARENT_SCOPE)\n    set(CPACK_DEB_COMPONENT_INSTALL ON PARENT_SCOPE)\n    set(CPACK_ARCHIVE_COMPONENT_INSTALL OFF PARENT_SCOPE)\n    set(CPACK_SET_DESTDIR ON PARENT_SCOPE)\n    set(CPACK_STRIP_FILES ON PARENT_SCOPE)\n    find_program(TAR tar)\n    find_program(DPKG dpkg)\n    find_program(RPM rpm)\n    set(MODELBOX_PACK_NAME \"${CPACK_PACKAGE_NAME}\")\n    if(NOT CPACK_PACKAGE_NAME)\n        set(MODELBOX_PACK_NAME \"${CMAKE_PROJECT_NAME}\")\n    endif()\n    \n    if(TAR) \n        set(CPACK_GENERATOR \"${CPACK_GENERATOR}TGZ;\")\n    endif()\n    if (NOT STANDALONE)\n        if(DPKG) \n            set(CPACK_GENERATOR \"${CPACK_GENERATOR}DEB;\")\n        endif()\n        if(RPM) \n            set(CPACK_GENERATOR \"${CPACK_GENERATOR}RPM;\")\n        endif()\n    endif()\n\n    set(CPACK_GENERATOR \"${CPACK_GENERATOR}\" PARENT_SCOPE)\n\n    set(CPACK_COMPONENT_CPU-DEVICE-FLOWUNIT_DESCRIPTION \"Modelbox CPU flowunit plugins\" PARENT_SCOPE)\n    set(CPACK_COMPONENT_CPU-DEVICE-FLOWUNIT-DEVEL_DESCRIPTION \"Modelbox CPU flowunit plugins - development\" PARENT_SCOPE)\n    set(CPACK_COMPONENT_ASCEND-DEVICE-FLOWUNIT_DESCRIPTION \"Modelbox Ascend flowunit plugins\" PARENT_SCOPE)\n    set(CPACK_COMPONENT_ASCEND-DEVICE-FLOWUNIT-DEVEL_DESCRIPTION \"Modelbox Ascend flowunit plugins - development\" PARENT_SCOPE)\n    set(CPACK_COMPONENT_CUDA-DEVICE-FLOWUNIT_DESCRIPTION \"Modelbox Cuda flowunit plugins\" PARENT_SCOPE)\n    set(CPACK_COMPONENT_CUDA-DEVICE-FLOWUNIT-DEVEL_DESCRIPTION \"Modelbox Cuda flowunit plugins - development\" PARENT_SCOPE)\n    set(CPACK_COMPONENT_GRAPH-GRAPHVIZ_DESCRIPTION \"Modelbox graph parser for graphviz\" PARENT_SCOPE)\n    set(CPACK_COMPONENT_VIRTUALDRIVER-DRIVER-INFERENCE_DESCRIPTION \"Modelbox virtual plugin for inference\" PARENT_SCOPE)\n    set(CPACK_COMPONENT_VIRTUALDRIVER-DRIVER-PYTHON_DESCRIPTION \"Modelbox virtual plugin for python\" PARENT_SCOPE)\n    set(CPACK_COMPONENT_SOLUTION_DESCRIPTION \"Modelbox solutions\" PARENT_SCOPE)\n    set(CPACK_COMPONENT_SERVER_DESCRIPTION \"Modelbox service\" PARENT_SCOPE)\n    set(CPACK_COMPONENT_SERVER-DEVEL_DESCRIPTION \"Modelbox service - development\" PARENT_SCOPE)\n    set(CPACK_COMPONENT_DOCUMENT_DESCRIPTION \"Modelbox document\" PARENT_SCOPE)\n    set(CPACK_COMPONENT_LIBMODELBOX_DESCRIPTION \"Modelbox framework library\" PARENT_SCOPE)\n    set(CPACK_COMPONENT_LIBMODELBOX-DEVEL_DESCRIPTION \"Modelbox framework library - development\" PARENT_SCOPE)   \n\n    # deb package configuration\n    set(CPACK_DEBIAN_ENABLE_COMPONENT_DEPENDS ON PARENT_SCOPE)\n    set(CPACK_DEBIAN_CPU-DEVICE-FLOWUNIT_PACKAGE_SECTION \"libs\" PARENT_SCOPE)\n    set(CPACK_DEBIAN_CPU-DEVICE-FLOWUNIT_PACKAGE_DEPENDS \"${MODELBOX_PACK_NAME}-libmodelbox\" PARENT_SCOPE)\n    set(CPACK_DEBIAN_CPU-DEVICE-FLOWUNIT_PACKAGE_CONTROL_EXTRA ${CPACK_SOURCE_DIR}/debian/postinst PARENT_SCOPE)\n    set(CPACK_DEBIAN_CUDA-DEVICE-FLOWUNIT_PACKAGE_SECTION \"libs\" PARENT_SCOPE)\n    set(CPACK_DEBIAN_CUDA-DEVICE-FLOWUNIT_PACKAGE_DEPENDS \"${MODELBOX_PACK_NAME}-libmodelbox\" PARENT_SCOPE)\n    set(CPACK_DEBIAN_CUDA-DEVICE-FLOWUNIT_PACKAGE_CONTROL_EXTRA ${CPACK_SOURCE_DIR}/debian/postinst PARENT_SCOPE)\n    set(CPACK_DEBIAN_ASCEND-DEVICE-FLOWUNIT_PACKAGE_SECTION \"libs\" PARENT_SCOPE)\n    set(CPACK_DEBIAN_ASCEND-DEVICE-FLOWUNIT_PACKAGE_DEPENDS \"${MODELBOX_PACK_NAME}-libmodelbox\" PARENT_SCOPE)\n    set(CPACK_DEBIAN_ASCEND-DEVICE-FLOWUNIT_PACKAGE_CONTROL_EXTRA ${CPACK_SOURCE_DIR}/debian/postinst PARENT_SCOPE)\n    set(CPACK_DEBIAN_GRAPH-GRAPHVIZE_PACKAGE_SECTION \"libs\" PARENT_SCOPE)\n    set(CPACK_DEBIAN_GRAPH-GRAPHVIZE_PACKAGE_DEPENDS \"${MODELBOX_PACK_NAME}-libmodelbox\" PARENT_SCOPE)\n    set(CPACK_DEBIAN_GRAPH-GRAPHVIZE_PACKAGE_CONTROL_EXTRA ${CPACK_SOURCE_DIR}/debian/postinst PARENT_SCOPE)\n    set(CPACK_DEBIAN_VIRTUALDRIVER-DRIVER-INFERENCE_PACKAGE_SECTION \"libs\" PARENT_SCOPE)\n    set(CPACK_DEBIAN_VIRTUALDRIVER-DRIVER-INFERENCE_PACKAGE_DEPENDS \"${MODELBOX_PACK_NAME}-libmodelbox\" PARENT_SCOPE)\n    set(CPACK_DEBIAN_VIRTUALDRIVER-DRIVER_PACKAGE_CONTROL_EXTRA ${CPACK_SOURCE_DIR}/debian/postinst PARENT_SCOPE)\n    set(CPACK_DEBIAN_VIRTUALDRIVER-DRIVER-PYTHON_PACKAGE_SECTION \"libs\" PARENT_SCOPE)\n    set(CPACK_DEBIAN_VIRTUALDRIVER-DRIVER-PYTHON_PACKAGE_DEPENDS \"${MODELBOX_PACK_NAME}-libmodelbox\" PARENT_SCOPE)\n    set(CPACK_DEBIAN_VIRTUALDRIVER-DRIVER-PYTHON_PACKAGE_CONTROL_EXTRA ${CPACK_SOURCE_DIR}/debian/postinst PARENT_SCOPE)\n    set(CPACK_DEBIAN_DEMO_PACKAGE_SECTION \"libs\" PARENT_SCOPE)\n    set(CPACK_DEBIAN_DEMO_PACKAGE_DEPENDS \"${MODELBOX_PACK_NAME}-libmodelbox\" PARENT_SCOPE)\n    set(CPACK_DEBIAN_DEMO_PACKAGE_CONTROL_EXTRA ${CPACK_SOURCE_DIR}/debian/postinst PARENT_SCOPE)\n    configure_file(${CPACK_SOURCE_DIR}/debian/modelbox-server/postinst.in ${CPACK_BINARY_DIR}/debian/modelbox-server/postinst @ONLY)\n    configure_file(${CPACK_SOURCE_DIR}/debian/modelbox-server/postrm.in ${CPACK_BINARY_DIR}/debian/modelbox-server/postrm @ONLY)\n    configure_file(${CPACK_SOURCE_DIR}/debian/modelbox-server/conffiles.in ${CPACK_BINARY_DIR}/debian/modelbox-server/conffiles @ONLY)\n    set(CPACK_DEBIAN_SERVER_PACKAGE_SECTION \"s\" PARENT_SCOPE)\n    set(CPACK_DEBIAN_SERVER_PACKAGE_DEPENDS \"${MODELBOX_PACK_NAME}-libmodelbox\" PARENT_SCOPE)\n    set(CPACK_DEBIAN_SERVER_PACKAGE_CONTROL_EXTRA\n        ${CPACK_BINARY_DIR}/debian/modelbox-server/postinst\n        ${CPACK_BINARY_DIR}/debian/modelbox-server/postrm\n        ${CPACK_BINARY_DIR}/debian/modelbox-server/postinst/conffiles PARENT_SCOPE)\n\n    set(CPACK_DEBIAN_PACKAGE_MAINTAINER \"Huawei Technologies Co., Ltd.\" PARENT_SCOPE)\n\n    # rpm package configuration\n    set(CPACK_PACKAGE_RELOCATABLE OFF PARENT_SCOPE)\n    set(CPACK_RPM_PACKAGE_AUTOREQ OFF PARENT_SCOPE)\n    set(CPACK_RPM_EXCLUDE_FROM_AUTO_FILELIST_ADDITION\n        \"/usr/local\"\n        \"/usr/local/bin\"\n        \"/usr/local/lib\"\n        \"/usr/local/lib64\"\n        \"/usr/local/etc\"\n        \"/usr/local/include\"\n        \"/usr/lib/systemd\"\n        ${CMAKE_INSTALL_FULL_BINDIR}\n        ${CMAKE_INSTALL_FULL_INCLUDEDIR}\n        ${CMAKE_INSTALL_FULL_LIBDIR}\n        \"${CMAKE_INSTALL_FULL_LIBDIR}/pkgconfig\"\n        ${SYSTEMDSYSTEMUNITDIR} PARENT_SCOPE)\n    set(CPACK_RPM_CPU-DEVICE-FLOWUNIT_PACKAGE_REQUIRES \"${MODELBOX_PACK_NAME}-libmodelbox\" PARENT_SCOPE)\n    set(CPACK_RPM_CPU-DEVICE-FLOWUNIT_POST_INSTALL_SCRIPT_FILE ${CPACK_SOURCE_DIR}/rpm/postscript PARENT_SCOPE)\n    set(CPACK_RPM_CUDA-DEVICE-FLOWUNIT_PACKAGE_REQUIRES \"${MODELBOX_PACK_NAME}-libmodelbox\" PARENT_SCOPE)\n    set(CPACK_RPM_CUDA-DEVICE-FLOWUNIT_POST_INSTALL_SCRIPT_FILE ${CPACK_SOURCE_DIR}/rpm/postscript PARENT_SCOPE)\n    set(CPACK_RPM_ASCEND-DEVICE-FLOWUNIT_PACKAGE_REQUIRES \"${MODELBOX_PACK_NAME}-libmodelbox\" PARENT_SCOPE)\n    set(CPACK_RPM_ASCEND-DEVICE-FLOWUNIT_POST_INSTALL_SCRIPT_FILE ${CPACK_SOURCE_DIR}/rpm/postscript PARENT_SCOPE)\n    set(CPACK_RPM_GRAPH-GRAPHVIZE_PACKAGE_REQUIRES \"${MODELBOX_PACK_NAME}-libmodelbox\" PARENT_SCOPE)\n    set(CPACK_RPM_GRAPH-GRAPHVIZE_POST_INSTALL_SCRIPT_FILE ${CPACK_SOURCE_DIR}/rpm/postscript PARENT_SCOPE)\n    set(CPACK_RPM_VIRTUALDRIVER-DRIVER-INFERENCE_PACKAGE_REQUIRES \"${MODELBOX_PACK_NAME}-libmodelbox\" PARENT_SCOPE)\n    set(CPACK_RPM_VIRTUALDRIVER-DRIVER-INFERENCE_POST_INSTALL_SCRIPT_FILE ${CPACK_SOURCE_DIR}/rpm/postscript PARENT_SCOPE)\n    set(CPACK_RPM_VIRTUALDRIVER-DRIVER-PYTHON_PACKAGE_REQUIRES \"${MODELBOX_PACK_NAME}-libmodelbox\" PARENT_SCOPE)\n    set(CPACK_RPM_VIRTUALDRIVER-DRIVER-PYTHON_POST_INSTALL_SCRIPT_FILE ${CPACK_SOURCE_DIR}/rpm/postscript PARENT_SCOPE)\n    set(CPACK_RPM_DEMO_PACKAGE_REQUIRES \"${MODELBOX_PACK_NAME}-libmodelbox\" PARENT_SCOPE)\n    set(CPACK_RPM_DEMO_POST_INSTALL_SCRIPT_FILE ${CPACK_SOURCE_DIR}/rpm/demo/postscript PARENT_SCOPE)\n    set(CPACK_RPM_SERVER_PACKAGE_REQUIRES \"${MODELBOX_PACK_NAME}-libmodelbox, ${MODELBOX_PACK_NAME}-graph-graphviz\" PARENT_SCOPE)\n    configure_file(${CPACK_SOURCE_DIR}/rpm/modelbox-server/postscript.in ${CPACK_BINARY_DIR}/rpm/modelbox-server/postscript @ONLY)\n    configure_file(${CPACK_SOURCE_DIR}/rpm/modelbox-server/postunscript.in ${CPACK_BINARY_DIR}/rpm/modelbox-server/postunscript @ONLY)\n    configure_file(${CPACK_SOURCE_DIR}/rpm/modelbox-server/conffiles.in ${CPACK_BINARY_DIR}/rpm/modelbox-server/conffiles @ONLY)\n    set(CPACK_RPM_SERVER_POST_INSTALL_SCRIPT_FILE ${CPACK_BINARY_DIR}/rpm/modelbox-server/postscript PARENT_SCOPE)\n    set(CPACK_RPM_SERVER_POST_UNINSTALL_SCRIPT_FILE ${CPACK_BINARY_DIR}/rpm/modelbox-server/postunscript PARENT_SCOPE)\n    set(CPACK_RPM_SERVER_USER_FILELIST \n        \"%config ${CMAKE_INSTALL_FULL_SYSCONFDIR}/modelbox/modelbox.conf\"\n        \"%config ${CMAKE_INSTALL_FULL_SYSCONFDIR}/modelbox/modelbox-opts\" PARENT_SCOPE)\n    set(CPACK_OUTPUT_FILE_PREFIX ${RELEASE_PACKAGE_DIR} PARENT_SCOPE)\n    set(CPACK_PACKAGE_DIRECTORY ${CMAKE_BINARY_DIR}/cpack PARENT_SCOPE)\n    \n    get_cmake_property(CPACK_COMPONENTS_ALL COMPONENTS)\n    list(REMOVE_ITEM CPACK_COMPONENTS_ALL \"Unspecified\")\n    set(CPACK_COMPONENTS_ALL ${CPACK_COMPONENTS_ALL} PARENT_SCOPE) \nendfunction(MODELBOX_CPACK)\n\nset(${MODELBOX_CPACK} ${MODELBOX_CPACK} CACHE INTERNAL \"\")\nset(CPACK_PACKAGE_VERSION_MAJOR ${MODELBOX_VERSION_MAJOR})\nset(CPACK_PACKAGE_VERSION_MINOR ${MODELBOX_VERSION_MINOR})\nset(CPACK_PACKAGE_VERSION_PATCH ${MODELBOX_VERSION_PATCH})\n\nif (NOT DISABLE_MODELBOX_CPACK)\n    include(CPackComponent)\n    MODELBOX_CPACK()\n    include(CPack)\nendif()"
  },
  {
    "path": "package/debian/modelbox-server/conffiles.in",
    "content": "@CMAKE_INSTALL_FULL_SYSCONFDIR@/modelbox/modelbox.conf\n@CMAKE_INSTALL_FULL_SYSCONFDIR@/modelbox/modelbox-opts\n@CMAKE_INSTALL_FULL_SYSCONFDIR@/modelbox/manager.conf\n@CMAKE_INSTALL_FULL_SYSCONFDIR@/modelbox/manager-opts"
  },
  {
    "path": "package/debian/modelbox-server/postinst.in",
    "content": "#!/bin/sh\n#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ninstall_user() {\n    addgroup --system --quiet modelbox\n    adduser --system --quiet --ingroup modelbox --no-create-home --home /nonexistent modelbox\n    chown -R modelbox:modelbox /var/log/modelbox /@CMAKE_INSTALL_RUNSTATEDIR@/modelbox\n    chown -R modelbox:modelbox /usr/local/etc/modelbox/graph\n    chmod 750 /var/log/modelbox\n}\n\nif [ \"$1\" = \"configure\" ]; then\n\tinstall_user\nfi\n\nldconfig\nsystemctl daemon-reload >/dev/null 2>&1\n\nexit 0\n"
  },
  {
    "path": "package/debian/modelbox-server/postrm.in",
    "content": "#!/bin/sh\n#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nif [ \"$1\" = \"purge\" ]; then\n\tdeluser --system --quiet modelbox || true\nfi\n\nldconfig\nrm -rf /@CMAKE_INSTALL_RUNSTATEDIR@/modelbox/modelbox.pid\nrm -fr /@CMAKE_INSTALL_RUNSTATEDIR@/modelbox/manager.pid\nsystemctl stop modelbox >/dev/null 2>&1\nsystemctl stop modelbox-manager >/dev/null 2>&1\nsystemctl daemon-reload >/dev/null 2>&1\n\nexit 0\n"
  },
  {
    "path": "package/debian/postinst",
    "content": "#!/bin/sh\n#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nldconfig\n"
  },
  {
    "path": "package/rpm/demo/postscript",
    "content": "#!/bin/sh\n# Copyright (C) 2020 Huawei Technologies Co., Ltd. All rights reserved.\n\nchown -R modelbox:modelbox /opt/modelbox\nchmod 750 /opt/modelbox\nchmod 750 /opt/modelbox/demo\nldconfig\n"
  },
  {
    "path": "package/rpm/modelbox-server/conffiles.in",
    "content": "%config @CMAKE_INSTALL_FULL_SYSCONFDIR@/modelbox/modelbox.conf\n%config @CMAKE_INSTALL_FULL_SYSCONFDIR@/modelbox/modelbox-opts\n%config @CMAKE_INSTALL_FULL_SYSCONFDIR@/modelbox/manager.conf\n%config @CMAKE_INSTALL_FULL_SYSCONFDIR@/modelbox/manager-opts\n"
  },
  {
    "path": "package/rpm/modelbox-server/postscript.in",
    "content": "#!/bin/sh\n#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ninstall_user() {\n    groupadd --system modelbox\n    useradd --system -s /sbin/nologin -N -g modelbox --no-create-home --home @CMAKE_INSTALL_FULL_SYSCONFDIR@/modelbox modelbox\n    chown -R modelbox:modelbox /usr/local/etc/modelbox\n    chown -R modelbox:modelbox /var/log/modelbox /@CMAKE_INSTALL_RUNSTATEDIR@/modelbox\n    chown -R modelbox:modelbox /usr/local/etc/modelbox/graph\n    chmod 750 /var/log/modelbox\n}\n\ninstall_user\nldconfig\nsystemctl daemon-reload >/dev/null 2>&1\n\nexit 0\n"
  },
  {
    "path": "package/rpm/modelbox-server/postunscript.in",
    "content": "#!/bin/sh\n#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nuserdel modelbox || true\nrm -rf /@CMAKE_INSTALL_RUNSTATEDIR@/modelbox/modelbox.pid\nldconfig\nsystemctl stop modelbox >/dev/null 2>&1\nsystemctl daemon-reload >/dev/null 2>&1\n\nexit 0\n"
  },
  {
    "path": "package/rpm/postscript",
    "content": "#!/bin/sh\n#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nldconfig\n"
  },
  {
    "path": "src/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nproject(modelbox)\n\nif(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_BINARY_DIR})\n    message(FATAL_ERROR \"Do not build in source directory!\")\nendif()\n\nset(MODELBOX_DEMO_INSTALL_DIR \"${CMAKE_INSTALL_FULL_DATAROOTDIR}/modelbox/demo\")\n\nadd_subdirectory(libmodelbox)\nadd_subdirectory(drivers)\nadd_subdirectory(develop)\nadd_subdirectory(python)\nadd_subdirectory(java)\nadd_subdirectory(modelbox)\nadd_subdirectory(demo)"
  },
  {
    "path": "src/demo/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nif (WITH_ALL_DEMO)\n    subdirlist(SUBDIRS ${CMAKE_CURRENT_SOURCE_DIR} \"CMakeLists.txt\")\nelse()\n    list(APPEND SUBDIRS \"hello_world\")\n    list(APPEND SUBDIRS \"mnist\")\nendif()\n\nset(DEMO_SOURCE_DIR \"${CMAKE_CURRENT_LIST_DIR}\" CACHE INTERNAL \"\")\n\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_CPU_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_CUDA_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_ASCEND_INCLUDE})\n\nlink_directories(${LIBMODELBOX_BINARY_DIR})\nlink_directories(${LIBMODELBOX_DEVICE_CPU_BINARY_DIR})\nlink_directories(${LIBMODELBOX_DEVICE_CUDA_BINARY_DIR})\nlink_directories(${LIBMODELBOX_DEVICE_ASCEND_BINARY_DIR})\n\nset(DEMO_MODEL_DIR \"${MODELBOX_DEMO_DIR}/model\")\nset(DEMO_VIDEO_DIR \"${MODELBOX_DEMO_DIR}/video\")\nset(DEMO_IMAGE_DIR \"${MODELBOX_DEMO_DIR}/image\")\n\nforeach(subdir ${SUBDIRS})\n    add_subdirectory(${subdir})\nendforeach()\n\nadd_custom_target(demo\n    DEPENDS ${DRIVER_DEMO_TEST_TARGET}\n) \n"
  },
  {
    "path": "src/demo/car_detection/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nsubdirlist(SUBDIRS ${CMAKE_CURRENT_SOURCE_DIR} \"CMakeLists.txt\")\n\nset(DEMO_CAR_DETECTION_DIR \"${MODELBOX_DEMO_INSTALL_DIR}/car_detection\" CACHE INTERNAL \"\")\nset(DEMO_CAR_DETECTION_FLOWUNIT_DIR ${DEMO_CAR_DETECTION_DIR}/flowunit)\nset(DEMO_CAR_DETECTION_GRAPH_DIR ${DEMO_CAR_DETECTION_DIR}/graph)\n\nforeach(subdir ${SUBDIRS})\n    add_subdirectory(${subdir})\nendforeach()\n\n\nif(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_BINARY_DIR})\n    message(FATAL_ERROR \"Do not build in source directory!\")\nendif()\n"
  },
  {
    "path": "src/demo/car_detection/flowunit/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nsubdirlist(SUBDIRS ${CMAKE_CURRENT_SOURCE_DIR} \"CMakeLists.txt\")\n\nset(DEMO_CAR_DETECTION_FLOWUNIT_DIR ${DEMO_CAR_DETECTION_DIR}/flowunit)\n\nforeach(subdir ${SUBDIRS})\n    add_subdirectory(${subdir})\nendforeach()\n\nif(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_BINARY_DIR})\n    message(FATAL_ERROR \"Do not build in source directory!\")\nendif()\n\n"
  },
  {
    "path": "src/demo/car_detection/flowunit/car_detect/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nset(FLOWUNIT_NAME \"car_detect\")\n\nset(FLOWUNIT_PATH ${CMAKE_CURRENT_BINARY_DIR}/${FLOWUNIT_NAME})\nset(MODEL_FILE ${CMAKE_CURRENT_SOURCE_DIR}/yolox_nano_jit_trace_288x512.pt)\nset(FLOWUNIT_CONFIG ${CMAKE_CURRENT_SOURCE_DIR}/${FLOWUNIT_NAME}.toml)\n\nconfigure_file(${MODEL_FILE} ${FLOWUNIT_PATH}/yolox_nano_jit_trace_288x512.pt COPYONLY)\nconfigure_file(${FLOWUNIT_CONFIG} ${FLOWUNIT_PATH}/${FLOWUNIT_NAME}.toml @ONLY)\n \ninstall(DIRECTORY\n    ${FLOWUNIT_PATH}\n    DESTINATION ${DEMO_CAR_DETECTION_FLOWUNIT_DIR}\n    COMPONENT demo\n)\n"
  },
  {
    "path": "src/demo/car_detection/flowunit/car_detect/car_detect.toml",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n[base]\nname = \"car_detect\"\ndevice = \"cuda\"\nversion = \"1.0.0\"\ndescription = \"car detection infer\"\nentry = \"./yolox_nano_jit_trace_288x512.pt\"\ntype = \"inference\"\nvirtual_type = \"torch\"\n\n[input]\n[input.input1]\nname = \"input\"\ntype = \"float\"\n\n[output]\n[output.output1]\nname = \"output\"\ntype = \"float\"\n"
  },
  {
    "path": "src/demo/car_detection/flowunit/yolox_post/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nset(FLOWUNIT_NAME \"yolox_post\")\n\nset(FLOWUNIT_PATH ${CMAKE_CURRENT_BINARY_DIR}/${FLOWUNIT_NAME})\nset(FLOWUNIT_FILE ${CMAKE_CURRENT_SOURCE_DIR}/${FLOWUNIT_NAME}.py)\nset(FLOWUNIT_CONFIG ${CMAKE_CURRENT_SOURCE_DIR}/${FLOWUNIT_NAME}.toml)\n\nconfigure_file(${FLOWUNIT_FILE} ${FLOWUNIT_PATH}/${FLOWUNIT_NAME}.py @ONLY)\nconfigure_file(${CMAKE_CURRENT_SOURCE_DIR}/yolox_utils.py ${FLOWUNIT_PATH}/yolox_utils.py @ONLY)\nconfigure_file(${FLOWUNIT_CONFIG} ${FLOWUNIT_PATH}/${FLOWUNIT_NAME}.toml @ONLY)\n \ninstall(DIRECTORY\n    ${FLOWUNIT_PATH}\n    DESTINATION ${DEMO_CAR_DETECTION_FLOWUNIT_DIR}\n    COMPONENT demo\n)"
  },
  {
    "path": "src/demo/car_detection/flowunit/yolox_post/yolox_post.py",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport _flowunit as modelbox\nimport numpy as np \nimport json \nimport cv2 \n\nfrom yolox_utils import *\n\nclass YoloXPost(modelbox.FlowUnit):\n    def __init__(self):\n        super().__init__()\n\n    def open(self, config):\n        self.net_h = config.get_int('net_h', 288)\n        self.net_w = config.get_int('net_w', 512)\n        self.num_classes = config.get_int('num_classes', 80)\n        self.num_grids = int((self.net_h / 32) * (self.net_w / 32)) * (1 + 2*2 + 4*4)\n        self.conf_thre = config.get_float('conf_threshold', 0.3)\n        self.nms_thre = config.get_float('iou_threshold', 0.4)\n\n        return modelbox.Status.StatusCode.STATUS_SUCCESS\n\n    def process(self, data_context):\n        in_image = data_context.input(\"in_image\")\n        in_feat = data_context.input(\"in_feat\")\n\n        out_data = data_context.output(\"out_data\")\n\n        for buffer_img, buffer_feat in zip(in_image, in_feat):\n            width = buffer_img.get('width')\n            height = buffer_img.get('height')\n            channel = buffer_img.get('channel')\n            frame_index = buffer_img.get('index')\n            modelbox.debug(\"get frame index: {}\".format(frame_index))\n\n            img_data = np.array(buffer_img.as_object(), copy=False)\n            img_data = img_data.reshape((height, width, channel))\n\n            feat_data = np.array(buffer_feat.as_object(), copy=False)\n            feat_data = feat_data.reshape((self.num_classes + 5, self.num_grids)).transpose()\n\n            ratio = min(self.net_h / height, self.net_w / width)\n            bboxes = postprocess(feat_data, (self.net_h, self.net_w), self.num_classes, self.conf_thre, self.nms_thre, ratio)\n            if bboxes is not None:\n                img_out = draw_bbox(img_data, bboxes)\n                add_buffer = modelbox.Buffer(self.get_bind_device(), img_out)\n                add_buffer.copy_meta(buffer_img)\n                out_data.push_back(add_buffer)\n            else:\n                out_data.push_back(buffer_img)\n            \n        return modelbox.Status.StatusCode.STATUS_SUCCESS\n\n    def close(self):\n        return modelbox.Status()\n    \n    def data_pre(self, data_context):\n        return modelbox.Status()\n\n    def data_post(self, data_context):\n        return modelbox.Status()\n"
  },
  {
    "path": "src/demo/car_detection/flowunit/yolox_post/yolox_post.toml",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n[base]\nname = \"yolox_post\"\ndevice = \"cpu\"\nversion = \"1.0.0\"\ndescription = \"yolox postprocess\"\nentry = \"yolox_post@YoloXPost\"\ntype = \"python\"\n\n[config]\nnet_h = 288\nnet_w = 512\nnum_classes = 80\nconf_threshold = 0.3\niou_threshold = 0.4\n\n[input]\n[input.input1]\nname = \"in_image\"\n\n[input.input2]\nname = \"in_feat\"\n\n[output]\n[output.output1]\nname = \"out_data\""
  },
  {
    "path": "src/demo/car_detection/flowunit/yolox_post/yolox_utils.py",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport cv2\nimport time\nimport numpy as np\n\ncolors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0],\n          [170, 255, 0], [85, 255, 0], [0, 255, 0], [0, 255, 85],\n          [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255],\n          [0, 0, 255], [85, 0, 255], [170, 0, 255], [255, 0, 255],\n          [255, 0, 170], [255, 0, 85], [85, 85, 255], [170, 170, 255], [170, 255, 170]]\ncnt_colors = len(colors)\n\n# car, bus, truck\ncoco_car_classes = [2, 5, 7]\n\n\ndef nms(boxes, scores, nms_thr):\n    \"\"\"Single class NMS implemented in Numpy.\"\"\"\n    x1 = boxes[:, 0]\n    y1 = boxes[:, 1]\n    x2 = boxes[:, 2]\n    y2 = boxes[:, 3]\n\n    areas = (x2 - x1 + 1) * (y2 - y1 + 1)\n    order = scores.argsort()[::-1]\n\n    keep = []\n    while order.size > 0:\n        i = order[0]\n        keep.append(i)\n        xx1 = np.maximum(x1[i], x1[order[1:]])\n        yy1 = np.maximum(y1[i], y1[order[1:]])\n        xx2 = np.minimum(x2[i], x2[order[1:]])\n        yy2 = np.minimum(y2[i], y2[order[1:]])\n\n        w = np.maximum(0.0, xx2 - xx1 + 1)\n        h = np.maximum(0.0, yy2 - yy1 + 1)\n        inter = w * h\n        ovr = inter / (areas[i] + areas[order[1:]] - inter)\n\n        inds = np.where(ovr <= nms_thr)[0]\n        order = order[inds + 1]\n\n    return keep\n\n\ndef multiclass_nms_class_agnostic(boxes, scores, nms_thr, score_thr):\n    \"\"\"Multiclass NMS implemented inNumpy. Class-agnostic version.\"\"\"\n    cls_inds = scores.argmax(1)\n    cls_scores = scores[np.arange(len(cls_inds)), cls_inds]\n\n    valid_score_mask = cls_scores > score_thr\n    if valid_score_mask.sum() == 0:\n        return None\n    valid_scores = cls_scores[valid_score_mask]\n    valid_boxes = boxes[valid_score_mask]\n    valid_cls_inds = cls_inds[valid_score_mask]\n    keep = nms(valid_boxes, valid_scores, nms_thr)\n    if keep:\n        dets = np.concatenate(\n            [valid_boxes[keep], valid_scores[keep, None], valid_cls_inds[keep, None]], 1\n        )\n    return dets\n\n\ndef decode_outputs(outputs, img_size):\n    grids = []\n    expanded_strides = []\n\n    strides = [8, 16, 32]\n    hsizes = [img_size[0] // stride for stride in strides]\n    wsizes = [img_size[1] // stride for stride in strides]\n\n    for hsize, wsize, stride in zip(hsizes, wsizes, strides):\n        xv, yv = np.meshgrid(np.arange(wsize), np.arange(hsize))\n        grid = np.stack((xv, yv), 2).reshape(1, -1, 2)\n        grids.append(grid)\n        shape = grid.shape[:2]\n        expanded_strides.append(np.full((*shape, 1), stride))\n\n    grids = np.concatenate(grids, 1)\n    expanded_strides = np.concatenate(expanded_strides, 1)\n    outputs[..., :2] = (outputs[..., :2] + grids) * expanded_strides\n    outputs[..., 2:4] = np.exp(outputs[..., 2:4]) * expanded_strides\n\n    return outputs\n\n\ndef postprocess(image_pred, input_shape, num_classes, conf_thre=0.3, nms_thre=0.45, ratio=1.0):\n    predictions = decode_outputs(image_pred, input_shape)\n\n    boxes = predictions[:, :4]\n    scores = predictions [:, 4:5] * predictions[:, 5:]\n\n    boxes_xyxy = np.ones_like(boxes)\n    boxes_xyxy[:, 0] = boxes[:, 0] - boxes[:, 2]/2.\n    boxes_xyxy[:, 1] = boxes[:, 1] - boxes[:, 3]/2.\n    boxes_xyxy[:, 2] = boxes[:, 0] + boxes[:, 2]/2.\n    boxes_xyxy[:, 3] = boxes[:, 1] + boxes[:, 3]/2.\n    boxes_xyxy /= ratio\n    detections = multiclass_nms_class_agnostic(boxes_xyxy, scores, nms_thre, conf_thre)\n\n    return detections\n\n\ndef draw_bbox(image, results):\n    h, w, c = image.shape\n    for bbox in results:\n        x1, y1, x2, y2, score, label = bbox\n\n        label = int(label)\n        if label not in coco_car_classes:\n            continue\n        \n        x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)\n        x1 = max(0, x1)\n        y1 = max(0, y1)\n        x2 = min(x2, w)\n        y2 = min(y2, h)\n        score = str(score)[:4]\n        color = (0, 0, 255)\n        cv2.rectangle(image, (x1, y1), (x2, y2), color, 2)\n    \n    return image\n"
  },
  {
    "path": "src/demo/car_detection/graph/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nset(GRAPH_NAME \"car_detection.toml\")\nset(DEMO_GRAPH ${CMAKE_CURRENT_BINARY_DIR}/${GRAPH_NAME})\nconfigure_file(${CMAKE_CURRENT_SOURCE_DIR}/${GRAPH_NAME}.in ${DEMO_GRAPH})\n\ninstall(FILES \n${DEMO_GRAPH} DESTINATION ${DEMO_CAR_DETECTION_GRAPH_DIR}\nCOMPONENT demo\n)\n\nset(CAR_TEST_VIDEO \"car_test_video.mp4\")\ninstall(FILES\n    ${CMAKE_CURRENT_SOURCE_DIR}/${CAR_TEST_VIDEO} DESTINATION ${DEMO_VIDEO_DIR}\n    COMPONENT demo\n)\n"
  },
  {
    "path": "src/demo/car_detection/graph/car_detection.toml.in",
    "content": "[driver]\ndir = [\n\"@DEMO_CAR_DETECTION_FLOWUNIT_DIR@\"\n]\n[flow]\ndesc = \"car detection for video streams\"\n[graph]\nformat = \"graphviz\"\ngraphconf = \"\"\"digraph car_detection {\n    node [shape=Mrecord]\n    video_input[type=flowunit, flowunit=video_input, device=cpu, deviceid=0, source_url=\"@DEMO_VIDEO_DIR@/car_test_video.mp4\"]\n    videodemuxer[type=flowunit, flowunit=video_demuxer, device=cpu, deviceid=0]\n    videodecoder[type=flowunit, flowunit=video_decoder, device=cuda, deviceid=0, pix_fmt=bgr]\n    image_resize[type=flowunit, flowunit=resize, device=cpu, deviceid=0, image_width=512, image_height=288]\n    image_transpose[type=flowunit, flowunit=packed_planar_transpose, device=cpu, deviceid=0]\n    normalize[type=flowunit, flowunit=normalize, device=cpu, deviceid=0, standard_deviation_inverse=\"1,1,1\"]\n    model_inference[type=flowunit, flowunit=car_detect, device=cuda, deviceid=0, batch_size=1]\n    yolox_post[type=flowunit, flowunit=yolox_post, device=cpu, deviceid=0]\n    videoencoder[type=flowunit, flowunit=video_encoder, device=cpu, deviceid=0, encoder=mpeg4, format=mp4, default_dest_url=\"/tmp/car_detection_result.mp4\"]\n\n    video_input:out_video_url -> videodemuxer:in_video_url\n    videodemuxer:out_video_packet -> videodecoder:in_video_packet\n    videodecoder:out_video_frame -> image_resize:in_image\n    image_resize:out_image -> image_transpose:in_image\n    image_transpose:out_image -> normalize:in_data\n    normalize:out_data -> model_inference:input\n    model_inference:output -> yolox_post:in_feat\n    videodecoder:out_video_frame -> yolox_post:in_image\n    yolox_post:out_data -> videoencoder:in_video_frame\n}\"\"\"\n"
  },
  {
    "path": "src/demo/emotion_detection/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nsubdirlist(SUBDIRS ${CMAKE_CURRENT_SOURCE_DIR} \"CMakeLists.txt\")\n\nset(DEMO_EMOTION_DETECTION_DIR \"${MODELBOX_DEMO_INSTALL_DIR}/emotion_detection\" CACHE INTERNAL \"\")\nset(DEMO_EMOTION_DETECTION_FLOWUNIT_DIR ${DEMO_EMOTION_DETECTION_DIR}/flowunit)\nset(DEMO_EMOTION_DETECTION_GRAPH_DIR ${DEMO_EMOTION_DETECTION_DIR}/graph)\n\nforeach(subdir ${SUBDIRS})\n    add_subdirectory(${subdir})\nendforeach()\n\nif(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_BINARY_DIR})\n    message(FATAL_ERROR \"Do not build in source directory!\")\nendif()\n"
  },
  {
    "path": "src/demo/emotion_detection/flowunit/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\ncmake_minimum_required(VERSION 3.10)\n\nsubdirlist(SUBDIRS ${CMAKE_CURRENT_SOURCE_DIR} \"CMakeLists.txt\")\n\nforeach(subdir ${SUBDIRS})\n    add_subdirectory(${subdir})\nendforeach()\n\nif(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_BINARY_DIR})\n    message(FATAL_ERROR \"Do not build in source directory!\")\nendif()\n"
  },
  {
    "path": "src/demo/emotion_detection/flowunit/collapse_emotion/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\ncmake_minimum_required(VERSION 3.10)\n\nset(FLOWUNIT_NAME \"collapse_emotion\")\n\nset(FLOWUNIT_PATH ${CMAKE_CURRENT_BINARY_DIR}/${FLOWUNIT_NAME})\nset(FLOWUNIT_FILE ${CMAKE_CURRENT_SOURCE_DIR}/${FLOWUNIT_NAME}.py)\nset(FLOWUNIT_CONFIG ${CMAKE_CURRENT_SOURCE_DIR}/${FLOWUNIT_NAME}.toml)\n\nconfigure_file(${FLOWUNIT_FILE} ${FLOWUNIT_PATH}/${FLOWUNIT_NAME}.py @ONLY)\nconfigure_file(${FLOWUNIT_CONFIG} ${FLOWUNIT_PATH}/${FLOWUNIT_NAME}.toml @ONLY)\n\ninstall(DIRECTORY\n    ${FLOWUNIT_PATH}\n    DESTINATION ${DEMO_EMOTION_DETECTION_FLOWUNIT_DIR}\n    COMPONENT demo\n)\n"
  },
  {
    "path": "src/demo/emotion_detection/flowunit/collapse_emotion/collapse_emotion.py",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport _flowunit as modelbox\nimport numpy as np\n\nclass CollapseEmotion(modelbox.FlowUnit):\n    def __init__(self):\n        super().__init__()\n\n    def open(self, config):\n        self.emotion_list = [\"Surprise\", \"Fear\", \"Disgust\", \"Happiness\", \"Sadness\", \"Anger\", \"Neutral\"]\n        return modelbox.Status.StatusCode.STATUS_SUCCESS\n\n    def process(self, data_context):\n        confidence_list = data_context.input(\"confidence\")\n        predicts_list = data_context.input(\"predicts\")\n        out_data_list = data_context.output(\"out_data\")\n\n        emotion_result = \"\"\n        for conf, predict in zip(confidence_list, predicts_list):\n            conf = np.array(conf.as_object(), dtype=np.float32)\n            predict = np.array(predict.as_object(), dtype=np.float32)\n\n            res = \"NoEmotion\"\n            if conf > 0.7:\n                res = self.emotion_list[np.argmax(predict)]\n            if len(emotion_result) > 0:\n                emotion_result += \",\"\n            emotion_result += res\n\n        emotion_buffer = modelbox.Buffer(self.get_bind_device(), emotion_result)\n        out_data_list.push_back(emotion_buffer)\n\n        return modelbox.Status.StatusCode.STATUS_SUCCESS\n\n    def close(self):\n        return modelbox.Status()\n    \n    def data_pre(self, data_context):\n        return modelbox.Status()\n\n    def data_post(self, data_context):\n        return modelbox.Status()\n"
  },
  {
    "path": "src/demo/emotion_detection/flowunit/collapse_emotion/collapse_emotion.toml",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n[base]\nname = \"collapse_emotion\"\ndevice = \"cpu\"\nversion = \"1.0.0\"\ndescription = \"collapse all face emotion\"\nentry = \"collapse_emotion@CollapseEmotion\"\ntype = \"python\"\n\ncollapse = true\n\n[input]\n[input.input1]\nname = \"confidence\"\n\n[input.input2]\nname = \"predicts\"\n\n[output]\n[output.output1]\nname = \"out_data\"\n"
  },
  {
    "path": "src/demo/emotion_detection/flowunit/custom_resize/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\ncmake_minimum_required(VERSION 3.10)\n\nset(FLOWUNIT_NAME \"custom_resize\")\n\nset(FLOWUNIT_PATH ${CMAKE_CURRENT_BINARY_DIR}/${FLOWUNIT_NAME})\nset(FLOWUNIT_FILE ${CMAKE_CURRENT_SOURCE_DIR}/${FLOWUNIT_NAME}.py)\nset(FLOWUNIT_CONFIG ${CMAKE_CURRENT_SOURCE_DIR}/${FLOWUNIT_NAME}.toml)\n\nconfigure_file(${FLOWUNIT_FILE} ${FLOWUNIT_PATH}/${FLOWUNIT_NAME}.py @ONLY)\nconfigure_file(${FLOWUNIT_CONFIG} ${FLOWUNIT_PATH}/${FLOWUNIT_NAME}.toml @ONLY)\n \ninstall(DIRECTORY\n    ${FLOWUNIT_PATH}\n    DESTINATION ${DEMO_EMOTION_DETECTION_FLOWUNIT_DIR}\n    COMPONENT demo\n)\n"
  },
  {
    "path": "src/demo/emotion_detection/flowunit/custom_resize/custom_resize.py",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport _flowunit as modelbox\nimport numpy as np\nimport cv2\n\nclass CustomResize(modelbox.FlowUnit):\n    def __init__(self):\n        super().__init__()\n\n    def open(self, config):\n        self.max_edge = config.get_int(\"max_edge\", 320)\n        return modelbox.Status.StatusCode.STATUS_SUCCESS\n\n    def process(self, data_context):\n        in_image_list = data_context.input(\"in_image\")\n        out_image_list = data_context.output(\"out_image\")\n\n        for buffer_img in in_image_list:\n            width = buffer_img.get(\"width\")\n            height = buffer_img.get(\"height\")\n            channel = buffer_img.get(\"channel\")\n\n            img_data = np.array(buffer_img.as_object(), dtype=np.uint8)\n            img_data = img_data.reshape(height, width, channel)\n\n            im_size_min = np.min([height, width])\n            im_size_max = np.max([height, width])\n            resize = self.max_edge / float(im_size_min)\n            if np.round(resize * im_size_max) > self.max_edge:\n                resize = self.max_edge / float(im_size_max)\n\n            resize_img = cv2.resize(img_data, None, None, fx=resize, fy=resize, interpolation=cv2.INTER_LINEAR)\n            resize_img_height, resize_img_width, _ = resize_img.shape\n\n            add_buffer = modelbox.Buffer(self.get_bind_device(), resize_img)\n            add_buffer.copy_meta(buffer_img)\n            add_buffer.set(\"width\", resize_img_width)\n            add_buffer.set(\"height\", resize_img_height)\n            add_buffer.set(\"width_stride\", resize_img_width)\n            add_buffer.set(\"height_stride\", resize_img_height)\n            out_image_list.push_back(add_buffer)\n\n        return modelbox.Status.StatusCode.STATUS_SUCCESS\n\n    def close(self):\n        return modelbox.Status()\n    \n    def data_pre(self, data_context):\n        return modelbox.Status()\n\n    def data_post(self, data_context):\n        return modelbox.Status()\n"
  },
  {
    "path": "src/demo/emotion_detection/flowunit/custom_resize/custom_resize.toml",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n[base]\nname = \"custom_resize\"\ndevice = \"cpu\"\nversion = \"1.0.0\"\ndescription = \"custom resize\"\nentry = \"custom_resize@CustomResize\"\ntype = \"python\"\n\n[config]\nmax_edge = 320\n\n[input]\n[input.input1]\nname = \"in_image\"\n\n[output]\n[output.output1]\nname = \"out_image\"\n"
  },
  {
    "path": "src/demo/emotion_detection/flowunit/draw_emotion/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\ncmake_minimum_required(VERSION 3.10)\n\nset(FLOWUNIT_NAME \"draw_emotion\")\n\nset(FLOWUNIT_PATH ${CMAKE_CURRENT_BINARY_DIR}/${FLOWUNIT_NAME})\nset(FLOWUNIT_FILE ${CMAKE_CURRENT_SOURCE_DIR}/${FLOWUNIT_NAME}.py)\nset(FLOWUNIT_CONFIG ${CMAKE_CURRENT_SOURCE_DIR}/${FLOWUNIT_NAME}.toml)\n\nconfigure_file(${FLOWUNIT_FILE} ${FLOWUNIT_PATH}/${FLOWUNIT_NAME}.py @ONLY)\nconfigure_file(${FLOWUNIT_CONFIG} ${FLOWUNIT_PATH}/${FLOWUNIT_NAME}.toml @ONLY)\n \ninstall(DIRECTORY\n    ${FLOWUNIT_PATH}\n    DESTINATION ${DEMO_EMOTION_DETECTION_FLOWUNIT_DIR}\n    COMPONENT demo\n)\n"
  },
  {
    "path": "src/demo/emotion_detection/flowunit/draw_emotion/draw_emotion.py",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport _flowunit as modelbox\nimport numpy as np\nimport cv2\n\nclass DrawEmotion(modelbox.FlowUnit):\n    def __init__(self):\n        super().__init__()\n\n    def open(self, config):\n        return modelbox.Status.StatusCode.STATUS_SUCCESS\n\n    def process(self, data_context):\n        in_face_list = data_context.input(\"in_face\")\n        in_emotion_list = data_context.input(\"in_emotion\")\n        out_data_list = data_context.output(\"out_data\")\n\n        for image, emotion in zip(in_face_list, in_emotion_list):\n            bboxes = image.get(\"bboxes\")\n            bboxes = np.array(bboxes).reshape(-1, 4)\n\n            width = image.get(\"width\")\n            height = image.get(\"height\")\n            channel = image.get(\"channel\")\n\n            out_img = np.array(image.as_object(), dtype=np.uint8)\n            out_img = out_img.reshape(height, width, channel)\n\n            emotion = emotion.as_object().split(\",\")\n            for box, emo in zip(bboxes, emotion):\n                cv2.rectangle(out_img, (box[0], box[1]), (box[2], box[3]), (0, 0, 255), 2)\n                cv2.putText(out_img, emo, (box[0], box[1]), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 3)\n            \n            add_buffer = modelbox.Buffer(self.get_bind_device(), out_img)\n            add_buffer.copy_meta(image)\n            out_data_list.push_back(add_buffer)\n\n        return modelbox.Status.StatusCode.STATUS_SUCCESS\n\n    def close(self):\n        return modelbox.Status()\n    \n    def data_pre(self, data_context):\n        return modelbox.Status()\n\n    def data_post(self, data_context):\n        return modelbox.Status()\n"
  },
  {
    "path": "src/demo/emotion_detection/flowunit/draw_emotion/draw_emotion.toml",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n[base]\nname = \"draw_emotion\"\ndevice = \"cpu\"\nversion = \"1.0.0\"\ndescription = \"collapse all face emotion\"\nentry = \"draw_emotion@DrawEmotion\"\ntype = \"python\"\n\n[input]\n[input.input1]\nname = \"in_emotion\"\n\n[input.input2]\nname = \"in_face\"\n\n[output]\n[output.output1]\nname = \"out_data\"\n"
  },
  {
    "path": "src/demo/emotion_detection/flowunit/emotion_infer/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\ncmake_minimum_required(VERSION 3.10)\n\nset(FLOWUNIT_NAME \"emotion_infer\")\n\nset(FLOWUNIT_PATH ${CMAKE_CURRENT_BINARY_DIR}/${FLOWUNIT_NAME})\nset(FLOWUNIT_CONFIG ${CMAKE_CURRENT_SOURCE_DIR}/${FLOWUNIT_NAME}.toml)\n\nconfigure_file(${EMOTION_MODEL_FILE} ${FLOWUNIT_PATH}/emotion.pt COPYONLY)\nconfigure_file(${FLOWUNIT_CONFIG} ${FLOWUNIT_PATH}/${FLOWUNIT_NAME}.toml @ONLY)\n \ninstall(DIRECTORY\n    ${FLOWUNIT_PATH}\n    DESTINATION ${DEMO_EMOTION_DETECTION_FLOWUNIT_DIR}\n    COMPONENT demo\n)\n\n"
  },
  {
    "path": "src/demo/emotion_detection/flowunit/emotion_infer/emotion_infer.toml",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n[base]\nname = \"emotion_infer\" \ndevice = \"cuda\" \nversion = \"1.0.0\" \ndescription = \"emotion infer\"\nentry = \"./emotion.pt\" \ntype = \"inference\" \nvirtual_type = \"torch\" \n\n[input]\n[input.input1] \nname = \"input\" \ntype = \"float\" \n\n[output]\n[output.output1] \nname = \"confidence\" \ntype = \"float\" \n\n[output.output2] \nname = \"predicts\" \ntype = \"float\" \n"
  },
  {
    "path": "src/demo/emotion_detection/flowunit/expand_box/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\ncmake_minimum_required(VERSION 3.10)\n\nset(FLOWUNIT_NAME \"expand_box\")\n\nset(FLOWUNIT_PATH ${CMAKE_CURRENT_BINARY_DIR}/${FLOWUNIT_NAME})\nset(FLOWUNIT_FILE ${CMAKE_CURRENT_SOURCE_DIR}/${FLOWUNIT_NAME}.py)\nset(FLOWUNIT_CONFIG ${CMAKE_CURRENT_SOURCE_DIR}/${FLOWUNIT_NAME}.toml)\n\nconfigure_file(${FLOWUNIT_FILE} ${FLOWUNIT_PATH}/${FLOWUNIT_NAME}.py @ONLY)\nconfigure_file(${FLOWUNIT_CONFIG} ${FLOWUNIT_PATH}/${FLOWUNIT_NAME}.toml @ONLY)\n \ninstall(DIRECTORY\n    ${FLOWUNIT_PATH}\n    DESTINATION ${DEMO_EMOTION_DETECTION_FLOWUNIT_DIR}\n    COMPONENT demo\n)\n"
  },
  {
    "path": "src/demo/emotion_detection/flowunit/expand_box/expand_box.py",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport _flowunit as modelbox\nimport numpy as np\n\nclass ExpandBox(modelbox.FlowUnit):\n    def __init__(self):\n        super().__init__()\n\n    def open(self, config):\n        return modelbox.Status.StatusCode.STATUS_SUCCESS\n\n    def process(self, data_context):\n        in_data_list = data_context.input(\"in_data\")\n        out_image_list = data_context.output(\"roi_image\")\n\n        for in_buffer in in_data_list:\n            width = in_buffer.get(\"width\")\n            height = in_buffer.get(\"height\")\n            channel = in_buffer.get(\"channel\")\n\n            img = np.array(in_buffer.as_object(), dtype=np.uint8)\n            img = img.reshape(height, width, channel)\n\n            bboxes = in_buffer.get(\"bboxes\")\n            bboxes = np.array(bboxes).reshape(-1, 4)\n            for box in bboxes:\n                img_roi = img[box[1]:box[3], box[0]:box[2]]\n                img_roi = img_roi[:, :, ::-1]\n\n                img_roi = img_roi.flatten()\n                add_buffer = modelbox.Buffer(self.get_bind_device(), img_roi)\n                add_buffer.copy_meta(in_buffer)\n                add_buffer.set(\"pix_fmt\", \"rgb\")\n                add_buffer.set(\"width\", int(box[2] - box[0]))\n                add_buffer.set(\"height\", int(box[3] - box[1]))\n                add_buffer.set(\"width_stride\", int(box[2] - box[0]))\n                add_buffer.set(\"height_stride\", int(box[3] - box[1]))\n                out_image_list.push_back(add_buffer)\n        return modelbox.Status.StatusCode.STATUS_SUCCESS\n\n    def close(self):\n        return modelbox.Status()\n    \n    def data_pre(self, data_context):\n        return modelbox.Status()\n\n    def data_post(self, data_context):\n        return modelbox.Status()\n"
  },
  {
    "path": "src/demo/emotion_detection/flowunit/expand_box/expand_box.toml",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n[base]\nname = \"expand_box\"\ndevice = \"cpu\"\nversion = \"1.0.0\"\ndescription = \"expand each box to emotion detection\"\nentry = \"expand_box@ExpandBox\"\ntype = \"python\"\n\nexpand = true\n\n[input]\n[input.input1]\nname = \"in_data\"\n\n[output]\n[output.output1]\nname = \"roi_image\"\n"
  },
  {
    "path": "src/demo/emotion_detection/flowunit/face_detect/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\ncmake_minimum_required(VERSION 3.10)\n\nset(FLOWUNIT_NAME \"face_detect\")\n\nset(FLOWUNIT_PATH ${CMAKE_CURRENT_BINARY_DIR}/${FLOWUNIT_NAME})\nset(FLOWUNIT_CONFIG ${CMAKE_CURRENT_SOURCE_DIR}/${FLOWUNIT_NAME}.toml)\n\nconfigure_file(${FACE_DETECTION_MODEL_FILE} ${FLOWUNIT_PATH}/face_detector.pt COPYONLY)\nconfigure_file(${FLOWUNIT_CONFIG} ${FLOWUNIT_PATH}/${FLOWUNIT_NAME}.toml @ONLY)\n \ninstall(DIRECTORY\n    ${FLOWUNIT_PATH}\n    DESTINATION ${DEMO_EMOTION_DETECTION_FLOWUNIT_DIR}\n    COMPONENT demo\n)\n"
  },
  {
    "path": "src/demo/emotion_detection/flowunit/face_detect/face_detect.toml",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n[base]\nname = \"face_detect\" \ndevice = \"cuda\" \nversion = \"1.0.0\" \ndescription = \"face detection\"\nentry = \"./face_detector.pt\" \ntype = \"inference\" \nvirtual_type = \"torch\" \n\n[input]\n[input.input1] \nname = \"input\" \ntype = \"float\" \n\n[output]\n[output.output1] \nname = \"out_loc\" \ntype = \"float\" \n\n[output.output2] \nname = \"out_conf\" \ntype = \"float\" \n\n[output.output3] \nname = \"out_cls\" \ntype = \"float\" \n"
  },
  {
    "path": "src/demo/emotion_detection/flowunit/face_post/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\ncmake_minimum_required(VERSION 3.10)\n\nset(FLOWUNIT_NAME \"face_post\")\n\nset(FLOWUNIT_PATH ${CMAKE_CURRENT_BINARY_DIR}/${FLOWUNIT_NAME})\nset(FLOWUNIT_FILE ${CMAKE_CURRENT_SOURCE_DIR}/${FLOWUNIT_NAME}.py)\nset(FLOWUNIT_CONFIG ${CMAKE_CURRENT_SOURCE_DIR}/${FLOWUNIT_NAME}.toml)\n\nconfigure_file(${FLOWUNIT_FILE} ${FLOWUNIT_PATH}/${FLOWUNIT_NAME}.py @ONLY)\nconfigure_file(${FLOWUNIT_CONFIG} ${FLOWUNIT_PATH}/${FLOWUNIT_NAME}.toml @ONLY)\nconfigure_file(${CMAKE_CURRENT_SOURCE_DIR}/face_post_utils.py ${FLOWUNIT_PATH}/face_post_utils.py @ONLY)\n \ninstall(DIRECTORY\n    ${FLOWUNIT_PATH}\n    DESTINATION ${DEMO_EMOTION_DETECTION_FLOWUNIT_DIR}\n    COMPONENT demo\n)\n"
  },
  {
    "path": "src/demo/emotion_detection/flowunit/face_post/face_post.py",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport _flowunit as modelbox\nimport numpy as np\n\nfrom face_post_utils import postprocess\n\nclass FacePost(modelbox.FlowUnit):\n    def __init__(self):\n        super().__init__()\n\n    def open(self, config):\n        self.max_edge = config.get_int(\"max_edge\", 320)\n        return modelbox.Status.StatusCode.STATUS_SUCCESS\n\n    def process(self, data_context):\n        in_image_list = data_context.input(\"in_image\")\n        in_loc_list = data_context.input(\"in_loc\")\n        in_conf_list = data_context.input(\"in_conf\")\n\n        has_face_list = data_context.output(\"has_face\")\n        no_face_list = data_context.output(\"no_face\")\n\n        for buffer_img, buffer_loc, buffer_conf in zip(in_image_list, in_loc_list, in_conf_list):\n            width = buffer_img.get(\"width\")\n            height = buffer_img.get(\"height\")\n            channel = buffer_img.get(\"channel\")\n\n            img_data = np.array(buffer_img.as_object())\n            img_data = img_data.reshape(height, width, channel)\n\n            im_size_min = np.min([height, width])\n            im_size_max = np.max([height, width])\n            resize = self.max_edge / float(im_size_min)\n            if np.round(resize * im_size_max) > self.max_edge:\n                resize = self.max_edge / float(im_size_max)\n            resize_height = int(height * resize)\n            resize_width = int(width * resize)\n            scale = np.array([resize_width, resize_height, resize_width, resize_height])\n\n            loc = np.array(buffer_loc.as_object())\n            conf = np.array(buffer_conf.as_object())\n            loc = np.reshape(loc, (-1, 4))\n            conf = np.reshape(conf, (-1, 2))\n\n            bboxes = postprocess((resize_height, resize_width), loc, conf, scale, resize)\n            if bboxes is None or bboxes.size == 0:\n                no_face_list.push_back(buffer_img)\n            else:\n                bboxes = np.delete(bboxes, -1, axis=1).astype(int)\n                buffer_img.set(\"bboxes\", bboxes.flatten().tolist())\n                has_face_list.push_back(buffer_img)\n                \n        return modelbox.Status.StatusCode.STATUS_SUCCESS\n\n    def close(self):\n        return modelbox.Status()\n    \n    def data_pre(self, data_context):\n        return modelbox.Status()\n\n    def data_post(self, data_context):\n        return modelbox.Status()\n"
  },
  {
    "path": "src/demo/emotion_detection/flowunit/face_post/face_post.toml",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n[base]\nname = \"face_post\"\ndevice = \"cpu\"\nversion = \"1.0.0\"\ndescription = \"face detection postprocess\"\nentry = \"face_post@FacePost\"\ntype = \"python\"\n\ncondition = true\n\n[config]\nmax_edge = 320\n\n[input]\n[input.input1]\nname = \"in_loc\"\n\n[input.input2]\nname = \"in_conf\"\n\n[input.input3]\nname = \"in_cls\"\n\n[input.input4]\nname = \"in_image\"\n\n[output]\n[output.output1]\nname = \"has_face\"\n\n[output.output2]\nname = \"no_face\"\n"
  },
  {
    "path": "src/demo/emotion_detection/flowunit/face_post/face_post_utils.py",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport math\nimport cv2\nimport numpy as np\nfrom itertools import product\n\ndef get_priors(image_size):\n    steps = [8, 16, 32, 64]\n    min_sizes_list = [[10, 16, 24], [32, 48], [64, 96], [128, 192, 256]]\n    feature_maps = [[math.ceil(image_size[0] / step), math.ceil(image_size[1] / step)] for step in steps]\n    anchors = []\n    for index, map in enumerate(feature_maps):\n        min_sizes = min_sizes_list[index]\n        for map_y, map_x in product(range(map[0]), range(map[1])):\n            for min_size in min_sizes:\n                s_kx = min_size / image_size[1]\n                s_ky = min_size / image_size[0]\n                dense_cx = [x * steps[index] / image_size[1] for x in [map_x + 0.5]]\n                dense_cy = [y * steps[index] / image_size[0] for y in [map_y + 0.5]]\n                for cy, cx in product(dense_cy, dense_cx):\n                    anchors += [cx, cy, s_kx, s_ky]\n    \n    output = np.reshape(np.float32(anchors), (-1, 4))\n    return output\n\ndef decode(loc, priors, variances):\n    a = priors[:, :2] + loc[:, :2] * variances[0] * priors[:, 2:]\n    b = priors[:, 2:] * np.exp(loc[:, 2:] * variances[1])\n    boxes = np.concatenate((a, b), 1)\n    boxes[:, :2] -= boxes[:, 2:] / 2\n    boxes[:, 2:] += boxes[:, :2]\n    return boxes\n\ndef py_cpu_nms(dets, thresh):\n    x1 = dets[:, 0]\n    y1 = dets[:, 1]\n    x2 = dets[:, 2]\n    y2 = dets[:, 3]\n    scores = dets[:, 4]\n\n    areas = (x2 - x1 + 1) * (y2 - y1 + 1)\n    order = scores.argsort()[::-1]\n\n    keep = []\n    while order.size > 0:\n        i = order[0]\n        keep.append(i)\n        xx1 = np.maximum(x1[i], x1[order[1:]])\n        yy1 = np.maximum(y1[i], y1[order[1:]])\n        xx2 = np.minimum(x2[i], x2[order[1:]])\n        yy2 = np.minimum(y2[i], y2[order[1:]])\n\n        w = np.maximum(0.0, xx2 - xx1 + 1)\n        h = np.maximum(0.0, yy2 - yy1 + 1)\n        inter = w * h\n        ovr = inter / (areas[i] + areas[order[1:]] - inter)\n\n        inds = np.where(ovr <= thresh)[0]\n        order = order[inds + 1]\n\n    return keep\n\ndef postprocess(image_size, loc, conf, scale, resize):\n    confidence_threshold = 0.3\n    top_k = 100\n    nms_threshold = 0.4\n\n    priors = get_priors(image_size)\n    boxes = decode(loc, priors, [0.1, 0.2])\n    boxes = boxes * scale / resize\n    scores = conf[:, 1]\n\n    # ignore low scores\n    inds = np.where(scores > confidence_threshold)[0]\n    boxes = boxes[inds]\n    scores = scores[inds]\n\n    # keep top-K before NMS\n    order = scores.argsort()[::-1][:top_k]\n    boxes = boxes[order]\n    scores = scores[order]\n\n    # do NMS\n    dets = np.hstack((boxes, scores[:, np.newaxis])).astype(np.float32, copy=False)\n    keep = py_cpu_nms(dets, nms_threshold)\n    dets = dets[keep, :]\n\n    return dets\n"
  },
  {
    "path": "src/demo/emotion_detection/graph/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nsubdirlist(SUBDIRS ${CMAKE_CURRENT_SOURCE_DIR} \"CMakeLists.txt\")\n\nset(GRAPH_NAME \"emotion_detection.toml\")\nset(DEMO_GRAPH ${CMAKE_CURRENT_BINARY_DIR}/${GRAPH_NAME})\nconfigure_file(${CMAKE_CURRENT_SOURCE_DIR}/${GRAPH_NAME}.in ${DEMO_GRAPH} @ONLY)\ninstall(FILES \n    ${DEMO_GRAPH} DESTINATION ${DEMO_EMOTION_DETECTION_GRAPH_DIR}\n    COMPONENT demo\n)\n\ninstall(FILES\n    ${EMOTION_TEST_VIDEO_FILE} DESTINATION ${DEMO_VIDEO_DIR}\n    COMPONENT demo\n)\n"
  },
  {
    "path": "src/demo/emotion_detection/graph/emotion_detection.toml.in",
    "content": "\n\n[driver]\ndir = [\n\"@DEMO_EMOTION_DETECTION_FLOWUNIT_DIR@\"\n]\n[flow]\ndesc = \"face emotion detection for video\"\n[graph]\nformat = \"graphviz\"\ngraphconf = \"\"\"digraph emotion_detection {\n    node [shape=Mrecord]\n    video_input[type=flowunit, flowunit=video_input, device=cpu, source_url=\"@DEMO_VIDEO_DIR@/emotion_test_video.mp4\"]\n    videodemuxer[type=flowunit, flowunit=video_demuxer, device=cpu]\n    videodecoder[type=flowunit, flowunit=video_decoder, device=cuda, pix_fmt=bgr]\n    custom_resize[type=flowunit, flowunit=custom_resize, device=cpu]\n    image_transpose[type=flowunit, flowunit=packed_planar_transpose, device=cpu]\n    mean[type=flowunit, flowunit=mean, device=cpu, mean=\"104, 117, 123\"]\n    normalize[type=flowunit, flowunit=normalize, device=cpu, standard_deviation_inverse=\"1, 1, 1\"]\n    face_detect[type=flowunit, flowunit=face_detect, device=cuda]\n    face_post[type=flowunit, flowunit=face_post, device=cpu, batch_size=1]\n    expand_box[type=flowunit, flowunit=expand_box, device=cpu]\n    face_resize[type=flowunit, flowunit=resize, device=cpu, image_width=224, image_height=224]\n    face_transpose[type=flowunit, flowunit=packed_planar_transpose, device=cpu]\n    face_mean[type=flowunit, flowunit=mean, device=cpu, mean=\"123.675, 116.28, 103.53\"]\n    face_normalize[type=flowunit, flowunit=normalize, device=cpu, standard_deviation_inverse=\"0.0171247538316637, 0.0175070028011204, 0.0174291938997821\"]\n    emotion_infer[type=flowunit, flowunit=emotion_infer, device=cuda, batch_size=1]\n    collapse_emotion[type=flowunit, flowunit=collapse_emotion, device=cpu]\n    draw_emotion[type=flowunit, flowunit=draw_emotion, device=cpu]\n    videoencoder[type=flowunit, flowunit=video_encoder, device=cpu, encoder=mpeg4, format=mp4, default_dest_url=\"/tmp/emotion_detection_result.mp4\"]\n\n    video_input:out_video_url -> videodemuxer:in_video_url\n    videodemuxer:out_video_packet -> videodecoder:in_video_packet\n    videodecoder:out_video_frame -> custom_resize:in_image\n    custom_resize:out_image -> image_transpose:in_image\n    image_transpose:out_image -> mean:in_data\n    mean:out_data -> normalize:in_data\n    normalize:out_data -> face_detect:input\n    face_detect:out_loc -> face_post:in_loc\n    face_detect:out_conf -> face_post:in_conf\n    face_detect:out_cls -> face_post:in_cls\n    videodecoder:out_video_frame -> face_post:in_image\n    face_post:has_face -> expand_box:in_data\n    expand_box:roi_image -> face_resize:in_image\n    face_resize:out_image -> face_transpose:in_image\n    face_transpose:out_image -> face_mean:in_data\n    face_mean:out_data -> face_normalize:in_data\n    face_normalize:out_data -> emotion_infer:input\n    emotion_infer:confidence -> collapse_emotion:confidence\n    emotion_infer:predicts -> collapse_emotion:predicts\n    collapse_emotion:out_data -> draw_emotion:in_emotion\n    face_post:has_face -> draw_emotion:in_face\n    draw_emotion:out_data -> videoencoder:in_video_frame\n    face_post:no_face -> videoencoder:in_video_frame\n}\"\"\"\n"
  },
  {
    "path": "src/demo/hello_world/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nsubdirlist(SUBDIRS ${CMAKE_CURRENT_SOURCE_DIR} \"CMakeLists.txt\")\n\nset(DEMO_HELLO_WORLD_DIR \"${MODELBOX_DEMO_INSTALL_DIR}/hello_world\" CACHE INTERNAL \"\")\nset(DEMO_HELLO_WORLD_FLOWUNIT_DIR ${DEMO_HELLO_WORLD_DIR}/flowunit)\nset(DEMO_HELLO_WORLD_GRAPH_DIR ${DEMO_HELLO_WORLD_DIR}/graph)\n\nforeach(subdir ${SUBDIRS})\n    add_subdirectory(${subdir})\nendforeach()\n\n\nif(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_BINARY_DIR})\n    message(FATAL_ERROR \"Do not build in source directory!\")\nendif()\n"
  },
  {
    "path": "src/demo/hello_world/flowunit/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nsubdirlist(SUBDIRS ${CMAKE_CURRENT_SOURCE_DIR} \"CMakeLists.txt\")\n\nforeach(subdir ${SUBDIRS})\n    add_subdirectory(${subdir})\nendforeach()\n\nif(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_BINARY_DIR})\n    message(FATAL_ERROR \"Do not build in source directory!\")\nendif()\n"
  },
  {
    "path": "src/demo/hello_world/flowunit/hello_world/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nset(FLOWUNIT_NAME \"hello_world\")\n\nset(FLOWUNIT_PATH ${CMAKE_CURRENT_BINARY_DIR}/${FLOWUNIT_NAME})\nset(FLOWUNIT_FILE ${CMAKE_CURRENT_SOURCE_DIR}/${FLOWUNIT_NAME}.py)\nset(FLOWUNIT_CONFIG ${CMAKE_CURRENT_SOURCE_DIR}/${FLOWUNIT_NAME}.toml)\n\nconfigure_file(${FLOWUNIT_FILE} ${FLOWUNIT_PATH}/${FLOWUNIT_NAME}.py @ONLY)\nconfigure_file(${FLOWUNIT_CONFIG} ${FLOWUNIT_PATH}/${FLOWUNIT_NAME}.toml @ONLY)\n \ninstall(DIRECTORY\n    ${FLOWUNIT_PATH}\n    DESTINATION ${DEMO_HELLO_WORLD_FLOWUNIT_DIR}\n    COMPONENT demo\n)\n"
  },
  {
    "path": "src/demo/hello_world/flowunit/hello_world/hello_world.py",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport _flowunit as modelbox\nimport json\nimport time\n\ndef addTimestamp(msg):\n    local_time = time.asctime(time.localtime(time.time()))\n    return '{} {}'.format(msg, str(local_time))\n\nclass HelloWorld(modelbox.FlowUnit):\n    def __init__(self):\n        super().__init__()\n\n    def open(self, config):\n        return modelbox.Status.StatusCode.STATUS_SUCCESS\n\n    def process(self, data_context):\n        in_data = data_context.input(\"in_data\")\n        out_data = data_context.output(\"out_data\")\n\n        for buffer in in_data:\n            request_body = json.loads(str(buffer))\n            msg = request_body.get(\"msg\")\n            msg = msg.title()\n            msg = addTimestamp(msg)\n\n            out_string = msg + chr(0)\n            out_buffer = modelbox.Buffer(self.get_bind_device(), out_string.encode('utf-8').strip())\n            out_data.push_back(out_buffer)\n\n        return modelbox.Status.StatusCode.STATUS_SUCCESS\n\n    def close(self):\n        return modelbox.Status()\n\n    def data_pre(self, data_context):\n        return modelbox.Status()\n\n    def data_post(self, data_context):\n        return modelbox.Status()\n"
  },
  {
    "path": "src/demo/hello_world/flowunit/hello_world/hello_world.toml",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n[base]\nname = \"hello_world\"\ndevice = \"cpu\" \nversion = \"1.0.0\" \ndescription = \"show hello world\"\nentry = \"hello_world@HelloWorld\" \ntype = \"python\" \n\n[config]\nitem = \"value\"\n\n[input]\n[input.input1] \nname = \"in_data\"\n\n[output]\n[output.output1] \nname = \"out_data\"\n"
  },
  {
    "path": "src/demo/hello_world/graph/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n \ncmake_minimum_required(VERSION 3.10)\n\nsubdirlist(SUBDIRS ${CMAKE_CURRENT_SOURCE_DIR} \"CMakeLists.txt\")\n\nset(GRAPH_NAME \"hello_world.toml\")\nset(TEST_HELLO_WORLD_NAME \"test_hello_world.py\")\nset(DEMO_GRAPH ${CMAKE_CURRENT_BINARY_DIR}/${GRAPH_NAME})\nset(DEMO_TEST_FILE ${CMAKE_CURRENT_SOURCE_DIR}/${TEST_HELLO_WORLD_NAME})\n\nconfigure_file(${CMAKE_CURRENT_SOURCE_DIR}/${GRAPH_NAME}.in ${DEMO_GRAPH})\n\ninstall(FILES \n    ${DEMO_GRAPH}  \n    ${DEMO_TEST_FILE} \n    DESTINATION ${DEMO_HELLO_WORLD_GRAPH_DIR}\n    COMPONENT demo\n)\n"
  },
  {
    "path": "src/demo/hello_world/graph/hello_world.toml.in",
    "content": "[driver]\ndir = [\n    \"@DEMO_HELLO_WORLD_FLOWUNIT_DIR@\"\n    ]\n[flow]\nname = \"HelloWorld\"\ndesc = \"A hello world REST API service demo.\"\n[graph]\nformat = \"graphviz\"\ngraphconf = '''digraph hello_world_diagraph {\n    node [shape=Mrecord]\n    httpserver_sync_receive[type=flowunit, flowunit=httpserver_sync_receive, device=cpu, time_out_ms=5000, endpoint=\"http://0.0.0.0:7770\", max_requests=100]\n    hello_world[type=flowunit, flowunit=hello_world, device=cpu]\n    httpserver_sync_reply[type=flowunit, flowunit=httpserver_sync_reply, device=cpu]\n\n    httpserver_sync_receive:out_request_info -> hello_world:in_data\n    hello_world:out_data -> httpserver_sync_reply:in_reply_info\n}\n'''\n"
  },
  {
    "path": "src/demo/hello_world/graph/test_hello_world.py",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport http.client\nimport json\n\nclass HttpConfig:\n    def __init__(self, msg):\n        self.hostIP = \"127.0.0.1\"\n        self.Port = 7770\n\n        self.httpMethod = \"POST\"\n        self.requstURL = \"/v1/hello_world\"\n\n        self.headerdata = {\n            \"Content-Type\": \"application/json\"\n        }\n\n        self.data = {\n            \"msg\": msg\n        }\n\n        self.body = json.dumps(self.data)\n\n        \nif __name__ == \"__main__\":\n    http_config = HttpConfig(\"hello world!\")\n\n    conn = http.client.HTTPConnection(host=http_config.hostIP, port=http_config.Port)\n    conn.request(method=http_config.httpMethod, url=http_config.requstURL, body=http_config.body,\n                headers=http_config.headerdata)\n\n    response = conn.getresponse().read().decode()\n    print(response)\n\n"
  },
  {
    "path": "src/demo/mnist/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nsubdirlist(SUBDIRS ${CMAKE_CURRENT_SOURCE_DIR} \"CMakeLists.txt\")\n\nset(DEMO_MNIST_DIR \"${MODELBOX_DEMO_INSTALL_DIR}/mnist\" CACHE INTERNAL \"\")\nset(DEMO_MNIST_FLOWUNIT_DIR ${DEMO_MNIST_DIR}/flowunit)\nset(DEMO_MNIST_GRAPH_DIR ${DEMO_MNIST_DIR}/graph)\n\nforeach(subdir ${SUBDIRS})\n    add_subdirectory(${subdir})\nendforeach()\n\n\nif(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_BINARY_DIR})\n    message(FATAL_ERROR \"Do not build in source directory!\")\nendif()\n"
  },
  {
    "path": "src/demo/mnist/flowunit/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nsubdirlist(SUBDIRS ${CMAKE_CURRENT_SOURCE_DIR} \"CMakeLists.txt\")\n\nforeach(subdir ${SUBDIRS})\n    add_subdirectory(${subdir})\nendforeach()\n\nif(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_BINARY_DIR})\n    message(FATAL_ERROR \"Do not build in source directory!\")\nendif()\n"
  },
  {
    "path": "src/demo/mnist/flowunit/mnist_infer/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nset(FLOWUNIT_NAME \"mnist_infer\")\n\nset(FLOWUNIT_PATH ${CMAKE_CURRENT_BINARY_DIR}/${FLOWUNIT_NAME})\nset(MODEL_FILE ${CMAKE_CURRENT_SOURCE_DIR}/mnist_model.pb)\nset(TRAIN_FILE ${CMAKE_CURRENT_SOURCE_DIR}/train.py)\nset(TRAIN_SH_FILE ${CMAKE_CURRENT_SOURCE_DIR}/train.sh)\nset(FLOWUNIT_CONFIG ${CMAKE_CURRENT_SOURCE_DIR}/${FLOWUNIT_NAME}.toml)\n\nconfigure_file(${MODEL_FILE} ${FLOWUNIT_PATH}/mnist_model.pb COPYONLY)\nconfigure_file(${TRAIN_FILE} ${FLOWUNIT_PATH}/train.py COPYONLY)\nconfigure_file(${TRAIN_SH_FILE} ${FLOWUNIT_PATH}/train.sh COPYONLY)\nconfigure_file(${FLOWUNIT_CONFIG} ${FLOWUNIT_PATH}/${FLOWUNIT_NAME}.toml @ONLY)\n \ninstall(DIRECTORY\n    ${FLOWUNIT_PATH}\n    DESTINATION ${DEMO_MNIST_FLOWUNIT_DIR}\n    COMPONENT demo\n)\n"
  },
  {
    "path": "src/demo/mnist/flowunit/mnist_infer/mnist_infer.toml",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n[base]\nname = \"mnist_infer\" \ndevice = \"cpu\" \nversion = \"1.0.0\" \ndescription = \"Recognition handwritten digits recognition.\"\nentry = \"./mnist_model.pb\" \ntype = \"inference\" \nvirtual_type = \"tensorflow\" \n\n[input]\n[input.input1] \nname = \"Input\" \ntype = \"float\" \n\n[output]\n[output.output1] \nname = \"Output\" \ntype = \"float\" "
  },
  {
    "path": "src/demo/mnist/flowunit/mnist_infer/train.py",
    "content": "import sys\nimport os\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom PIL import Image\nimport numpy as np\n\nTEST_IMG_DIR=\"mnist-image\"\nTRAIN_DIR=\"mnist-train\"\n\nprint(\"begin train mnist:\")\n\n# load mnist data set\nprint(\"load dataset:\")\nmnist = keras.datasets.mnist\n(train_images, train_labels), (test_images, test_labels) = mnist.load_data()\n\n# scale the values to 0.0 to 1.0\ntrain_images = train_images / 255.0\ntest_images = test_images / 255.0\n\n# reshape for feeding into the model\ntrain_images = train_images.reshape(train_images.shape[0], 28, 28, 1)\ntest_images = test_images.reshape(test_images.shape[0], 28, 28, 1)\n\nprint('train_images.shape: {}, of {}'.format(train_images.shape, train_images.dtype))\nprint('test_images.shape: {}, of {}'.format(test_images.shape, test_images.dtype))\n\nmodel = keras.Sequential([\n  keras.layers.Conv2D(input_shape=(28,28,1), filters=8, kernel_size=3, \n                      strides=2, activation='relu', name='Conv1'),\n  keras.layers.Flatten(),\n  keras.layers.Dense(10, name='Dense')\n])\nmodel.summary()\n\ntesting = False\nepochs = 5\n\nmodel.compile(optimizer='adam', \n              loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n              metrics=[keras.metrics.SparseCategoricalAccuracy()])\nmodel.fit(train_images, train_labels, epochs=epochs)\n\ntest_loss, test_acc = model.evaluate(test_images, test_labels)\nprint('\\nTest accuracy: {}'.format(test_acc))\n\n# Fetch the Keras session and save the model\n# The signature definition is defined by the input and output tensors,\n# and stored with the default serving key\ntf.keras.models.save_model(\n    model,\n    TRAIN_DIR,\n    overwrite=True,\n    include_optimizer=True,\n    save_format=None,\n    signatures=None,\n    options=None\n)\n\nprint('\\nSaved model in ', TRAIN_DIR)"
  },
  {
    "path": "src/demo/mnist/flowunit/mnist_infer/train.sh",
    "content": "#!/bin/bash\n#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language \n\nCURRDIR=$(pwd)\n\nsetup_var() {\n    TRAIN_DIR=\"${CURRDIR}/mnist-train\"\n    OS_ARCH=$(uname -m)\n    mkdir ${TRAIN_DIR} -p\n    if [ $? -ne 0 ]; then\n        echo \"create train dir failed\"\n        return 1\n    fi\n    TRAIN_FILE=\"${CURRDIR}/train.py\"\n}\n\ndownload_package() {\n    pip list |grep \"tensorflow \" >/dev/null 2>&1\n    if [ $? -ne 0 ]; then\n        $SUDO pip install tensorflow tensorflow-datasets\n        if [ $? -ne 0 ]; then\n            echo \"install python train package failed\"\n            return 1\n        fi\n    fi\n\n    return 0\n}\n\ntrain() {\n    python ${TRAIN_FILE}\n    if [ $? -ne 0 ]; then\n            echo \"train failed\"\n            return 1\n    fi\n\n    return 0\n}\n\nexport_model() {\n    cp ${TRAIN_DIR}/saved_model.pb ${CURRDIR}/mnist_model.pb\n    return $?\n}\n\n\nmain() {\n    setup_var\n    if [ $? -ne 0 ]; then\n            return 1\n    fi\n\n    echo \"working directory: ${TRAIN_DIR}\"\n\n    download_package\n    if [ $? -ne 0 ]; then\n            return 1\n    fi\n\n    train\n    if [ $? -ne 0 ]; then\n            return 1\n    fi\n\n    export_model\n    if [ $? -ne 0 ]; then\n            return 1\n    fi\n\n    rm -fr \"${TRAIN_DIR}\"\n\n    echo \"train success.\"\n    echo \"quick start: https://www.tensorflow.org/tfx/tutorials/serving/rest_simple\"\n}\n\nmain\n"
  },
  {
    "path": "src/demo/mnist/flowunit/mnist_preprocess/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nset(FLOWUNIT_NAME \"mnist_preprocess\")\n\nset(FLOWUNIT_PATH ${CMAKE_CURRENT_BINARY_DIR}/${FLOWUNIT_NAME})\nset(FLOWUNIT_FILE ${CMAKE_CURRENT_SOURCE_DIR}/${FLOWUNIT_NAME}.py)\nset(FLOWUNIT_CONFIG ${CMAKE_CURRENT_SOURCE_DIR}/${FLOWUNIT_NAME}.toml)\n\nconfigure_file(${FLOWUNIT_FILE} ${FLOWUNIT_PATH}/${FLOWUNIT_NAME}.py @ONLY)\nconfigure_file(${FLOWUNIT_CONFIG} ${FLOWUNIT_PATH}/${FLOWUNIT_NAME}.toml @ONLY)\n \ninstall(DIRECTORY\n    ${FLOWUNIT_PATH}\n    DESTINATION ${DEMO_MNIST_FLOWUNIT_DIR}\n    COMPONENT demo\n)\n"
  },
  {
    "path": "src/demo/mnist/flowunit/mnist_preprocess/mnist_preprocess.py",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport _flowunit as modelbox\nimport numpy as np\nimport base64\nimport json\nimport cv2\n\nclass MnistPreprocess(modelbox.FlowUnit):\n    def __init__(self):\n        super().__init__()\n\n    def open(self, config):\n        return modelbox.Status.StatusCode.STATUS_SUCCESS\n\n    def process(self, data_context):\n        in_data = data_context.input(\"in_data\")\n        out_data = data_context.output(\"out_data\")\n\n        for buffer in in_data:\n            # get image from request body\n            request_body = json.loads(buffer.as_object().strip(chr(0)))\n            \n            if  request_body.get(\"image_base64\"):\n                img_base64 = request_body[\"image_base64\"]\n                img_file = base64.b64decode(img_base64)\n\n                # reshape img\n                img = cv2.imdecode(np.fromstring(img_file, np.uint8), cv2.IMREAD_GRAYSCALE)\n                img = cv2.resize(img, (28, 28))\n                infer_data = np.array([255 - img], dtype=np.float32)\n                infer_data = np.reshape(infer_data, (784,)) / 255.\n                \n                # build buffer\n                add_buffer = modelbox.Buffer(self.get_bind_device(), infer_data)\n                out_data.push_back(add_buffer)\n            else:\n                error_msg = \"wrong key of request_body\"\n                modelbox.error(error_msg)\n                add_buffer = modelbox.Buffer(self.get_bind_device(), \"\")\n                add_buffer.set_error(\"MnistPreprocess.BadRequest\", error_msg)\n                out_data.push_back(add_buffer)\n\n        return modelbox.Status.StatusCode.STATUS_SUCCESS\n\n    def close(self):\n        return modelbox.Status()\n\n    def data_pre(self, data_context):\n        return modelbox.Status()\n\n    def data_post(self, data_context):\n        return modelbox.Status()\n"
  },
  {
    "path": "src/demo/mnist/flowunit/mnist_preprocess/mnist_preprocess.toml",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n[base]\nname = \"mnist_preprocess\"\ndevice = \"cpu\" \nversion = \"1.0.0\" \ndescription = \"mnist preprocess\"\nentry = \"mnist_preprocess@MnistPreprocess\" \ntype = \"python\" \n\nstream = false\ncondition  = false\ncollapse = false \ncollapse_all = false \nexpand = false \n\n[config]\n\n[input]\n[input.input1] \nname = \"in_data\"\n\n[output]\n[output.output1] \nname = \"out_data\" "
  },
  {
    "path": "src/demo/mnist/flowunit/mnist_response/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nset(FLOWUNIT_NAME \"mnist_response\")\n\nset(FLOWUNIT_PATH ${CMAKE_CURRENT_BINARY_DIR}/${FLOWUNIT_NAME})\nset(FLOWUNIT_FILE ${CMAKE_CURRENT_SOURCE_DIR}/${FLOWUNIT_NAME}.py)\nset(FLOWUNIT_CONFIG ${CMAKE_CURRENT_SOURCE_DIR}/${FLOWUNIT_NAME}.toml)\n\nconfigure_file(${FLOWUNIT_FILE} ${FLOWUNIT_PATH}/${FLOWUNIT_NAME}.py @ONLY)\nconfigure_file(${FLOWUNIT_CONFIG} ${FLOWUNIT_PATH}/${FLOWUNIT_NAME}.toml @ONLY)\n \ninstall(DIRECTORY\n    ${FLOWUNIT_PATH}\n    DESTINATION ${DEMO_MNIST_FLOWUNIT_DIR}\n    COMPONENT demo\n)\n"
  },
  {
    "path": "src/demo/mnist/flowunit/mnist_response/mnist_response.py",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport _flowunit as modelbox\nimport numpy as np\nimport json\n\nclass MnistResponseFlowUnit(modelbox.FlowUnit):\n    def __init__(self):\n        super().__init__()\n\n    def open(self, config):\n        return modelbox.Status.StatusCode.STATUS_SUCCESS\n\n    def process(self, data_context):\n        in_data = data_context.input(\"in_data\")\n        out_data = data_context.output(\"out_data\")\n\n        for buffer in in_data:\n            result_str = ''\n            if buffer.has_error():\n                error_msg = buffer.get_error_msg()\n                result = {\n                    \"error_msg\": str(error_msg)\n                }\n            else:\n                max_index = np.argmax(buffer.as_object())\n                result = {\n                    \"predict_result\": str(max_index)\n                }\n\n            result_str = (json.dumps(result) + chr(0)).encode('utf-8').strip()\n            add_buffer = modelbox.Buffer(self.get_bind_device(), result_str)\n            out_data.push_back(add_buffer)\n\n        return modelbox.Status.StatusCode.STATUS_SUCCESS\n\n    def close(self):\n        return modelbox.Status()\n\n    def data_pre(self, data_context):\n        return modelbox.Status()\n\n    def data_post(self, data_context):\n        return modelbox.Status()\n"
  },
  {
    "path": "src/demo/mnist/flowunit/mnist_response/mnist_response.toml",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n[base]\nname = \"mnist_response\" \ndevice = \"cpu\" \nversion = \"1.0.0\" \ndescription = \"mnist_response\" \nentry = \"mnist_response@MnistResponseFlowUnit\" \ntype = \"python\" \n\nstream = false \ncondition  = false\ncollapse = false \ncollapse_all = false \nexpand = false \nexception_visible = true\n\n[config]\n\n[input]\n[input.input1] \nname = \"in_data\"\n\n[output]\n[output.output1] \nname = \"out_data\""
  },
  {
    "path": "src/demo/mnist/graph/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nsubdirlist(SUBDIRS ${CMAKE_CURRENT_SOURCE_DIR} \"CMakeLists.txt\")\n\nset(DEMO_GRAPH ${CMAKE_CURRENT_BINARY_DIR}/mnist.toml)\nset(TEST_IMAGE_FILE ${CMAKE_CURRENT_SOURCE_DIR}/mnist-image.tar.gz)\nset(TEST_MNIST_SCRIPT ${CMAKE_CURRENT_SOURCE_DIR}/test_mnist.py)\n\nconfigure_file(${CMAKE_CURRENT_SOURCE_DIR}/mnist.toml.in ${DEMO_GRAPH} @ONLY)\n\ninstall(FILES \n    ${TEST_IMAGE_FILE} \n    DESTINATION ${DEMO_IMAGE_DIR}\n    COMPONENT demo\n)\n\ninstall(FILES \n    ${DEMO_GRAPH}  \n    ${TEST_MNIST_SCRIPT} \n    DESTINATION ${DEMO_MNIST_GRAPH_DIR}\n    COMPONENT demo\n)"
  },
  {
    "path": "src/demo/mnist/graph/mnist.toml.in",
    "content": "[driver]\ndir = [\"@DEMO_MNIST_FLOWUNIT_DIR@\"]\n[profile]\nprofile=false\ntrace=false\ndir=\"\"\n[flow]\nname = \"MNIST\"\ndesc = \"MNIST detection for image\"\n[graph]\nformat = \"graphviz\"\ngraphconf = '''digraph mnist_sample {\n    node [shape=Mrecord]\n    httpserver_sync_receive[type=flowunit, flowunit=httpserver_sync_receive, device=cpu, time_out_ms=5000, endpoint=\"http://0.0.0.0:8190\", max_requests=100]\n    mnist_preprocess[type=flowunit, flowunit=mnist_preprocess, device=cpu]\n    mnist_infer[type=flowunit, flowunit=mnist_infer, device=cpu, deviceid=0, batch_size=1]\n    mnist_response[type=flowunit, flowunit=mnist_response, device=cpu]\n    httpserver_sync_reply[type=flowunit, flowunit=httpserver_sync_reply, device=cpu]\n\n    httpserver_sync_receive:out_request_info -> mnist_preprocess:in_data\n    mnist_preprocess:out_data -> mnist_infer:Input\n    mnist_infer:Output -> mnist_response:in_data\n    mnist_response:out_data -> httpserver_sync_reply:in_reply_info\n}\n'''\n\n"
  },
  {
    "path": "src/demo/mnist/graph/test_mnist.py",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport base64\nimport http.client\nimport json\nimport sys\nimport os\nimport random\nimport argparse\nfrom pathlib import Path\nfrom PIL import Image\nfrom urllib.parse import urlparse\nimport urllib.request\n\nsource_path = Path(__file__).resolve()\nsource_dir = source_path.parent\nmnist_image_path = str(source_dir) + \"/mnist-image\"\n\ndef DisplayImage(image_path):\n    # pass the image as command line argument\n    img = Image.open(image_path)\n\n    # resize the image\n    width, height = img.size\n    aspect_ratio = height/width\n    new_width = 32\n    new_height = aspect_ratio * new_width * 0.45\n    img = img.resize((new_width, int(new_height)))\n\n    # convert image to greyscale format\n    img = img.convert('L')\n\n    pixels = img.getdata()\n\n    # replace each pixel with a character from array\n    chars = [\"B\",\"S\",\"#\",\"&\",\"@\",\"$\",\"%\",\"*\",\"!\",\":\",\" \"]\n    new_pixels = [chars[pixel//25] for pixel in pixels]\n    new_pixels = ''.join(new_pixels)\n\n    # split string of chars into multiple strings of length equal to new width and create a list\n    new_pixels_count = len(new_pixels)\n    ascii_image = [new_pixels[index:index + new_width] for index in range(0, new_pixels_count, new_width)]\n    ascii_image = \"\\n\".join(ascii_image)\n    sys.stdout.write(ascii_image + \"\\n\")\n\nclass HttpConfig:\n    def __init__(self, img_base64_str):\n        self.httpMethod = \"POST\"\n        self.requstURL = \"/v1/mnist_test\"\n\n        self.headerdata = {\n            \"Content-Type\": \"application/json\"\n        }\n\n        self.test_data = {\n            \"image_base64\": img_base64_str\n        }\n\n        self.body = json.dumps(self.test_data)\n\ndef DoMnistInfer(host, img_path, PrintRequest =  False):\n    o = urlparse('//' + host)\n\n    with open(img_path, 'rb') as fp:\n        base64_data = base64.b64encode(fp.read())\n        img_base64_str = str(base64_data, encoding='utf8')\n\n    http_config = HttpConfig(img_base64_str)\n\n    http_config.hostIP = o.hostname\n    http_config.Port = o.port\n\n    if PrintRequest:\n        print(\"-- Request Body:\")\n        print(http_config.body)\n\n    conn = http.client.HTTPConnection(host=http_config.hostIP, port=http_config.Port)\n    try:\n        conn.request(method=http_config.httpMethod, url=http_config.requstURL, body=http_config.body,\n                headers=http_config.headerdata)\n    except  Exception as e:\n        print(\"Connect to \" + host + \" failed, reaseon: \")\n        print(e)\n        print(\"Please check the service status, or use '-host [ip:port]' to specify the address of the service.\")\n        return 1\n\n    print(\"-- Response:\")\n    response = conn.getresponse().read().decode()\n    print(response)\n    return 0\n\ndef Extract(download):\n    if os.path.exists(mnist_image_path):\n        return\n    \n    if download:\n        urllib.request.urlretrieve(\"http://download.modelbox-ai.com/test/mnist-image.tar.gz\", \"mnist-image.tar.gz\")\n\n    os.system(\"tar -C \"+ str(source_dir) + \" -xf mnist-image.tar.gz\")\n\ndef main():\n    descStr = \"This program is used to test mnist inference.\"\n    parser = argparse.ArgumentParser(description=descStr)\n    parser.add_argument('-id', dest='TestNum', help=\"Test image id.\", required=False)\n    parser.add_argument('-download', dest='DownLoad', help=\"download full test image from modelbox.\", required=False, default=False, action='store_true')\n    parser.add_argument('-print-request', dest='PrintRequest', help=\"print request json body.\", required=False, default=False, action='store_true')\n    parser.add_argument('-host', dest='Host', default=\"127.0.0.1:8190\", help=\"set host and port.\", required=False)\n\n    args = parser.parse_args()\n    TestNum = 0\n\n    Extract(args.DownLoad)\n\n    if args.TestNum:\n        TestNum = args.TestNum\n    else:\n        list = os.listdir(mnist_image_path)\n        TestNum = random.randint(0, len(list) - 1)\n\n    img_path = mnist_image_path + \"/test_\" + str(TestNum) + \".bmp\"\n\n    if not os.path.exists(img_path):\n        print(\"image \" + img_path + \" not exists\")\n        return 1\n\n    Host = args.Host\n    PrintRequest = args.PrintRequest\n\n    print(\"-- Image ID: \" + str(TestNum) + \", Image: \" )\n    DisplayImage(img_path)\n    print(\"-- Connect to \" + Host)\n    return DoMnistInfer(Host, img_path, PrintRequest)\n\nif __name__ == \"__main__\":\n    # execute only if run as a script\n    main()\n"
  },
  {
    "path": "src/develop/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nproject(modelbox-example)\n\nif(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_BINARY_DIR})\n    message(FATAL_ERROR \"Do not build in source directory!\")\nendif()\n\n\n\n"
  },
  {
    "path": "src/drivers/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nproject(modelbox-drivers)\n\nif(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_BINARY_DIR})\n    message(FATAL_ERROR \"Do not build in source directory!\")\nendif()\n\nif (STANDALONE)\n    set(CMAKE_INSTALL_RPATH $ORIGIN)\nendif()\n\nadd_subdirectory(common)\nadd_subdirectory(virtual)\nadd_subdirectory(inference_engine)\nadd_subdirectory(devices)\nadd_subdirectory(graph_conf)\n"
  },
  {
    "path": "src/drivers/common/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nproject(modelbox-drivers-common)\n\nif(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_BINARY_DIR})\n    message(FATAL_ERROR \"Do not build in source directory!\")\nendif()\n\nadd_subdirectory(devices)\nadd_subdirectory(libs)\nadd_subdirectory(flowunit)\nif(${PYTHONLIBS_FOUND})\n    add_subdirectory(python)\nendif()"
  },
  {
    "path": "src/drivers/common/devices/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nproject(modelbox-drivers-common-device)\n\nif(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_BINARY_DIR})\n    message(FATAL_ERROR \"Do not build in source directory!\")\nendif()\n\nsubdirlist(SUBDIRS ${CMAKE_CURRENT_SOURCE_DIR} \"CMakeLists.txt\")\n\nforeach(subdir ${SUBDIRS})\n    add_subdirectory(${subdir})\nendforeach()\n"
  },
  {
    "path": "src/drivers/common/devices/device_stream/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nfile(GLOB_RECURSE SOURCES *.cpp *.cc *.c)\n\nset(INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_CONFIG_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${INCLUDE})\ninclude_directories(${HUAWEI_SECURE_C_INCLUDE_DIR})\n\nset(LIBRARY modelbox-common-device-stream-object)\nadd_library(${LIBRARY} STATIC ${SOURCES})\nset_property(TARGET ${LIBRARY} PROPERTY POSITION_INDEPENDENT_CODE ON)\n\ntarget_link_libraries(${LIBRARY} ${FFMPEG_LIBRARIES})\n\nset(MODELBOX_COMMON_DEVICE_STREAM_LIBRARY ${LIBRARY} CACHE INTERNAL \"\")\nset(MODELBOX_COMMON_DEVICE_STREAM_INCLUDE ${INCLUDE} CACHE INTERNAL \"\")\n"
  },
  {
    "path": "src/drivers/common/devices/device_stream/device_stream.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include \"device_stream.h\"\n\nnamespace modelbox {}  // namespace modelbox"
  },
  {
    "path": "src/drivers/common/devices/device_stream/device_stream.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#ifndef MODELBOX_FLOWUNIT_DEVICE_STREAM_COMMON_H_\n#define MODELBOX_FLOWUNIT_DEVICE_STREAM_COMMON_H_\n\n#include \"modelbox/base/device.h\"\n#include \"modelbox/base/device_memory.h\"\n#include \"modelbox/base/status.h\"\n#include \"modelbox/flowunit.h\"\n\nnamespace modelbox {\n\ntemplate <typename Memory>\nvoid TravelDevMem(\n    const std::shared_ptr<modelbox::BufferListMap> &input_buffer_list_map,\n    std::function<bool(std::shared_ptr<Memory>)> func) {\n  for (auto &port : *input_buffer_list_map) {\n    auto &buffer_list = port.second;\n    auto dev_mem_list = buffer_list->GetAllBufferDeviceMemory();\n    for (auto &dev_mem : dev_mem_list) {\n      auto target_dev_mem = std::dynamic_pointer_cast<Memory>(dev_mem);\n      if (target_dev_mem == nullptr) {\n        continue;\n      }\n\n      auto ret = func(target_dev_mem);\n      if (!ret) {\n        return;\n      }\n    }\n  }\n}\n\ntemplate <typename Stream, typename Memory>\nstd::shared_ptr<Stream> GetDevSyncStream(\n    const std::shared_ptr<modelbox::DataContext> &data_ctx) {\n  auto input_buffer_list_map = data_ctx->Input();\n  std::shared_ptr<Stream> first_stream;\n\n  // Get first stream\n  TravelDevMem<Memory>(input_buffer_list_map,\n                       [&first_stream](std::shared_ptr<Memory> dev_mem) {\n                         auto dev_stream = dev_mem->GetBindStream();\n                         if (first_stream == nullptr && dev_stream != nullptr) {\n                           first_stream = dev_stream;\n                           return false;\n                         }\n\n                         return true;\n                       });\n\n  // Bind to same stream\n  TravelDevMem<Memory>(\n      input_buffer_list_map, [&first_stream](std::shared_ptr<Memory> dev_mem) {\n        if (first_stream == nullptr) {\n          // All dev mem has no stream, will create new stream\n          auto status = dev_mem->BindStream();\n          if (status != modelbox::STATUS_OK) {\n            auto err_msg = \"bind stream failed, \" + status.WrapErrormsgs();\n            MBLOG_ERROR << err_msg;\n            return false;\n          }\n\n          first_stream = dev_mem->GetBindStream();\n          return true;\n        }\n\n        if (first_stream != dev_mem->GetBindStream()) {\n          // Sync different stream\n          auto status = dev_mem->DetachStream();\n          if (status != modelbox::STATUS_OK) {\n            auto err_msg = \"Detach stream failed, \" + status.WrapErrormsgs();\n            MBLOG_WARN << err_msg;\n          }\n\n          status = dev_mem->BindStream(first_stream);\n          if (status != modelbox::STATUS_OK) {\n            auto err_msg = \"bind stream failed, \" + status.WrapErrormsgs();\n            MBLOG_WARN << err_msg;\n          }\n        }\n\n        return true;\n      });\n\n  return first_stream;\n};\n\ntemplate <typename Stream, typename Memory>\nStatus SetDevStream(const std::shared_ptr<modelbox::DataContext> &data_ctx,\n                    const std::shared_ptr<Stream> &stream) {\n  if (stream == nullptr) {\n    return modelbox::STATUS_OK;\n  }\n\n  auto output_buffer_list_map = data_ctx->Output();\n  TravelDevMem<Memory>(\n      output_buffer_list_map, [stream](std::shared_ptr<Memory> dev_mem) {\n        if (dev_mem->GetBindStream() == nullptr) {\n          auto status = dev_mem->BindStream(stream);\n          if (status != modelbox::STATUS_OK) {\n            auto err_msg = \"bind stream failed, \" + status.WrapErrormsgs();\n            MBLOG_WARN << err_msg;\n          }\n        }\n\n        return true;\n      });\n\n  return modelbox::STATUS_OK;\n};\n\ntemplate <typename Stream>\nStatus HoldMemory(const std::shared_ptr<modelbox::DataContext> &data_ctx,\n                  const std::shared_ptr<Stream> &stream) {\n  // Release input of this unit after stream stage completed\n  // No need to bind output, it will pass to next unit\n  // Refer to CudaStream/AscendStream->Bind() for detail\n  auto input_buffer_list_map = data_ctx->Input();\n  std::vector<std::shared_ptr<const DeviceMemory>> mems_to_hold;\n  for (auto &item : *input_buffer_list_map) {\n    auto &buffer_list = item.second;\n    for (auto &buffer : *buffer_list) {\n      mems_to_hold.push_back(buffer->GetDeviceMemory());\n    }\n  }\n\n  auto ret = stream->Bind(mems_to_hold);\n  if (!ret) {\n    MBLOG_ERROR << \"Bind mem for stream \" << stream->Get() << \" failed\";\n    return ret;\n  }\n\n  return modelbox::STATUS_OK;\n}\n\n};  // namespace modelbox\n\n#endif  // MODELBOX_FLOWUNIT_DEVICE_STREAM_COMMON_H_"
  },
  {
    "path": "src/drivers/common/flowunit/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nproject(modelbox-drivers-common-flowunit)\n\nif(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_BINARY_DIR})\n    message(FATAL_ERROR \"Do not build in source directory!\")\nendif()\n\nsubdirlist(SUBDIRS ${CMAKE_CURRENT_SOURCE_DIR} \"CMakeLists.txt\")\n\nforeach(subdir ${SUBDIRS})\n    add_subdirectory(${subdir})\nendforeach()\n"
  },
  {
    "path": "src/drivers/common/flowunit/driver_util/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nfile(GLOB_RECURSE SOURCES *.cpp *.cc *.c)\n\nset(INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_CONFIG_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${INCLUDE})\ninclude_directories(${HUAWEI_SECURE_C_INCLUDE_DIR})\n\nset(LIBRARY modelbox-common-driver-util-object)\nadd_library(${LIBRARY} STATIC ${SOURCES})\nset_property(TARGET ${LIBRARY} PROPERTY POSITION_INDEPENDENT_CODE ON)\n\nset(MODELBOX_COMMON_DRIVER_UTIL_LIBRARY ${LIBRARY} CACHE INTERNAL \"\")\nset(MODELBOX_COMMON_DRIVER_UTIL_INCLUDE ${INCLUDE} CACHE INTERNAL \"\")\n"
  },
  {
    "path": "src/drivers/common/flowunit/driver_util/driver_util.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"driver_util.h\"\n\nnamespace driverutil {\n\nstd::string string_masking(const std::string &input) {\n  std::regex url_auth_pattern(\"://[^ /]*?:[^ /]*?@\");\n  auto output = std::regex_replace(input, url_auth_pattern, \"://*:*@\");\n\n  std::regex pattern_ak(R\"(\"ak\"[ ]*?:[ ]*?\".*?\")\");\n  output = std::regex_replace(output, pattern_ak, R\"(\"ak\":\"*\")\");\n\n  std::regex pattern_sk(R\"(\"sk\"[ ]*?:[ ]*?\".*?\")\");\n  output = std::regex_replace(output, pattern_sk, R\"(\"sk\":\"*\")\");\n\n  std::regex pattern_token(R\"(\"securityToken\"[ ]*?:[ ]*?\".*?\")\");\n  output = std::regex_replace(output, pattern_token, R\"(\"securityToken\":\"*\")\");\n\n  std::regex pattern_vcn_pwd(R\"(\"vcn_stream_pwd\"[ ]*?:[ ]*?\".*?\")\");\n  output =\n      std::regex_replace(output, pattern_vcn_pwd, R\"(\"vcn_stream_pwd\":\"*\")\");\n\n  return output;\n}\n\n}  // namespace driverutil"
  },
  {
    "path": "src/drivers/common/flowunit/driver_util/driver_util.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#ifndef MODELBOX_FLOWUNIT_DRIVER_UTIL_H_\n#define MODELBOX_FLOWUNIT_DRIVER_UTIL_H_\n\n#include <modelbox/base/config.h>\n#include <modelbox/base/device.h>\n#include <modelbox/base/log.h>\n#include <modelbox/base/status.h>\n\nnamespace driverutil {\n\ntemplate <class T>\nmodelbox::Status GetPlugin(\n    const std::string &driver_class, std::shared_ptr<modelbox::Drivers> &drivers,\n    std::vector<std::shared_ptr<modelbox::DriverFactory>> &factories,\n    std::map<std::string, std::shared_ptr<T>> &plugins) {\n  auto driver_list = drivers->GetDriverListByClass(driver_class);\n  for (auto &driver : driver_list) {\n    auto driver_desc = driver->GetDriverDesc();\n    if (driver_desc == nullptr) {\n      continue;\n    }\n\n    auto name = driver_desc->GetName();\n    auto factory = driver->CreateFactory();\n    if (factory == nullptr) {\n      MBLOG_ERROR << \"Plugin : \" << name << \" factory create failed\";\n      continue;\n    }\n\n    auto plugin = std::dynamic_pointer_cast<T>(factory->GetDriver());\n    if (plugin == nullptr) {\n      MBLOG_ERROR << \"plugin : \" << name << \" is not derived from \"\n                  << typeid(T).name();\n      continue;\n    }\n\n    plugins[name] = plugin;\n    factories.push_back(factory);\n    MBLOG_INFO << \"Add plugin : \" << name;\n  }\n\n  return modelbox::STATUS_SUCCESS;\n}\n\n/**\n * @brief mask sensitive information\n * @param input_str input string\n * @return desensitive characters\n */\nstd::string string_masking(const std::string &input);\n\n}  // namespace driverutil\n\n#endif  // MODELBOX_FLOWUNIT_DRIVER_UTIL_H_"
  },
  {
    "path": "src/drivers/common/flowunit/hw_components/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nproject(modelbox-driver-common-component)\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_CONFIG_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${HUAWEI_SECURE_C_INCLUDE_DIR})\n\nadd_subdirectory(iam_auth)\nif(OBS_FOUND)\n    add_subdirectory(obs_client)\nelse()\n    message(STATUS \"Not found obs library, disable obs source parser plugin\")\nendif()"
  },
  {
    "path": "src/drivers/common/flowunit/hw_components/iam_auth/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nfile(GLOB_RECURSE SOURCES *.cpp *.cc *.c)\n\nif (NOT Boost_FOUND) \n    message(STATUS \"Not found boost, skip build IAMAuth\")\n    return()\nendif()\n\nif (NOT CPPREST_FOUND) \n    message(STATUS \"Not found cpprest, skip build IAMAuth\")\n    return()\nendif()\n\nset(INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${APIGW_CPP_INCLUDE_DIR})\ninclude_directories(${CPPREST_INCLUDE_DIR})\ninclude_directories(${OPENSSL_INCLUDE_DIR})\ninclude_directories(${Boost_INCLUDE_DIR})\ninclude_directories(${NLOHMANN_INCLUDE_DIR})\n\nset(LIBRARY modelbox-common-cpu-iam_auth)  \nadd_library(${LIBRARY} SHARED ${SOURCES})\nset_property(TARGET ${LIBRARY} PROPERTY POSITION_INDEPENDENT_CODE ON)\n\ntarget_link_libraries(${LIBRARY} ${CPPREST_LIBRARIES} ${Boost_LIBRARIES} ${OPENSSL_LIBRARIES})\ntarget_link_libraries(${LIBRARY} ${APIGW_CPP_LIBRARIES})\ntarget_link_libraries(${LIBRARY} ${LIBMODELBOX_SHARED})\n\ninstall(FILES \n    $<TARGET_FILE:${APIGW_CPP_LIBRARIES}> DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    COMPONENT cpu-device-flowunit\n    )\n\ninstall(TARGETS ${LIBRARY} \n    COMPONENT cpu-device-flowunit\n    RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n    LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    OPTIONAL\n    )\n\nset(MODELBOX_COMMON_IAM_AUTH_LIBRARY ${LIBRARY} CACHE INTERNAL \"\")\nset(MODELBOX_COMMON_IAM_AUTH_INCLUDE ${INCLUDE} CACHE INTERNAL \"\")\nlist(APPEND DRIVER_UNIT_TEST_INCLUDE ${MODELBOX_COMMON_IAM_AUTH_INCLUDE})\nset(DRIVER_UNIT_TEST_INCLUDE ${DRIVER_UNIT_TEST_INCLUDE} CACHE INTERNAL \"\")\nlist(APPEND DRIVER_UNIT_TEST_LINK_LIBRARIES ${MODELBOX_COMMON_IAM_AUTH_LIBRARY})\nset(DRIVER_UNIT_TEST_LINK_LIBRARIES ${DRIVER_UNIT_TEST_LINK_LIBRARIES} CACHE INTERNAL \"\")\n"
  },
  {
    "path": "src/drivers/common/flowunit/hw_components/iam_auth/iam_api.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"iam_api.h\"\n\n#include <iostream>\n#include <nlohmann/json.hpp>\n#include <utility>\n\n#include \"modelbox/base/log.h\"\n#include \"signer.h\"\n\nusing namespace utility;            // NOLINT\nusing namespace web;                // NOLINT\nusing namespace web::http;          // NOLINT\nusing namespace web::http::client;  // NOLINT\n\nnamespace modelbox {\nbool IAMApi::validate_certificates_ = false;\nstd::string IAMApi::request_host_;\nstd::string IAMApi::request_token_uri_;\nstd::string IAMApi::request_credential_uri_;\nstd::string IAMApi::cert_file_ = \"/etc/ssl/certs/ca-certificates.crt\";\nstd::string IAMApi::cert_file_path_ = \"/etc/ssl/certs\";\n\nvoid SetHttpConfig(http_client_config &config, bool validate_certificates) {\n  config.set_validate_certificates(validate_certificates);\n  config.set_timeout(utility::seconds(60));\n  if (validate_certificates) {\n    config.set_ssl_context_callback([&](boost::asio::ssl::context &ctx) {\n      ctx.load_verify_file(IAMApi::cert_file_path_);\n      ctx.add_verify_path(IAMApi::cert_file_path_);\n    });\n  }\n}\n\nmodelbox::Status CreateRequestBody(const AgencyInfo &agency_info,\n                                   const ProjectInfo &project_info,\n                                   const int32_t &token_flag,\n                                   web::json::value &request_body) {\n  try {\n    request_body[\"auth\"][\"identity\"][\"methods\"][0] =\n        web::json::value::string(U(\"assume_role\"));\n    request_body[\"auth\"][\"identity\"][\"assume_role\"][\"domain_name\"] =\n        web::json::value::string(U(agency_info.user_domain_name));\n    request_body[\"auth\"][\"identity\"][\"assume_role\"][\"agency_name\"] =\n        web::json::value::string(U(agency_info.xrole_name));\n    request_body[\"auth\"][\"identity\"][\"assume_role\"][\"duration_seconds\"] =\n        ONE_DAY_SECONDS;\n    if (token_flag == TOKEN_REQUEST) {\n      if (!project_info.project_name.empty()) {\n        request_body[\"auth\"][\"scope\"][\"project\"][\"name\"] =\n            web::json::value::string(U(project_info.project_name));\n      } else if (!project_info.project_id.empty()) {\n        request_body[\"auth\"][\"scope\"][\"project\"][\"id\"] =\n            web::json::value::string(U(project_info.project_id));\n      } else {\n        MBLOG_ERROR << \"cannot find any project info\";\n      }\n    }\n  } catch (const std::exception &e) {\n    MBLOG_ERROR << e.what();\n    return modelbox::STATUS_FAULT;\n  }\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status GetSubjectTokenFromResponse(\n    UserAgencyToken &token, web::http::http_response &response_data) {\n  try {\n    auto response_headers = response_data.headers();\n    auto token_iter = response_headers.find(\"X-Subject-Token\");\n    if (token_iter != response_headers.end()) {\n      token.user_token = token_iter->second;\n    } else {\n      throw \"failed get user agency token \";\n    }\n  } catch (std::exception const &e) {\n    MBLOG_ERROR << e.what();\n    return modelbox::STATUS_FAULT;\n  }\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status GetAgencyCredentialFromResponse(\n    UserAgencyCredential &user_agency_credential,\n    web::http::http_response &response_data) {\n  try {\n    auto reponse_body = response_data.extract_json().get();\n    auto nlohmann_body = nlohmann::json::parse(reponse_body.serialize());\n    user_agency_credential.user_secure_token =\n        nlohmann_body[\"credential\"][\"securitytoken\"];\n    user_agency_credential.user_ak = nlohmann_body[\"credential\"][\"access\"];\n    user_agency_credential.user_sk = nlohmann_body[\"credential\"][\"secret\"];\n  } catch (std::exception const &e) {\n    MBLOG_ERROR << e.what();\n    return modelbox::STATUS_FAULT;\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nstd::shared_ptr<void> IAMApi::CreateSignerRequest(\n    const ConsigneeInfo &consignee_info, const AgencyInfo &agency_info,\n    const ProjectInfo &project_info, int32_t token_flag) {\n  if (request_host_.empty()) {\n    MBLOG_ERROR << \"host or uri is empty, please set host and uri value\";\n    return nullptr;\n  }\n\n  if (token_flag == TOKEN_REQUEST) {\n    if (request_token_uri_.empty()) {\n      MBLOG_ERROR << \"token uri is empty, please set  uri value\";\n      return nullptr;\n    }\n  }\n\n  if (token_flag == CREDENTIAL_REQUEST) {\n    if (request_credential_uri_.empty()) {\n      MBLOG_ERROR << \"credential uri is empty, please set  uri value\";\n      return nullptr;\n    }\n  }\n\n  size_t pos = request_host_.find(\"://\", 0);\n  size_t offset = std::string(\"://\").length();\n  std::string endpoint = request_host_.substr(pos + offset);\n\n  // construct json data\n  web::json::value request_body;\n  if (modelbox::STATUS_OK !=\n      CreateRequestBody(agency_info, project_info, token_flag, request_body)) {\n    return nullptr;\n  }\n\n  std::string request_uri = request_token_uri_;\n  if (token_flag == CREDENTIAL_REQUEST) {\n    request_uri = request_credential_uri_;\n  }\n\n  std::shared_ptr<RequestParams> request_self = std::make_shared<RequestParams>(\n      \"POST\", endpoint, request_uri, \"\", U(request_body.serialize()));\n  request_self->addHeader(\"content-type\", \"application/json\");\n  request_self->addHeader(\"X-Domain-Id\", consignee_info.domain_id);\n  request_self->addHeader(\"X-Project-Id\", consignee_info.project_id);\n\n  Signer signer(consignee_info.ak, consignee_info.sk);\n  signer.createSignature(request_self.get());\n  return request_self;\n}\n\nmodelbox::Status SendHttpRequest(const std::string &request_host,\n                                 const std::string &request_uri,\n                                 const web::http::http_request &token_request,\n                                 web::http::http_response &response_data) {\n  http_client_config config;\n  SetHttpConfig(config, IAMApi::validate_certificates_);\n  http_client token_client(U(request_host + request_uri), config);\n  try {\n    response_data = token_client.request(token_request).get();\n  } catch (std::exception const &e) {\n    MBLOG_ERROR << e.what();\n    return modelbox::STATUS_FAULT;\n  }\n\n  if (response_data.status_code() != status_codes::Created) {\n    MBLOG_ERROR << \"failed to get project token, status code :\"\n                << response_data.status_code();\n    MBLOG_ERROR << \"return body: \" << response_data.extract_utf8string().get();\n    return modelbox::STATUS_FAULT;\n  }\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status IAMApi::GetAgencyProjectCredentialWithAK(\n    const ConsigneeInfo &consignee_info, const AgencyInfo &agency_info,\n    UserAgencyCredential &user_agency_credential) {\n  ProjectInfo project_info;\n  std::shared_ptr<RequestParams> request_self =\n      std::static_pointer_cast<RequestParams>(CreateSignerRequest(\n          consignee_info, agency_info, project_info, CREDENTIAL_REQUEST));\n  if (request_self == nullptr) {\n    return modelbox::STATUS_FAULT;\n  }\n\n  http_request token_request;\n  token_request.set_method(methods::POST);\n  token_request.set_body(U(request_self->getPayload()));\n  for (auto header : *request_self->getHeaders()) {\n    token_request.headers()[header.getKey()] = header.getValue();\n  }\n\n  web::http::http_response response_data;\n  if (modelbox::STATUS_OK != SendHttpRequest(request_host_,\n                                             request_credential_uri_,\n                                             token_request, response_data)) {\n    return modelbox::STATUS_FAULT;\n  }\n\n  if (modelbox::STATUS_OK !=\n      GetAgencyCredentialFromResponse(user_agency_credential, response_data)) {\n    return modelbox::STATUS_FAULT;\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status IAMApi::GetAgencyProjectCredentialWithToken(\n    const AgentToken &agent_token, const AgencyInfo &agency_info,\n    UserAgencyCredential &user_agency_credential) {\n  web::json::value request_body;\n  ProjectInfo project_info;\n  if (modelbox::STATUS_OK != CreateRequestBody(agency_info, project_info,\n                                               CREDENTIAL_REQUEST,\n                                               request_body)) {\n    return modelbox::STATUS_FAULT;\n  }\n\n  http_request token_request;\n  token_request.headers()[\"Content-Type\"] = \"application/json;charset=utf8\";\n  token_request.headers()[\"X-Auth-Token\"] = agent_token.x_subject_token_;\n  token_request.set_method(methods::POST);\n  token_request.set_body(request_body);\n  web::http::http_response response_data;\n\n  if (modelbox::STATUS_OK != SendHttpRequest(request_host_, request_token_uri_,\n                                             token_request, response_data)) {\n    return modelbox::STATUS_FAULT;\n  }\n\n  if (modelbox::STATUS_OK !=\n      GetAgencyCredentialFromResponse(user_agency_credential, response_data)) {\n    return modelbox::STATUS_FAULT;\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status IAMApi::GetAgencyProjectTokenWithAK(\n    const ConsigneeInfo &consignee_info, const AgencyInfo &agency_info,\n    const ProjectInfo &project_info, UserAgencyToken &project_agency_token) {\n  std::shared_ptr<RequestParams> request_self =\n      std::static_pointer_cast<RequestParams>(CreateSignerRequest(\n          consignee_info, agency_info, project_info, TOKEN_REQUEST));\n  if (request_self == nullptr) {\n    return modelbox::STATUS_FAULT;\n  }\n\n  http_request token_request;\n  token_request.set_method(methods::POST);\n  token_request.set_body(U(request_self->getPayload()));\n  for (auto header : *request_self->getHeaders()) {\n    token_request.headers()[header.getKey()] = header.getValue();\n  }\n\n  web::http::http_response response_data;\n  if (modelbox::STATUS_OK != SendHttpRequest(request_host_, request_token_uri_,\n                                             token_request, response_data)) {\n    return modelbox::STATUS_FAULT;\n  }\n\n  if (modelbox::STATUS_OK !=\n      GetSubjectTokenFromResponse(project_agency_token, response_data)) {\n    return modelbox::STATUS_FAULT;\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status IAMApi::GetAgencyProjectTokenWithToken(\n    const AgentToken &agent_token, const AgencyInfo &agency_info,\n    const ProjectInfo &project_info, UserAgencyToken &user_agency_token) {\n  web::json::value request_body;\n  if (modelbox::STATUS_OK != CreateRequestBody(agency_info, project_info,\n                                               TOKEN_REQUEST, request_body)) {\n    return modelbox::STATUS_FAULT;\n  }\n\n  http_request token_request;\n  token_request.headers()[\"Content-Type\"] = \"application/json;charset=utf8\";\n  token_request.headers()[\"X-Auth-Token\"] = agent_token.x_subject_token_;\n  token_request.set_method(methods::POST);\n  token_request.set_body(request_body);\n  web::http::http_response response_data;\n\n  if (modelbox::STATUS_OK != SendHttpRequest(request_host_, request_token_uri_,\n                                             token_request, response_data)) {\n    return modelbox::STATUS_FAULT;\n  }\n\n  if (modelbox::STATUS_OK !=\n      GetSubjectTokenFromResponse(user_agency_token, response_data)) {\n    return modelbox::STATUS_FAULT;\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nvoid IAMApi::SetRequestHost(std::string request_host) {\n  request_host_ = std::move(request_host);\n}\n\nvoid IAMApi::SetRequestTokenUri(std::string request_token_uri) {\n  request_token_uri_ = std::move(request_token_uri);\n}\n\nvoid IAMApi::SetRequestCredentialUri(std::string request_credential_uri) {\n  request_credential_uri_ = std::move(request_credential_uri);\n}\n\nvoid IAMApi::SetValidateCertificates(bool validate_certificates) {\n  validate_certificates_ = validate_certificates;\n}\n\nvoid IAMApi::SetCertFilePath(std::string cert_file,\n                             std::string cert_file_path) {\n  IAMApi::cert_file_ = std::move(cert_file);\n  IAMApi::cert_file_path_ = std::move(cert_file_path);\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/drivers/common/flowunit/hw_components/iam_auth/iam_api.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#ifndef MODELBOX_FLOWUNIT_IAM_API_H_\n#define MODELBOX_FLOWUNIT_IAM_API_H_\n\n#include <modelbox/base/status.h>\n#include <cpprest/http_client.h>\n#include <cpprest/json.h>\n\n#include <string>\n\n#include <modelbox/token_header.h>\n#define TOKEN_REQUEST 0\n#define CREDENTIAL_REQUEST 1\n#define ONE_DAY_SECONDS 86400\n\nnamespace modelbox {\nclass IAMApi {\n public:\n  /*\n   * @brief  Get agency credential with ak/sk\n   * @param  consignee_info - in,  consignee information\n   * @param  AgencyInfo - in, agency information\n   * @param  user_agency_credential - out, user credential\n   * @return successful or fault\n   */\n  static modelbox::Status GetAgencyProjectCredentialWithAK(\n      const ConsigneeInfo &consignee_info, const AgencyInfo &agency_info,\n      UserAgencyCredential &user_agency_credential);\n  /*\n   * @brief  Get agency credential with token\n   * @param  agent_token - in,  agent token\n   * @param  AgencyInfo - in, agency information\n   * @param  user_agency_credential - out, user credential\n   * @return successful or fault\n   */\n  static modelbox::Status GetAgencyProjectCredentialWithToken(\n      const AgentToken &agent_token, const AgencyInfo &agency_info,\n      UserAgencyCredential &user_agency_credential);\n\n  /*\n   * @brief  Get agency credential\n   * @param  consignee_info - in,  consignee information\n   * @param  AgencyInfo - in, agency information\n   * @param  project_info - in, project information: project_name and project_id\n   * @param  user_agency_token - out, user token\n   * @return successful or fault\n   */\n  static modelbox::Status GetAgencyProjectTokenWithAK(\n      const ConsigneeInfo &consignee_info, const AgencyInfo &agency_info,\n      const ProjectInfo &project_info, UserAgencyToken &project_agency_token);\n\n  /*\n   * @brief  Get agency token\n   * @param  token - in,  vas token\n   * @param  agency_info - in, agency information\n   * @param  project_info - in, project information: project_name and project_id\n   * @param  user_agency_token - out, user token\n   * @return successful or fault\n   */\n  static modelbox::Status GetAgencyProjectTokenWithToken(\n      const AgentToken &agent_token, const AgencyInfo &agency_info,\n      const ProjectInfo &project_info, UserAgencyToken &user_agency_token);\n\n  /*\n   * @brief  Set iam host address\n   * @param  request_host - in,  iam host address\n   */\n  static void SetRequestHost(std::string request_host);\n\n  /*\n   * @brief  Set token request uri\n   * @param  request_token_uri - in,   token request uri\n   */\n  static void SetRequestTokenUri(std::string request_token_uri);\n\n  /*\n   * @brief  Set credential request uri\n   * @param  request_credential_uri - in,   credential request uri\n   */\n  static void SetRequestCredentialUri(std::string request_credential_uri);\n\n  /*\n   * @brief  Set validate certificates flag\n   * @param  validate_certificates_ - in,   Indicates whether to validate the\n   * certificate.\n   */\n  static void SetValidateCertificates(bool validate_certificates);\n  /*\n   * @brief  Set certificate file and path\n   * @param  cert_file - in,   certificate file name\n   * @param  cert_file_path - in, certificate file\n   */\n  static void SetCertFilePath(std::string cert_file,\n                              std::string cert_file_path);\n  /*\n   * @brief  Create a signer request\n   * @param  consignee_info - in,  consignee info\n   * @param  agency_info - in, agency info\n   * @param  project_info - project info(id and name)\n   * @param  token_type - token or credential\n   * @return return request\n   */\n  static std::shared_ptr<void> CreateSignerRequest(\n      const ConsigneeInfo &consignee_info, const AgencyInfo &agency_info,\n      const ProjectInfo &project_info, int32_t token_flag);\n\n  static std::string request_host_;\n  static std::string request_credential_uri_;\n  static std::string request_token_uri_;\n  static std::string cert_file_path_;\n  static std::string cert_file_;\n  static bool validate_certificates_;\n};\n}  // namespace modelbox\n#endif  // MODELBOX_FLOWUNIT_IAM_API_H_"
  },
  {
    "path": "src/drivers/common/flowunit/hw_components/iam_auth/iam_auth.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <modelbox/base/log.h>\n#include <modelbox/iam_auth.h>\n\n#include \"token_manager.h\"\nnamespace modelbox {\n\nstatic std::shared_ptr<TokenManager> token_manager_ =\n    std::make_shared<TokenManager>();\n\nstd::shared_ptr<IAMAuth> IAMAuth::GetInstance() {\n  static std::shared_ptr<IAMAuth> instance(new IAMAuth());\n  return instance;\n}\n\nIAMAuth::~IAMAuth() = default;\n\nIAMAuth::IAMAuth() = default;\n\nmodelbox::Status IAMAuth::SetConsigneeInfo(const std::string &service_ak,\n                                           const std::string &service_sk,\n                                           const std::string &domain_id,\n                                           const std::string &project_id) {\n  return token_manager_->SetConsigneeInfo(service_ak, service_sk, domain_id,\n                                          project_id);\n}\n\nmodelbox::Status IAMAuth::GetUserAgencyProjectCredential(\n    UserAgencyCredential &agency_user_credential, const AgencyInfo &agency_info,\n    const std::string &user_id) {\n  if (agency_info.user_domain_name.empty()) {\n    if (modelbox::STATUS_OK != token_manager_->GetPersistUserAgencyCredential(\n                                   agency_user_credential, user_id)) {\n      MBLOG_ERROR << \"failed to get user credential info, user_id: \" << user_id;\n      return modelbox::STATUS_FAULT;\n    }\n\n    return modelbox::STATUS_OK;\n  }\n\n  modelbox::Status code =\n      token_manager_->RequestAgencyProjectCredential(agency_info, false);\n  if (code != modelbox::STATUS_OK) {\n    MBLOG_ERROR << \"failed request agency project credential\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  if (modelbox::STATUS_OK != token_manager_->GetAgencyProjectCredential(\n                                 agency_info, agency_user_credential)) {\n    MBLOG_ERROR << \"failed get agency project credential\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status IAMAuth::GetUserAgencyProjectToken(\n    UserAgencyToken &agency_user_token, const AgencyInfo &agency_info,\n    const ProjectInfo &project_info) {\n  modelbox::Status code = token_manager_->RequestAgencyProjectToken(\n      agency_info, project_info, false);\n  if (code != modelbox::STATUS_OK) {\n    MBLOG_ERROR << \"failed request agency project token\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  if (modelbox::STATUS_OK !=\n      token_manager_->GetAgencyProjectToken(agency_info, project_info,\n                                            agency_user_token)) {\n    MBLOG_ERROR << \"failed get agency project token\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nvoid IAMAuth::ExpireUserAgencyProjectCredential(const AgencyInfo &agency_info) {\n  token_manager_->DeleteUserAgencyCredential(agency_info);\n}\n\nvoid IAMAuth::ExpireUserAgencyProjectToken(const AgencyInfo &agency_info) {\n  token_manager_->DeleteUserAgencyToken(agency_info);\n}\n\nvoid IAMAuth::SetIAMHostAddress(const std::string &host) {\n  token_manager_->SetHostAddress(host);\n}\n\nmodelbox::Status IAMAuth::Init() { return token_manager_->Init(); }\n\nvoid IAMAuth::SetUserAgencyCredential(const UserAgencyCredential &credential) {\n  token_manager_->SetPersistUserAgencyCredential(credential);\n}\n\nvoid IAMAuth::RemoveUserAgencyCredential(const std::string &userId) {\n  token_manager_->RemovePersistUserAgencyCredential(userId);\n}\n\nvoid IAMAuth::SetAgentToken(const AgentToken &token) {\n  token_manager_->SetAgentToken(token);\n}\n\nvoid IAMAuth::SetUpdateAgentTokenCallBack(std::function<void()> &callback) {\n  token_manager_->SetUpdateAgentTokenRequestCallback(callback);\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/drivers/common/flowunit/hw_components/iam_auth/iam_auth.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_IAM_AUTH_H_\n#define MODELBOX_FLOWUNIT_IAM_AUTH_H_\n\n#include <modelbox/base/status.h>\n\n#include <memory>\n\n#include \"token_manager.h\"\nnamespace modelbox {\n/**\n * @brief\n * User: means algorithm user\n * Service: means algorithm developer or platform developer\n */\nclass IAMAuth {\n public:\n  /*\n   * @brief get iamauth instance\n   * @return iamauth instance\n   */\n  static std::shared_ptr<IAMAuth> GetInstance();\n\n  /**\n   * @brief initilize timer\n   * @return successful or fault\n   */\n  modelbox::Status Init();\n\n  /**\n   * @brief set iam host address\n   * @param host - in, iam host address\n   */\n  void SetIAMHostAddress(const std::string &host);\n\n  /**\n   * @brief Set Consignee info: ak, sk, domain_id and project_id\n   * @param ak - in, access key for vas\n   * @param sk - in, secret key for vas\n   * @param domain_id - in, domain id\n   * @param project_id - in, project id\n   * @return successful or fault\n   */\n  modelbox::Status SetConsigneeInfo(const std::string &service_ak,\n                                    const std::string &service_sk,\n                                    const std::string &domain_id,\n                                    const std::string &project_id);\n  /**\n   * @brief If service cert has been set, then you can get\n   * user agency Project credential to access user cloud resource\n   * @param agency_user_credential - out, agency credential\n   * @param agency_info - in, agency info\n   * @return successful or fault\n   */\n  modelbox::Status GetUserAgencyProjectCredential(\n      UserAgencyCredential &agency_user_credential,\n      const AgencyInfo &agency_info, const std::string &user_id = \"\");\n\n  /**\n   * @brief If service cert has been set, then you can get\n   * user agency Project token to access user cloud resource\n   * @param agency_project_token - out, agency project token\n   * @param agency_info - in, agency info\n   * @param project_info - in, project info\n   * @return successful or fault\n   */\n  modelbox::Status GetUserAgencyProjectToken(UserAgencyToken &agency_user_token,\n                                             const AgencyInfo &agency_info,\n                                             const ProjectInfo &project_info);\n\n  /**\n   * @brief If user agency Project credential expires,notice me\n   * @param agency_info - in, agency info\n   */\n  void ExpireUserAgencyProjectCredential(const AgencyInfo &agency_info);\n\n  /**\n   * @brief If user agency Project token expires,notice me\n   * @param agency_info - in, agency info\n   */\n  void ExpireUserAgencyProjectToken(const AgencyInfo &agency_info);\n\n  /**\n   * @brief Save agency project credential\n   * @param credential - in, credential token\n   */\n  void SetUserAgencyCredential(const UserAgencyCredential &credential);\n\n  /**\n   * @brief Remove agency project credential\n   * @param userId - in, user id\n   */\n  void RemoveUserAgencyCredential(const std::string &userId);\n\n  /**\n   * @brief Save vas token\n   * @param xrole_name - in\n   * @param token - in, vas token from iva\n   */\n  void SetAgentToken(const AgentToken &token);\n\n  /**\n   * @brif set update token callback function\n   * @param callback -in\n   */\n  void SetUpdateAgentTokenCallBack(std::function<void()> &callback);\n\n  virtual ~IAMAuth();\n\n private:\n  IAMAuth();\n\n  std::shared_ptr<TokenManager> token_manager_{\n      std::make_shared<TokenManager>()};\n};\n\n}  // namespace modelbox\n\n#endif  // MODELBOX_FLOWUNIT_IAM_AUTH_H_"
  },
  {
    "path": "src/drivers/common/flowunit/hw_components/iam_auth/token_manager.cc",
    "content": "#include \"token_manager.h\"\n\n#include <cpprest/http_client.h>\n#include <cpprest/json.h>\n\n#include <future>\n#include <nlohmann/json.hpp>\n#include <string>\n#include <thread>\n\n#include \"iam_api.h\"\n#include \"modelbox/base/log.h\"\n#include \"signer.h\"\n\nusing namespace web;                   // NOLINT\nusing namespace web::http;             // NOLINT\nusing namespace web::http::client;     // NOLINT\nusing namespace utility;               // NOLINT\nusing namespace concurrency::streams;  // NOLINT\n\n#define INTERVAL_TIME (3600 * 1000)\n\nnamespace modelbox {\nTokenManager::TokenManager()\n    : request_credential_uri_(\"/v3.0/OS-CREDENTIAL/securitytokens\"),\n      request_token_uri_(\"/v3/auth/tokens\") {}\n\nTokenManager::~TokenManager() = default;\n\nmodelbox::Status TokenManager::Init() {\n  if (!init_flag_) {\n    timer_ = std::make_shared<modelbox::TimerTask>();\n    timer_->Callback(&TokenManager::OnTimer, this);\n\n    TimerGlobal::Schedule(timer_, 1000, INTERVAL_TIME, false);\n    init_flag_ = true;\n  }\n  return modelbox::STATUS_OK;\n}\n\nvoid TokenManager::OnTimer() {\n  if (++async_count > 1) {\n    async_count--;\n    return;\n  }\n  Defer { async_count--; };\n  RequestAgencyToken();\n}\n\nvoid TokenManager::RequestAgencyToken() {\n  std::unique_lock<std::mutex> lock_credential(credential_lock_);\n  auto origin_user_credential_map = user_credential_map_;\n  lock_credential.unlock();\n  for (auto &credential_item : origin_user_credential_map) {\n    auto status = RequestAgencyProjectCredential(credential_item.first, true);\n    if (status != modelbox::STATUS_OK) {\n      MBLOG_ERROR << \"failed to get project credential, user name : \"\n                  << credential_item.first.user_domain_name;\n    }\n  }\n\n  std::unique_lock<std::mutex> lock_token(token_lock_);\n  auto origin_user_token_map = user_token_map_;\n  lock_token.unlock();\n  for (const auto &user_item : origin_user_token_map) {\n    const auto &project_map = user_item.second;\n    for (const auto &project_item : project_map) {\n      auto status =\n          RequestAgencyProjectToken(user_item.first, project_item.first, true);\n      if (status != modelbox::STATUS_OK) {\n        MBLOG_ERROR << \"failed to get project credential, user name : \"\n                    << user_item.first.user_domain_name;\n      }\n    }\n  }\n}\n\nmodelbox::Status TokenManager::SetConsigneeInfo(const std::string &ak,\n                                                const std::string &sk,\n                                                const std::string &domain_id,\n                                                const std::string &project_id) {\n  consignee_info_.ak = ak;\n  consignee_info_.sk = sk;\n  consignee_info_.domain_id = domain_id;\n  consignee_info_.project_id = project_id;\n  return modelbox::STATUS_OK;\n}\nmodelbox::Status TokenManager::SetHostAddress(const std::string &host) {\n  request_host_ = host;\n  IAMApi::SetRequestHost(host);\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status TokenManager::RequestAgencyProjectToken(\n    const AgencyInfo &agency_info, const ProjectInfo &project_info,\n    bool force) {\n  if (!force) {\n    if (modelbox::STATUS_EXIST ==\n        FindUserAgencyToken(agency_info, project_info)) {\n      return modelbox::STATUS_OK;\n    }\n  }\n\n  IAMApi::SetRequestTokenUri(request_token_uri_);\n  IAMApi::SetRequestCredentialUri(request_credential_uri_);\n  UserAgencyToken token;\n  if (IsExpire(agent_token_.expires_time_) && token_update_callback_) {\n    token_update_callback_();\n  }\n  if (modelbox::STATUS_OK !=\n      IAMApi::GetAgencyProjectTokenWithToken(agent_token_, agency_info,\n                                             project_info, token)) {\n    if (modelbox::STATUS_OK !=\n        IAMApi::GetAgencyProjectTokenWithAK(consignee_info_, agency_info,\n                                            project_info, token)) {\n      return modelbox::STATUS_FAULT;\n    }\n  }\n  if (modelbox::STATUS_OK !=\n      SaveUserAgencyToken(agency_info, project_info, token)) {\n    return modelbox::STATUS_FAULT;\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status TokenManager::GetAgencyProjectToken(\n    const AgencyInfo &agency_info, const ProjectInfo &project_info,\n    UserAgencyToken &agency_token) {\n  std::lock_guard<std::mutex> lock(token_lock_);\n  auto item = user_token_map_.find(agency_info);\n  if (item == user_token_map_.end()) {\n    return modelbox::STATUS_FAULT;\n  }\n  auto token_item = item->second.find(project_info);\n  if (token_item == item->second.end()) {\n    return modelbox::STATUS_FAULT;\n  }\n  agency_token = token_item->second;\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status TokenManager::RequestAgencyProjectCredential(\n    const AgencyInfo &agency_info, bool force) {\n  if (!force) {\n    if (modelbox::STATUS_EXIST == FindUserAgencyCredential(agency_info)) {\n      return modelbox::STATUS_OK;\n    }\n  }\n\n  IAMApi::SetRequestCredentialUri(request_credential_uri_);\n  ProjectInfo project_info;\n  UserAgencyCredential credential;\n\n  if (modelbox::STATUS_OK != IAMApi::GetAgencyProjectCredentialWithAK(\n                                 consignee_info_, agency_info, credential)) {\n    if (IsExpire(agent_token_.expires_time_) && token_update_callback_) {\n      token_update_callback_();\n      return modelbox::STATUS_FAULT;\n    }\n    if (modelbox::STATUS_OK != IAMApi::GetAgencyProjectCredentialWithToken(\n                                   agent_token_, agency_info, credential)) {\n      return modelbox::STATUS_FAULT;\n    }\n  }\n  if (modelbox::STATUS_OK !=\n      SaveUserAgencyCredential(agency_info, credential)) {\n    return modelbox::STATUS_FAULT;\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status TokenManager::GetAgencyProjectCredential(\n    const AgencyInfo &agency_info, UserAgencyCredential &agency_credential) {\n  std::lock_guard<std::mutex> lock(credential_lock_);\n  auto iter = user_credential_map_.find(agency_info);\n  if (iter == user_credential_map_.end()) {\n    return modelbox::STATUS_FAULT;\n  }\n  agency_credential = iter->second;\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status TokenManager::SaveUserAgencyCredential(\n    const AgencyInfo &agency_info, UserAgencyCredential &credential) {\n  std::lock_guard<std::mutex> lock(credential_lock_);\n  user_credential_map_[agency_info] = credential;\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status TokenManager::SaveUserAgencyToken(\n    const AgencyInfo &agency_info, const ProjectInfo &project_info,\n    UserAgencyToken &agency_token) {\n  std::lock_guard<std::mutex> lock(token_lock_);\n  user_token_map_[agency_info][project_info] = agency_token;\n  return modelbox::STATUS_OK;\n}\n\nvoid TokenManager::DeleteUserAgencyCredential(const AgencyInfo &agency_info) {\n  std::lock_guard<std::mutex> lock(credential_lock_);\n  auto iter = user_credential_map_.find(agency_info);\n  if (iter != user_credential_map_.end()) {\n    user_credential_map_.erase(iter);\n  }\n}\n\nvoid TokenManager::DeleteUserAgencyToken(const AgencyInfo &agency_info) {\n  std::lock_guard<std::mutex> lock(token_lock_);\n  auto iter = user_token_map_.find(agency_info);\n  if (iter != user_token_map_.end()) {\n    user_token_map_.erase(iter);\n  }\n}\n\nmodelbox::Status TokenManager::FindUserAgencyCredential(\n    const AgencyInfo &agency_info) {\n  std::lock_guard<std::mutex> lock(credential_lock_);\n  auto iter = user_credential_map_.find(agency_info);\n  if (iter == user_credential_map_.end()) {\n    return modelbox::STATUS_NOTFOUND;\n  }\n  return modelbox::STATUS_EXIST;\n}\n\nmodelbox::Status TokenManager::FindUserAgencyToken(\n    const AgencyInfo &agency_info, const ProjectInfo &project_info) {\n  std::lock_guard<std::mutex> lock(token_lock_);\n  auto iter = user_token_map_.find(agency_info);\n  if (iter == user_token_map_.end()) {\n    return modelbox::STATUS_NOTFOUND;\n  }\n\n  auto token_iter = iter->second.find(project_info);\n  if (token_iter == iter->second.end()) {\n    return modelbox::STATUS_NOTFOUND;\n  }\n  return modelbox::STATUS_EXIST;\n}\n\nvoid TokenManager::SetPersistUserAgencyCredential(\n    const UserAgencyCredential &credential) {\n  std::lock_guard<std::mutex> lock(persist_credential_lock_);\n  persist_credential_[credential.user_id] = credential;\n}\n\nvoid TokenManager::RemovePersistUserAgencyCredential(\n    const std::string &userId) {\n  std::lock_guard<std::mutex> lock(persist_credential_lock_);\n  if (persist_credential_.find(userId) == persist_credential_.end()) {\n    MBLOG_WARN << \"RemovePersistUserAgencyCredential: \" << userId\n               << \" presist credential info isn't exist\";\n    return;\n  }\n  persist_credential_.erase(userId);\n}\n\nmodelbox::Status TokenManager::GetPersistUserAgencyCredential(\n    UserAgencyCredential &credential, const std::string &userId) {\n  std::lock_guard<std::mutex> lock(persist_credential_lock_);\n  if (persist_credential_.find(userId) == persist_credential_.end()) {\n    MBLOG_WARN << userId << \" presist credential info isn't exist\";\n    return modelbox::STATUS_FAULT;\n  }\n  UserAgencyCredential userCredential = persist_credential_[userId];\n  if (userCredential.user_ak.empty()) {\n    MBLOG_ERROR\n        << \"presist credential is empty, please set presist credential first.\";\n    return modelbox::STATUS_FAULT;\n  }\n  credential.user_id = userCredential.user_id;\n  credential.user_ak = userCredential.user_ak;\n  credential.user_sk = userCredential.user_sk;\n  credential.user_secure_token = userCredential.user_secure_token;\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status TokenManager::SetAgentToken(const AgentToken &token) {\n  agent_token_ = token;\n  return modelbox::STATUS_SUCCESS;\n}\n\nvoid TokenManager::SetUpdateAgentTokenRequestCallback(\n    std::function<void()> &callback) {\n  token_update_callback_ = callback;\n}\n\nbool TokenManager::IsExpire(const std::string &expire) const {\n  auto expire_tp = datetime::from_string(expire, datetime::ISO_8601);\n  if (!expire_tp.is_initialized()) {\n    auto msg = std::string(\"expire not ISO_8601 format.\") +\n               std::string(\" expire:\") + expire;\n    return true;\n  }\n\n  auto now_tp = datetime::utc_now() + datetime::from_minutes(30);\n  if (!now_tp.is_initialized()) {\n    auto msg = std::string(\"expire not ISO_8601 format.\") +\n               std::string(\" expire:\") + expire;\n    return true;\n  }\n\n  if (expire_tp.to_interval() < now_tp.to_interval()) {\n    auto msg = std::string(\"expire timeout.\") + std::string(\" expire:\") +\n               expire + std::string(\" expire_tp:\") +\n               now_tp.to_string(datetime::ISO_8601);\n    MBLOG_WARN << msg;\n    return true;\n  }\n\n  return false;\n}\n\n}  // namespace modelbox\n"
  },
  {
    "path": "src/drivers/common/flowunit/hw_components/iam_auth/token_manager.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef GET_TOKEN_H_\n#define GET_TOKEN_H_\n\n#include <modelbox/base/status.h>\n#include <modelbox/base/timer.h>\n\n#include <map>\n\n#include <modelbox/token_header.h>\n\nnamespace modelbox {\nclass TokenManager {\n public:\n  TokenManager();\n\n  virtual ~TokenManager();\n\n  /**\n   * @brief   Initializing a Scheduled Task\n   * @return  Successful or not\n   */\n  modelbox::Status Init();\n\n  /**\n   * @brief   Save Consiginee information and AK/SK\n   * @param   ak - in, access_key\n   * @param   sk - in, secret_key\n   * @param   domian_id - in, consignee's domain id\n   * @param   project_id - in, current project id\n   * @return  Successful or not\n   */\n  modelbox::Status SetConsigneeInfo(const std::string &ak,\n                                    const std::string &sk,\n                                    const std::string &domain_id,\n                                    const std::string &project_id);\n\n  /**\n   * @brief   Save IAM host address\n   * @param   host - in, iam host address\n   * @return  Successful or not\n   */\n  modelbox::Status SetHostAddress(const std::string &host);\n\n  /**\n   * @brief   request agency credential\n   * @param   agency_info - in, agency info\n   * @param   force - in,\n   *                  true: request credential from host,\n   *                  false: check credeential is exist in cache before request\n   * @return  Successful or not\n   */\n  modelbox::Status RequestAgencyProjectCredential(const AgencyInfo &agency_info,\n                                                  bool force);\n\n  /**\n   * @brief   get agency credential\n   * @param   agency_info - in, agency information\n   * @param   agency_credential - out, credential inforamtion\n   * @return  Successful or not\n   */\n  modelbox::Status GetAgencyProjectCredential(\n      const AgencyInfo &agency_info, UserAgencyCredential &agency_credential);\n\n  /**\n   * @brief   request agency token\n   * @param   agency_info - in, agency info\n   * @param   force - in,\n   *                  true: request token from host,\n   *                  false: check token is exist in cache before request\n   * @return  Successful or not\n   */\n  modelbox::Status RequestAgencyProjectToken(const AgencyInfo &agency_info,\n                                             const ProjectInfo &project_info,\n                                             bool force);\n\n  /**\n   * @brief   get agency token\n   * @param   agency_info - in, agency information\n   * @param   agency_token - out, token inforamtion\n   * @return  Successful or not\n   */\n  modelbox::Status GetAgencyProjectToken(const AgencyInfo &agency_info,\n                                         const ProjectInfo &project_info,\n                                         UserAgencyToken &agency_token);\n\n  /**\n   * @brief   delete agency credential\n   * in cache\n   * @param   agency_info - in, agency\n   * information\n   * @return  Successful or not\n   */\n  void DeleteUserAgencyCredential(const AgencyInfo &agency_info);\n\n  /**\n   * @brief   delete agency token in\n   * cache\n   * @param   agency_info - in, agency\n   * information\n   * @return  Successful or not\n   */\n  void DeleteUserAgencyToken(const AgencyInfo &agency_info);\n\n  /**\n   * @brief   save agency\n   * credential in cache\n   * @param   credential - in,\n   * credential information\n   */\n  void SetPersistUserAgencyCredential(const UserAgencyCredential &credential);\n\n  /**\n   * @brief   remove agency\n   * remove credential in cache\n   * @param   userId - in,\n   * remove credential information\n   */\n  void RemovePersistUserAgencyCredential(const std::string &userId);\n\n  /**\n   * @brief   get agency credential in\n   * cache\n   * @param   credential - out,\n   * credential information\n   */\n  modelbox::Status GetPersistUserAgencyCredential(\n      UserAgencyCredential &credential, const std::string &userId = \"\");\n\n  modelbox::Status SetAgentToken(const AgentToken &token);\n  /*\n\n  */\n  void SetUpdateAgentTokenRequestCallback(std::function<void()> &callback);\n\n private:\n  void OnTimer();\n\n  modelbox::Status SaveUserAgencyToken(const AgencyInfo &agency_info,\n                                       const ProjectInfo &project_info,\n                                       UserAgencyToken &agency_token);\n\n  modelbox::Status SaveUserAgencyCredential(const AgencyInfo &agency_info,\n                                            UserAgencyCredential &credential);\n\n  modelbox::Status FindUserAgencyCredential(const AgencyInfo &agency_info);\n\n  modelbox::Status FindUserAgencyToken(const AgencyInfo &agency_info,\n                                       const ProjectInfo &project_info);\n\n  void RequestAgencyToken();\n\n  bool IsExpire(const std::string &expire) const;\n\n  bool init_flag_{false};\n  std::string request_credential_uri_;\n  std::string request_token_uri_;\n  std::string request_host_;\n  std::string request_method_;\n  ConsigneeInfo consignee_info_;\n  std::map<AgencyInfo, UserAgencyCredential> user_credential_map_;\n  std::map<AgencyInfo, std::map<ProjectInfo, UserAgencyToken> > user_token_map_;\n  std::mutex credential_lock_;\n  std::mutex token_lock_;\n  std::mutex persist_credential_lock_;\n  std::map<std::string, UserAgencyCredential> persist_credential_;\n  AgentToken agent_token_;\n  std::function<void(void)> token_update_callback_;\n  std::atomic<int32_t> async_count{0};\n  std::shared_ptr<modelbox::TimerTask> timer_;\n};\n}  // namespace modelbox\n\n#endif\n"
  },
  {
    "path": "src/drivers/common/flowunit/hw_components/obs_client/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nfile(GLOB_RECURSE SOURCES *.cpp *.cc *.c)\ngroup_source_test_files(MODELBOX_COMPONENT_SOURCE MODELBOX_COMPONENT_TEST_SOURCE \"_test.c*\" ${SOURCES})\n\nset(INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_CPU_INCLUDE})\ninclude_directories(${MODELBOX_COMMON_IAM_AUTH_INCLUDE})\n\nset(LIBRARY modelbox-unit-cpu-obs_client)\nadd_library(${LIBRARY} SHARED ${SOURCES})\nset_property(TARGET ${LIBRARY} PROPERTY POSITION_INDEPENDENT_CODE ON)\n\ntarget_link_libraries(${LIBRARY} ${OBS_LIBRARIES})\ntarget_link_libraries(${LIBRARY} ${MODELBOX_COMMON_IAM_AUTH_LIBRARY})\n\ninstall(TARGETS ${LIBRARY} \n    COMPONENT cpu-device-flowunit\n    RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n    LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    OPTIONAL\n    )\n\nset(MODELBOX_COMMON_OBS_CLIENT_LIBRARY ${LIBRARY} CACHE INTERNAL \"\")\nset(MODELBOX_COMMON_OBS_CLIENT_INCLUDE ${INCLUDE} CACHE INTERNAL \"\")\n"
  },
  {
    "path": "src/drivers/common/flowunit/hw_components/obs_client/obs_client.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <modelbox/obs_client.h>\n#include <securec.h>\n#include <modelbox/iam_auth.h>\n#include <modelbox/base/utils.h>\n\n\n#define OBS_SDK_MAX_KEYS 1000\n#define MAX_RETRY_COUNTS 3\n#define OBJECTS_LIST_MARKER_SIZE 4096\n\nnamespace modelbox {\n\nstd::mutex ObsClient::obs_client_lock_;\n\n// callbacks for OBS SDK\nobs_status ResponsePropertiesCallback(const obs_response_properties *properties,\n                                      void *callback_data);\nvoid ListObjectCompleteCallback(obs_status status,\n                                const obs_error_details *error,\n                                void *callback_data);\nobs_status ListObjectsCallback(int is_truncated, const char *next_marker,\n                               int contents_count,\n                               const obs_list_objects_content *contents,\n                               int common_prefixes_count,\n                               const char **common_prefixes,\n                               void *callback_data);\nobs_status GetPropertiesCallback(const obs_response_properties *properties,\n                                 void *callback_data);\nvoid GetObjectCompleteCallback(obs_status status,\n                               const obs_error_details *error,\n                               void *callback_data);\nobs_status GetObjectDataCallback(int buffer_size, const char *buffer,\n                                 void *callback_data);\nvoid PutBufferCompleteCallback(obs_status status,\n                               const obs_error_details *error,\n                               void *callback_data);\nint PutBufferDataCallback(int buffer_size, char *buffer, void *callback_data);\nobs_status GetObjectSizeCallback(const obs_response_properties *properties,\n                                 void *callback_data);\n\nvoid GetObjectSizeCompleteCallback(obs_status status,\n                                   const obs_error_details *error,\n                                   void *callback_data);\n\nobs_status GetBufferCallback(int buffer_size, const char *buffer,\n                             void *callback_data);\n\nvoid GetBufferCompleteCallback(obs_status status,\n                               const obs_error_details *error,\n                               void *callback_data);\n\n// data struct for OBS SDK callbacks\nusing GetObjectListData = struct {\n  int is_truncated;\n  char next_marker[OBJECTS_LIST_MARKER_SIZE];\n  std::vector<std::string> object_keys_list;\n  obs_status ret_status;\n};\n\nusing GetObjectCallbackData = struct {\n  std::shared_ptr<FILE> out_file;\n  obs_status ret_status;\n};\n\nusing GetBufferCallbackData = struct {\n  unsigned char *get_buffer;\n  uint64_t buffer_size;\n  obs_status ret_status;\n};\n\nusing GetObejectSizeCallbackData = struct {\n  uint64_t content_length;\n  obs_status ret_status;\n};\n\nusing PutBufferObjectCallbackData = struct {\n  char *put_buffer;\n  uint64_t buffer_size;\n  uint64_t cur_offset;\n  obs_status ret_status;\n};\n\nstd::shared_ptr<ObsClient> ObsClient::GetInstance() {\n  std::lock_guard<std::mutex> lock(obs_client_lock_);\n  static std::shared_ptr<ObsClient> obs_client = nullptr;\n  if (nullptr == obs_client) {\n    obs_client = std::shared_ptr<ObsClient>(new ObsClient());\n    if (nullptr == obs_client) {\n      MBLOG_ERROR << \"Failed to construct obs client!\";\n      return nullptr;\n    }\n  }\n\n  static bool is_initialized = false;\n  if (true == is_initialized) {\n    return obs_client;\n  }\n  auto ret = obs_client->InitObsSdk();\n  if (modelbox::STATUS_OK != ret.Code()) {\n    MBLOG_ERROR << ret.Errormsg();\n    return nullptr;\n  }\n  is_initialized = true;\n\n  return obs_client;\n}\n\nObsClient::ObsClient() = default;\nObsClient::~ObsClient() { DeInitObsSdk(); };\n\nmodelbox::Status ObsClient::InitObsSdk() {\n  obs_status ret_status = OBS_STATUS_BUTT;\n  ret_status = obs_initialize(OBS_INIT_ALL);\n  if (OBS_STATUS_OK != ret_status) {\n    const auto *obs_err = obs_get_status_name(ret_status);\n    std::string err_msg = \"Failed to initialize OBS SDK\";\n    if (obs_err) {\n      err_msg += \", \";\n      err_msg += obs_err;\n    }\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n  return modelbox::STATUS_OK;\n}\n\nvoid ObsClient::DeInitObsSdk() { obs_deinitialize(); }\n\nmodelbox::Status ObsClient::GetAuthInfo(const std::string &domain_name,\n                                        const std::string &xrole_name,\n                                        const std::string &user_id,\n                                        std::string &access_key,\n                                        std::string &secret_key,\n                                        std::string &security_token) {\n  std::string err_msg;\n\n  modelbox::AgencyInfo agent_info;\n  agent_info.user_domain_name = domain_name;\n  agent_info.xrole_name = xrole_name;\n\n  auto hw_auth = modelbox::IAMAuth::GetInstance();\n  if (hw_auth == nullptr) {\n    err_msg = \"Failed to get hw_auth instance!\";\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  modelbox::UserAgencyCredential credential;\n  auto ret =\n      hw_auth->GetUserAgencyProjectCredential(credential, agent_info, user_id);\n  if (ret != modelbox::STATUS_OK) {\n    err_msg = \"Failed to get credential info!\";\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  access_key = credential.user_ak;\n  secret_key = credential.user_sk;\n  security_token = credential.user_secure_token;\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status ObsClient::GetUpdatedAuthInfo(const std::string &domain_name,\n                                               const std::string &xrole_name,\n                                               const std::string &user_id,\n                                               std::string &access_key,\n                                               std::string &secret_key,\n                                               std::string &security_token) {\n  modelbox::AgencyInfo agency_info;\n  agency_info.user_domain_name = domain_name;\n  agency_info.xrole_name = xrole_name;\n\n  std::string err_msg;\n  auto hw_auth = modelbox::IAMAuth::GetInstance();\n  if (hw_auth == nullptr) {\n    err_msg = \"Failed to get hw_auth instance!\";\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n  hw_auth->ExpireUserAgencyProjectCredential(agency_info);\n\n  modelbox::UserAgencyCredential user_credential;\n  auto ret = hw_auth->GetUserAgencyProjectCredential(user_credential,\n                                                     agency_info, user_id);\n  if (ret != modelbox::STATUS_OK) {\n    err_msg = \"Failed to get the renewed credential info!\";\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  access_key = user_credential.user_ak;\n  secret_key = user_credential.user_sk;\n  security_token = user_credential.user_secure_token;\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status ObsClient::NotifyToUpdateAuthInfo(\n    const std::string &domain_name, const std::string &xrole_name) {\n  modelbox::AgencyInfo agency_info;\n  agency_info.user_domain_name = domain_name;\n  agency_info.xrole_name = xrole_name;\n\n  std::string err_msg;\n  auto hw_auth = modelbox::IAMAuth::GetInstance();\n  if (hw_auth == nullptr) {\n    err_msg = \"Failed to update Auth info\";\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n  hw_auth->ExpireUserAgencyProjectCredential(agency_info);\n\n  return modelbox::STATUS_OK;\n}\n\nbool ObsClient::IsValidOptionExceptPath(const ObsOptions &opt) {\n  if (opt.end_point.empty() || opt.bucket.empty()) {\n    return false;\n  }\n  return true;\n}\n\nbool ObsClient::IsValidOptionIncludingPath(const ObsOptions &opt) {\n  if (!IsValidOptionExceptPath(opt) || opt.path.empty()) {\n    return false;\n  }\n  return true;\n}\n\nvoid ObsClient::SetObsOption(const ObsOptions &src, const std::string &ak,\n                             const std::string &sk,\n                             const std::string &security_token,\n                             obs_options &dst) {\n  dst.bucket_options.host_name = const_cast<char *>(src.end_point.c_str());\n  dst.bucket_options.bucket_name = const_cast<char *>(src.bucket.c_str());\n  dst.bucket_options.access_key = const_cast<char *>(ak.c_str());\n  dst.bucket_options.secret_access_key = const_cast<char *>(sk.c_str());\n  dst.bucket_options.token = const_cast<char *>(security_token.c_str());\n}\n\nstd::shared_ptr<FILE> ObsClient::OpenLocalFile(\n    const std::string &full_file_path) {\n  if (full_file_path.empty()) {\n    return nullptr;\n  }\n\n  std::string path = full_file_path.substr(0, full_file_path.rfind('/'));\n\n  if (modelbox::CreateDirectory(path) != modelbox::STATUS_OK) {\n    MBLOG_ERROR << \"Failed to create folder for obs object (\" << full_file_path\n                << \"), error info: \" << modelbox::StrError(errno);\n    return nullptr;\n  }\n\n  auto path_name = modelbox::PathCanonicalize(full_file_path);\n  FILE *out_file = fopen(path_name.c_str(), \"wb\");\n  if (!out_file) {\n    MBLOG_ERROR << \"Failed to create file \" << full_file_path\n                << \", because: \" << modelbox::StrError(errno);\n    return nullptr;\n  }\n  auto file_ptr = std::shared_ptr<FILE>(out_file, [](FILE *file) {\n    fflush(file);\n    fclose(file);\n  });\n  return file_ptr;\n}\n\nbool ObsClient::NeedUpdateAuthInfo(obs_status status) {\n  switch (status) {\n    case OBS_STATUS_InvalidAccessKeyId:\n    case OBS_STATUS_NoToken:\n    case OBS_STATUS_ExpiredToken:\n    case OBS_STATUS_InvalidToken:\n    case OBS_STATUS_TokenRefreshRequired:\n      return true;\n    default:\n      return false;\n  }\n}\n\nbool ObsClient::NeedTryAgain(obs_status status) {\n  switch (status) {\n    case OBS_STATUS_EntityTooSmall:\n    case OBS_STATUS_EntityTooLarge:\n    case OBS_STATUS_InlineDataTooLarge:\n    case OBS_STATUS_NoSuchBucket:\n    case OBS_STATUS_NoSuchKey:\n    case OBS_STATUS_OK:\n      return false;\n    default:\n      return true;\n  }\n}\n\nmodelbox::Status ObsClient::GetObjectsList(\n    const ObsOptions &opt, std::vector<std::string> &object_list) {\n  if (!IsValidOptionExceptPath(opt)) {\n    std::string err_msg = \"Invalid parameters!\";\n    return {modelbox::STATUS_INVALID, err_msg};\n  }\n\n  // get Authorization info\n  std::string ak;\n  std::string sk;\n  std::string security_token;\n  auto ret = GetAuthInfo(opt.domain_name, opt.xrole_name, opt.user_id, ak, sk,\n                         security_token);\n  if (modelbox::STATUS_OK != ret) {\n    return ret;\n  }\n\n  // create and initialize the obs option\n  obs_options option;\n  init_obs_options(&option);\n  SetObsOption(opt, ak, sk, security_token, option);\n\n  // set callbacks\n  obs_list_objects_handler list_bucket_objects_handler = {\n      {&ResponsePropertiesCallback, &ListObjectCompleteCallback},\n      &ListObjectsCallback};\n\n  // user-defined callback data\n  GetObjectListData data;\n  memset_s(&data, sizeof(GetObjectListData), 0, sizeof(GetObjectListData));\n\n  int retry_count = 0;\n  char next_marker[OBJECTS_LIST_MARKER_SIZE] = {0};\n\n  // list objects\n  while (retry_count < MAX_RETRY_COUNTS) {\n    if (opt.path.empty()) {\n      list_bucket_objects(&option, nullptr, next_marker, nullptr,\n                          OBS_SDK_MAX_KEYS, &list_bucket_objects_handler,\n                          &data);\n    } else {\n      list_bucket_objects(&option, opt.path.c_str(), next_marker, nullptr,\n                          OBS_SDK_MAX_KEYS, &list_bucket_objects_handler,\n                          &data);\n    }\n\n    if (OBS_STATUS_OK == data.ret_status) {\n      retry_count = 0;  // reset\n      // successfully get complete list\n      if (!data.is_truncated) {\n        break;\n      }\n      auto len =\n          snprintf_s(next_marker, OBJECTS_LIST_MARKER_SIZE,\n                     OBJECTS_LIST_MARKER_SIZE - 1, \"%s\", data.next_marker);\n      if (len < 0 || len >= OBJECTS_LIST_MARKER_SIZE - 1) {\n        MBLOG_WARN << \"marker is too long: \" << next_marker;\n        return {modelbox::STATUS_INVALID, \"marker too long\"};\n      }\n\n      continue;\n    }\n\n    ++retry_count;\n    if (NeedUpdateAuthInfo(data.ret_status)) {\n      MBLOG_WARN << \"Auth info expire and need to be updated.\";\n      auto ret = GetUpdatedAuthInfo(opt.domain_name, opt.xrole_name,\n                                    opt.user_id, ak, sk, security_token);\n      if (modelbox::STATUS_OK != ret) {\n        MBLOG_WARN << \"Failed to update auth info!! obs_ret_status: \"\n                   << obs_get_status_name(data.ret_status)\n                   << \", try count: \" << retry_count;\n        continue;\n      }\n      MBLOG_WARN << \"Auth info is updated successfully. obs_ret_status: \"\n                 << obs_get_status_name(data.ret_status)\n                 << \", try count: \" << retry_count;\n      SetObsOption(opt, ak, sk, security_token, option);\n      continue;\n    }\n    MBLOG_ERROR << \"Failed to list objects! obs_ret_status: \"\n                << obs_get_status_name(data.ret_status)\n                << \", try count: \" << retry_count;\n  }\n\n  std::string err_msg;\n  if (retry_count >= MAX_RETRY_COUNTS) {\n    err_msg = \"Failed to list objects after \" + std::to_string(retry_count) +\n              \" tries!\";\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  object_list = std::move(data.object_keys_list);\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status ObsClient::GetObject(const ObsOptions &opt,\n                                      const std::string &file_local_path) {\n  std::string err_msg;\n  if (!IsValidOptionIncludingPath(opt) || file_local_path.empty()) {\n    err_msg = \"Failed to download obs object: Invalid parameters! file key: \" +\n              file_local_path;\n    return {modelbox::STATUS_INVALID, err_msg};\n  }\n\n  // get Authorization info\n  std::string ak;\n  std::string sk;\n  std::string security_token;\n  auto ret = GetAuthInfo(opt.domain_name, opt.xrole_name, opt.user_id, ak, sk,\n                         security_token);\n  if (modelbox::STATUS_OK != ret) {\n    return ret;\n  }\n\n  // Initialize the download option\n  obs_options option;\n  init_obs_options(&option);\n  SetObsOption(opt, ak, sk, security_token, option);\n\n  obs_object_info object_info = {nullptr};\n  object_info.key = const_cast<char *>(opt.path.c_str());\n\n  GetObjectCallbackData data;\n  data.ret_status = OBS_STATUS_BUTT;\n  data.out_file = OpenLocalFile(file_local_path);\n  if (nullptr == data.out_file) {\n    err_msg =\n        \"Failed to download obs object: can't open local file to accept \"\n        \"data: \" +\n        file_local_path;\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  // define the download range; 0 indicates to download the whole object.\n  obs_get_conditions get_condition = {0};\n  init_get_properties(&get_condition);\n  get_condition.start_byte = 0;  // the start position of the object\n  get_condition.byte_count =\n      0;  // download length, default is 0, up to the end of the object\n  obs_get_object_handler get_object_handler = {\n      {&GetPropertiesCallback, &GetObjectCompleteCallback},\n      &GetObjectDataCallback};\n\n  // download\n  get_object(&option, &object_info, &get_condition, nullptr,\n             &get_object_handler, &data);\n\n  if (NeedUpdateAuthInfo(data.ret_status)) {\n    MBLOG_WARN\n        << \"Denied to access OBS. Maybe Auth info expired. Try to update.\";\n    ret = GetUpdatedAuthInfo(opt.domain_name, opt.xrole_name, opt.user_id, ak,\n                             sk, security_token);\n    if (modelbox::STATUS_OK != ret) {\n      MBLOG_WARN << \"Failed to update hw_auth info.\";\n    } else {\n      // try to get object again.\n      SetObsOption(opt, ak, sk, security_token, option);\n      get_object(&option, &object_info, &get_condition, nullptr,\n                 &get_object_handler, &data);\n    }\n  }\n\n  if (OBS_STATUS_OK != data.ret_status) {\n    const auto *obs_status_name = obs_get_status_name(data.ret_status);\n    if (obs_status_name == nullptr) {\n      obs_status_name = \"null\";\n    }\n    err_msg = \"Failed to download obs object: [\" + opt.bucket + \"] - \" +\n              opt.path + \" err_msg: (\" + std::to_string(data.ret_status) +\n              \": \" + obs_status_name + \").\";\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status ObsClient::GetBuffer(ObsOptions &opt, unsigned char *buf,\n                                      uint64_t size, uint64_t offset) {\n  std::string err_msg;\n\n  obs_object_info object_info = {nullptr};\n  object_info.key = const_cast<char *>(opt.path.c_str());\n\n  GetBufferCallbackData data;\n  data.ret_status = OBS_STATUS_BUTT;\n  data.get_buffer = buf;\n  data.buffer_size = 0;\n\n  // get Authorization info\n  if (opt.ak.empty() || opt.sk.empty()) {\n    auto ret = GetAuthInfo(opt.domain_name, opt.xrole_name, opt.user_id, opt.ak,\n                           opt.sk, opt.token);\n    if (modelbox::STATUS_OK != ret) {\n      return ret;\n    }\n  }\n\n  // Initialize the download option\n  obs_options option;\n  init_obs_options(&option);\n  SetObsOption(opt, opt.ak, opt.sk, opt.token, option);\n\n  // define the download range; 0 indicates to download the whole object.\n  obs_get_conditions get_condition = {0};\n  init_get_properties(&get_condition);\n  get_condition.start_byte = offset;  // the start position of the object\n  get_condition.byte_count =\n      size;  // download length, default is 0, up to the end of the object\n  obs_get_object_handler get_object_handler = {\n      {&GetPropertiesCallback, &GetBufferCompleteCallback}, &GetBufferCallback};\n\n  // download\n  get_object(&option, &object_info, &get_condition, nullptr,\n             &get_object_handler, &data);\n\n  if (NeedUpdateAuthInfo(data.ret_status)) {\n    MBLOG_WARN\n        << \"Denied to access OBS. Maybe Auth info expired. Try to update.\";\n    auto ret = GetUpdatedAuthInfo(opt.domain_name, opt.xrole_name, opt.user_id,\n                                  opt.ak, opt.sk, opt.token);\n    if (modelbox::STATUS_OK != ret) {\n      MBLOG_WARN << \"Failed to update hw_auth info.\";\n    } else {\n      // try to get object again.\n      SetObsOption(opt, opt.ak, opt.sk, opt.token, option);\n      get_object(&option, &object_info, &get_condition, nullptr,\n                 &get_object_handler, &data);\n    }\n  }\n\n  if (OBS_STATUS_OK != data.ret_status) {\n    err_msg = \"Failed to get buffer from obs data, bucket: \" + opt.bucket +\n              \", file key: \" + opt.path +\n              \", buffer size: \" + std::to_string(size) +\n              \", obs status: \" + std::to_string(data.ret_status) + \" (\" +\n              obs_get_status_name(data.ret_status) + \").\";\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nuint64_t ObsClient::GetObjectSize(ObsOptions &opt) {\n  std::string err_msg;\n\n  obs_object_info object_info = {nullptr};\n  object_info.key = const_cast<char *>(opt.path.c_str());\n\n  GetObejectSizeCallbackData data;\n  data.ret_status = OBS_STATUS_BUTT;\n  data.content_length = 0;\n\n  // get Authorization info\n  if (opt.ak.empty() || opt.sk.empty()) {\n    auto ret = GetAuthInfo(opt.domain_name, opt.xrole_name, opt.user_id, opt.ak,\n                           opt.sk, opt.token);\n    if (modelbox::STATUS_OK != ret) {\n      return 0;\n    }\n  }\n\n  // Initialize the download option\n  obs_options option;\n  init_obs_options(&option);\n  SetObsOption(opt, opt.ak, opt.sk, opt.token, option);\n\n  obs_response_handler response_handler = {&GetObjectSizeCallback,\n                                           &GetObjectSizeCompleteCallback};\n\n  get_object_metadata(&option, &object_info, nullptr, &response_handler, &data);\n\n  if (NeedUpdateAuthInfo(data.ret_status)) {\n    MBLOG_WARN\n        << \"Denied to access OBS. Maybe Auth info expired. Try to update.\";\n    auto ret = GetUpdatedAuthInfo(opt.domain_name, opt.xrole_name, opt.user_id,\n                                  opt.ak, opt.sk, opt.token);\n    if (modelbox::STATUS_OK != ret) {\n      MBLOG_WARN << \"Failed to update hw_auth info.\";\n    } else {\n      // try to get object again.\n      SetObsOption(opt, opt.ak, opt.sk, opt.token, option);\n      get_object_metadata(&option, &object_info, nullptr, &response_handler,\n                          &data);\n    }\n  }\n\n  if (OBS_STATUS_OK != data.ret_status) {\n    err_msg = \"Failed to get obs object size, bucket: \" + opt.bucket +\n              \", file key: \" + opt.path +\n              \", obs status: \" + std::to_string(data.ret_status) + \" (\" +\n              obs_get_status_name(data.ret_status) + \").\";\n    MBLOG_ERROR << err_msg;\n  }\n\n  return data.content_length;\n}\n\nmodelbox::Status ObsClient::PutObject(const ObsOptions &opt, const char *data,\n                                      size_t data_size) {\n  std::string err_msg;\n  if (!IsValidOptionIncludingPath(opt)) {\n    err_msg = \"Failed to output obs data: Invalid obs options!\";\n    return {modelbox::STATUS_INVALID, err_msg};\n  }\n\n  if (data == nullptr || data_size == 0) {\n    err_msg = \"Failed to output obs data: Invalid data!\";\n    return {modelbox::STATUS_INVALID, err_msg};\n  }\n\n  // get Authorization info\n  std::string ak;\n  std::string sk;\n  std::string security_token;\n  auto ret = GetAuthInfo(opt.domain_name, opt.xrole_name, opt.user_id, ak, sk,\n                         security_token);\n  if (modelbox::STATUS_OK != ret) {\n    return ret;\n  }\n\n  // initialize obs option\n  obs_options option;\n  init_obs_options(&option);\n  SetObsOption(opt, ak, sk, security_token, option);\n\n  // initialize put properties\n  obs_put_properties put_properties;\n  init_put_properties(&put_properties);\n\n  // initialize put\n  PutBufferObjectCallbackData data_to_put = {nullptr};\n  data_to_put.put_buffer = const_cast<char *>(data);\n  data_to_put.buffer_size = data_size;\n\n  // set callback functions\n  obs_put_object_handler putobjectHandler = {\n      {&ResponsePropertiesCallback, &PutBufferCompleteCallback},\n      &PutBufferDataCallback};\n\n  put_object(&option, const_cast<char *>(opt.path.c_str()), data_size,\n             &put_properties, nullptr, &putobjectHandler, &data_to_put);\n\n  if (NeedUpdateAuthInfo(data_to_put.ret_status)) {\n    MBLOG_WARN\n        << \"Access obs denied. Maybe Auth info is expired. Try to update.\";\n    ret = NotifyToUpdateAuthInfo(opt.domain_name, opt.xrole_name);\n    if (modelbox::STATUS_OK != ret) {\n      MBLOG_ERROR << ret.Errormsg();\n    }\n    return {modelbox::STATUS_AGAIN,\n            \"Failed to output obs data. Try again please.\"};\n  }\n\n  if (NeedTryAgain(data_to_put.ret_status)) {\n    err_msg = \"Failed to output obs data, bucket: \" + opt.bucket +\n              \", file key: \" + opt.path +\n              \", data size: \" + std::to_string(data_size) +\n              \", obs status: \" + std::to_string(data_to_put.ret_status) + \" (\" +\n              obs_get_status_name(data_to_put.ret_status) + \").\";\n    return {modelbox::STATUS_AGAIN, err_msg};\n  }\n\n  if (OBS_STATUS_OK != data_to_put.ret_status) {\n    err_msg = \"Failed to output obs data, bucket: \" + opt.bucket +\n              \", file key: \" + opt.path +\n              \", data size: \" + std::to_string(data_size) +\n              \", obs status: \" + std::to_string(data_to_put.ret_status) + \" (\" +\n              obs_get_status_name(data_to_put.ret_status) + \").\";\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nobs_status ResponsePropertiesCallback(const obs_response_properties *properties,\n                                      void *callback_data) {\n  return OBS_STATUS_OK;\n}\n\nvoid ListObjectCompleteCallback(obs_status status,\n                                const obs_error_details *error,\n                                void *callback_data) {\n  auto *data = (GetObjectListData *)callback_data;\n  data->ret_status = status;\n\n  if (data->ret_status == OBS_STATUS_OK && error && error->message) {\n    // sometimes there would be an error, eventhough the ret_status is OK.\n    data->ret_status = OBS_STATUS_InternalError;\n  }\n}\n\nobs_status ListObjectsCallback(int is_truncated, const char *next_marker,\n                               int contents_count,\n                               const obs_list_objects_content *contents,\n                               int common_prefixes_count,\n                               const char **common_prefixes,\n                               void *callback_data) {\n  if (contents_count < 0) {\n    MBLOG_WARN << \"Illegal contents count: \" << contents_count;\n    return OBS_STATUS_AbortedByCallback;\n  }\n\n  auto *data = (GetObjectListData *)callback_data;\n\n  data->is_truncated = is_truncated;\n  // This is tricky.  S3 doesn't return the NextMarker if there is no\n  // delimiter.  Why, I don't know, since it's still useful for paging\n  // through results.  We want NextMarker to be the last content in the\n  // list, so set it to that if necessary.\n  if ((!next_marker || !next_marker[0]) && contents_count) {\n    next_marker = contents[contents_count - 1].key;\n  }\n  if (next_marker) {\n    auto ret = snprintf_s(data->next_marker, OBJECTS_LIST_MARKER_SIZE,\n                          OBJECTS_LIST_MARKER_SIZE - 1, \"%s\", next_marker);\n    if (ret < 0 || ret >= OBJECTS_LIST_MARKER_SIZE - 1) {\n      MBLOG_WARN << \"marker is too long: \" << next_marker;\n      return OBS_STATUS_AbortedByCallback;\n    }\n  } else {\n    data->next_marker[0] = 0;\n  }\n\n  for (uint32_t i = 0; i < (uint32_t)contents_count; i++) {\n    std::string strFilePath = contents[i].key;\n    std::string lastchar = strFilePath.substr(strFilePath.length() - 1);\n\n    // skip empty directories.\n    if (\"/\" == lastchar) {\n      continue;\n    }\n\n    data->object_keys_list.push_back(strFilePath);\n  }\n\n  return OBS_STATUS_OK;\n}\n\nobs_status GetPropertiesCallback(const obs_response_properties *properties,\n                                 void *callback_data) {\n  return OBS_STATUS_OK;\n}\n\nvoid GetObjectCompleteCallback(obs_status status,\n                               const obs_error_details *error,\n                               void *callback_data) {\n  if (nullptr != error->message) {\n    MBLOG_WARN << \"OBS error message: \" << error->message;\n  }\n  if (nullptr != error->resource) {\n    MBLOG_WARN << \"OBS error resource: \" << error->resource;\n  }\n  if (nullptr != error->further_details) {\n    MBLOG_WARN << \"OBS error further detail: \" << error->further_details;\n  }\n  if (OBS_STATUS_OK != status) {\n    MBLOG_WARN << \"OBS status: \" << status;\n  }\n  auto *data = (GetObjectCallbackData *)callback_data;\n  if (nullptr == data) {\n    return;\n  }\n  data->ret_status = status;\n}\n\nobs_status GetObjectDataCallback(int buffer_size, const char *buffer,\n                                 void *callback_data) {\n  auto *data = (GetObjectCallbackData *)callback_data;\n  if (nullptr == data || nullptr == data->out_file) {\n    return OBS_STATUS_AbortedByCallback;\n  }\n  size_t wrote = fwrite(buffer, buffer_size, 1, data->out_file.get());\n  return ((wrote < (size_t)1) ? OBS_STATUS_AbortedByCallback : OBS_STATUS_OK);\n}\n\nvoid PutBufferCompleteCallback(obs_status status,\n                               const obs_error_details *error,\n                               void *callback_data) {\n  auto *data = (PutBufferObjectCallbackData *)callback_data;\n  data->ret_status = status;\n}\n\nint PutBufferDataCallback(int buffer_size, char *buffer, void *callback_data) {\n  auto *data = (PutBufferObjectCallbackData *)callback_data;\n\n  int toRead = 0;\n  if (data->buffer_size) {\n    toRead =\n        ((data->buffer_size > (unsigned)buffer_size) ? (unsigned)buffer_size\n                                                     : data->buffer_size);\n    auto ret = memcpy_s(buffer, buffer_size,\n                        data->put_buffer + data->cur_offset, toRead);\n    if (EOK != ret) {\n      MBLOG_ERROR << \"Cpu memcpy failed, ret \" << ret << \", src size \" << toRead\n                  << \", dest size \" << buffer_size;\n      return 0;\n    }\n  }\n\n  data->buffer_size -= toRead;\n  data->cur_offset += toRead;\n\n  return toRead;\n}\n\nobs_status GetObjectSizeCallback(const obs_response_properties *properties,\n                                 void *callback_data) {\n  auto *data = (GetObejectSizeCallbackData *)callback_data;\n  data->content_length = properties->content_length;\n  return OBS_STATUS_OK;\n}\n\nvoid GetObjectSizeCompleteCallback(obs_status status,\n                                   const obs_error_details *error,\n                                   void *callback_data) {\n  auto *data = (GetObejectSizeCallbackData *)callback_data;\n  data->ret_status = status;\n}\n\nobs_status GetBufferCallback(int buffer_size, const char *buffer,\n                             void *callback_data) {\n  auto *data = (GetBufferCallbackData *)callback_data;\n  if (nullptr == data || nullptr == data->get_buffer) {\n    return OBS_STATUS_AbortedByCallback;\n  }\n  auto ret = memcpy_s(data->get_buffer + data->buffer_size, buffer_size, buffer,\n                      buffer_size);\n  if (EOK != ret) {\n    MBLOG_ERROR << \"Cpu memcpy failed, ret \" << ret << \", src size \"\n                << buffer_size << \", dest size \" << buffer_size;\n    return OBS_STATUS_InternalError;\n  }\n  data->buffer_size += buffer_size;\n  return OBS_STATUS_OK;\n}\n\nvoid GetBufferCompleteCallback(obs_status status,\n                               const obs_error_details *error,\n                               void *callback_data) {\n  auto *data = (GetBufferCallbackData *)callback_data;\n  data->ret_status = status;\n}\n\n}  // namespace modelbox\n"
  },
  {
    "path": "src/drivers/common/flowunit/image_process/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nfile(GLOB_RECURSE SOURCES *.cpp *.cc *.c)\n\nset(INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\nif (ACL_FOUND)\n  add_definitions(-DACL_ENABLE)\nendif ()\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_CONFIG_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${INCLUDE})\ninclude_directories(${HUAWEI_SECURE_C_INCLUDE_DIR})\ninclude_directories(${ACL_INCLUDE_DIR})\n\nset(LIBRARY modelbox-common-image-process-object)\nadd_library(${LIBRARY} STATIC ${SOURCES})\nset_property(TARGET ${LIBRARY} PROPERTY POSITION_INDEPENDENT_CODE ON)\n\ntarget_link_libraries(${LIBRARY} ${FFMPEG_LIBRARIES})\ntarget_link_libraries(${LIBRARY} ${ACL_LIBRARIES})\n\nset(MODELBOX_COMMON_IMAGE_PROCESS_LIBRARY ${LIBRARY} CACHE INTERNAL \"\")\nset(MODELBOX_COMMON_IMAGE_PROCESS_INCLUDE ${INCLUDE} CACHE INTERNAL \"\")\n"
  },
  {
    "path": "src/drivers/common/flowunit/image_process/image_process.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"image_process.h\"\n\n#include <functional>\n#include <map>\n#include <set>\n\nnamespace imageprocess {\nint32_t align_up(int32_t num, int32_t align) {\n  if (align == 0 || num == 0) {\n    return 0;\n  }\n\n  return ((num - 1) / align + 1) * align;\n}\n\nstatic const std::set<std::string> pix_fmt_set = {\"rgb\", \"bgr\", \"nv12\", \"nv21\"};\n\nstatic size_t RGBBytesCalc(size_t pix_num) { return pix_num * 3; }\n\nstatic size_t NVBytesCalc(size_t pix_num) { return pix_num * 3 / 2; }\n\nstatic const std::map<std::string, std::function<size_t(size_t)>>\n    img_bytes_calc_map = {{\"rgb\", RGBBytesCalc},\n                          {\"bgr\", RGBBytesCalc},\n                          {\"nv12\", NVBytesCalc},\n                          {\"nv21\", NVBytesCalc}};\n\nmodelbox::Status GetImageBytes(const std::string &pix_fmt, size_t pix_num,\n                               size_t &img_bytes) {\n  auto item = img_bytes_calc_map.find(pix_fmt);\n  if (item == img_bytes_calc_map.end()) {\n    return {modelbox::STATUS_NOTSUPPORT,\n            \"pix_fmt \" + pix_fmt + \" is not support\"};\n  }\n\n  img_bytes = item->second(pix_num);\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status GetImageBytes(const std::string &pix_fmt, int32_t width,\n                               int32_t height, size_t &img_bytes) {\n  return GetImageBytes(pix_fmt, (size_t)width * height, img_bytes);\n}\n\nstatic int32_t RGBWidthStrideCalc(int32_t width) { return width * 3; }\n\nstatic int32_t NVWidthStrideCalc(int32_t width) { return width; }\n\nstatic const std::map<std::string, std::function<int32_t(int32_t)>>\n    img_width_stride_calc_map = {{\"rgb\", RGBWidthStrideCalc},\n                                 {\"bgr\", RGBWidthStrideCalc},\n                                 {\"nv12\", NVWidthStrideCalc},\n                                 {\"nv21\", NVWidthStrideCalc}};\n\nmodelbox::Status GetWidthStride(const std::string &pix_fmt, int32_t width,\n                                int32_t &width_stride) {\n  auto item = img_width_stride_calc_map.find(pix_fmt);\n  if (item == img_width_stride_calc_map.end()) {\n    return {modelbox::STATUS_NOTSUPPORT,\n            \"pix_fmt \" + pix_fmt + \" is not support\"};\n  }\n\n  width_stride = item->second(width);\n  return modelbox::STATUS_OK;\n}\n\nstatic size_t RGBBytesCalcByStride(int32_t width_stride,\n                                   int32_t height_stride) {\n  return (size_t)width_stride * height_stride;\n}\n\nstatic size_t NVBytesCalcByStride(int32_t width_stride, int32_t height_stride) {\n  return (size_t)width_stride * height_stride * 3 / 2;\n}\n\nstatic const std::map<std::string, std::function<size_t(int32_t, int32_t)>>\n    img_bytes_calc_map2 = {{\"rgb\", RGBBytesCalcByStride},\n                           {\"bgr\", RGBBytesCalcByStride},\n                           {\"nv12\", NVBytesCalcByStride},\n                           {\"nv21\", NVBytesCalcByStride}};\n\nmodelbox::Status GetImageBytesByStride(const std::string &pix_fmt,\n                                       int32_t width_stride,\n                                       int32_t height_stride,\n                                       size_t &img_bytes) {\n  auto item = img_bytes_calc_map2.find(pix_fmt);\n  if (item == img_bytes_calc_map2.end()) {\n    return {modelbox::STATUS_NOTSUPPORT,\n            \"pix_fmt \" + pix_fmt + \" is not support\"};\n  }\n\n  img_bytes = item->second(width_stride, height_stride);\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status GetImgParam(const std::shared_ptr<modelbox::Buffer> &img,\n                             std::string &pix_fmt, int32_t &img_width,\n                             int32_t &img_height, int32_t &img_width_stride,\n                             int32_t &img_height_stride) {\n  auto b_ret = img->Get(\"pix_fmt\", pix_fmt);\n  if (!b_ret) {\n    return {modelbox::STATUS_INVALID, \"pix_fmt not in input image meta\"};\n  }\n\n  b_ret = img->Get(\"width\", img_width);\n  if (!b_ret) {\n    return {modelbox::STATUS_INVALID, \"width not in input image meta\"};\n  }\n\n  b_ret = img->Get(\"height\", img_height);\n  if (!b_ret) {\n    return {modelbox::STATUS_INVALID, \"height not in input image meta\"};\n  }\n\n  b_ret = img->Get(\"width_stride\", img_width_stride);\n  if (!b_ret) {\n    return {modelbox::STATUS_INVALID, \"width stride not in input image meta\"};\n  }\n\n  b_ret = img->Get(\"height_stride\", img_height_stride);\n  if (!b_ret) {\n    return {modelbox::STATUS_INVALID, \"height stride not in input image meta\"};\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status CheckImageStride(const std::string &pix_fmt,\n                                  int32_t img_width_stride,\n                                  int32_t expect_w_align,\n                                  int32_t img_height_stride,\n                                  int32_t expect_h_align, size_t img_size) {\n  if (expect_w_align == 0 || expect_h_align == 0) {\n    return {modelbox::STATUS_NOTSUPPORT, \"divisor is zero\"};\n  }\n  if (img_width_stride % expect_w_align != 0) {\n    return {modelbox::STATUS_NOTSUPPORT,\n            \"img_width_stride must align to \" + std::to_string(expect_w_align)};\n  }\n\n  if (img_height_stride % expect_h_align != 0) {\n    return {modelbox::STATUS_NOTSUPPORT, \"img_height_stride must align to \" +\n                                             std::to_string(expect_h_align)};\n  }\n\n  size_t expect_size = 0;\n  auto ret = GetImageBytesByStride(pix_fmt, img_width_stride, img_height_stride,\n                                   expect_size);\n  if (!ret) {\n    return ret;\n  }\n\n  if (img_size != expect_size) {\n    return {modelbox::STATUS_INVALID,\n            \"img_size[ \" + std::to_string(img_size) + \" ] not right, pix_fmt[\" +\n                pix_fmt + \"], width_stride[\" +\n                std::to_string(img_width_stride) + \"], height_stride[\" +\n                std::to_string(img_height_stride) + \"]\"};\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nbool CheckRoiBoxVaild(const RoiBox *bbox, int32_t image_width,\n                      int32_t image_height) {\n  if (bbox->x < 0 || bbox->y < 0 || bbox->w < 0 || bbox->h < 0 ||\n      bbox->x + bbox->w > image_width || bbox->y + bbox->h > image_height) {\n    MBLOG_ERROR << \"crop bbox roi is invaild: x:\" << bbox->x << \" y:\" << bbox->y\n                << \" w:\" << bbox->w << \" h:\" << bbox->h\n                << \", image width: \" << image_width\n                << \", image height:\" << image_height;\n    return false;\n  }\n  return true;\n}\n\n#ifdef ACL_ENABLE\n\nmodelbox::Status InitDvppChannel(\n    std::shared_ptr<acldvppChannelDesc> &chan_desc) {\n  auto *chan_desc_ptr = acldvppCreateChannelDesc();\n  if (chan_desc_ptr == nullptr) {\n    return {modelbox::STATUS_FAULT, \"acldvppCreateChannelDesc return null\"};\n  }\n\n  auto acl_ret = acldvppCreateChannel(chan_desc_ptr);\n  if (acl_ret != ACL_SUCCESS) {\n    acldvppDestroyChannelDesc(chan_desc_ptr);\n    return {modelbox::STATUS_FAULT,\n            \"acldvppCreateChannel failed, ret \" + std::to_string(acl_ret)};\n  }\n\n  chan_desc.reset(chan_desc_ptr, [](acldvppChannelDesc *ptr) {\n    acldvppDestroyChannel(ptr);\n    acldvppDestroyChannelDesc(ptr);\n  });\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nstatic std::map<std::string, acldvppPixelFormat> acl_fmt_trans_map = {\n    {\"nv12\", PIXEL_FORMAT_YUV_SEMIPLANAR_420},\n    {\"nv21\", PIXEL_FORMAT_YVU_SEMIPLANAR_420},\n    {\"rgb\", PIXEL_FORMAT_RGB_888},\n    {\"bgr\", PIXEL_FORMAT_BGR_888},\n};\n\nstd::shared_ptr<acldvppPicDesc> CreateImgDesc(size_t img_size,\n                                              const std::string &pix_fmt,\n                                              const ImageShape &shape,\n                                              ImgDescDestroyFlag flag) {\n  void *buffer = nullptr;\n  auto acl_ret = acldvppMalloc(&buffer, img_size);\n  if (acl_ret != ACL_SUCCESS) {\n    modelbox::StatusError = {\n        modelbox::STATUS_FAULT,\n        \"acldvppMalloc failed, code \" + std::to_string(acl_ret)};\n    return nullptr;\n  }\n\n  auto img_desc = CreateImgDesc(img_size, buffer, pix_fmt, shape, flag);\n  if (img_desc == nullptr) {\n    acldvppFree(buffer);\n    return nullptr;\n  }\n\n  modelbox::StatusError = modelbox::STATUS_OK;\n  return img_desc;\n}\n\nstd::shared_ptr<acldvppPicDesc> CreateImgDesc(size_t img_size, void *img_buffer,\n                                              const std::string &pix_fmt,\n                                              const ImageShape &shape,\n                                              ImgDescDestroyFlag flag) {\n  auto ret =\n      CheckImageStride(pix_fmt, shape.width_stride, ASCEND_WIDTH_ALIGN,\n                       shape.height_stride, ASCEND_HEIGHT_ALIGN, img_size);\n  if (!ret) {\n    modelbox::StatusError = ret;\n    return nullptr;\n  }\n\n  auto *img_desc_ptr = acldvppCreatePicDesc();\n  if (img_desc_ptr == nullptr) {\n    modelbox::StatusError = {modelbox::STATUS_FAULT,\n                             \"acldvppCreatePicDesc return null\"};\n    return nullptr;\n  }\n\n  auto format_item = acl_fmt_trans_map.find(pix_fmt);\n  if (format_item == acl_fmt_trans_map.end()) {\n    acldvppDestroyPicDesc(img_desc_ptr);\n    modelbox::StatusError = {modelbox::STATUS_NOTSUPPORT,\n                             \"pix_fmt \" + pix_fmt + \" is not support\"};\n    return nullptr;\n  }\n\n  acldvppSetPicDescSize(img_desc_ptr, img_size);\n  if (img_buffer != nullptr) {\n    acldvppSetPicDescData(img_desc_ptr, img_buffer);\n  }\n  acldvppSetPicDescFormat(img_desc_ptr, format_item->second);\n  acldvppSetPicDescWidth(img_desc_ptr, shape.width);\n  acldvppSetPicDescHeight(img_desc_ptr, shape.height);\n  acldvppSetPicDescWidthStride(img_desc_ptr, shape.width_stride);\n  acldvppSetPicDescHeightStride(img_desc_ptr, shape.height_stride);\n\n  std::shared_ptr<acldvppPicDesc> img_desc(\n      img_desc_ptr, [flag](acldvppPicDesc *ptr) {\n        if (flag == ImgDescDestroyFlag::NONE) {\n          return;\n        }\n\n        auto *data = acldvppGetPicDescData(ptr);\n        if (data != nullptr && flag != ImgDescDestroyFlag::DESC_ONLY) {\n          acldvppFree(data);\n        }\n\n        acldvppDestroyPicDesc(ptr);\n      });\n\n  modelbox::StatusError = modelbox::STATUS_OK;\n  return img_desc;\n}\n\nmodelbox::Status FillImgDescData(\n    std::shared_ptr<acldvppPicDesc> &img_desc,\n    std::shared_ptr<modelbox::Buffer> &image_buffer, aclrtStream stream) {\n  auto acl_ret = aclrtSynchronizeStream(stream);\n  if (acl_ret != ACL_SUCCESS) {\n    MBLOG_ERROR << \"aclrtSynchronizeStream failed, err \" << acl_ret;\n    return modelbox::STATUS_FAULT;\n  }\n\n  auto *input_buf = acldvppGetPicDescData(img_desc.get());\n  if (input_buf == nullptr) {\n    return {modelbox::STATUS_FAULT, \"acldvppGetPicDescData failed\"};\n  }\n\n  acl_ret = aclrtMemcpy(input_buf, image_buffer->GetBytes(),\n                        image_buffer->ConstData(), image_buffer->GetBytes(),\n                        aclrtMemcpyKind::ACL_MEMCPY_DEVICE_TO_DEVICE);\n  if (acl_ret != ACL_SUCCESS) {\n    std::string err_msg =\n        \"aclrtMemcpyAsync failed, dest_ptr:\" +\n        std::to_string((uintptr_t)input_buf) +\n        \",dest_size:\" + std::to_string(image_buffer->GetBytes()) +\n        \",src_ptr:\" + std::to_string((uintptr_t)image_buffer->ConstData()) +\n        \",src_size:\" + std::to_string(image_buffer->GetBytes()) +\n        \",type:device_to_device\";\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status SetOutImgMeta(std::shared_ptr<modelbox::Buffer> &out_image,\n                               const std::string &out_pix_fmt,\n                               std::shared_ptr<acldvppPicDesc> &out_img_desc) {\n  auto *img_desc_ptr = out_img_desc.get();\n  if (img_desc_ptr == nullptr) {\n    return {modelbox::STATUS_FAULT, \"out_img_desc is null\"};\n  }\n\n  int32_t width = acldvppGetPicDescWidth(img_desc_ptr);\n  int32_t height = acldvppGetPicDescHeight(img_desc_ptr);\n  int32_t width_stride = acldvppGetPicDescWidthStride(img_desc_ptr);\n  int32_t height_stride = acldvppGetPicDescHeightStride(img_desc_ptr);\n  out_image->Set(\"width\", width);\n  out_image->Set(\"height\", height);\n  out_image->Set(\"width_stride\", width_stride);\n  out_image->Set(\"height_stride\", height_stride);\n  out_image->Set(\"channel\", (int32_t)1);\n  out_image->Set(\"pix_fmt\", out_pix_fmt);\n  out_image->Set(\"layout\", std::string(\"hwc\"));\n  out_image->Set(\"shape\", std::vector<size_t>{(size_t)height_stride * 3 / 2,\n                                              (size_t)width_stride, 1});\n  out_image->Set(\"type\", modelbox::ModelBoxDataType::MODELBOX_UINT8);\n  return modelbox::STATUS_OK;\n}\n\nclass DvppChanMgr : public std::enable_shared_from_this<DvppChanMgr> {\n public:\n  std::shared_ptr<acldvppChannelDesc> Get(int32_t device_id) {\n    std::shared_ptr<acldvppChannelDesc> chan;\n    std::weak_ptr<DvppChanMgr> mgr_ref = shared_from_this();\n    auto free_func = [mgr_ref, device_id](acldvppChannelDesc *ptr) {\n      auto mgr = mgr_ref.lock();\n      if (mgr == nullptr) {\n        acldvppDestroyChannel(ptr);\n        acldvppDestroyChannelDesc(ptr);\n        return;\n      }\n\n      mgr->Put(ptr, device_id);\n    };\n\n    {\n      std::lock_guard<std::mutex> lock(chan_list_lock_);\n      auto &chan_list = device_chan_list_[device_id];\n      if (!chan_list.empty()) {\n        auto *ch_ptr = chan_list.front();\n        chan_list.pop_front();\n        chan.reset(ch_ptr, free_func);\n        return chan;\n      }\n    }\n\n    auto *ch_ptr = acldvppCreateChannelDesc();\n    if (ch_ptr == nullptr) {\n      MBLOG_ERROR << \"acldvppCreateChannelDesc return null\";\n      return nullptr;\n    }\n\n    auto acl_ret = acldvppCreateChannel(ch_ptr);\n    if (acl_ret != ACL_SUCCESS) {\n      acldvppDestroyChannelDesc(ch_ptr);\n      MBLOG_ERROR << \"acldvppCreateChannel failed, acl ret \" << acl_ret;\n      return nullptr;\n    }\n\n    alloc_count_++;\n    chan.reset(ch_ptr, free_func);\n    return chan;\n  }\n\n  void Put(acldvppChannelDesc *desc, int32_t device_id) {\n    std::lock_guard<std::mutex> lock(chan_list_lock_);\n    auto &chan_list = device_chan_list_[device_id];\n    chan_list.push_back(desc);\n  }\n\n  virtual ~DvppChanMgr() {\n    for (auto &chan_list_item : device_chan_list_) {\n      for (auto *ptr : chan_list_item.second) {\n        acldvppDestroyChannel(ptr);\n        acldvppDestroyChannelDesc(ptr);\n      }\n    }\n  }\n\n private:\n  std::mutex chan_list_lock_;\n  std::unordered_map<int32_t, std::list<acldvppChannelDesc *>>\n      device_chan_list_;\n  std::atomic_uint64_t alloc_count_{0};\n};\n\nstatic std::shared_ptr<DvppChanMgr> g_dvpp_chan_mgr =\n    std::make_shared<DvppChanMgr>();\nstd::shared_ptr<acldvppChannelDesc> GetDvppChannel(int32_t device_id) {\n  return g_dvpp_chan_mgr->Get(device_id);\n}\n\n#endif  // ACL_ENABLE\n\n};  // namespace imageprocess"
  },
  {
    "path": "src/drivers/common/flowunit/image_process/image_process.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_IMAGE_PROCESS_COMMON_H_\n#define MODELBOX_FLOWUNIT_IMAGE_PROCESS_COMMON_H_\n\n#include <modelbox/base/status.h>\n#include <modelbox/flowunit.h>\n\n#include <string>\n\n#ifdef ACL_ENABLE\n\n#ifndef ENABLE_DVPP_INTERFACE\n#define ENABLE_DVPP_INTERFACE\n#endif\n\n#include <acl/ops/acl_dvpp.h>\n\n#endif  // ACL_ENABLE\n\nnamespace imageprocess {\n\ntypedef struct RoiBox {\n  int32_t x, y, w, h;\n} RoiBox;\n\nclass ImageShape {\n public:\n  ImageShape(int32_t img_width, int32_t img_height, int32_t img_width_stride,\n             int32_t img_height_stride)\n      : width{img_width},\n        height{img_height},\n        width_stride{img_width_stride},\n        height_stride{img_height_stride} {}\n\n  virtual ~ImageShape() = default;\n\n  int32_t width{0};\n  int32_t height{0};\n  int32_t width_stride{0};\n  int32_t height_stride{0};\n};\n\nint32_t align_up(int32_t num, int32_t align);\n\nmodelbox::Status GetImageBytes(const std::string &pix_fmt, size_t pix_num,\n                               size_t &img_bytes);\n\nmodelbox::Status GetImageBytes(const std::string &pix_fmt, int32_t width,\n                               int32_t height, size_t &img_bytes);\n\nmodelbox::Status GetWidthStride(const std::string &pix_fmt, int32_t width,\n                                int32_t &width_stride);\n\nmodelbox::Status GetImageBytesByStride(const std::string &pix_fmt,\n                                       int32_t width_stride,\n                                       int32_t height_stride,\n                                       size_t &img_bytes);\n\nmodelbox::Status GetImgParam(const std::shared_ptr<modelbox::Buffer> &img,\n                             std::string &pix_fmt, int32_t &img_width,\n                             int32_t &img_height, int32_t &img_width_stride,\n                             int32_t &img_height_stride);\n\nmodelbox::Status CheckImageStride(const std::string &pix_fmt,\n                                  int32_t img_width_stride,\n                                  int32_t expect_w_align,\n                                  int32_t img_height_stride,\n                                  int32_t expect_h_align, size_t img_size);\n\nbool CheckRoiBoxVaild(const RoiBox *bbox, int32_t image_width,\n                                    int32_t image_height);\n\n#ifdef ACL_ENABLE\n\nconst int32_t ASCEND_WIDTH_ALIGN = 16;\nconst int32_t ASCEND_HEIGHT_ALIGN = 2;\n\nmodelbox::Status InitDvppChannel(\n    std::shared_ptr<acldvppChannelDesc> &chan_desc);\n\nenum class ImgDescDestroyFlag { DESC_AND_BUFFER, DESC_ONLY, NONE };\n\nstd::shared_ptr<acldvppPicDesc> CreateImgDesc(\n    size_t img_size, const std::string &pix_fmt, const ImageShape &shape,\n    ImgDescDestroyFlag flag = ImgDescDestroyFlag::DESC_AND_BUFFER);\n\nstd::shared_ptr<acldvppPicDesc> CreateImgDesc(\n    size_t img_size, void *img_buffer, const std::string &pix_fmt,\n    const ImageShape &shape,\n    ImgDescDestroyFlag flag = ImgDescDestroyFlag::DESC_AND_BUFFER);\n\nmodelbox::Status FillImgDescData(\n    std::shared_ptr<acldvppPicDesc> &img_desc,\n    std::shared_ptr<modelbox::Buffer> &image_buffer, aclrtStream stream);\n\nmodelbox::Status SetOutImgMeta(std::shared_ptr<modelbox::Buffer> &out_image,\n                               const std::string &out_pix_fmt,\n                               std::shared_ptr<acldvppPicDesc> &out_img_desc);\n\nstd::shared_ptr<acldvppChannelDesc> GetDvppChannel(int32_t device_id);\n\n#endif  // ACL_ENABLE\n\n};  // namespace imageprocess\n\n#endif"
  },
  {
    "path": "src/drivers/common/flowunit/image_rotate/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\ncmake_minimum_required(VERSION 3.10)\n\nfile(GLOB_RECURSE SOURCES *.cpp *.cc *.c)\ngroup_source_test_files(IMAGE_ROTATE_SOURCE MODELBOX_UNIT_TEST_SOURCE \"test_base.c*\" ${SOURCES})\n\nif (NOT OPENCV_FOUND) \n    set(MODELBOX_UNIT_TEST_SOURCE \"\")\nendif()\n\nset(INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_CONFIG_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${INCLUDE})\ninclude_directories(${HUAWEI_SECURE_C_INCLUDE_DIR})\n\nset(LIBRARY modelbox-common-image-rotate-object)\nadd_library(${LIBRARY} STATIC ${IMAGE_ROTATE_SOURCE})\nset_property(TARGET ${LIBRARY} PROPERTY POSITION_INDEPENDENT_CODE ON)\n\nlist(APPEND TEST_INCLUDE ${INCLUDE})\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_UNIT_TEST_SOURCE})\nset(TEST_INCLUDE ${TEST_INCLUDE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(MODELBOX_COMMON_IMAGE_ROTATE_LIBRARY ${LIBRARY} CACHE INTERNAL \"\")\nset(MODELBOX_COMMON_IMAGE_ROTATE_INCLUDE ${INCLUDE} CACHE INTERNAL \"\")\n"
  },
  {
    "path": "src/drivers/common/flowunit/image_rotate/image_rotate_base.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"image_rotate_base.h\"\n\n#include <securec.h>\n\n#include <sstream>\n#include <string>\n\nImageRotateFlowUnitBase::ImageRotateFlowUnitBase() = default;\nImageRotateFlowUnitBase::~ImageRotateFlowUnitBase() = default;\n\nmodelbox::Status ImageRotateFlowUnitBase::Open(\n    const std::shared_ptr<modelbox::Configuration> &opts) {\n  has_rotate_angle_ = opts->Contain(\"rotate_angle\");\n  if (has_rotate_angle_) {\n    rotate_angle_ = opts->GetInt32(\"rotate_angle\", 0);\n    auto ret = CheckRotateAngle(rotate_angle_);\n    if (ret != modelbox::STATUS_OK) {\n      return ret;\n    }\n  }\n  MBLOG_DEBUG << \"has  rotate_angle\" << has_rotate_angle_ << \", rotate_angle\"\n              << rotate_angle_;\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status ImageRotateFlowUnitBase::Close() {\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status ImageRotateFlowUnitBase::Process(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  auto input_bufs = data_ctx->Input(\"in_image\");\n  auto output_bufs = data_ctx->Output(\"out_image\");\n  if (input_bufs->Size() <= 0) {\n    auto errMsg = \"input images batch is \" + std::to_string(input_bufs->Size());\n    MBLOG_ERROR << errMsg;\n    return {modelbox::STATUS_FAULT, errMsg};\n  }\n\n  for (auto &buffer : *input_bufs) {\n    if (CheckImageType(buffer) != modelbox::STATUS_OK) {\n      return modelbox::STATUS_FAULT;\n    }\n\n    int32_t rotate_angle(0);\n    if (has_rotate_angle_) {\n      rotate_angle = rotate_angle_;\n    } else if (!buffer->Get(\"rotate_angle\", rotate_angle)) {\n      MBLOG_ERROR << \"get buffer meta rotate_angle failed.\";\n      return modelbox::STATUS_FAULT;\n    }\n\n    if (rotate_angle == 0) {\n      output_bufs->PushBack(buffer);\n      continue;\n    }\n    auto check_ret = CheckRotateAngle(rotate_angle);\n    if (check_ret != modelbox::STATUS_OK) {\n      return check_ret;\n    }\n\n    int32_t width = 0;\n    int32_t height = 0;\n    buffer->Get(\"width\", width);\n    buffer->Get(\"height\", height);\n\n    int32_t output_width(width);\n    int32_t output_height(height);\n    if (rotate_angle == 90 || rotate_angle == 270) {\n      output_width = height;\n      output_height = width;\n    }\n\n    // rotate\n    auto output_buffer = std::make_shared<modelbox::Buffer>(GetBindDevice());\n    RotateOneImage(buffer, output_buffer, rotate_angle, width, height);\n\n    output_buffer->CopyMeta(buffer);\n    output_buffer->Set(\"width\", output_width);\n    output_buffer->Set(\"height\", output_height);\n    output_buffer->Set(\"width_stride\", output_width);\n    output_buffer->Set(\"height_stride\", output_height);\n    output_buffer->Set(\"rotate_angle\", 0);\n    output_bufs->PushBack(output_buffer);\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status ImageRotateFlowUnitBase::CheckImageType(\n    const std::shared_ptr<modelbox::Buffer> &input_buffer) {\n  auto input_type = modelbox::ModelBoxDataType::MODELBOX_TYPE_INVALID;\n  if (!input_buffer->Get(\"type\", input_type) ||\n      input_type != modelbox::ModelBoxDataType::MODELBOX_UINT8) {\n    MBLOG_ERROR << \"input image buffer type must be MODELBOX_UINT8\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  std::string input_layout;\n  if (!input_buffer->Get(\"layout\", input_layout) || input_layout != \"hwc\") {\n    MBLOG_ERROR << \"input image buffer layout must be hwc\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status ImageRotateFlowUnitBase::CheckRotateAngle(\n    const int32_t &rotate_angle) {\n  if (rotate_value_.find(rotate_angle) == rotate_value_.end()) {\n    MBLOG_ERROR << \"rotate_angle is invalid, configure is :\" +\n                       std::to_string(rotate_angle);\n    std::stringstream err_msg;\n    err_msg << \"Valid rotate_angle is: \";\n    for (auto value : rotate_value_) {\n      err_msg << std::to_string(value) << \" \";\n    }\n    MBLOG_ERROR << err_msg.str();\n    return {modelbox::STATUS_BADCONF, err_msg.str()};\n  }\n  return modelbox::STATUS_OK;\n}\n"
  },
  {
    "path": "src/drivers/common/flowunit/image_rotate/image_rotate_base.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_IMAGE_ROTATE_BASE_H_\n#define MODELBOX_FLOWUNIT_IMAGE_ROTATE_BASE_H_\n\n#include <modelbox/base/device.h>\n#include <modelbox/flow.h>\n#include <modelbox/flowunit.h>\n\n#include <set>\n\nclass ImageRotateFlowUnitBase : public modelbox::FlowUnit {\n public:\n  ImageRotateFlowUnitBase();\n  ~ImageRotateFlowUnitBase() override;\n\n  modelbox::Status Open(\n      const std::shared_ptr<modelbox::Configuration> &opts) override;\n  modelbox::Status Close() override;\n  modelbox::Status Process(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override;\n\n  virtual modelbox::Status RotateOneImage(\n      std::shared_ptr<modelbox::Buffer> input_buffer,\n      std::shared_ptr<modelbox::Buffer> output_buffer, int32_t rotate_angle,\n      int32_t width, int32_t height) = 0;\n\n private:\n  modelbox::Status CheckImageType(\n      const std::shared_ptr<modelbox::Buffer> &input_buffer);\n  modelbox::Status CheckRotateAngle(const int32_t &rotate_angle);\n\n  std::set<int32_t> rotate_value_{90, 180, 270};\n  bool has_rotate_angle_{false};\n  int32_t rotate_angle_{0};\n};\n\n#endif  // MODELBOX_FLOWUNIT_IMAGE_ROTATE_BASE_H_"
  },
  {
    "path": "src/drivers/common/flowunit/image_rotate/image_rotate_test_base.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"image_rotate_test_base.h\"\n\n#include <opencv2/opencv.hpp>\n\nnamespace modelbox {\n\nstd::shared_ptr<MockFlow> ImageRotateFlowUnitTest::GetDriverFlow() {\n  return driver_flow_;\n}\n\nStatus ImageRotateFlowUnitTest::AddMockFlowUnit() {\n  auto ctl_ = driver_flow_->GetMockFlowCtl();\n\n  {\n    auto mock_desc = GenerateFlowunitDesc(\"test_0_1_rotate\", {}, {\"out_1\"});\n    mock_desc->SetFlowType(STREAM);\n    mock_desc->SetMaxBatchSize(16);\n    auto open_func = [=](const std::shared_ptr<Configuration>& opts,\n                         const std::shared_ptr<MockFlowUnit>& mock_flowunit) {\n      auto ext_data = mock_flowunit->CreateExternalData();\n      std::string gimg_path = std::string(TEST_ASSETS) + \"/test.jpg\";\n\n      auto output_buf = ext_data->CreateBufferList();\n      modelbox::TensorList output_tensor_list(output_buf);\n      output_tensor_list.BuildFromHost<uchar>({1, {gimg_path.size() + 1}},\n                                              (void*)gimg_path.data(),\n                                              gimg_path.size() + 1);\n\n      auto status = ext_data->Send(output_buf);\n      if (!status) {\n        MBLOG_ERROR << \"external data send buffer list failed:\" << status;\n        return status;\n      }\n\n      status = ext_data->Close();\n      if (!status) {\n        MBLOG_ERROR << \"external data close failed:\" << status;\n        return status;\n      }\n\n      return modelbox::STATUS_OK;\n    };\n\n    auto process_func =\n        [=](const std::shared_ptr<DataContext>& data_ctx,\n            const std::shared_ptr<MockFlowUnit>& mock_flowunit) -> Status {\n      MBLOG_INFO << \"test_0_1_rotate process\";\n\n      auto external = data_ctx->External();\n      std::string input_path = std::string((char*)(*external)[0]->ConstData());\n      cv::Mat input_img = cv::imread(input_path);\n\n      MBLOG_INFO << \"gimage col \" << input_img.cols << \"  grow \"\n                 << input_img.rows << \" gchannel:\" << input_img.channels();\n\n      auto output_bufs = data_ctx->Output(\"out_1\");\n\n      for (int &i : test_rotate_angle_) {\n        auto output_buffer =\n            std::make_shared<modelbox::Buffer>(mock_flowunit->GetBindDevice());\n        output_buffer->Build(input_img.total() * input_img.elemSize());\n        auto *output_data = static_cast<uchar *>(output_buffer->MutableData());\n        auto ret =\n            memcpy_s(output_data, output_buffer->GetBytes(), input_img.data,\n                     input_img.total() * input_img.elemSize());\n        if (ret != EOK) {\n          MBLOG_ERROR << \"Cpu memcpy failed, ret \" << ret;\n          return modelbox::STATUS_FAULT;\n        }\n        output_buffer->Set(\"width\", (int32_t)input_img.cols);\n        output_buffer->Set(\"height\", (int32_t)input_img.rows);\n        output_buffer->Set(\"layout\", std::string(\"hwc\"));\n        output_buffer->Set(\"type\", ModelBoxDataType::MODELBOX_UINT8);\n\n        output_buffer->Set(\"rotate_angle\", i);\n        output_bufs->PushBack(output_buffer);\n      }\n\n      return modelbox::STATUS_OK;\n    };\n\n    auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n    mock_funcitons->RegisterOpenFunc(open_func);\n    mock_funcitons->RegisterProcessFunc(process_func);\n    driver_flow_->AddFlowUnitDesc(\n        mock_desc, mock_funcitons->GenerateCreateFunc(), TEST_DRIVER_DIR);\n  }\n\n  {\n    auto mock_desc = GenerateFlowunitDesc(\"test_1_0_rotate\", {\"in_origin\", \"in_rotate\"}, {});\n    mock_desc->SetFlowType(STREAM);\n    mock_desc->SetMaxBatchSize(16);\n\n    auto process_func =\n        [=](const std::shared_ptr<DataContext>& data_ctx,\n            const std::shared_ptr<MockFlowUnit>& mock_flowunit) -> Status {\n      MBLOG_INFO << \"test_1_0_rotate process\";\n      auto origin_buf = data_ctx->Input(\"in_origin\");\n      auto rotate_buf = data_ctx->Input(\"in_rotate\");\n      int32_t width = 0;\n      int32_t height = 0;\n      int32_t channels = 0;\n      int32_t rotate_angle = 0;\n\n      for (size_t i = 0; i < rotate_buf->Size(); ++i) {\n        rotate_buf->At(i)->Get(\"width\", width);\n        rotate_buf->At(i)->Get(\"height\", height);\n        rotate_buf->At(i)->Get(\"channel\", channels);\n        origin_buf->At(i)->Get(\"rotate_angle\", rotate_angle);\n        const auto *input_data =\n            static_cast<const uchar *>(rotate_buf->ConstBufferData(i));\n\n        cv::Mat img_data(cv::Size(width, height), CV_8UC3);\n        auto ret =\n            memcpy_s(img_data.data, img_data.total() * img_data.elemSize(),\n                     input_data, rotate_buf->At(i)->GetBytes());\n        if (ret != EOK) {\n          MBLOG_ERROR << \"Cpu memcpy failed, ret \" << ret;\n          return modelbox::STATUS_FAULT;\n        }\n\n        MBLOG_INFO << \"output image col \" << img_data.cols << \"  row \"\n                   << img_data.rows << \" channel:\" << img_data.channels();\n\n        std::string name = std::string(TEST_DATA_DIR) + \"/rotate_result_\" +\n                           std::to_string(rotate_angle) + \".jpg\";\n\n        cv::imwrite(name, img_data);\n      }\n\n      return modelbox::STATUS_STOP;\n    };\n\n    auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n    mock_funcitons->RegisterProcessFunc(process_func);\n    driver_flow_->AddFlowUnitDesc(\n        mock_desc, mock_funcitons->GenerateCreateFunc(), TEST_DRIVER_DIR);\n  }\n  return STATUS_OK;\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/drivers/common/flowunit/image_rotate/image_rotate_test_base.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_IMAGE_ROTATE_TEST_BASE_H_\n#define MODELBOX_FLOWUNIT_IMAGE_ROTATE_TEST_BASE_H_\n\n#include <securec.h>\n\n#include <functional>\n#include <future>\n#include <random>\n#include <thread>\n\n#include \"driver_flow_test.h\"\n#include \"flowunit_mockflowunit/flowunit_mockflowunit.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"modelbox/base/log.h\"\n#include \"modelbox/base/utils.h\"\n#include \"modelbox/buffer.h\"\n#include \"test/mock/minimodelbox/mockflow.h\"\n\nnamespace modelbox {\nclass ImageRotateFlowUnitTest : public testing::Test {\n public:\n  ImageRotateFlowUnitTest() : driver_flow_(std::make_shared<MockFlow>()) {}\n\n  std::vector<int32_t> test_rotate_angle_{90, 180, 270};\n\n protected:\n  void SetUp() override {\n    auto ret = AddMockFlowUnit();\n    EXPECT_EQ(ret, STATUS_OK);\n  };\n\n  void TearDown() override { driver_flow_ = nullptr; };\n  std::shared_ptr<MockFlow> GetDriverFlow();\n\n private:\n  Status AddMockFlowUnit();\n  std::shared_ptr<MockFlow> driver_flow_;\n};\n\n}  // namespace modelbox\n\n#endif  // MODELBOX_FLOWUNIT_IMAGE_ROTATE_TEST_BASE_H_"
  },
  {
    "path": "src/drivers/common/flowunit/inference/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\ncmake_minimum_required(VERSION 3.10)\n\nfile(GLOB SOURCES *.cpp *.cc *.c)\n\nset(INCLUDE ${CMAKE_CURRENT_LIST_DIR})\nSET(HEADER ${CMAKE_CURRENT_LIST_DIR}/model_decrypt_interface.h)\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_CONFIG_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${INCLUDE})\ninclude_directories(${HUAWEI_SECURE_C_INCLUDE_DIR})\n\ninstall(FILES ${HEADER} \n    DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR}/modelbox/inference \n    COMPONENT cpu-device-flowunit-devel\n    )\n\nset(LIBRARY modelbox-common-inference-object)\nadd_library(${LIBRARY} STATIC ${SOURCES})\nset_property(TARGET ${LIBRARY} PROPERTY POSITION_INDEPENDENT_CODE ON)\n\nset(MODELBOX_COMMON_INFERENCE_LIBRARY ${LIBRARY} CACHE INTERNAL \"\")\nset(MODELBOX_COMMON_INFERENCE_INCLUDE ${INCLUDE} CACHE INTERNAL \"\")"
  },
  {
    "path": "src/drivers/common/flowunit/inference/model_decrypt.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"model_decrypt.h\"\n\n#include <dirent.h>\n#include <dlfcn.h>\n#include <limits.h>\n#include <sys/stat.h>\n\n#include <cstdint>\n#include <fstream>\n\n#include \"model_decrypt_header.h\"\n#include \"modelbox/base/log.h\"\n#include \"modelbox/base/status.h\"\n#include \"modelbox/base/utils.h\"\n\nModelDecryption::~ModelDecryption() {\n  if (fmodel_.is_open()) {\n    fmodel_.close();\n  }\n}\n\nmodelbox::Status ModelDecryption::Init(\n    const std::string& model_path,\n    const std::shared_ptr<modelbox::Drivers>& drivers_ptr,\n    const std::shared_ptr<modelbox::Configuration>& config) {\n  model_state_ = MODEL_STATE_ERROR;\n  header_offset_ = 0;\n\n  Defer {\n    if (model_state_ == MODEL_STATE_ERROR && fmodel_.is_open()) {\n      fmodel_.close();\n    }\n  };\n\n  fmodel_.open(model_path, std::ios::binary);\n  if (fmodel_.fail() || !fmodel_.is_open()) {\n    std::string errmsg = \"open model '\";\n    errmsg += model_path + \"' failed, \" + modelbox::StrError(errno);\n    MBLOG_ERROR << errmsg;\n    return {modelbox::STATUS_INVALID, errmsg};\n  }\n\n  fmodel_.seekg(0, std::ios::end);\n  fsize_ = fmodel_.tellg();\n  if (fsize_ <= 0) {\n    std::string errmsg = \"empty model file: \" + model_path;\n    MBLOG_ERROR << errmsg;\n    return {modelbox::STATUS_BADCONF, errmsg};\n  }\n\n  auto plugin_name = config->GetString(\"encryption.plugin_name\");\n  auto plugin_version = config->GetString(\"encryption.plugin_version\");\n  if (plugin_name.empty()) {\n    GetInfoFromHeader(plugin_name, plugin_version, config);\n  }\n\n  if (!plugin_name.empty()) {\n    if (drivers_ptr == nullptr) {\n      MBLOG_ERROR << \"drivers_ptr is null\";\n      return modelbox::STATUS_FAULT;\n    }\n\n    auto plugin_driver = drivers_ptr->GetDriver(\n        DRIVER_CLASS_MODEL_DECRYPT, DRIVER_TYPE, plugin_name, plugin_version);\n    if (plugin_driver == nullptr) {\n      std::string errmsg = \"Can not find decrytp drivers: \" + plugin_name;\n      MBLOG_ERROR << errmsg;\n      // fclose will call when ~ModelDecryption\n      return {modelbox::STATUS_BADCONF, errmsg};\n    }\n\n    cur_factory_ = plugin_driver->CreateFactory();\n    if (cur_factory_ == nullptr) {\n      MBLOG_ERROR << \"Plugin : \" << plugin_name << \" factory create failed\";\n      return {modelbox::STATUS_FAULT, \"decrypt driver create failed\"};\n    }\n\n    cur_plugin_ = std::dynamic_pointer_cast<IModelDecryptPlugin>(\n        cur_factory_->GetDriver());\n    if (cur_plugin_ == nullptr) {\n      MBLOG_ERROR << \"plugin : \" << plugin_name\n                  << \" is not derived from IModelDecryptPlugin\";\n      return {modelbox::STATUS_FAULT, \"decrypt driver create failed\"};\n    }\n\n    auto ret = cur_plugin_->Init(model_path, config);\n    if (ret == modelbox::STATUS_SUCCESS) {\n      model_state_ = MODEL_STATE_ENCRYPT;\n    } else {\n      MBLOG_ERROR << \"drivers Init Error\";\n      return ret;\n    }\n  } else {\n    model_state_ = MODEL_STATE_PLAIN;\n  }\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nvoid ModelDecryption::GetInfoFromHeader(\n    std::string& plugin_name, std::string& plugin_version,\n    const std::shared_ptr<modelbox::Configuration>& config) {\n  struct PrefixInfo model_info;\n  fmodel_.seekg(0, std::ios::beg);\n  fmodel_.read(reinterpret_cast<char*>(&model_info), sizeof(PrefixInfo));\n\n  std::string magic_str(model_info.magic, MAGIC_SIZE);\n  if (fmodel_.gcount() != sizeof(PrefixInfo) || magic_str != MAGIC_FLAG) {\n    // here is plain model , not err , so do not log\n    plugin_name = \"\";\n    plugin_version = \"\";\n    return;\n  }\n\n  plugin_name = std::string(model_info.plugin_name);\n  plugin_version = std::to_string(model_info.ver_major) + \".\" +\n                   std::to_string(model_info.ver_minor) + \".\" +\n                   std::to_string(model_info.ver_patch);\n  config->SetProperty(\"encryption.header_reserve\", (uint8_t)model_info.reserve);\n  header_offset_ = (int32_t)(sizeof(PrefixInfo));\n}\n\nuint8_t* ModelDecryption::GetModelBuffer(int64_t& model_len) {\n  model_len = 0;\n  if (model_state_ == MODEL_STATE_ERROR) {\n    MBLOG_ERROR << \"model_state is error\";\n    modelbox::StatusError = {modelbox::STATUS_INVALID, \"model_state is error\"};\n    return nullptr;\n  }\n\n  // tensorflow TF_Buffer seems a c-style code, here use std::malloc\n  auto* model_buf = static_cast<uint8_t*>(malloc(fsize_));\n  if (!model_buf) {\n    MBLOG_ERROR << \"memory alloc fail with size =.\" << fsize_;\n    modelbox::StatusError = {modelbox::STATUS_NOMEM, \"Read file fail.\"};\n    return nullptr;\n  }\n\n  fmodel_.seekg(0, std::ios::beg);\n  fmodel_.read((char*)model_buf, fsize_);\n  if (fmodel_.gcount() != fsize_) {\n    MBLOG_ERROR << \"Read file fail.\";\n    free(model_buf);\n    modelbox::StatusError = {modelbox::STATUS_INVALID, \"Read file fail.\"};\n    return nullptr;\n  }\n\n  if (model_state_ == MODEL_STATE_ENCRYPT && cur_plugin_ != nullptr) {\n    int64_t raw_len = fsize_ - header_offset_;\n    int64_t plain_len = raw_len + EVP_MAX_BLOCK_LENGTH + 1;\n    auto* plain_buf = static_cast<uint8_t*>(malloc(raw_len));\n    auto ret = cur_plugin_->ModelDecrypt(model_buf + header_offset_, raw_len,\n                                         plain_buf, plain_len);\n    free(model_buf);\n\n    if (ret != modelbox::STATUS_SUCCESS) {\n      MBLOG_ERROR << \"ModelDecrypt fail.\";\n      model_len = 0;\n      free(plain_buf);\n      modelbox::StatusError = {ret, \"ModelDecrypt fail.\"};\n      return nullptr;\n    }\n    model_len = plain_len;\n    return plain_buf;\n  }\n  model_len = fsize_;\n  return model_buf;\n}\n\nstd::shared_ptr<uint8_t> ModelDecryption::GetModelSharedBuffer(\n    int64_t& model_len) {\n  uint8_t* ret_buf = GetModelBuffer(model_len);\n  if (ret_buf) {\n    std::shared_ptr<uint8_t> retShare(ret_buf, [](uint8_t* p) { free(p); });\n    return retShare;\n  }\n  return nullptr;\n}\n"
  },
  {
    "path": "src/drivers/common/flowunit/inference/model_decrypt.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_MODEL_DECRYPT_H_\n#define MODELBOX_FLOWUNIT_MODEL_DECRYPT_H_\n\n#include <fstream>\n#include <memory>\n#include <string>\n\n#include \"model_decrypt_interface.h\"\n\nclass ModelDecryption {\n public:\n  typedef enum {\n    MODEL_STATE_ENCRYPT,\n    MODEL_STATE_PLAIN,\n    MODEL_STATE_ERROR\n  } MODEL_STATE;\n  ModelDecryption() = default;\n  virtual ~ModelDecryption();\n\n  /**\n   * @brief init funciton\n   * @param model_path a model filename path\n   * @param drivers_ptr drivers point to get plugin\n   * @param config a toml config\n   * @return Success if pass\n   */\n  modelbox::Status Init(const std::string& model_path,\n                        const std::shared_ptr<modelbox::Drivers>& drivers_ptr,\n                        const std::shared_ptr<modelbox::Configuration>& config);\n                        \n  /**\n   * @brief model decrypt implement\n   * @param model_len a return value: the plain model buffer length\n   * @return plain buffer，note ,call free for this buffer by yourself!\n   */\n  uint8_t* GetModelBuffer(int64_t& model_len);\n\n  /**\n   * @brief model decrypt implement\n   * @param model_path model file path name\n   * @param model_len a return value: the plain model buffer length\n   * @return plain buffer smart point, recommand to call this function\n   */\n  std::shared_ptr<uint8_t> GetModelSharedBuffer(int64_t& model_len);\n\n  /**\n   * @brief call it to know whether it's a encrypt model\n   * @return MODEL_STATE enum\n   */\n  inline MODEL_STATE GetModelState() { return model_state_; }\n\n private:\n  ModelDecryption(const ModelDecryption&) = delete;\n  ModelDecryption& operator=(const ModelDecryption&) = delete;\n  void GetInfoFromHeader(std::string& plugin_name, std::string& plugin_version,\n                         const std::shared_ptr<modelbox::Configuration>& config);\n                         \n  int64_t fsize_ = 0;\n  int32_t header_offset_ = 0;\n  std::ifstream fmodel_;\n  std::shared_ptr<modelbox::DriverFactory> cur_factory_ = nullptr;\n  std::shared_ptr<IModelDecryptPlugin> cur_plugin_ = nullptr;\n  MODEL_STATE model_state_ = MODEL_STATE_ERROR;\n};\n\n#endif  // MODELBOX_FLOWUNIT_MODEL_DECRYPT_H_"
  },
  {
    "path": "src/drivers/common/flowunit/inference/model_decrypt_header.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_MODEL_DECRYPT_HEADER_H_\n#define MODELBOX_FLOWUNIT_MODEL_DECRYPT_HEADER_H_\n\nconstexpr const char* DRIVER_TYPE = \"cpu\";\nconstexpr const char* DRIVER_CLASS_MODEL_DECRYPT = \"DRIVER-MODEL-DECRYPT\";\n\n#endif  // MODELBOX_FLOWUNIT_MODEL_DECRYPT_HEADER_H_"
  },
  {
    "path": "src/drivers/common/flowunit/inference/model_decrypt_interface.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_MODEL_DECRYPT_INTERFACE_H_\n#define MODELBOX_FLOWUNIT_MODEL_DECRYPT_INTERFACE_H_\n\n#include <modelbox/base/driver.h>\n#include <modelbox/base/status.h>\n\n#include <cstdint>\n\nconstexpr const char* MAGIC_FLAG = \"MB_EnModel\";\n\n#define MAGIC_SIZE 10  // len of MB_EnModel\n#define PLUGIN_SIZE 50\n\nstruct PrefixInfo {\n  char magic[MAGIC_SIZE];        /* magicnumber */\n  uint8_t ver_major;             /* version number X*/\n  uint8_t ver_minor;             /* version number Y*/\n  uint8_t ver_patch;             /* version number Z*/\n  uint8_t reserve;               /* reserve flag*/\n  char plugin_name[PLUGIN_SIZE]; /* plugin name*/\n} __attribute__((packed, aligned(1)));\n\nclass IModelDecryptPlugin : public modelbox::Driver {\n public:\n  /**\n   * @brief model decrypt Init\n   * @param fname model file path name\n   * @param config encryption.rootkey and encryption.passwd will pass here if\n   * passwd is deliverd by config toml file\n   * @return Success or not\n   */\n  virtual modelbox::Status Init(\n      const std::string& fname,\n      std::shared_ptr<modelbox::Configuration> config) = 0;\n\n  /**\n   * @brief model decrypt implement\n   * @param raw_buf model encrypted buffer\n   * @param raw_len model encrypted buffer len\n   * @param plain_buf model plain buffer, plain_len will pass the max plain_buf len\n   * @param plain_len set the real len for plain buffer \n   * @return Success or not\n   */\n  virtual modelbox::Status ModelDecrypt(uint8_t* raw_buf, int64_t raw_len,\n                                        uint8_t* plain_buf,\n                                        int64_t& plain_len) = 0;\n};\n\n#endif  // MODELBOX_FLOWUNIT_MODEL_DECRYPT_INTERFACE_H_"
  },
  {
    "path": "src/drivers/common/flowunit/mean/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nfile(GLOB_RECURSE SOURCES *.cpp *.cc *.c)\n\nset(INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_CONFIG_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${INCLUDE})\ninclude_directories(${HUAWEI_SECURE_C_INCLUDE_DIR})\n\nset(LIBRARY modelbox-common-mean-object)\nadd_library(${LIBRARY} STATIC ${SOURCES})\nset_property(TARGET ${LIBRARY} PROPERTY POSITION_INDEPENDENT_CODE ON)\n\nset(MODELBOX_COMMON_MEAN_LIBRARY ${LIBRARY} CACHE INTERNAL \"\")\nset(MODELBOX_COMMON_MEAN_INCLUDE ${INCLUDE} CACHE INTERNAL \"\")\n"
  },
  {
    "path": "src/drivers/common/flowunit/mean/mean_flowunit_base.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include \"mean_flowunit_base.h\"\n\nMeanFlowUnitBase::MeanFlowUnitBase() = default;\nMeanFlowUnitBase::~MeanFlowUnitBase() = default;\n\nmodelbox::Status MeanFlowUnitBase::Open(\n    const std::shared_ptr<modelbox::Configuration> &opts) {\n  if (!opts->Contain(\"mean\")) {\n    MBLOG_ERROR << \"mean flow unit does not contain mean param\";\n    return modelbox::STATUS_BADCONF;\n  }\n\n  auto input_params = opts->GetDoubles(\"mean\");\n  if (input_params.size() != CHANNEL_NUM) {\n    MBLOG_ERROR << \"mean param error\";\n    return modelbox::STATUS_BADCONF;\n  }\n\n  params_.means_.assign(input_params.begin(), input_params.end());\n  return modelbox::STATUS_OK;\n}\n\nbool MeanFlowUnitBase::CheckBufferListValid(\n    const std::shared_ptr<modelbox::BufferList> &buffer_list) {\n  if (buffer_list == nullptr) {\n    MBLOG_ERROR << \"mean flowunit input is null\";\n    return false;\n  }\n\n  if (buffer_list->Size() == 0) {\n    MBLOG_ERROR << \"mean flowunit input size is 0\";\n    return false;\n  }\n\n  return true;\n}\n\nbool BuildOutputBufferList(\n    const std::shared_ptr<modelbox::BufferList> &input_bufs,\n    std::shared_ptr<modelbox::BufferList> &output_bufs) {\n  std::vector<size_t> shape;\n  for (size_t i = 0; i < input_bufs->Size(); ++i) {\n    modelbox::ModelBoxDataType type = modelbox::MODELBOX_TYPE_INVALID;\n    if (!input_bufs->At(i)->Get(\"type\", type)) {\n      MBLOG_FATAL << \"mean flowunit can not get input type from meta\";\n      return false;\n    }\n\n    if ((type != modelbox::ModelBoxDataType::MODELBOX_FLOAT) &&\n        (type != modelbox::ModelBoxDataType::MODELBOX_UINT8)) {\n      MBLOG_FATAL << \"mean flowunit input type error, type is \" << type;\n      return false;\n    }\n\n    size_t size = 0;\n    if (type == modelbox::ModelBoxDataType::MODELBOX_FLOAT) {\n      size = input_bufs->At(i)->GetBytes();\n    } else {\n      size = (input_bufs->At(i)->GetBytes() / sizeof(uint8_t)) * sizeof(float);\n    }\n\n    shape.emplace_back(size);\n  }\n\n  output_bufs->Build(shape);\n  return true;\n}\n\nmodelbox::Status MeanFlowUnitBase::Close() { return modelbox::STATUS_OK; }\n\nmodelbox::Status MeanFlowUnitBase::DataPre(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status MeanFlowUnitBase::DataPost(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  return modelbox::STATUS_OK;\n}\n"
  },
  {
    "path": "src/drivers/common/flowunit/mean/mean_flowunit_base.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#ifndef MODELBOX_FLOWUNIT_MEAN_BASE_H_\n#define MODELBOX_FLOWUNIT_MEAN_BASE_H_\n\n#include <modelbox/base/device.h>\n#include <modelbox/base/status.h>\n#include <modelbox/flow.h>\n#include <modelbox/flowunit.h>\n\nconstexpr uint32_t SHAPE_SIZE = 3;\nconstexpr uint32_t CHANNEL_NUM = 3;\n\nclass MeanParams {\n public:\n  std::vector<double> means_;\n};\n\nclass MeanFlowUnitBase : public modelbox::FlowUnit {\n public:\n  MeanFlowUnitBase();\n  ~MeanFlowUnitBase() override;\n\n  modelbox::Status Open(\n      const std::shared_ptr<modelbox::Configuration> &opts) override;\n  modelbox::Status Close() override;\n\n  /* run when processing data */\n  modelbox::Status Process(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override = 0;\n\n  modelbox::Status DataPre(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override;\n\n  modelbox::Status DataPost(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override;\n\n  modelbox::Status DataGroupPre(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override {\n    return modelbox::STATUS_OK;\n  };\n\n  modelbox::Status DataGroupPost(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override {\n    return modelbox::STATUS_OK;\n  };\n\n protected:\n  bool CheckBufferListValid(\n      const std::shared_ptr<modelbox::BufferList> &buffer_list);\n  MeanParams params_;\n};\n\nbool BuildOutputBufferList(\n    const std::shared_ptr<modelbox::BufferList> &input_bufs,\n    std::shared_ptr<modelbox::BufferList> &output_bufs);\n\n#endif  // MODELBOX_FLOWUNIT_MEAN_BASE_H_"
  },
  {
    "path": "src/drivers/common/flowunit/normalize/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nfile(GLOB_RECURSE SOURCES *.cpp *.cc *.c)\n\nset(INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_CONFIG_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${INCLUDE})\ninclude_directories(${HUAWEI_SECURE_C_INCLUDE_DIR})\n\nset(LIBRARY modelbox-common-normalize-object)\nadd_library(${LIBRARY} STATIC ${SOURCES})\nset_property(TARGET ${LIBRARY} PROPERTY POSITION_INDEPENDENT_CODE ON)\n\nset(MODELBOX_COMMON_NORMALIZE_LIBRARY ${LIBRARY} CACHE INTERNAL \"\")\nset(MODELBOX_COMMON_NORMALIZE_INCLUDE ${INCLUDE} CACHE INTERNAL \"\")\n"
  },
  {
    "path": "src/drivers/common/flowunit/normalize/normalize_flowunit_base.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include \"normalize_flowunit_base.h\"\n\nNormalizeFlowUnitBase::NormalizeFlowUnitBase() = default;\nNormalizeFlowUnitBase::~NormalizeFlowUnitBase() = default;\n\nmodelbox::Status NormalizeFlowUnitBase::Open(\n    const std::shared_ptr<modelbox::Configuration> &opts) {\n  if (!opts->Contain(\"standard_deviation_inverse\")) {\n    MBLOG_ERROR << \"normalize flow unit does not contain normalize param\";\n    return modelbox::STATUS_BADCONF;\n  }\n\n  auto input_params = opts->GetDoubles(\"standard_deviation_inverse\");\n  if (input_params.size() != CHANNEL_NUM) {\n    MBLOG_ERROR << \"normalize param error\";\n    return modelbox::STATUS_BADCONF;\n  }\n\n  params_.normalizes_.assign(input_params.begin(), input_params.end());\n  return modelbox::STATUS_OK;\n}\n\nbool NormalizeFlowUnitBase::CheckBufferListValid(\n    const std::shared_ptr<modelbox::BufferList> &buffer_list) {\n  if (buffer_list == nullptr) {\n    MBLOG_ERROR << \"mean flowunit input is null\";\n    return false;\n  }\n\n  if (buffer_list->Size() == 0) {\n    MBLOG_ERROR << \"mean flowunit input size is 0\";\n    return false;\n  }\n\n  return true;\n}\n\nbool BuildOutputBufferList(\n    const std::shared_ptr<modelbox::BufferList> &input_bufs,\n    std::shared_ptr<modelbox::BufferList> &output_bufs) {\n  std::vector<size_t> shape;\n  for (size_t i = 0; i < input_bufs->Size(); ++i) {\n    modelbox::ModelBoxDataType type = modelbox::MODELBOX_TYPE_INVALID;\n    if (!input_bufs->At(i)->Get(\"type\", type)) {\n      MBLOG_FATAL << \"mean flowunit can not get input type from meta\";\n      return false;\n    }\n\n    if ((type != modelbox::ModelBoxDataType::MODELBOX_FLOAT) &&\n        (type != modelbox::ModelBoxDataType::MODELBOX_UINT8)) {\n      MBLOG_FATAL << \"mean flowunit input type error, type is \" << type;\n      return false;\n    }\n\n    size_t size = 0;\n    if (type == modelbox::ModelBoxDataType::MODELBOX_FLOAT) {\n      size = input_bufs->At(i)->GetBytes();\n    } else {\n      size = (input_bufs->At(i)->GetBytes() / sizeof(uint8_t)) * sizeof(float);\n    }\n\n    shape.emplace_back(size);\n  }\n\n  output_bufs->Build(shape);\n  return true;\n}\n\nmodelbox::Status NormalizeFlowUnitBase::Close() { return modelbox::STATUS_OK; }\n\nmodelbox::Status NormalizeFlowUnitBase::DataPre(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status NormalizeFlowUnitBase::DataPost(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  return modelbox::STATUS_OK;\n}"
  },
  {
    "path": "src/drivers/common/flowunit/normalize/normalize_flowunit_base.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#ifndef MODELBOX_FLOWUNIT_NORMALIZE_BASE_H_\n#define MODELBOX_FLOWUNIT_NORMALIZE_BASE_H_\n\n#include <modelbox/base/device.h>\n#include <modelbox/base/status.h>\n#include <modelbox/flow.h>\n#include <modelbox/flowunit.h>\n\nconstexpr uint32_t SHAPE_SIZE = 3;\nconstexpr uint32_t CHANNEL_NUM = 3;\n\nclass NormalizeParams {\n public:\n  std::vector<double> normalizes_;\n};\n\nclass NormalizeFlowUnitBase : public modelbox::FlowUnit {\n public:\n  NormalizeFlowUnitBase();\n  ~NormalizeFlowUnitBase() override;\n\n  modelbox::Status Open(\n      const std::shared_ptr<modelbox::Configuration> &opts) override;\n  modelbox::Status Close() override;\n\n  /* run when processing data */\n  modelbox::Status Process(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override = 0;\n\n  modelbox::Status DataPre(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override;\n\n  modelbox::Status DataPost(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override;\n\n  modelbox::Status DataGroupPre(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override {\n    return modelbox::STATUS_OK;\n  };\n\n  modelbox::Status DataGroupPost(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override {\n    return modelbox::STATUS_OK;\n  };\n\n protected:\n  bool CheckBufferListValid(\n      const std::shared_ptr<modelbox::BufferList> &buffer_list);\n  NormalizeParams params_;\n};\n\nbool BuildOutputBufferList(\n    const std::shared_ptr<modelbox::BufferList> &input_bufs,\n    std::shared_ptr<modelbox::BufferList> &output_bufs);\n#endif  // MODELBOX_FLOWUNIT_NORMALIZE_BASE_H_"
  },
  {
    "path": "src/drivers/common/flowunit/safe_http/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nfile(GLOB_RECURSE SOURCES *.cpp *.cc *.c)\n\nif (NOT CPPREST_FOUND) \n    message(STATUS, \"not build safe http\")\n    return()\nendif()\n\nset(INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_CONFIG_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${INCLUDE})\ninclude_directories(${HUAWEI_SECURE_C_INCLUDE_DIR})\n\nset(LIBRARY modelbox-common-safe-http-object)\nadd_library(${LIBRARY} STATIC ${SOURCES})\nset_property(TARGET ${LIBRARY} PROPERTY POSITION_INDEPENDENT_CODE ON)\n\nset(MODELBOX_COMMON_SAFE_HTTP_LIBRARY ${LIBRARY} CACHE INTERNAL \"\")\nset(MODELBOX_COMMON_SAFE_HTTP_INCLUDE ${INCLUDE} CACHE INTERNAL \"\")\n"
  },
  {
    "path": "src/drivers/common/flowunit/safe_http/http_util.cc",
    "content": "#include \"http_util.h\"\n\nuint64_t HttpRequestLimiter::max_request_;\nstd::atomic_size_t HttpRequestLimiter::request_count_;\nstd::mutex HttpRequestLimiter::request_mutex_;\n\nvoid SafeReply(const web::http::http_request &request,\n               web::http::status_code status) {\n  auto resp = web::http::http_response(status);\n  resp.headers().add(U(\"Referrer-Policy\"),\n                     U(\"strict-origin-when-cross-origin\"));\n  resp.headers().add(\n      U(\"Content-Security-Policy\"),\n      U(\"default-src 'self'  data: 'unsafe-inline' 'unsafe-eval'; \"\n        \"object-src 'none'; \"\n        \"frame-ancestors 'none'\"));\n  resp.headers().add(U(\"X-Frame-Options\"), U(\"DENY\"));\n  request.reply(resp).then([](pplx::task<void> t) { HandleError(t); });\n}\n\nvoid SafeReply(const web::http::http_request &request,\n               web::http::status_code status, const utf8string &body_data) {\n  auto resp = web::http::http_response(status);\n  resp.set_body(body_data);\n  resp.headers().add(U(\"Referrer-Policy\"),\n                     U(\"strict-origin-when-cross-origin\"));\n  resp.headers().add(\n      U(\"Content-Security-Policy\"),\n      U(\"default-src 'self'  data: 'unsafe-inline' 'unsafe-eval'; \"\n        \"object-src 'none'; \"\n        \"frame-ancestors 'none'\"));\n  resp.headers().add(U(\"X-Frame-Options\"), U(\"DENY\"));\n  request.reply(resp).then([](pplx::task<void> t) { HandleError(t); });\n}\n\nvoid SafeReply(const web::http::http_request &request,\n               web::http::status_code status,\n               const concurrency::streams::istream &body_data,\n               const utility::string_t &content_type) {\n  auto resp = web::http::http_response(status);\n  resp.set_body(body_data, content_type);\n  resp.headers().add(U(\"Referrer-Policy\"),\n                     U(\"strict-origin-when-cross-origin\"));\n  resp.headers().add(\n      U(\"Content-Security-Policy\"),\n      U(\"default-src 'self'  data: 'unsafe-inline' 'unsafe-eval'; \"\n        \"object-src 'none'; \"\n        \"frame-ancestors 'none'\"));\n  resp.headers().add(U(\"X-Frame-Options\"), U(\"DENY\"));\n  request.reply(resp).then([](pplx::task<void> t) { HandleError(t); });\n}\n\nutility::string_t GetSupportedMethods() {\n  utility::string_t allowed;\n  std::vector<web::http::method> methods = {\n      web::http::methods::POST, web::http::methods::GET,\n      web::http::methods::DEL, web::http::methods::PUT};\n  bool first = true;\n  for (auto &method : methods) {\n    if (!first) {\n      allowed += U(\", \");\n    } else {\n      first = false;\n    }\n    allowed += method;\n  }\n  return allowed;\n}\n\nvoid HandleError(pplx::task<void> &t) {\n  try {\n    t.get();\n  } catch (const std::exception &e) {\n    MBLOG_ERROR << \"http error\" << e.what();\n  }\n}\n\nvoid HandleUnSupportMethod(const web::http::http_request &request) {\n  web::http::http_response response(web::http::status_codes::MethodNotAllowed);\n  response.headers().add(U(\"Allow\"), GetSupportedMethods());\n  request.reply(response);\n}\n\nvoid HandleHealthCheck(const web::http::http_request &request) {\n  auto health_value = web::json::value::object();\n  health_value[\"status\"] = web::json::value(200);\n  health_value[\"message\"] = web::json::value::string(\"success\");\n  auto resp_body = health_value.serialize();\n  auto resp_status = web::http::status_codes::OK;\n  SafeReply(request, resp_status, resp_body);\n}\n\nHttpRequestLimiter::HttpRequestLimiter() = default;\n\nHttpRequestLimiter::~HttpRequestLimiter() { --request_count_; };\n\nstd::shared_ptr<HttpRequestLimiter> HttpRequestLimiter::GetInstance() {\n  std::lock_guard<std::mutex> lock(request_mutex_);\n  if (request_count_ < max_request_) {\n    ++request_count_;\n    return std::make_shared<HttpRequestLimiter>();\n  }\n\n  return nullptr;\n}"
  },
  {
    "path": "src/drivers/common/flowunit/safe_http/http_util.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#ifndef MODELBOX_FLOWUNIT_HTTP_UTIL_H_\n#define MODELBOX_FLOWUNIT_HTTP_UTIL_H_\n#include \"modelbox/base/log.h\"\n#include \"cpprest/http_listener.h\"\n\nvoid SafeReply(const web::http::http_request &request,\n               web::http::status_code status);\nvoid SafeReply(const web::http::http_request &request,\n               web::http::status_code status, const utf8string &body_data);\nvoid SafeReply(const web::http::http_request &request,\n               web::http::status_code status,\n               const concurrency::streams::istream &body_data,\n               const utility::string_t &content_type);\n\nvoid HandleError(pplx::task<void> &t);\n\nutility::string_t GetSupportedMethods();\n\nvoid HandleUnSupportMethod(const web::http::http_request &request);\n\nvoid HandleHealthCheck(const web::http::http_request &request);\n\nclass HttpRequestLimiter {\n  public:\n   HttpRequestLimiter(HttpRequestLimiter &&) = delete;\n   HttpRequestLimiter &operator=(HttpRequestLimiter &&) = delete;\n   HttpRequestLimiter(const HttpRequestLimiter &) = delete;\n   HttpRequestLimiter &operator=(const HttpRequestLimiter &) = delete;\n\n   HttpRequestLimiter();\n   virtual ~HttpRequestLimiter();\n\n   static std::shared_ptr<HttpRequestLimiter> GetInstance();\n   static uint64_t max_request_;\n   static std::atomic_size_t request_count_;\n\n  private:\n   static std::mutex request_mutex_;\n};\n\n#endif  // MODELBOX_FLOWUNIT_HTTP_UTIL_H_"
  },
  {
    "path": "src/drivers/common/flowunit/source_context/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nfile(GLOB_RECURSE SOURCES *.cpp *.cc *.c)\n\nset(INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_CONFIG_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${INCLUDE})\ninclude_directories(${HUAWEI_SECURE_C_INCLUDE_DIR})\n\nset(LIBRARY modelbox-common-source-context-object)\nadd_library(${LIBRARY} STATIC ${SOURCES})\nset_property(TARGET ${LIBRARY} PROPERTY POSITION_INDEPENDENT_CODE ON)\n\nset(MODELBOX_COMMON_SOURCE_CONTEXT_LIBRARY ${LIBRARY} CACHE INTERNAL \"\")\nset(MODELBOX_COMMON_SOURCE_CONTEXT_INCLUDE ${INCLUDE} CACHE INTERNAL \"\")\n\nlist(APPEND DRIVER_UNIT_TEST_INCLUDE ${MODELBOX_COMMON_SOURCE_CONTEXT_INCLUDE})\nset(DRIVER_UNIT_TEST_INCLUDE ${DRIVER_UNIT_TEST_INCLUDE} CACHE INTERNAL \"\")\n"
  },
  {
    "path": "src/drivers/common/flowunit/source_context/source_context.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"source_context.h\"\n\n#include <utility>\n\nnamespace modelbox {\n\nSourceContext::SourceContext(std::shared_ptr<DataSourceParserPlugin> plugin,\n                             std::string plugin_name)\n    : plugin_(std::move(plugin)), plugin_name_(std::move(plugin_name)) {}\n\nSourceContext::~SourceContext() = default;\n\nstd::shared_ptr<std::string> SourceContext::GetSourceURL() {\n  std::shared_ptr<std::string> uri;\n  std::string uri_str;\n  DestroyUriFunc destroy_uri_func;\n\n  auto ret = plugin_->Parse(session_context_, session_config_, data_source_cfg_,\n                            uri_str, destroy_uri_func);\n  if (!ret) {\n    MBLOG_ERROR << \"Parse config failed, source uri is empty\";\n    return nullptr;\n  }\n\n  uri = std::shared_ptr<std::string>(new std::string(uri_str),\n                                     [destroy_uri_func](std::string *ptr) {\n                                       if (destroy_uri_func) {\n                                         destroy_uri_func(*ptr);\n                                       }\n                                       delete ptr;\n                                     });\n\n  return uri;\n}\n\nRetryStatus SourceContext::NeedRetry() {\n  retry_context_.RetryTimesInc();\n\n  if (last_status_ == modelbox::STATUS_NODATA && stream_type_ == \"file\") {\n    return modelbox::RETRY_STOP;\n  }\n\n  if (plugin_->GetRetryEnabled() &&\n      ((retry_context_.GetRetryTimes() <= plugin_->GetRetryTimes()) ||\n       (plugin_->GetRetryTimes() == -1))) {\n    return modelbox::RETRY_NEED;\n  }\n  MBLOG_INFO << \"retry_enable_: \" << plugin_->GetRetryEnabled()\n             << \" retry_times: \" << retry_context_.GetRetryTimes()\n             << \" retry_max_times_: \" << plugin_->GetRetryTimes();\n  return modelbox::RETRY_NONEED;\n}\n\nvoid SourceContext::SetLastProcessStatus(const modelbox::Status &status) {\n  last_status_ = status;\n  if (status == modelbox::STATUS_SUCCESS) {\n    retry_context_.ResetRetryTimes();\n  }\n}\n\nvoid SourceContext::SetStreamType(std::string type) {\n  stream_type_ = std::move(type);\n  MBLOG_DEBUG << \"plugin_name: \" << plugin_name_\n              << \"  stream_type: \" << stream_type_;\n}\n\nvoid SourceContext::SetRetryParam(int32_t retry_enable, int32_t retry_interval,\n                                  int32_t retry_times) {\n  retry_context_.SetMaxRetryTimes(retry_times);\n  retry_context_.SetRetryInterval(retry_interval);\n  retry_context_.SetRetryEnable(retry_enable);\n  MBLOG_DEBUG << \"plugin_name: \" << plugin_name_\n              << \"  retry_enable: \" << retry_context_.GetRetryEnable()\n              << \"  retry_interval:\" << retry_context_.GetRetryInterval()\n              << \"  retry_times: \" << retry_context_.GetMaxRetryTimes();\n}\n\nvoid SourceContext::SetDataSourceCfg(std::string data_source_cfg) {\n  data_source_cfg_ = std::move(data_source_cfg);\n}\n\nvoid SourceContext::SetSessionContext(\n    const std::shared_ptr<modelbox::SessionContext> &session_context) {\n  session_context_ = session_context;\n}\n\nvoid SourceContext::SetSessionConfig(\n    const std::shared_ptr<modelbox::Configuration> &session_config) {\n  session_config_ = session_config;\n}\n\nint32_t SourceContext::GetRetryInterval() {\n  return retry_context_.GetRetryInterval();\n}\n\n}  // namespace modelbox\n"
  },
  {
    "path": "src/drivers/common/flowunit/source_context/source_context.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_SOURCE_CONTEXT_H_\n#define MODELBOX_SOURCE_CONTEXT_H_\n\n#include <modelbox/base/config.h>\n#include <modelbox/base/device.h>\n#include <modelbox/base/log.h>\n#include <modelbox/base/status.h>\n#include <modelbox/data_source_parser_plugin.h>\n\n#include \"modelbox/data_context.h\"\n\nnamespace modelbox {\n\nenum RetryStatus { RETRY_NONEED = 0, RETRY_NEED = 1, RETRY_STOP = 2 };\n\nclass RetryContext {\n public:\n  void SetRetryEnable(int32_t retry_enabled) {\n    retry_enabled_ = retry_enabled;\n  };\n\n  void SetMaxRetryTimes(int32_t max_retry_times) {\n    max_retry_times_ = max_retry_times;\n  };\n\n  void SetRetryInterval(int32_t retry_interval) {\n    retry_interval_ = retry_interval;\n  };\n\n  void ResetRetryTimes() { retry_times_ = 0; };\n\n  int32_t GetMaxRetryTimes() { return max_retry_times_; };\n  int32_t GetRetryTimes() { return retry_times_; };\n  int32_t GetRetryInterval() { return retry_interval_; };  // millisecond\n  int32_t GetRetryEnable() { return retry_enabled_; };\n  void RetryTimesInc() { retry_times_++; };\n\n private:\n  int32_t retry_enabled_;    // retry or not\n  int32_t retry_interval_;   // retry interval millisecond\n  int32_t max_retry_times_;  // max retry times\n  int32_t retry_times_{0};   // current retry times\n};\n\nclass SourceContext {\n public:\n  SourceContext(std::shared_ptr<DataSourceParserPlugin> plugin,\n                std::string plugin_name);\n\n  virtual ~SourceContext();\n\n  void SetStreamType(std::string type);\n  std::shared_ptr<std::string> GetSourceURL();\n  void SetLastProcessStatus(const modelbox::Status& status);\n  void SetDataSourceCfg(std::string data_source_cfg);\n  void SetSessionContext(\n      const std::shared_ptr<modelbox::SessionContext>& session_context);\n  void SetSessionConfig(\n      const std::shared_ptr<modelbox::Configuration>& session_config);\n\n  RetryStatus NeedRetry();\n  int32_t GetRetryInterval();\n  void SetRetryParam(int32_t retry_enable, int32_t retry_interval,\n                     int32_t retry_times);\n\n private:\n  std::shared_ptr<modelbox::SessionContext> session_context_;\n  std::shared_ptr<modelbox::Configuration> session_config_;\n  std::string data_source_cfg_;\n  std::shared_ptr<DataSourceParserPlugin> plugin_;\n  modelbox::Status last_status_{modelbox::STATUS_SUCCESS};\n  std::string stream_type_;\n  RetryContext retry_context_;\n  std::string plugin_name_;\n};\n\n}  // namespace modelbox\n#endif"
  },
  {
    "path": "src/drivers/common/flowunit/video_decode/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nif (NOT FFMPEG_FOUND) \n    message(STATUS \"Not found ffmpeg, disable video decode common\")\n    return()\nendif()\n\nfile(GLOB_RECURSE SOURCES *.cpp *.cc *.c)\n\nset(INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_CONFIG_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${INCLUDE})\ninclude_directories(${HUAWEI_SECURE_C_INCLUDE_DIR})\ninclude_directories(${FFMPEG_INCLUDE_DIR})\n\nset(LIBRARY modelbox-common-video-decode-object)\nadd_library(${LIBRARY} STATIC ${SOURCES})\nset_property(TARGET ${LIBRARY} PROPERTY POSITION_INDEPENDENT_CODE ON)\n\ntarget_link_libraries(${LIBRARY} ${FFMPEG_LIBRARIES})\n\nset(MODELBOX_COMMON_VIDEO_DECODE_LIBRARY ${LIBRARY} CACHE INTERNAL \"\")\nset(MODELBOX_COMMON_VIDEO_DECODE_INCLUDE ${INCLUDE} CACHE INTERNAL \"\")\n"
  },
  {
    "path": "src/drivers/common/flowunit/video_decode/ffmpeg_color_converter.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"ffmpeg_color_converter.h\"\n\n#include <modelbox/base/log.h>\n#include <video_decode_common.h>\n\nmodelbox::Status FfmpegColorConverter::CvtColor(\n    const std::shared_ptr<AVFrame> &src_frame, uint8_t *out_frame_data,\n    AVPixelFormat out_pix_fmt) {\n  if (!SupportCvtPixFmt(out_pix_fmt)) {\n    return modelbox::STATUS_INVALID;\n  }\n\n  auto &width = src_frame->width;\n  auto &height = src_frame->height;\n  if (width_ != width || height != height_) {\n    width_ = width;\n    height_ = height;\n    auto ret = InitSwsCtx(width, height, (AVPixelFormat)src_frame->format,\n                          out_pix_fmt);\n    if (ret != modelbox::STATUS_SUCCESS) {\n      return ret;\n    }\n  }\n\n  int32_t linesize[4];\n  GetLineSize(out_pix_fmt, width, linesize, 4);\n  uint8_t *data[4] = {nullptr};\n  data[0] = out_frame_data;\n  if (out_pix_fmt == AVPixelFormat::AV_PIX_FMT_NV12) {\n    data[1] = out_frame_data + width * height;  // For UV plane\n  } else if (out_pix_fmt == AVPixelFormat::AV_PIX_FMT_YUV420P) {\n    data[1] = out_frame_data + width * height;  // For U plane\n    data[2] = data[1] + width * height / 4;     // For V plane\n  }\n\n  auto ffmpeg_ret = sws_scale(sws_ctx_.get(), src_frame->data,\n                              src_frame->linesize, 0, height, data, linesize);\n  if (ffmpeg_ret < 0) {\n    GET_FFMPEG_ERR(ffmpeg_ret, ffmpeg_err_str);\n    MBLOG_ERROR << \"sws_scale failed, detail:\" << ffmpeg_err_str;\n    return modelbox::STATUS_FAULT;\n  }\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nbool FfmpegColorConverter::SupportCvtPixFmt(AVPixelFormat pix_fmt) {\n  if (pix_fmt == AVPixelFormat::AV_PIX_FMT_BGR24 ||\n      pix_fmt == AVPixelFormat::AV_PIX_FMT_RGB24 ||\n      pix_fmt == AVPixelFormat::AV_PIX_FMT_NV12 ||\n      pix_fmt == AVPixelFormat::AV_PIX_FMT_YUV420P) {\n    return true;\n  }\n\n  return false;\n}\n\nmodelbox::Status FfmpegColorConverter::InitSwsCtx(int32_t width, int32_t height,\n                                                  AVPixelFormat src_pix_fmt,\n                                                  AVPixelFormat dest_pix_fmt) {\n  auto *sws_ctx = sws_getContext(width, height, src_pix_fmt, width, height,\n                                 dest_pix_fmt, 0, nullptr, nullptr, nullptr);\n  if (sws_ctx == nullptr) {\n    auto fmt_name = std::to_string(dest_pix_fmt);\n    const auto *name_c = av_get_pix_fmt_name(dest_pix_fmt);\n    if (name_c) {\n      fmt_name = name_c;\n    }\n    const char *pix_fmt_name = av_get_pix_fmt_name(src_pix_fmt);\n    if (pix_fmt_name == nullptr) {\n      pix_fmt_name = \"unknown\";\n    }\n\n    MBLOG_ERROR << \"Failed to create sws_ctx for [f:\" << fmt_name\n                << \" w:\" << width << \" h:\" << height << \"]->[f:\" << pix_fmt_name\n                << \" w:\" << width << \" h:\" << height << \"]\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  sws_ctx_.reset(sws_ctx, [](SwsContext *ctx) { sws_freeContext(ctx); });\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status FfmpegColorConverter::GetLineSize(AVPixelFormat pix_fmt,\n                                                   int32_t width,\n                                                   int32_t linesize[4],\n                                                   int32_t linesize_size) {\n  linesize[1] = 0;\n  linesize[2] = 0;\n  linesize[3] = 0;\n  switch (pix_fmt) {\n    case AVPixelFormat::AV_PIX_FMT_NV12:\n      linesize[0] = width;\n      linesize[1] = width;\n      break;\n    case AVPixelFormat::AV_PIX_FMT_RGB24:\n    case AVPixelFormat::AV_PIX_FMT_BGR24:\n      linesize[0] = width * 3;\n      break;\n    case AVPixelFormat::AV_PIX_FMT_YUV420P:\n      linesize[0] = width;\n      linesize[1] = width / 2;\n      linesize[2] = width / 2;\n      break;\n    default:\n      if (av_get_pix_fmt_name(pix_fmt)) {\n        MBLOG_ERROR << \"Not support pix fmt \" << av_get_pix_fmt_name(pix_fmt);\n      } else {\n        MBLOG_ERROR << \"Not support pix fmt \" << pix_fmt;\n      }\n      return modelbox::STATUS_FAULT;\n  }\n\n  return modelbox::STATUS_SUCCESS;\n}"
  },
  {
    "path": "src/drivers/common/flowunit/video_decode/ffmpeg_color_converter.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#ifndef MODELBOX_FLOWUNIT_FFMPEG_COLOR_CONVERTER_H_\n#define MODELBOX_FLOWUNIT_FFMPEG_COLOR_CONVERTER_H_\n\n#include <modelbox/base/status.h>\n#include <vector>\n\nextern \"C\" {\n#include <libavformat/avformat.h>\n#include <libavutil/imgutils.h>\n#include <libswscale/swscale.h>\n}\n\nclass FfmpegColorConverter {\n public:\n  modelbox::Status CvtColor(const std::shared_ptr<AVFrame> &src_frame,\n                          uint8_t *out_frame_data, AVPixelFormat out_pix_fmt);\n\n private:\n  bool SupportCvtPixFmt(AVPixelFormat pix_fmt);\n\n  modelbox::Status InitSwsCtx(int32_t width, int32_t height,\n                            AVPixelFormat src_pix_fmt, AVPixelFormat dest_pix_fmt);\n\n  modelbox::Status GetLineSize(AVPixelFormat pix_fmt, int32_t width,\n                             int32_t linesize[4], int32_t linesize_size);\n\n  modelbox::Status AllocFrame(std::shared_ptr<AVFrame> &frame, int32_t *line_size,\n                            int32_t width, int32_t height,\n                            AVPixelFormat pix_fmt);\n\n  std::shared_ptr<SwsContext> sws_ctx_;\n  int32_t width_{0};\n  int32_t height_{0};\n};\n\n#endif  // MODELBOX_FLOWUNIT_FFMPEG_COLOR_CONVERTER_H_"
  },
  {
    "path": "src/drivers/common/flowunit/video_decode/video_decode_common.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include \"video_decode_common.h\"\n\n#include <modelbox/base/log.h>\n\n#include <functional>\n#include <map>\n\nnamespace videodecode {\n\nsize_t NV12BufferSize(int32_t width, int32_t height) {\n  return width * height * 3 / 2;\n}\n\nsize_t RGBBufferSize(int32_t width, int32_t height) {\n  return width * height * 3;\n}\n\nstd::map<std::string, std::function<size_t(int32_t width, int32_t height)>>\n    g_pix_fmt_to_buffer_size = {{\"nv12\", NV12BufferSize},\n                                {\"rgb\", RGBBufferSize},\n                                {\"bgr\", RGBBufferSize}};\n\nconst std::set<std::string> g_supported_pix_fmt = {\"nv12\", \"rgb\", \"bgr\"};\nconst std::map<std::string, AVPixelFormat> g_av_pix_fmt_map = {\n    {\"nv12\", AVPixelFormat::AV_PIX_FMT_NV12},\n    {\"rgb\", AVPixelFormat::AV_PIX_FMT_RGB24},\n    {\"bgr\", AVPixelFormat::AV_PIX_FMT_BGR24}};\n\nmodelbox::Status GetBufferSize(int32_t width, int32_t height,\n                             const std::string &pix_fmt, size_t &size) {\n  auto iter = g_pix_fmt_to_buffer_size.find(pix_fmt);\n  if (iter == g_pix_fmt_to_buffer_size.end()) {\n    MBLOG_ERROR << \"Not support pix fmt \" << pix_fmt;\n    return modelbox::STATUS_NOTSUPPORT;\n  }\n\n  size = iter->second(width, height);\n  return modelbox::STATUS_SUCCESS;\n}\n\nvoid UpdateStatsInfo(std::shared_ptr<modelbox::DataContext> &data_ctx, int32_t width,\n                     int32_t height) {\n  auto stats = data_ctx->GetStatistics();\n  stats->AddItem(\"frame_width\", width, true);\n  stats->AddItem(\"frame_height\", height, true);\n  uint64_t one_frame = 1;\n  stats->IncreaseValue(\"frame_count\", one_frame);\n}\n\n}  // namespace videodecode"
  },
  {
    "path": "src/drivers/common/flowunit/video_decode/video_decode_common.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#ifndef MODELBOX_FLOWUNIT_VIDEO_DECODE_COMMON_H_\n#define MODELBOX_FLOWUNIT_VIDEO_DECODE_COMMON_H_\n\n#include <modelbox/base/status.h>\n#include <modelbox/data_context.h>\n\n#include <map>\n#include <set>\n#include <string>\n\n#include \"ffmpeg_color_converter.h\"\n\nextern \"C\" {\n#include <libavformat/avformat.h>\n#include <libavutil/error.h>\n}\n\nnamespace videodecode {\n\nextern const std::set<std::string> g_supported_pix_fmt;\nextern const std::map<std::string, AVPixelFormat> g_av_pix_fmt_map;\n\n#define GET_FFMPEG_ERR(err_num, var_name)        \\\n  char var_name[AV_ERROR_MAX_STRING_SIZE] = {0}; \\\n  av_make_error_string(var_name, AV_ERROR_MAX_STRING_SIZE, err_num);\n\nmodelbox::Status GetBufferSize(int32_t width, int32_t height,\n                             const std::string &pix_fmt, size_t &size);\n\nvoid UpdateStatsInfo(std::shared_ptr<modelbox::DataContext> &data_ctx,\n                     int32_t width, int32_t height);\n}  // namespace videodecode\n\n#endif  // MODELBOX_FLOWUNIT_VIDEO_DECODE_COMMON_H_"
  },
  {
    "path": "src/drivers/common/libs/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nproject(modelbox-drivers-comm-lib)\n\nif(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_BINARY_DIR})\n    message(FATAL_ERROR \"Do not build in source directory!\")\nendif()\n\nset(HEADER ${CMAKE_CURRENT_LIST_DIR}/include)\n\ninclude_directories(${HEADER})\n\nsubdirlist(SUBDIRS ${CMAKE_CURRENT_SOURCE_DIR} \"CMakeLists.txt\")\n\nforeach(subdir ${SUBDIRS})\n    add_subdirectory(${subdir})\nendforeach()\n\ninstall(DIRECTORY ${HEADER}/modelbox\n    DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR} \n    COMPONENT cpu-device-flowunit-devel\n    )\n\nset(LIBMODELBOX_DRIVER_COMMON_LIB_INCLUDE ${HEADER} CACHE INTERNAL \"\")\nlist(APPEND DRIVER_UNIT_TEST_INCLUDE ${HEADER})\nset(DRIVER_UNIT_TEST_INCLUDE ${DRIVER_UNIT_TEST_INCLUDE} CACHE INTERNAL \"\")\n"
  },
  {
    "path": "src/drivers/common/libs/file_requester/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\n\nif (NOT CPPREST_FOUND) \n    message(STATUS \"Not found cpprest, disable file_requester library\")\n    return()\nendif()\n\nfile(GLOB_RECURSE UNIT_SOURCE *.cpp *.cc *.c)\ngroup_source_test_files(MODELBOX_COMMON_LIB_SOURCE MODELBOX_UNIT_TEST_SOURCE \"_test.c*\" ${UNIT_SOURCE})\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${CPPREST_INCLUDE_DIR})\ninclude_directories(${INCLUDE})\n\nset(MODELBOX_COMMON_LIB_SHARED modelbox-drivers-common-filerequester)\nset(MODELBOX_COMMON_LIB_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\nadd_library(${MODELBOX_COMMON_LIB_SHARED} SHARED ${MODELBOX_COMMON_LIB_SOURCE})\n\nset(LIBMODELBOX_DRIVER_COMMON_LIB_FILE_REQUESTER ${MODELBOX_COMMON_LIB_SHARED})\nset_target_properties(${MODELBOX_COMMON_LIB_SHARED} PROPERTIES \n    SOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n\ntarget_link_libraries(${MODELBOX_COMMON_LIB_SHARED} ${LIBMODELBOX_DEVICE_CPU_SHARED})\ntarget_link_libraries(${MODELBOX_COMMON_LIB_SHARED} ${LIBMODELBOX_SHARED})\ntarget_link_libraries(${MODELBOX_COMMON_LIB_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_COMMON_LIB_SHARED} rt)\ntarget_link_libraries(${MODELBOX_COMMON_LIB_SHARED} ${CPPREST_LIBRARIES})\n\ninstall(TARGETS ${MODELBOX_COMMON_LIB_SHARED} \n    COMPONENT cpu-device-flowunit\n    RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n    LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    OPTIONAL\n    )\n\n\nset(LIBMODELBOX_DRIVER_COMMON_LIB_FILE_REQUESTER ${MODELBOX_COMMON_LIB_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_DRIVER_COMMON_LIB_FILE_REQUESTER_INCLUDE ${MODELBOX_COMMON_LIB_SOURCE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_DRIVER_COMMON_LIB_FILE_REQUESTER_SOURCES ${MODELBOX_COMMON_LIB_SOURCE} CACHE INTERNAL \"\")\n\n# driver test\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_UNIT_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_COMMON_LIB_SHARED})\nlist(APPEND DRIVER_UNIT_TEST_LINK_LIBRARIES ${MODELBOX_COMMON_LIB_SHARED})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_LINK_LIBRARIES ${DRIVER_UNIT_TEST_LINK_LIBRARIES} CACHE INTERNAL \"\")\n\n"
  },
  {
    "path": "src/drivers/common/libs/file_requester/file_requester.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/drivers/common/file_requester.h\"\n\n#include <algorithm>\n\n#include \"cpprest/producerconsumerstream.h\"\n#include \"cpprest/uri.h\"\n#include \"modelbox/base/log.h\"\n\nconst int MAX_BLOCK_SIZE = 1 * 1024 * 1024;\nconst int MAX_READ_SIZE = 40;\n\nnamespace modelbox {\n\nFileRequester::~FileRequester() {\n  try {\n    listener_->close().wait();\n  } catch (const std::exception &e) {\n    MBLOG_INFO << \"close file request failed, \" << e.what();\n  }\n}\n\nstd::once_flag FileRequester::file_requester_init_flag_;\n\nstd::shared_ptr<FileRequester> FileRequester::GetInstance() {\n  static std::shared_ptr<FileRequester> server(new FileRequester());\n  std::call_once(\n      file_requester_init_flag_,\n      [](std::shared_ptr<FileRequester> server) {\n        auto ret = server->Init();\n        if (STATUS_FAULT == ret) {\n          server = nullptr;\n        }\n      },\n      server);\n  return server;\n}\n\nStatus FileRequester::Init() {\n  utility::string_t address = _XPLATSTR(DEFAULT_FILE_REQUEST_URI);\n  web::uri_builder uri(address);\n  auto addr = uri.to_uri().to_string();\n  listener_ =\n      std::make_shared<web::http::experimental::listener::http_listener>(addr);\n  listener_->support(web::http::methods::GET,\n                     [this](const web::http::http_request &request) {\n                       this->HandleFileGet(request);\n                     });\n  try {\n    listener_->open().wait();\n    MBLOG_INFO << \"File requester start to listen : \" << addr;\n  } catch (std::exception const &e) {\n    MBLOG_ERROR << e.what();\n    return STATUS_FAULT;\n  }\n  pool_ = std::make_shared<ThreadPool>(0, 8);\n  pool_->SetName(\"File-Requester\");\n  return STATUS_OK;\n}\n\nStatus FileRequester::RegisterUrlHandler(\n    const std::string &relative_url,\n    const std::shared_ptr<FileGetHandler> &handler) {\n  std::lock_guard<std::mutex> lock(handler_lock_);\n  auto iter = file_handlers_.find(relative_url);\n  if (iter == file_handlers_.end()) {\n    file_handlers_.emplace(relative_url, handler);\n    return STATUS_OK;\n  }\n  MBLOG_ERROR << \"Url \" << relative_url << \"has been registered!\";\n  return STATUS_EXIST;\n}\n\nvoid FileRequester::SetMaxFileReadSize(int read_size) {\n  if (read_size <= 0 || read_size > MAX_READ_SIZE) {\n    MBLOG_ERROR << \"Invalid read size, use default value for instead.\"\n                << \"Your read size:\" << read_size;\n    return;\n  }\n  max_read_size_ = read_size * MAX_BLOCK_SIZE;\n  MBLOG_INFO << \"Set max file read size to \" << max_read_size_;\n}\n\nStatus FileRequester::DeregisterUrl(const std::string &relative_url) {\n  std::lock_guard<std::mutex> lock(handler_lock_);\n  auto iter = file_handlers_.find(relative_url);\n  if (iter != file_handlers_.end()) {\n    file_handlers_.erase(iter);\n    MBLOG_INFO << \"Success to deregister url: \" << relative_url;\n    return STATUS_OK;\n  }\n  MBLOG_ERROR << \"Failed to deregister url: \" << relative_url\n              << \", url not registered!\";\n  return STATUS_NOTFOUND;\n}\n\nbool FileRequester::IsValidRequest(const web::http::http_request &request) {\n  const auto &headers = request.headers();\n  if (!headers.has(\"Range\")) {\n    MBLOG_ERROR << \"Request has no header names 'Range'. Request:\"\n                << request.to_string();\n    return false;\n  }\n  return true;\n}\n\nbool FileRequester::ReadRequestRange(const web::http::http_request &request,\n                                     const uint64_t file_size,\n                                     uint64_t &range_start,\n                                     uint64_t &range_end) {\n  auto headers = request.headers();\n  auto range_value = headers[\"Range\"];\n  const std::string range_prefix = \"bytes=\";\n  auto pos = range_value.find(range_prefix);\n  if (pos == std::string::npos) {\n    MBLOG_ERROR << \"Range header has no bytes range values.\";\n    return false;\n  }\n  auto range_start_end = range_value.substr(range_prefix.size());\n  auto ranges = StringSplit(range_start_end, '-');\n  if ((ranges.size() > 2) || (ranges.size() < 1)) {\n    MBLOG_ERROR << \"Range value is invalid.\"\n                << \"range_start_end: \" << range_start_end;\n    return false;\n  }\n  try {\n    range_start = std::stoull(ranges[0]);\n    if (ranges.size() == 1) {\n      range_end = file_size - 1;\n    } else {\n      range_end = std::stoull(ranges[1]);\n    }\n  } catch (const std::exception &e) {\n    MBLOG_ERROR << \"Convert request range to int failed, range \" << ranges[0]\n                << \", err \" << e.what();\n    return false;\n  }\n\n  if ((range_start < 0) || (range_start > file_size - 1) ||\n      (range_end < range_start) || (range_end > file_size - 1)) {\n    MBLOG_ERROR << \"Request range is invalid.\"\n                << \"Range start: \" << range_start << \",range end: \" << range_end\n                << \", file size: \" << file_size;\n    return false;\n  }\n  if (range_end > range_start + MAX_BLOCK_SIZE) {\n    range_end = std::min(\n        range_end, range_start + std::max(MAX_BLOCK_SIZE, max_read_size_));\n  }\n  return true;\n}\n\nvoid FileRequester::ProcessRequest(\n    const web::http::http_request &request,\n    const std::shared_ptr<FileGetHandler> &handler, uint64_t range_start,\n    uint64_t range_end) {\n  uint64_t file_size = handler->GetFileSize();\n  concurrency::streams::producer_consumer_buffer<unsigned char> rwbuf;\n  concurrency::streams::basic_istream<uint8_t> stream(rwbuf);\n  web::http::http_response response(web::http::status_codes::OK);\n  response.set_body(stream);\n  auto rangeResponseHeader = \"bytes \" + std::to_string(range_start) + \"-\" +\n                             std::to_string(range_end) + \"/\" +\n                             std::to_string(file_size);\n  response.headers().add(\"Content-Range\", rangeResponseHeader);\n  response.headers().set_content_type(U(\"application/octet-stream\"));\n  response.headers().set_content_length((size_t)(range_end - range_start + 1));\n\n  std::shared_ptr<unsigned char> raw_data(\n      new (std::nothrow) unsigned char[MAX_BLOCK_SIZE],\n      [](const unsigned char *p) { delete[] p; });\n\n  if (raw_data == nullptr) {\n    MBLOG_ERROR << \"create raw data buffer failed.\";\n    request.reply(web::http::status_codes::InternalError);\n    return;\n  }\n\n  auto rep = request.reply(response);\n  while (range_start < range_end) {\n    int read_size;\n    if (range_start + MAX_BLOCK_SIZE < range_end) {\n      read_size = MAX_BLOCK_SIZE;\n      response.set_status_code(web::http::status_codes::PartialContent);\n    } else {\n      read_size = range_end - range_start + 1;\n    }\n    auto ret = handler->Get(raw_data.get(), read_size, range_start);\n    if (STATUS_OK != ret) {\n      MBLOG_ERROR << \"Get file data failed.\";\n      request.reply(web::http::status_codes::InternalError);\n      return;\n    }\n    rwbuf.putn_nocopy(raw_data.get(), read_size).wait();\n    rwbuf.sync().wait();\n    range_start += read_size;\n  }\n\n  rwbuf.close(std::ios_base::out).wait();\n\n  rep.wait();\n}\n\nvoid FileRequester::HandleFileGet(const web::http::http_request &request) {\n  MBLOG_DEBUG << request.to_string();\n  std::string path = web::http::uri::decode(request.relative_uri().path());\n\n  std::unique_lock<std::mutex> lock_handler(handler_lock_);\n  auto iter = file_handlers_.find(path);\n  if (iter == file_handlers_.end()) {\n    MBLOG_ERROR << \"File \" << path << \"not found.\";\n    request.reply(web::http::status_codes::NotFound);\n    return;\n  }\n  auto file_get_handler = iter->second;\n  lock_handler.unlock();\n\n  if (!IsValidRequest(request)) {\n    MBLOG_ERROR << \"Request for file \" << path\n                << \" is invalid. Request: \" << request.to_string();\n    request.reply(web::http::status_codes::BadRequest);\n    return;\n  }\n\n  int file_size = file_get_handler->GetFileSize();\n  uint64_t range_start;\n  uint64_t range_end = 0;\n  if (!ReadRequestRange(request, file_size, range_start, range_end)) {\n    MBLOG_ERROR << \"Read request range for file \" << path << \" filed.\";\n    request.reply(web::http::status_codes::BadRequest);\n    return;\n  }\n\n  pool_->Submit(&FileRequester::ProcessRequest, this, request, file_get_handler,\n                range_start, range_end);\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/drivers/common/libs/fuse/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\n\nif (NOT FUSE_FOUND) \n    message(STATUS \"Not found fuse, disable fuse library\")\n    return()\nendif()\n\nfile(GLOB_RECURSE UNIT_SOURCE *.cpp *.cc *.c)\ngroup_source_test_files(MODELBOX_COMMON_LIB_SOURCE MODELBOX_UNIT_TEST_SOURCE \"_test.c*\" ${UNIT_SOURCE})\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${FUSE_INCLUDE_DIR})\ninclude_directories(${INCLUDE})\n\nset(MODELBOX_COMMON_LIB_SHARED modelbox-drivers-common-fuse)\nset(MODELBOX_COMMON_LIB_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\nadd_library(${MODELBOX_COMMON_LIB_SHARED} SHARED ${MODELBOX_COMMON_LIB_SOURCE})\n\nset(LIBMODELBOX_DRIVER_COMMON_LIB_FUSE ${MODELBOX_COMMON_LIB_SHARED})\nset_target_properties(${MODELBOX_COMMON_LIB_SHARED} PROPERTIES \n    SOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n\ntarget_link_libraries(${MODELBOX_COMMON_LIB_SHARED} ${LIBMODELBOX_DEVICE_CPU_SHARED})\ntarget_link_libraries(${MODELBOX_COMMON_LIB_SHARED} ${LIBMODELBOX_SHARED})\ntarget_link_libraries(${MODELBOX_COMMON_LIB_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_COMMON_LIB_SHARED} rt)\ntarget_link_libraries(${MODELBOX_COMMON_LIB_SHARED} ${FUSE_LIBRARIES})\n\ninstall(TARGETS ${MODELBOX_COMMON_LIB_SHARED} \n    COMPONENT cpu-device-flowunit\n    RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n    LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    OPTIONAL\n    )\n\n\nset(LIBMODELBOX_DRIVER_COMMON_LIB_FUSE ${MODELBOX_COMMON_LIB_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_DRIVER_COMMON_LIB_FUSE_INCLUDE ${MODELBOX_COMMON_LIB_SOURCE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_DRIVER_COMMON_LIB_FUSE_SOURCES ${MODELBOX_COMMON_LIB_SOURCE} CACHE INTERNAL \"\")\n\n# driver test\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_UNIT_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_COMMON_LIB_SHARED})\nlist(APPEND DRIVER_UNIT_TEST_LINK_LIBRARIES ${MODELBOX_COMMON_LIB_SHARED})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_LINK_LIBRARIES ${DRIVER_UNIT_TEST_LINK_LIBRARIES} CACHE INTERNAL \"\")\n\n"
  },
  {
    "path": "src/drivers/common/libs/fuse/modelbox_fuse.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/drivers/common/modelbox_fuse.h\"\n\n#include <libgen.h>\n#include <modelbox/base/log.h>\n#include <modelbox/base/os.h>\n#include <modelbox/base/utils.h>\n#include <unistd.h>\n\n#include <ostream>\n#include <sstream>\n#include <thread>\n#include <utility>\n\nnamespace modelbox {\n\nstd::list<std::string> SplitPath(const std::string &s, char delim) {\n  std::list<std::string> result;\n  std::stringstream ss(s);\n  std::string item;\n\n  while (std::getline(ss, item, delim)) {\n    result.push_back(item);\n  }\n\n  return result;\n}\n\nstd::map<std::string, ModelBoxFuse *> ModelBoxFuseOperation::modelbox_fuses_;\nstd::mutex ModelBoxFuseOperation::modelbox_fuses_lock_;\n\nfuse_operations kModelboxFuseOperation = [] {\n  fuse_operations ops{};\n  ops.init = ModelBoxFuseOperation::FuseInit;\n  ops.destroy = ModelBoxFuseOperation::FuseDestroy;\n  ops.getattr = ModelBoxFuseOperation::GetAttr;\n  ops.access = ModelBoxFuseOperation::Access;\n  ops.statfs = ModelBoxFuseOperation::StatFS;\n\n  ops.rmdir = ModelBoxFuseOperation::RmDir;\n  ops.mkdir = ModelBoxFuseOperation::MkDir;\n  ops.opendir = ModelBoxFuseOperation::OpenDir;\n  ops.releasedir = ModelBoxFuseOperation::ReleaseDir;\n  ops.readdir = ModelBoxFuseOperation::ReadDir;\n  ops.unlink = ModelBoxFuseOperation::Unlink;\n\n  ops.create = ModelBoxFuseOperation::Create;\n  ops.open = ModelBoxFuseOperation::Open;\n  ops.release = ModelBoxFuseOperation::Release;\n  ops.read = ModelBoxFuseOperation::Read;\n  ops.write = ModelBoxFuseOperation::Write;\n  ops.fsync = ModelBoxFuseOperation::FSync;\n  ops.flush = ModelBoxFuseOperation::Flush;\n  return ops;\n}();\n\nvoid *ModelBoxFuseOperation::FuseInit(struct fuse_conn_info *conn) {\n  conn->want |= FUSE_CAP_ATOMIC_O_TRUNC;\n  conn->want |= FUSE_CAP_ASYNC_READ;\n  CurrentModleBoxFuse()->FuseInit(conn);\n  return CurrentModleBoxFuse();\n}\n\nvoid ModelBoxFuseOperation::FuseDestroy(void *eh) {\n  CurrentModleBoxFuse()->FuseDestroy(eh);\n}\n\nint ModelBoxFuseOperation::GetAttr(const char *path, struct stat *stbuf) {\n  return CurrentModleBoxFuse()->GetAttr(path, stbuf);\n}\n\nint ModelBoxFuseOperation::Access(const char *path, int mask) {\n  return CurrentModleBoxFuse()->Access(path, mask);\n}\n\nint ModelBoxFuseOperation::StatFS(const char *path, struct statvfs *stbuf) {\n  return CurrentModleBoxFuse()->StatFS(path, stbuf);\n}\n\nint ModelBoxFuseOperation::RmDir(const char *path) {\n  return CurrentModleBoxFuse()->RmDir(path);\n}\n\nint ModelBoxFuseOperation::MkDir(const char *path, mode_t mode) {\n  return CurrentModleBoxFuse()->MkDir(path, mode);\n}\n\nint ModelBoxFuseOperation::OpenDir(const char *path,\n                                   struct fuse_file_info *fi) {\n  return CurrentModleBoxFuse()->OpenDir(path, fi);\n}\n\nint ModelBoxFuseOperation::ReleaseDir(const char *path,\n                                      struct fuse_file_info *fi) {\n  return CurrentModleBoxFuse()->ReleaseDir(path, fi);\n}\n\nint ModelBoxFuseOperation::ReadDir(const char *path, void *buff,\n                                   fuse_fill_dir_t filler, off_t offset,\n                                   struct fuse_file_info *fi) {\n  return CurrentModleBoxFuse()->ReadDir(path, buff, filler, offset, fi);\n}\n\nint ModelBoxFuseOperation::Unlink(const char *path) {\n  return CurrentModleBoxFuse()->Unlink(path);\n}\n\nint ModelBoxFuseOperation::Open(const char *path, struct fuse_file_info *fi) {\n  return CurrentModleBoxFuse()->Open(path, fi);\n}\n\nint ModelBoxFuseOperation::Create(const char *path, mode_t mode,\n                                  struct fuse_file_info *fi) {\n  return CurrentModleBoxFuse()->Create(path, mode, fi);\n}\n\nint ModelBoxFuseOperation::Release(const char *path,\n                                   struct fuse_file_info *fi) {\n  return CurrentModleBoxFuse()->Release(path, fi);\n}\n\nint ModelBoxFuseOperation::Read(const char *path, char *buff, size_t size,\n                                off_t off, struct fuse_file_info *fi) {\n  return CurrentModleBoxFuse()->Read(path, buff, size, off, fi);\n}\n\nint ModelBoxFuseOperation::Write(const char *path, const char *buff,\n                                 size_t size, off_t off,\n                                 struct fuse_file_info *fi) {\n  return CurrentModleBoxFuse()->Write(path, buff, size, off, fi);\n}\n\nint ModelBoxFuseOperation::FSync(const char *path, int isdatasync,\n                                 struct fuse_file_info *fi) {\n  return CurrentModleBoxFuse()->FSync(path, isdatasync, fi);\n}\nint ModelBoxFuseOperation::Flush(const char *path, struct fuse_file_info *fi) {\n  return CurrentModleBoxFuse()->Flush(path, fi);\n}\n\nModelBoxFuse *ModelBoxFuseOperation::CurrentModleBoxFuse() {\n  return static_cast<ModelBoxFuse *>(fuse_get_context()->private_data);\n}\n\nstd::shared_ptr<ModelBoxFuse> ModelBoxFuseOperation::CreateFuse(\n    const std::string &mount_path) {\n  std::shared_ptr<ModelBoxFuse> modelbox_fuse(new ModelBoxFuse);\n  fuse_unmount(mount_path.c_str(), nullptr);\n  modelbox_fuses_lock_.lock();\n  modelbox_fuses_[mount_path] = modelbox_fuse.get();\n  modelbox_fuses_lock_.unlock();\n\n  modelbox_fuse->SetMountPoint(mount_path);\n  return modelbox_fuse;\n}\n\nvoid ModelBoxFuseOperation::DestroyFuse(ModelBoxFuse *modelbox_fuse) {\n  modelbox_fuses_lock_.lock();\n  modelbox_fuses_.erase(modelbox_fuse->GetMountPoint());\n  modelbox_fuses_lock_.unlock();\n}\n\nModelBoxFuse::ModelBoxFuse() = default;\nModelBoxFuse::~ModelBoxFuse() {\n  StopFuseLoop();\n  DestroyLowLevelFuse();\n}\n\nStatus ModelBoxFuse::InitLowLevelFuse() {\n  const char *argv[] = {\"modelbox\", \"-o\", \"nonempty\"};\n  const int argc = sizeof(argv) / sizeof(char *);\n  struct fuse_args args = FUSE_ARGS_INIT(argc, (char **)argv);\n  CreateDirectory(mount_point_);\n  struct fuse_chan *chan = fuse_mount(mount_point_.c_str(), &args);\n  if (chan == nullptr) {\n    std::string err = \"mount directory \";\n    err += mount_point_ + \" failed, \";\n    err += StrError(errno);\n    MBLOG_ERROR << err;\n    if (errno == ENOENT) {\n      return {STATUS_NOENT, err};\n    }\n\n    if (errno == EACCES) {\n      return {STATUS_PERMIT, err};\n    }\n\n    return {STATUS_FAULT, err};\n  }\n\n  auto *fuse = fuse_new(chan, &args, &kModelboxFuseOperation,\n                        sizeof(kModelboxFuseOperation), this);\n  if (fuse == nullptr) {\n    fuse_unmount(mount_point_.c_str(), chan);\n    return {STATUS_FAULT, \"new fuse failed.\"};\n  }\n\n  fuse_chan_ = chan;\n  fuse_ = fuse;\n  return STATUS_OK;\n}\n\nvoid ModelBoxFuse::DestroyLowLevelFuse() {\n  if (fuse_) {\n    fuse_destroy(fuse_);\n    fuse_ = nullptr;\n  }\n\n  if (fuse_chan_) {\n    fuse_unmount(mount_point_.c_str(), fuse_chan_);\n    fuse_chan_ = nullptr;\n  }\n\n  rmdir(mount_point_.c_str());\n}\n\nvoid ModelBoxFuse::SetMountPoint(const std::string &path) {\n  mount_point_ = path;\n}\n\nStatus ModelBoxFuse::AddFuseFile(\n    const std::shared_ptr<ModelBoxFileInode> &fuse_file) {\n  auto path = fuse_file->GetPath();\n  auto entry = root_entry_->LookUp(path);\n  if (entry) {\n    return STATUS_EXIST;\n  }\n\n  std::string parent = GetDirName(path);\n  std::string dir = GetBaseName(path);\n\n  auto parent_entry = root_entry_->LookUp(parent);\n  if (parent_entry == nullptr) {\n    return STATUS_NOENT;\n  }\n\n  entry = std::make_shared<ModelBoxDEntry>();\n  entry->SetName(dir);\n  entry->SetInode(fuse_file);\n  parent_entry->AddChild(entry);\n  return STATUS_OK;\n}\n\nStatus ModelBoxFuse::RmvFuseFile(const std::string &path) {\n  auto entry = root_entry_->LookUp(path);\n  if (!entry) {\n    return STATUS_NOENT;\n  }\n\n  std::string parent = GetDirName(path);\n  std::string name = GetBaseName(path);\n\n  auto parent_entry = root_entry_->LookUp(parent);\n  if (parent_entry == nullptr) {\n    return STATUS_NOENT;\n  }\n\n  if (parent_entry->RmvChild(name) != 0) {\n    return STATUS_FAULT;\n  }\n\n  return STATUS_OK;\n}\n\nstd::string ModelBoxFuse::GetMountPoint() { return mount_point_; }\n\nvoid *ModelBoxFuse::FuseInit(struct fuse_conn_info *conn) { return nullptr; }\nvoid ModelBoxFuse::FuseDestroy(void *eh) {}\nint ModelBoxFuse::GetAttr(const char *path, struct stat *stbuf) {\n  auto entry = root_entry_->LookUp(path);\n  if (entry == nullptr) {\n    return -ENOENT;\n  }\n\n  auto inode = entry->GetInode();\n  FillDefaultStat(stbuf);\n  switch (inode->GetInodeType()) {\n    case MODELBOX_FUSE_INODE_TYPE_FILE:\n      stbuf->st_mode |= S_IFREG;\n      stbuf->st_mode |= 0440;\n      break;\n    case MODELBOX_FUSE_INODE_TYPE_DIR:\n      stbuf->st_mode |= S_IFDIR;\n      stbuf->st_mode |= 0750;\n      break;\n    default:\n      break;\n  }\n  inode->FillStat(stbuf);\n  stbuf->st_blocks = stbuf->st_size / 512;\n  return 0;\n}\n\nint ModelBoxFuse::Access(const char *path, int mask) { return 0; }\nint ModelBoxFuse::StatFS(const char *path, struct statvfs *stbuf) { return 0; }\n\nint ModelBoxFuse::RmDir(const char *path) {\n  MBLOG_DEBUG << \"rm dir: \" << path;\n\n  auto entry = root_entry_->LookUp(path);\n  if (!entry) {\n    return -ENOENT;\n  }\n\n  std::string parent = GetDirName(path);\n  std::string dir = GetBaseName(path);\n\n  auto parent_entry = root_entry_->LookUp(parent);\n  if (parent_entry == nullptr) {\n    return -ENOENT;\n  }\n\n  parent_entry->RmvChild(dir);\n\n  return 0;\n}\n\nint ModelBoxFuse::MkDir(const char *path, mode_t mode) {\n  MBLOG_DEBUG << \"make dir: \" << path;\n\n  auto entry = root_entry_->LookUp(path);\n  if (entry) {\n    return -EEXIST;\n  }\n\n  std::string parent = GetDirName(path);\n  std::string dir = GetBaseName(path);\n\n  auto parent_entry = root_entry_->LookUp(parent);\n  if (parent_entry == nullptr) {\n    return -ENOENT;\n  }\n\n  auto inode = std::make_shared<ModelBoxDirInode>();\n  struct stat st;\n  FillDefaultStat(&st);\n  st.st_mode = mode;\n  st.st_size = 4096;\n  st.st_nlink = 2;\n  inode->SetStat(&st);\n  entry = std::make_shared<ModelBoxDEntry>();\n  entry->SetName(dir);\n  entry->SetInode(inode);\n  parent_entry->AddChild(entry);\n\n  return 0;\n}\n\nint ModelBoxFuse::OpenDir(const char *path, struct fuse_file_info *fi) {\n  auto entry = root_entry_->LookUp(path);\n  if (entry == nullptr) {\n    return -ENOENT;\n  }\n\n  auto *holder = new std::shared_ptr<ModelBoxDEntry>;\n  *holder = entry;\n  fi->fh = (uint64_t)holder;\n  return 0;\n}\n\nint ModelBoxFuse::ReleaseDir(const char *path, struct fuse_file_info *fi) {\n  auto *entry = (std::shared_ptr<ModelBoxDEntry> *)(fi->fh);\n  delete entry;\n  return 0;\n}\n\nint ModelBoxFuse::ReadDir(const char *path, void *buff, fuse_fill_dir_t filler,\n                          off_t offset, struct fuse_file_info *fi) {\n  auto *entry = (std::shared_ptr<ModelBoxDEntry> *)(fi->fh);\n\n  for (const auto &child : (*entry)->Children()) {\n    struct stat st;\n    auto inode = child->GetInode();\n    if (inode == nullptr) {\n      continue;\n    }\n    inode->FillStat(&st);\n    int res = filler(buff, child->GetName().c_str(), &st, 0);\n    if (res != 0) {\n      MBLOG_WARN << \"fill stat failed for \" << child->GetName();\n    }\n  }\n\n  const char *const dots[] = {\".\", \"..\"};\n\n  for (const auto *str : dots) {\n    struct stat st;\n    FillDefaultStat(&st);\n    int res = filler(buff, str, &st, 0);\n    if (res != 0) {\n      MBLOG_WARN << \"fill stat failed for \" << str;\n    }\n  }\n  return 0;\n}\nint ModelBoxFuse::Unlink(const char *path) {\n  MBLOG_DEBUG << \"unlink file: \" << path;\n\n  auto ret = RmvFuseFile(path);\n\n  if (ret == STATUS_NOENT) {\n    return -ENOENT;\n  }\n\n  return 0;\n}\n\nint ModelBoxFuse::Create(const char *path, mode_t mode,\n                         struct fuse_file_info *fi) {\n  MBLOG_DEBUG << \"create file: \" << path;\n  return -ENOSYS;\n}\n\nint ModelBoxFuse::Open(const char *path, struct fuse_file_info *fi) {\n  MBLOG_DEBUG << \"open file: \" << path;\n\n  auto entry = root_entry_->LookUp(path);\n  if (entry == nullptr) {\n    return -ENOENT;\n  }\n\n  auto inode = entry->GetInode();\n  if (inode == nullptr) {\n    return -EBADFD;\n  }\n\n  if (inode->GetInodeType() != MODELBOX_FUSE_INODE_TYPE_FILE) {\n    return -EBADFD;\n  }\n\n  auto file_inode = std::dynamic_pointer_cast<ModelBoxFileInode>(inode);\n  if (file_inode == nullptr) {\n    return -EBADFD;\n  }\n\n  auto file_ops = file_inode->CreateFile();\n  if (file_ops == nullptr) {\n    return -ENOMEM;\n  }\n\n  int ret = file_ops->Open(path);\n  if (ret != 0) {\n    return ret;\n  }\n\n  auto *holder = new std::shared_ptr<ModelBoxFuseFile>;\n  *holder = file_ops;\n  fi->fh = (uint64_t)holder;\n  return 0;\n}\n\nint ModelBoxFuse::Release(const char *path, struct fuse_file_info *fi) {\n  auto *fuse_file = (std::shared_ptr<ModelBoxFuseFile> *)(fi->fh);\n  int ret = (*fuse_file)->Release();\n  delete fuse_file;\n  MBLOG_DEBUG << \"close file: \" << path;\n  return ret;\n}\n\nint ModelBoxFuse::Read(const char *path, char *buff, size_t size, off_t off,\n                       struct fuse_file_info *fi) {\n  auto *fuse_file = (std::shared_ptr<ModelBoxFuseFile> *)(fi->fh);\n  return (*fuse_file)->Read(buff, size, off);\n}\n\nint ModelBoxFuse::Write(const char *path, const char *buff, size_t size,\n                        off_t off, struct fuse_file_info *fi) {\n  auto *fuse_file = (std::shared_ptr<ModelBoxFuseFile> *)(fi->fh);\n  return (*fuse_file)->Write(buff, size, off);\n}\n\nint ModelBoxFuse::FSync(const char *path, int isdatasync,\n                        struct fuse_file_info *fi) {\n  auto *fuse_file = (std::shared_ptr<ModelBoxFuseFile> *)(fi->fh);\n  return (*fuse_file)->FSync(isdatasync);\n}\n\nint ModelBoxFuse::Flush(const char *path, struct fuse_file_info *fi) {\n  auto *fuse_file = (std::shared_ptr<ModelBoxFuseFile> *)(fi->fh);\n  return (*fuse_file)->Flush();\n}\n\nStatus ModelBoxFuse::Run() {\n  auto ret = InitLowLevelFuse();\n  if (!ret) {\n    return ret;\n  }\n\n  MBLOG_DEBUG << \"fuse \" << mount_point_ << \" start\";\n  is_running_ = true;\n  root_entry_ = std::make_shared<ModelBoxDEntry>();\n  root_entry_->SetName(\"/\");\n  auto inode = std::make_shared<ModelBoxDirInode>();\n\n  struct stat stbuf;\n  FillDefaultStat(&stbuf);\n  stbuf.st_size = 4096;\n  stbuf.st_mode |= S_IFDIR;\n  stbuf.st_nlink = 2;\n  inode->SetStat(&stbuf);\n  root_entry_->SetInode(inode);\n\n  loop_thread_ = std::thread(&ModelBoxFuse::FuseLoop, this);\n\n  return STATUS_OK;\n}\n\nvoid ModelBoxFuse::FillDefaultStat(struct stat *stbuf) {\n  struct timespec ts;\n  timespec_get(&ts, TIME_UTC);\n  stbuf->st_mode = 0440;\n  stbuf->st_mtim = ts;\n  stbuf->st_atim = ts;\n  stbuf->st_uid = getuid();\n  stbuf->st_gid = getgid();\n  stbuf->st_blksize = 4096;\n  stbuf->st_size = 0;\n  stbuf->st_nlink = 1;\n  stbuf->st_blocks = stbuf->st_size / 512;\n}\n\nvoid ModelBoxFuse::Stop() { StopFuseLoop(); }\n\nvoid ModelBoxFuse::FuseLoop() {\n  os->Thread->SetName(\"FuseDaemon\");\n  fuse_loop_mt(fuse_);\n}\n\nvoid ModelBoxFuse::StopFuseLoop() {\n  if (is_running_ == false) {\n    return;\n  }\n\n  MBLOG_DEBUG << \"fuse \" << mount_point_ << \" stop\";\n\n  is_running_ = false;\n  ModelBoxFuseOperation::DestroyFuse(this);\n  fuse_exit(fuse_);\n  fuse_unmount(mount_point_.c_str(), fuse_chan_);\n  fuse_chan_ = nullptr;\n\n  if (loop_thread_.joinable()) {\n    loop_thread_.join();\n  }\n\n  DestroyLowLevelFuse();\n}\n\nModelBoxInode::ModelBoxInode() = default;\n\nModelBoxInode::~ModelBoxInode() = default;\n\nint ModelBoxInode::FillStat(struct stat *stat) { return 0; };\n\nvoid ModelBoxInode::SetDEntry(const std::shared_ptr<ModelBoxDEntry> &dentry) {\n  dentry_ = dentry;\n}\n\nstd::shared_ptr<ModelBoxDEntry> ModelBoxInode::GetDEntry() {\n  return dentry_.lock();\n}\n\nModelBoxDirInode::ModelBoxDirInode() {\n  SetInodeType(MODELBOX_FUSE_INODE_TYPE_DIR);\n};\n\nModelBoxDirInode::~ModelBoxDirInode() = default;\n\nvoid ModelBoxDirInode::SetStat(struct stat *stat) {\n  stat_ = *stat;\n  stat_.st_mode |= S_IFDIR;\n}\n\nint ModelBoxDirInode::FillStat(struct stat *stat) {\n  *stat = stat_;\n  auto dentry = GetDEntry();\n  if (dentry) {\n    stat->st_nlink += dentry->ChildDirNum();\n  }\n  return 0;\n}\n\nModelBoxFileInode::ModelBoxFileInode() {\n  SetInodeType(MODELBOX_FUSE_INODE_TYPE_FILE);\n}\nModelBoxFileInode::~ModelBoxFileInode() = default;\n\nModelBoxDEntry::ModelBoxDEntry() = default;\nModelBoxDEntry::~ModelBoxDEntry() = default;\n\nvoid ModelBoxDEntry::SetParent(const std::shared_ptr<ModelBoxDEntry> &dentry) {\n  parent_ = dentry;\n}\n\nstd::shared_ptr<ModelBoxDEntry> ModelBoxDEntry::LookUp(\n    const std::string &path) {\n  auto split_path = SplitPath(path, '/');\n  if (path == \"/\") {\n    return shared_from_this();\n  }\n\n  if (split_path.size() > 0 && split_path.front().length() == 0) {\n    split_path.pop_front();\n  }\n\n  return LookUp(split_path);\n}\n\nstd::shared_ptr<ModelBoxDEntry> ModelBoxDEntry::LookUp(\n    std::list<std::string> &names) {\n  if (names.size() <= 0) {\n    return shared_from_this();\n  }\n\n  auto &current = names.front();\n  names.pop_front();\n  if (current == \".\") {\n    return shared_from_this();\n  }\n\n  if (current == \"..\") {\n    return Parent();\n  }\n\n  std::unique_lock<std::mutex> lock(children_lock_);\n  auto itr = children_.find(current);\n  if (itr == children_.end()) {\n    return nullptr;\n  }\n  auto cur_entry = itr->second;\n  lock.unlock();\n\n  return cur_entry->LookUp(names);\n}\n\nint ModelBoxDEntry::AddChild(const std::shared_ptr<ModelBoxDEntry> &dentry) {\n  const auto &name = dentry->GetName();\n  std::unique_lock<std::mutex> lock(children_lock_);\n  auto itr = children_.find(name);\n  if (itr != children_.end()) {\n    return -1;\n  }\n\n  if (dentry->inode_->GetInodeType() == MODELBOX_FUSE_INODE_TYPE_DIR) {\n    dir_num_++;\n  }\n  children_[name] = dentry;\n  return 0;\n}\n\nint ModelBoxDEntry::RmvChild(const std::string &name) {\n  std::unique_lock<std::mutex> lock(children_lock_);\n  auto itr = children_.find(name);\n  if (itr == children_.end()) {\n    return -1;\n  }\n\n  if (itr->second->inode_->GetInodeType() == MODELBOX_FUSE_INODE_TYPE_DIR) {\n    dir_num_--;\n  }\n\n  children_.erase(name);\n  return 0;\n}\n\nint ModelBoxDEntry::ChildNum() {\n  std::unique_lock<std::mutex> lock(children_lock_);\n  return children_.size();\n}\n\nint ModelBoxDEntry::ChildDirNum() {\n  std::unique_lock<std::mutex> lock(children_lock_);\n  return dir_num_;\n}\n\nvoid ModelBoxDEntry::SetName(const std::string &name) { name_ = name; }\n\nconst std::string &ModelBoxDEntry::GetName() { return name_; }\n\nvoid ModelBoxDEntry::SetInode(const std::shared_ptr<ModelBoxInode> &inode) {\n  inode_ = inode;\n  inode_->SetDEntry(shared_from_this());\n}\n\nstd::shared_ptr<ModelBoxInode> ModelBoxDEntry::GetInode() { return inode_; }\n\nstd::shared_ptr<ModelBoxDEntry> ModelBoxDEntry::Parent() {\n  return parent_.lock();\n}\nstd::vector<std::shared_ptr<ModelBoxDEntry>> ModelBoxDEntry::Children() {\n  std::vector<std::shared_ptr<ModelBoxDEntry>> result;\n  std::unique_lock<std::mutex> lock(children_lock_);\n  for (auto &item : children_) {\n    result.push_back(item.second);\n  }\n  return result;\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/drivers/common/libs/fuse/modelbox_fuse_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/drivers/common/modelbox_fuse.h\"\n\n#include <dirent.h>\n#include <securec.h>\n#include <sys/types.h>\n\n#include <functional>\n#include <future>\n#include <random>\n#include <thread>\n\n#include \"driver_flow_test.h\"\n#include \"flowunit_mockflowunit/flowunit_mockflowunit.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"modelbox/base/log.h\"\n#include \"modelbox/base/utils.h\"\n#include \"modelbox/buffer.h\"\n#include \"test/mock/minimodelbox/mockflow.h\"\n\nnamespace modelbox {\n\nconst char *MOCK_FUSE_FILE = \"/tmp/modelbox_fuse\";\n\nclass ModelBoxFuseTest : public testing::Test {\n public:\n  ModelBoxFuseTest() : driver_flow_(std::make_shared<MockFlow>()) {}\n\n protected:\n  void SetUp() override {\n    auto ret = AddMockFlowUnit();\n    EXPECT_EQ(ret, STATUS_OK);\n  };\n\n  void TearDown() override { driver_flow_ = nullptr; };\n  std::shared_ptr<MockFlow> GetDriverFlow();\n\n private:\n  Status AddMockFlowUnit();\n  std::shared_ptr<MockFlow> driver_flow_;\n};\n\nStatus ModelBoxFuseTest::AddMockFlowUnit() { return STATUS_OK; }\n\nclass MockFuseFile : public modelbox::ModelBoxFuseFile {\n public:\n  virtual ~MockFuseFile() = default;\n  \n  std::string msg{\"Hello, world\"};\n  int Open(const std::string &path) override { return 0; }\n  int Release() override { return 0; }\n  int Read(char *buff, size_t size, off_t off) override {\n    if (off > (off_t)msg.length()) {\n      return 0;\n    }\n    snprintf_s(buff, size, size, \"%s\", msg.c_str());\n    return msg.length();\n  }\n  int Write(const char *buff, size_t size, off_t off) override { return 0; }\n  int FSync(int isdatasync) override { return 0; }\n  int Flush() override { return 0; }\n  int FileSize() { return msg.length(); }\n  std::string GetMsg() { return msg; }\n};\n\nclass MockFuseInode : public modelbox::ModelBoxFileInode {\n public:\n  MockFuseInode(const std::string &path) { path_ = path; };\n  ~MockFuseInode() override = default;\n\n  int FillStat(struct stat *stat) override {\n    auto inode = std::make_shared<MockFuseFile>();\n    stat->st_size = inode->FileSize();\n    return 0;\n  }\n\n  std::shared_ptr<modelbox::ModelBoxFuseFile> CreateFile() override {\n    return std::make_shared<MockFuseFile>();\n  }\n\n  std::string GetPath() override { return path_; }\n\n private:\n  std::string path_;\n};\n\nTEST_F(ModelBoxFuseTest, FuseStat) {\n  auto fuse = modelbox::ModelBoxFuseOperation::CreateFuse(MOCK_FUSE_FILE);\n  auto ret = fuse->Run();\n  if (ret == STATUS_NOENT || ret == STATUS_PERMIT) {\n    GTEST_SKIP();\n  }\n\n  struct stat stbuf;\n  EXPECT_EQ(0, stat(MOCK_FUSE_FILE, &stbuf));\n  EXPECT_EQ(2, stbuf.st_nlink);\n\n  std::string name = \"/file\";\n  auto inode = std::make_shared<MockFuseInode>(name);\n\n  EXPECT_EQ(0, stat(MOCK_FUSE_FILE, &stbuf));\n  EXPECT_EQ(2, stbuf.st_nlink);\n\n  name = MOCK_FUSE_FILE;\n  name += \"/dir\";\n  mkdir(name.c_str(), 0755);\n  EXPECT_EQ(0, stat(MOCK_FUSE_FILE, &stbuf));\n  EXPECT_EQ(3, stbuf.st_nlink);\n}\n\nTEST_F(ModelBoxFuseTest, FuseMountCheckFile) {\n  int expect_dir_num = 10;\n  int expect_file_num = 10;\n  int expect_total = expect_dir_num + expect_file_num + 2;\n\n  auto fuse = modelbox::ModelBoxFuseOperation::CreateFuse(MOCK_FUSE_FILE);\n  auto ret = fuse->Run();\n  if (ret == STATUS_NOENT || ret == STATUS_PERMIT) {\n    GTEST_SKIP();\n  }\n  ASSERT_EQ(ret, modelbox::STATUS_OK);\n  for (int i = 0; i < expect_dir_num; i++) {\n    std::string name = \"/dir\";\n    name += std::to_string(i);\n    EXPECT_EQ(fuse->MkDir(name.c_str(), 0755), 0);\n  }\n\n  for (int i = 0; i < expect_dir_num; i++) {\n    std::string name = \"/file\";\n    name += std::to_string(i);\n    auto inode = std::make_shared<MockFuseInode>(name);\n    fuse->AddFuseFile(inode);\n  }\n\n  DIR *dir;\n  struct dirent *ent;\n  int totalnum = 0;\n  int dirnum = 0;\n  int filenum = 0;\n  if ((dir = opendir(MOCK_FUSE_FILE)) != nullptr) {\n    while ((ent = readdir(dir)) != nullptr) {\n      struct stat stbuf;\n      std::string path = MOCK_FUSE_FILE;\n      path += \"/\";\n      path += ent->d_name;\n      if (stat(path.c_str(), &stbuf) == -1) {\n        continue;\n      }\n      totalnum++;\n\n      if (strncmp(ent->d_name, \".\", PATH_MAX - 1) == 0 ||\n          strncmp(ent->d_name, \"..\", PATH_MAX - 1) == 0) {\n        continue;\n      }\n\n      if (S_ISDIR(stbuf.st_mode) == 0) {\n        dirnum++;\n      }\n\n      if (S_ISREG(stbuf.st_mode) == 0) {\n        filenum++;\n      }\n    }\n    closedir(dir);\n  }\n\n  EXPECT_EQ(totalnum, expect_total);\n  EXPECT_EQ(filenum, expect_file_num);\n  EXPECT_EQ(dirnum, expect_file_num);\n}\n\nTEST_F(ModelBoxFuseTest, FileOpen) {\n  auto fuse = modelbox::ModelBoxFuseOperation::CreateFuse(MOCK_FUSE_FILE);\n  auto ret = fuse->Run();\n  if (ret == STATUS_NOENT || ret == STATUS_PERMIT) {\n    GTEST_SKIP();\n  }\n  ASSERT_EQ(ret, modelbox::STATUS_OK);\n  auto inode = std::make_shared<MockFuseInode>(\"/file\");\n  fuse->AddFuseFile(inode);\n\n  std::string item_data;\n  std::ifstream infile;\n  infile.open(MOCK_FUSE_FILE + inode->GetPath());\n  EXPECT_FALSE(infile.fail());\n  Defer { infile.close(); };\n  std::getline(infile, item_data);\n\n  EXPECT_EQ(\n      item_data,\n      std::dynamic_pointer_cast<MockFuseFile>(inode->CreateFile())->GetMsg());\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/drivers/common/libs/include/modelbox/drivers/common/file_requester.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FILE_REQUESTER_H_\n#define MODELBOX_FILE_REQUESTER_H_\n\n#include <modelbox/base/thread_pool.h>\n\n#include <mutex>\n#include <string>\n#include <unordered_map>\n\n#include \"cpprest/http_listener.h\"\n#include \"modelbox/base/status.h\"\n\nconst std::string DEFAULT_FILE_REQUEST_URI = \"http://127.0.0.1:8024\";\n\nnamespace modelbox {\n\nclass FileGetHandler {\n public:\n  /**\n   * @brief get data.\n   * @param buff read buffer.\n   * @param size buffer size.\n   * @param off current read offset.\n   * @param path file path.\n   * @return read result.\n   */\n  virtual Status Get(unsigned char *buff, size_t size, off_t off) = 0;\n\n  /**\n   * @brief get file size.\n   * @param path file path.\n   * @return file size.\n   */\n  virtual uint64_t GetFileSize() = 0;\n\n  virtual ~FileGetHandler() = default;\n};\n\nclass FileRequester {\n public:\n  /*\n   * @brief get FileRequester instance\n   * @return FileRequester instance\n   */\n  static std::shared_ptr<FileRequester> GetInstance();\n\n  Status RegisterUrlHandler(const std::string &relative_url,\n                            const std::shared_ptr<FileGetHandler> &handler);\n\n  Status DeregisterUrl(const std::string &relative_url);\n\n  void SetMaxFileReadSize(int read_size);\n\n  virtual ~FileRequester();\n\n private:\n  FileRequester() = default;\n  Status Init();\n  void HandleFileGet(const web::http::http_request &request);\n  bool IsValidRequest(const web::http::http_request &request);\n  bool ReadRequestRange(const web::http::http_request &request,\n                        uint64_t file_size, uint64_t &range_start,\n                        uint64_t &range_end);\n  void ProcessRequest(const web::http::http_request &request,\n                      const std::shared_ptr<FileGetHandler> &handler,\n                      uint64_t range_start, uint64_t range_end);\n\n  static std::once_flag file_requester_init_flag_;\n  std::shared_ptr<web::http::experimental::listener::http_listener> listener_;\n  std::unordered_map<std::string, std::shared_ptr<FileGetHandler>>\n      file_handlers_;\n  std::mutex handler_lock_;\n  std::shared_ptr<ThreadPool> pool_;\n  int max_read_size_ = 0;\n};\n};  // namespace modelbox\n\n#endif  // MODELBOX_FILE_REQUESTER_H_"
  },
  {
    "path": "src/drivers/common/libs/include/modelbox/drivers/common/modelbox_fuse.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FUSE_H_\n#define MODELBOX_FUSE_H_\n\n#define _FILE_OFFSET_BITS 64\n#define FUSE_USE_VERSION 31\n\n#include <assert.h>\n#include <errno.h>\n#include <fcntl.h>\n#include <fuse.h>\n#include <modelbox/base/status.h>\n#include <stddef.h>\n#include <stdio.h>\n#include <string.h>\n\n#include <list>\n#include <map>\n#include <memory>\n#include <mutex>\n#include <string>\n#include <thread>\n#include <vector>\n\nnamespace modelbox {\n\n/**\n * @brief modelbox fuse file operations\n */\nclass ModelBoxFuseFile {\n public:\n  /**\n   * @brief Open file.\n   * @param path file path.\n   * @return open result.\n   */\n  virtual int Open(const std::string &path) = 0;\n\n  /**\n   * @brief Release file.\n   * @return release result.\n   */\n  virtual int Release() = 0;\n\n  /**\n   * @brief read data from file.\n   * @param buff read buffer.\n   * @param size buffer size.\n   * @param off current read offset.\n   * @return read result.\n   */\n  virtual int Read(char *buff, size_t size, off_t off) = 0;\n\n  /**\n   * @brief write data to file.\n   * @param buff data.\n   * @param size data size.\n   * @param off current write offset.\n   * @return write result.\n   */\n  virtual int Write(const char *buff, size_t size, off_t off) = 0;\n\n  /**\n   * @brief sync file\n   * @param isdatasync sync data?\n   * @return sync result\n   */\n  virtual int FSync(int isdatasync) = 0;\n\n  /**\n   * @brief flush file\n   * @return flush result\n   */\n  virtual int Flush() = 0;\n};\n\n/**\n * @brief modelbox fuse file type\n */\nenum MODELBOX_FUSE_INODE_TYPE : unsigned int {\n\n  /** @brief type none */\n  MODELBOX_FUSE_INODE_TYPE_NONE = 0,\n  /** @brief inode type file */\n  MODELBOX_FUSE_INODE_TYPE_FILE = 1,\n  /** @brief inode type directory */\n  MODELBOX_FUSE_INODE_TYPE_DIR = 2,\n};\n\nclass ModelBoxDEntry;\n\n/**\n * @brief modelbox fuse file inode\n */\nclass ModelBoxInode {\n public:\n  /**\n   * @brief constructor\n   */\n  ModelBoxInode();\n\n  /**\n   * @brief destructor\n   */\n  virtual ~ModelBoxInode();\n\n  /**\n   * @brief fillup file stat\n   * @param stat output parameter, file stat\n   * @return fillup result, whether sucess or not.\n   */\n  virtual int FillStat(struct stat *stat);\n\n  /**\n   * @brief Get inode type\n   * @return inode type\n   */\n  enum MODELBOX_FUSE_INODE_TYPE GetInodeType() { return inode_type_; }\n\n  /**\n   * @brief get DEntry\n   * @return dentry\n   */\n  std::shared_ptr<ModelBoxDEntry> GetDEntry();\n\n protected:\n  /**\n   * @brief set inode type\n   * @param type inode type\n   */\n  void SetInodeType(enum MODELBOX_FUSE_INODE_TYPE type) { inode_type_ = type; }\n\n private:\n  friend ModelBoxDEntry;\n  void SetDEntry(const std::shared_ptr<ModelBoxDEntry> &dentry);\n\n  std::weak_ptr<ModelBoxDEntry> dentry_;\n  enum MODELBOX_FUSE_INODE_TYPE inode_type_;\n};\n\n/**\n * @brief directory inode\n */\nclass ModelBoxDirInode : public ModelBoxInode {\n public:\n  /**\n   * @brief constructor\n   */\n  ModelBoxDirInode();\n\n  /**\n   * @brief destructor\n   */\n  ~ModelBoxDirInode() override;\n\n  /**\n   * @brief Store stat structure data\n   * @brief stat stat data\n   */\n  void SetStat(struct stat *stat);\n\n  /**\n   * @brief fillup file stat\n   * @param stat output parameter, file stat\n   * @return fillup result, whether sucess or not.\n   */\n  int FillStat(struct stat *stat) override;\n\n private:\n  struct stat stat_;\n};\n\n/**\n * @brief file inode\n */\nclass ModelBoxFileInode : public ModelBoxInode {\n public:\n  /**\n   * @brief constructor\n   */\n  ModelBoxFileInode();\n\n  /**\n   * @brief destructor\n   */\n  ~ModelBoxFileInode() override;\n\n  /**\n   * @brief  Create modelbox fuse file object\n   * @return fuse file object\n   */\n  virtual std::shared_ptr<ModelBoxFuseFile> CreateFile() = 0;\n\n  /**\n   * @brief fillup file stat\n   * @param stat output parameter, file stat\n   * @return fillup result, whether sucess or not.\n   */\n  int FillStat(struct stat *stat) override = 0;\n\n  /**\n   * @brief Get current inode file path\n   * @return inode path.\n   */\n  virtual std::string GetPath() = 0;\n};\n\nclass ModelBoxDEntry : public std::enable_shared_from_this<ModelBoxDEntry> {\n public:\n  /**\n   * @brief constructor\n   */\n  ModelBoxDEntry();\n\n  /**\n   * @brief destructor\n   */\n  virtual ~ModelBoxDEntry();\n\n  /**\n   * @brief Find the DEntry structure according to the path\n   * @param path fuse file path\n   * @return DEntry object\n   */\n  std::shared_ptr<ModelBoxDEntry> LookUp(const std::string &path);\n\n  /**\n   * @brief Set parent DEntry\n   * @param dentry dentry object\n   * @return result\n   */\n  void SetParent(const std::shared_ptr<ModelBoxDEntry> &dentry);\n\n  /**\n   * @brief Add child DEntry\n   * @param dentry dentry object\n   * @return result\n   */\n  int AddChild(const std::shared_ptr<ModelBoxDEntry> &dentry);\n\n  /**\n   * @brief remove child DEntry by name\n   * @param dentry dentry object\n   * @return result\n   */\n  int RmvChild(const std::string &name);\n\n  /**\n   * @brief Get child entry number\n   * @return number of child inodes\n   */\n  int ChildNum();\n\n  /**\n   * @brief Get child directory number\n   * @return number of child inodes\n   */\n  int ChildDirNum();\n\n  /**\n   * @brief Get parent entry object\n   * @return parent object, returns null if it does not exist\n   */\n  std::shared_ptr<ModelBoxDEntry> Parent();\n\n  /**\n   * @brief Get all children entry objects\n   * @return vector of children objects\n   */\n  std::vector<std::shared_ptr<ModelBoxDEntry>> Children();\n\n  /**\n   * @brief Set current dentry name\n   * @param name entry name\n   */\n  void SetName(const std::string &name);\n\n  /**\n   * @brief Get current dentry name\n   * @return Entry name\n   */\n  const std::string &GetName();\n\n  /**\n   * @brief set inode to dentry\n   * @param inode inode object\n   */\n  void SetInode(const std::shared_ptr<ModelBoxInode> &inode);\n\n  /**\n   * @brief Get inode from dentry\n   * @return inode object\n   */\n  std::shared_ptr<ModelBoxInode> GetInode();\n\n private:\n  std::shared_ptr<ModelBoxDEntry> LookUp(std::list<std::string> &names);\n  std::string name_;\n  std::shared_ptr<ModelBoxInode> inode_;\n  std::weak_ptr<ModelBoxDEntry> parent_;\n  std::map<std::string, std::shared_ptr<ModelBoxDEntry>> children_;\n  int dir_num_{0};\n  std::mutex children_lock_;\n};\n\nclass ModelBoxFuseOperation;\n\n/**\n * @brief modelbox fuse\n */\nclass ModelBoxFuse {\n public:\n  /**\n   * @brief destructor\n   */\n  virtual ~ModelBoxFuse();\n\n  /**\n   * @brief Add fuse file to modelbox fuse filesystem\n   * @param fuse_file inode of fuse file\n   * @return whether add success.\n   */\n  Status AddFuseFile(const std::shared_ptr<ModelBoxFileInode> &fuse_file);\n\n  /**\n   * @brief Remove fuse file form modelbox fuse filesystem\n   * @param path file path\n   * @return whether remove success.\n   */\n  Status RmvFuseFile(const std::string &path);\n\n  /**\n   * @brief Get mount pointer path\n   * @return path\n   */\n  std::string GetMountPoint();\n\n  /**\n   * @brief Delete directory\n   * @param path path\n   */\n  int RmDir(const char *path);\n\n  /**\n   * @brief make directory\n   * @param path path\n   */\n  int MkDir(const char *path, mode_t mode);\n\n  /**\n   * @brief unlink file\n   * @param path path\n   */\n  int Unlink(const char *path);\n\n  /**\n   * @brief Run fuse file\n   */\n  Status Run();\n\n  /**\n   * @brief stop fuse\n   */\n  void Stop();\n\n private:\n  friend ModelBoxFuseOperation;\n  ModelBoxFuse();\n\n  /* Low level fuse API */\n  Status InitLowLevelFuse();\n  void DestroyLowLevelFuse();\n  void SetMountPoint(const std::string &path);\n\n  /* Fuse fops callback function */\n  void *FuseInit(struct fuse_conn_info *conn);\n  void FuseDestroy(void *eh);\n  int GetAttr(const char *path, struct stat *stbuf);\n  int Access(const char *path, int mask);\n  int StatFS(const char *path, struct statvfs *stbuf);\n\n  int OpenDir(const char *path, struct fuse_file_info *fi);\n  int ReleaseDir(const char *path, struct fuse_file_info *fi);\n  int ReadDir(const char *path, void *buff, fuse_fill_dir_t filler,\n              off_t offset, struct fuse_file_info *fi);\n  int Create(const char *path, mode_t mode, struct fuse_file_info *fi);\n  int Open(const char *path, struct fuse_file_info *fi);\n  int Release(const char *path, struct fuse_file_info *fi);\n  int Read(const char *path, char *buff, size_t size, off_t off,\n           struct fuse_file_info *fi);\n  int Write(const char *path, const char *buff, size_t size, off_t off,\n            struct fuse_file_info *fi);\n  int FSync(const char *path, int isdatasync, struct fuse_file_info *fi);\n  int Flush(const char *path, struct fuse_file_info *fi);\n\n  /* fuse loop */\n  void FuseLoop();\n  void StopFuseLoop();\n  void FillDefaultStat(struct stat *stbuf);\n  struct fuse_chan *fuse_chan_{nullptr};\n  struct fuse *fuse_{nullptr};\n  bool is_running_{false};\n  std::string mount_point_;\n  std::thread loop_thread_;\n  std::shared_ptr<ModelBoxDEntry> root_entry_;\n};\n\nclass ModelBoxFuseOperation {\n public:\n  /**\n   * @brief constructor\n   */\n  ModelBoxFuseOperation();\n  /**\n   * @brief destructor\n   */\n  virtual ~ModelBoxFuseOperation();\n\n  /**\n   * @brief Create modelbox fuse file system\n   * @param mount_path modelbox fuse mount point\n   * @return modelbox fuse object\n   */\n  static std::shared_ptr<ModelBoxFuse> CreateFuse(\n      const std::string &mount_path);\n\n  static void DestroyFuse(ModelBoxFuse *modelbox_fuse);\n\n  static void *FuseInit(struct fuse_conn_info *conn);\n  static void FuseDestroy(void *eh);\n  static int GetAttr(const char *path, struct stat *stbuf);\n  static int Access(const char *path, int mask);\n  static int StatFS(const char *path, struct statvfs *stbuf);\n\n  static int RmDir(const char *path);\n  static int MkDir(const char *path, mode_t mode);\n  static int OpenDir(const char *path, struct fuse_file_info *fi);\n  static int ReleaseDir(const char *path, struct fuse_file_info *fi);\n  static int ReadDir(const char *path, void *buff, fuse_fill_dir_t filler,\n                     off_t offset, struct fuse_file_info *fi);\n  static int Unlink(const char *path);\n\n  static int Create(const char *path, mode_t mode, struct fuse_file_info *fi);\n  static int Open(const char *path, struct fuse_file_info *fi);\n  static int Release(const char *path, struct fuse_file_info *fi);\n  static int Read(const char *path, char *buff, size_t size, off_t off,\n                  struct fuse_file_info *fi);\n  static int Write(const char *path, const char *buff, size_t size, off_t off,\n                   struct fuse_file_info *fi);\n  static int FSync(const char *path, int isdatasync, struct fuse_file_info *fi);\n  static int Flush(const char *path, struct fuse_file_info *fi);\n\n  /**\n   * @brief Get the current Fuse context\n   * @return fuse object\n   */\n  static ModelBoxFuse *CurrentModleBoxFuse();\n\n private:\n  static void FuseLoop();\n  static std::map<std::string, ModelBoxFuse *> modelbox_fuses_;\n  static std::mutex modelbox_fuses_lock_;\n};\n\n}  // namespace modelbox\n\n#endif  // MODELBOX_FUSE_H_\n"
  },
  {
    "path": "src/drivers/common/python/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nproject(modelbox-drivers-common-python)\n\nif(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_BINARY_DIR})\n    message(FATAL_ERROR \"Do not build in source directory!\")\nendif()\n\nsubdirlist(SUBDIRS ${CMAKE_CURRENT_SOURCE_DIR} \"CMakeLists.txt\")\n\nforeach(subdir ${SUBDIRS})\n    add_subdirectory(${subdir})\nendforeach()\n"
  },
  {
    "path": "src/drivers/common/python/modelbox_api/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nfile(GLOB_RECURSE SOURCES *.cpp *.cc *.c)\n\nif(NOT ${PYTHONLIBS_FOUND})\n    message(STATUS \"Not found python, disable python api\")\n    return()\nendif()\n\nset(INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_CONFIG_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${INCLUDE})\ninclude_directories(${HUAWEI_SECURE_C_INCLUDE_DIR})\ninclude_directories(${PYTHON_INCLUDE_DIRS})\ninclude_directories(${TOML_INCLUDE_DIR})\ninclude_directories(${PYBIND11_INCLUDE_DIRS})\n\nset(LIBRARY modelbox-common-modelbox-api-object)\nadd_library(${LIBRARY} STATIC ${SOURCES})\ntarget_compile_options(${LIBRARY} PUBLIC ${PYBIND11_CPP_STANDARD})\ntarget_compile_options(${LIBRARY} PUBLIC -fvisibility=hidden)\nset_property(TARGET ${LIBRARY} PROPERTY POSITION_INDEPENDENT_CODE ON)\n\nset(MODELBOX_COMMON_MODELBOX_API_LIBRARY ${LIBRARY} CACHE INTERNAL \"\")\nset(MODELBOX_COMMON_MODELBOX_API_INCLUDE ${INCLUDE} CACHE INTERNAL \"\")\n"
  },
  {
    "path": "src/drivers/common/python/modelbox_api/modelbox_api.cc",
    "content": "\n/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox_api.h\"\n\n#include <modelbox/base/config.h>\n#include <pybind11/chrono.h>\n#include <pybind11/complex.h>\n#include <pybind11/functional.h>\n#include <pybind11/numpy.h>\n#include <pybind11/operators.h>\n#include <pybind11/pybind11.h>\n#include <pybind11/stl.h>\n#include <securec.h>\n\n#include <string>\n#include <utility>\n\n#include \"modelbox/data_context.h\"\n#include \"modelbox/error.h\"\n#include \"modelbox/external_data_simple.h\"\n#include \"modelbox/flow.h\"\n#include \"modelbox/modelbox_engine.h\"\n#include \"modelbox/type.h\"\n#include \"python_common.h\"\n#include \"python_flow.h\"\n#include \"python_log.h\"\n#include \"python_model.h\"\n\nconstexpr int NPY_FLOAT16 = 23;\ntemplate <>\nstruct pybind11::detail::npy_format_descriptor<modelbox::Float16> {\n  static pybind11::dtype dtype() {\n    handle ptr = npy_api::get().PyArray_DescrFromType_(NPY_FLOAT16);\n    return reinterpret_borrow<pybind11::dtype>(ptr);\n  }\n\n  static std::string format() {\n    // following:\n    // https://docs.python.org/3/library/struct.html#format-characters\n    return \"e\";\n  }\n\n  static constexpr auto name() -> pybind11::detail::descr<7> {\n    return _(\"float16\");\n  }\n};\n\nnamespace modelbox {\n\nclass NumpyInfo {\n public:\n  NumpyInfo(ssize_t itemsize, const std::string &format, void *ptr,\n            std::vector<ssize_t> shape, std::vector<ssize_t> strides)\n      : shape_(std::move(shape)),\n        strides_(std::move(strides)),\n        itemsize_(itemsize) {\n    ssize_t bytes = std::accumulate(shape_.begin(), shape_.end(), (ssize_t)1,\n                                    std::multiplies<ssize_t>()) *\n                    itemsize_;\n    m_data_ = std::unique_ptr<char[]>(new char[bytes]);\n    memcpy_s(m_data_.get(), bytes, ptr, bytes);\n    dtype_ = TypeFromFormatStr(format);\n  }\n\n  NumpyInfo(const NumpyInfo &obj) {\n    shape_ = obj.Shape();\n    strides_ = obj.Strides();\n    itemsize_ = obj.ItemSize();\n    ssize_t bytes = std::accumulate(shape_.begin(), shape_.end(), (ssize_t)1,\n                                    std::multiplies<ssize_t>()) *\n                    itemsize_;\n    m_data_ = std::unique_ptr<char[]>(new char[bytes]);\n    memcpy_s(m_data_.get(), bytes, obj.Data(), bytes);\n    dtype_ = obj.Type();\n  }\n  modelbox::ModelBoxDataType Type() const { return dtype_; }\n  std::vector<ssize_t> Shape() const { return shape_; }\n  std::vector<ssize_t> Strides() const { return strides_; }\n  void *Data() const { return (void *)m_data_.get(); }\n  std::size_t ItemSize() const { return itemsize_; }\n\n private:\n  std::vector<ssize_t> shape_;\n  std::vector<ssize_t> strides_;\n  modelbox::ModelBoxDataType dtype_;\n  ssize_t itemsize_;\n  std::unique_ptr<char[]> m_data_;\n};\n\ntemplate <typename DataType, typename PyType, typename CType>\nbool DataSet(DataType &data, const std::string &key, py::object set_obj,\n             py::object cast_obj) {\n  if (!py::isinstance<PyType>(set_obj)) {\n    return false;\n  }\n  if (auto *buffer = dynamic_cast<Buffer *>(&data)) {\n    buffer->Set(key, cast_obj.cast<CType>());\n  } else if (auto *session_context = dynamic_cast<SessionContext *>(&data)) {\n    auto c_context = std::make_shared<CType>(cast_obj.cast<CType>());\n    auto c_type = typeid(CType).hash_code();\n    session_context->SetPrivate(key, c_context, c_type);\n  } else {\n    return false;\n  }\n  return true;\n}\n\ntypedef bool (*pDataGetFunc)(std::size_t hash_code, void *, py::object &ret);\n\ntemplate <typename CType, typename PyType>\nbool DataGet(std::size_t hash_code, void *value, py::object &ret) {\n  if (typeid(CType).hash_code() == hash_code) {\n    ret = py::cast(*((CType *)(value)));\n    return true;\n  }\n  return false;\n}\n\ntemplate <typename DataType, typename FuncType>\nbool SetAttributes(DataType &context, const std::string &key,\n                   const py::object &obj, std::vector<FuncType> &BaseObjectFunc,\n                   std::vector<FuncType> &List1DObjectFunc,\n                   std::vector<FuncType> &List2DObjectFunc) {\n  auto setup_data = [&](std::vector<FuncType> &func_list,\n                        const py::object &set_obj, const py::object &cast_obj) {\n    for (auto &func : func_list) {\n      if (func(context, key, set_obj, cast_obj)) {\n        return true;\n      }\n    }\n    return false;\n  };\n\n  if (setup_data(BaseObjectFunc, obj, obj)) {\n    return true;\n  }\n\n  if (py::isinstance<py::list>(obj)) {\n    py::list obj_list_all = obj.cast<py::list>();\n    if (obj_list_all.empty()) {\n      return setup_data(List1DObjectFunc, obj_list_all, obj_list_all);\n    }\n    if (py::isinstance<py::list>(obj_list_all[0])) {\n      py::list obj_list_1d = obj_list_all[0].cast<py::list>();\n      if (obj_list_1d.empty()) {\n        return setup_data(List2DObjectFunc, obj_list_1d, obj_list_all);\n      }\n      if (setup_data(List2DObjectFunc, obj_list_1d[0], obj_list_all)) {\n        return true;\n      }\n    } else {\n      if (setup_data(List1DObjectFunc, obj_list_all[0], obj_list_all)) {\n        return true;\n      }\n    }\n  }\n  return false;\n}\n\ntemplate <typename FuncType>\nbool GetAttributes(void *value, std::size_t value_type,\n                   std::vector<FuncType> func_list, py::object &ret_data) {\n  for (auto &pfunc : func_list) {\n    if (pfunc(value_type, value, ret_data)) {\n      return true;\n    }\n  }\n\n  if (typeid(NumpyInfo).hash_code() == value_type) {\n    auto *data = (NumpyInfo *)(value);\n    if (data == nullptr) {\n      MBLOG_ERROR << \"data is nullptr.\";\n    }\n\n    auto buffer_info =\n        py::buffer_info((void *)(data->Data()), data->ItemSize(),\n                        FormatStrFromType(data->Type()), data->Shape().size(),\n                        data->Shape(), data->Strides());\n    ret_data = py::array(buffer_info);\n    return true;\n  }\n  if (typeid(py::object).hash_code() == value_type) {\n    ret_data = *((py::object *)(value));\n    return true;\n  }\n\n  if (typeid(std::shared_ptr<py::object>).hash_code() == value_type) {\n    ret_data = *(*((std::shared_ptr<py::object> *)(value)));\n    return true;\n  }\n\n  return false;\n}\n\n/*******************************BufferSet Begin**************************/\ntypedef bool (*pBufferPyTypeToCTypeFunc)(Buffer &, const std::string &,\n                                         py::object, py::object);\nstatic std::vector<pBufferPyTypeToCTypeFunc> kBufferBaseObjectFunc = {\n    DataSet<Buffer, py::float_, double>,\n    DataSet<Buffer, ModelBoxDataType, ModelBoxDataType>,\n    DataSet<Buffer, py::str, std::string>, DataSet<Buffer, py::bool_, bool>,\n    DataSet<Buffer, py::int_, long>};\n\nstatic std::vector<pBufferPyTypeToCTypeFunc> kBufferLIst1DObjectFunc = {\n    DataSet<Buffer, py::float_, std::vector<double>>,\n    DataSet<Buffer, py::str, std::vector<std::string>>,\n    DataSet<Buffer, py::bool_, std::vector<bool>>,\n    DataSet<Buffer, py::int_, std::vector<long>>};\n\nstatic std::vector<pBufferPyTypeToCTypeFunc> kBufferLIst2DObjectFunc = {\n    DataSet<Buffer, py::float_, std::vector<std::vector<double>>>,\n    DataSet<Buffer, py::str, std::vector<std::vector<std::string>>>,\n    DataSet<Buffer, py::bool_, std::vector<std::vector<bool>>>,\n    DataSet<Buffer, py::int_, std::vector<std::vector<long>>>};\n\nvoid BufferSetAttributes(Buffer &buffer, const std::string &key,\n                         py::object &obj) {\n  if (SetAttributes<Buffer, pBufferPyTypeToCTypeFunc>(\n          buffer, key, obj, kBufferBaseObjectFunc, kBufferLIst1DObjectFunc,\n          kBufferLIst2DObjectFunc)) {\n    return;\n  }\n  if (py::isinstance<py::buffer>(obj)) {\n    py::buffer obj_buffer = obj.cast<py::buffer>();\n    py::buffer_info buffer_info = obj_buffer.request();\n    NumpyInfo numpy_info(buffer_info.itemsize, buffer_info.format,\n                         buffer_info.ptr, buffer_info.shape,\n                         buffer_info.strides);\n    buffer.Set(key, numpy_info);\n    return;\n  }\n\n  if (py::isinstance<py::object>(obj)) {\n    auto *obj_ptr = new py::object();\n    *obj_ptr = obj;\n    auto obj_shared = std::shared_ptr<py::object>(obj_ptr, [](void *ptr) {\n      py::gil_scoped_acquire interpreter_guard{};\n      delete static_cast<py::object *>(ptr);\n    });\n    buffer.Set(key, obj_shared);\n    return;\n  }\n\n  throw std::invalid_argument(\"invalid data type \" +\n                              py::str(obj).cast<std::string>() + \" for key \" +\n                              key);\n}\n/*******************************BufferSet End*******************************/\n\n/*******************************BufferGet Begin*****************************/\n\nstatic std::vector<pDataGetFunc> kBufferObjectConvertFunc = {\n    DataGet<ModelBoxDataType, ModelBoxDataType>,\n    DataGet<int, py::int_>,\n    DataGet<unsigned int, py::int_>,\n    DataGet<long, py::int_>,\n    DataGet<unsigned long, py::int_>,\n    DataGet<char, py::int_>,\n    DataGet<unsigned char, py::int_>,\n    DataGet<float, py::float_>,\n    DataGet<double, py::float_>,\n    DataGet<std::string, py::str>,\n    DataGet<bool, py::bool_>,\n\n    DataGet<std::vector<int>, py::list>,\n    DataGet<std::vector<unsigned int>, py::list>,\n    DataGet<std::vector<long>, py::list>,\n    DataGet<std::vector<unsigned long>, py::list>,\n    DataGet<std::vector<char>, py::list>,\n    DataGet<std::vector<unsigned char>, py::list>,\n    DataGet<std::vector<float>, py::list>,\n    DataGet<std::vector<double>, py::list>,\n    DataGet<std::vector<std::string>, py::list>,\n    DataGet<std::vector<bool>, py::list>,\n\n    DataGet<std::vector<std::vector<float>>, py::list>,\n    DataGet<std::vector<std::vector<double>>, py::list>,\n    DataGet<std::vector<std::vector<int>>, py::list>,\n    DataGet<std::vector<std::vector<unsigned int>>, py::list>,\n    DataGet<std::vector<std::vector<long>>, py::list>,\n    DataGet<std::vector<std::vector<unsigned long>>, py::list>,\n    DataGet<std::vector<std::vector<std::string>>, py::list>,\n    DataGet<std::vector<std::vector<bool>>, py::list>,\n};\n\npy::object BufferGetAttributes(Buffer &buffer, const std::string &key) {\n  auto ret = buffer.Get(key);\n  if (!std::get<1>(ret)) {\n    throw std::invalid_argument(\"can not find buffer meta: \" + key);\n  }\n  auto *data = std::get<0>(ret);\n  auto value_type = (std::size_t)(data->type().hash_code());\n  auto *value = any_cast<void *>(data);\n  py::object ret_data;\n\n  if (GetAttributes(value, value_type, kBufferObjectConvertFunc, ret_data)) {\n    return ret_data;\n  }\n  throw std::invalid_argument(\"invalid data type \" +\n                              std::string(data->type().name()) +\n                              \" for buffer meta \" + key);\n}\n/*******************************BufferGet End*****************************/\n\n/***************************ConfigurationSet Begin************************/\ntypedef bool (*pConfigurationPyTypeToCTypeFunc)(Configuration &config,\n                                                const std::string &key,\n                                                py::object set_obj,\n                                                py::object cast_obj);\n\ntemplate <typename PyType, typename CType>\nbool ConfigSet(Configuration &config, const std::string &key,\n               py::object set_obj, py::object cast_obj) {\n  if (py::isinstance<PyType>(set_obj)) {\n    config.SetProperty(key, cast_obj.cast<CType>());\n    return true;\n  }\n\n  return false;\n}\n\nstatic std::vector<pConfigurationPyTypeToCTypeFunc>\n    kConfigurationBaseObjectFunc = {\n        ConfigSet<py::float_, double>, ConfigSet<py::str, std::string>,\n        ConfigSet<py::bool_, bool>, ConfigSet<py::int_, long>};\n\nstatic std::vector<pConfigurationPyTypeToCTypeFunc>\n    kConfigurationListObjectFunc = {\n        ConfigSet<py::float_, std::vector<double>>,\n        ConfigSet<py::str, std::vector<std::string>>,\n        ConfigSet<py::bool_, std::vector<bool>>,\n        ConfigSet<py::int_, std::vector<long>>};\nstatic std::vector<pConfigurationPyTypeToCTypeFunc>\n    kConfigurationList2DObjectFunc = {};\nvoid ConfigurationSetAttributes(Configuration &config, const std::string &key,\n                                const py::object &obj) {\n  if (SetAttributes<Configuration, pConfigurationPyTypeToCTypeFunc>(\n          config, key, obj, kConfigurationBaseObjectFunc,\n          kConfigurationListObjectFunc, kConfigurationList2DObjectFunc)) {\n    return;\n  }\n  throw std::invalid_argument(\"invalid data type \" +\n                              py::str(obj).cast<std::string>() + \" for key \" +\n                              key);\n}\n/***************************ConfigurationSet End************************/\n\n/***************************DataContextSet Begin************************/\nvoid DataContextSetAttributes(DataContext &data_context, const std::string &key,\n                              const py::object &obj) {\n  obj.inc_ref();\n  auto py_context = std::make_shared<py::object>(obj);\n  data_context.SetPrivate(key, py_context);\n}\n/***************************DataContextSet End**************************/\n\n/***************************DataContextGet Begin************************/\npy::object DataContextGetAttributes(DataContext &data_context,\n                                    const std::string &key) {\n  auto *value = data_context.GetPrivate(key).get();\n  return *((py::object *)(value));\n}\n/***************************DataContextGet End*****************************/\n\n/***************************SessionContextSet Start************************/\n\ntypedef bool (*pSessionContextPyTypeToCTypeFunc)(SessionContext &,\n                                                 const std::string &,\n                                                 py::object, py::object);\n\nstatic std::vector<pSessionContextPyTypeToCTypeFunc>\n    kSessionContextBaseObjectFunc = {\n        DataSet<SessionContext, py::float_, double>,\n        DataSet<SessionContext, py::str, std::string>,\n        DataSet<SessionContext, py::bool_, bool>,\n        DataSet<SessionContext, py::int_, long>};\n\nstatic std::vector<pSessionContextPyTypeToCTypeFunc>\n    kSessionContextList1DObjectFunc = {\n        DataSet<SessionContext, py::float_, std::vector<double>>,\n        DataSet<SessionContext, py::str, std::vector<std::string>>,\n        DataSet<SessionContext, py::bool_, std::vector<bool>>,\n        DataSet<SessionContext, py::int_, std::vector<long>>};\n\nstatic std::vector<pSessionContextPyTypeToCTypeFunc>\n    kSessionContextList2DObjectFunc = {\n        DataSet<SessionContext, py::float_, std::vector<std::vector<double>>>,\n        DataSet<SessionContext, py::str, std::vector<std::vector<std::string>>>,\n        DataSet<SessionContext, py::bool_, std::vector<std::vector<bool>>>,\n        DataSet<SessionContext, py::int_, std::vector<std::vector<long>>>};\n\nvoid SessionContextSetAttributes(SessionContext &session_context,\n                                 const std::string &key,\n                                 const py::object &obj) {\n  if (SetAttributes<SessionContext, pSessionContextPyTypeToCTypeFunc>(\n          session_context, key, obj, kSessionContextBaseObjectFunc,\n          kSessionContextList1DObjectFunc, kSessionContextList2DObjectFunc)) {\n    return;\n  }\n  if (py::isinstance<py::buffer>(obj)) {\n    py::buffer obj_buffer = obj.cast<py::buffer>();\n    auto buffer_info = obj_buffer.request();\n    auto buffer_context = std::make_shared<NumpyInfo>(\n        buffer_info.itemsize, buffer_info.format, buffer_info.ptr,\n        buffer_info.shape, buffer_info.strides);\n    auto buffer_type = typeid(NumpyInfo).hash_code();\n    session_context.SetPrivate(key, buffer_context, buffer_type);\n    return;\n  }\n  obj.inc_ref();\n  auto py_context = std::make_shared<py::object>(obj);\n  auto py_type = typeid(py::object).hash_code();\n  session_context.SetPrivate(key, py_context, py_type);\n}\n/***************************SessionContextSet End**************************/\n\n/***************************SessionContextGet Start************************/\n\nstatic std::vector<pDataGetFunc> kSessionContextObjectConvertFunc =\n    kBufferObjectConvertFunc;\npy::object SessionContextGetAttributes(SessionContext &session_context,\n                                       const std::string &key) {\n  py::object ret_data;\n  auto *value = session_context.GetPrivate(key).get();\n  auto value_type = session_context.GetPrivateType(key);\n  if (GetAttributes(value, value_type, kSessionContextObjectConvertFunc,\n                    ret_data)) {\n    return ret_data;\n  }\n  throw std::invalid_argument(\"invalid data type \" + key);\n}\n\n/***************************SessionContextGet End************************/\n\nvoid ModelboxPyApiSetUpLog(pybind11::module &m) {\n  m.def(\"set_log_level\", FlowUnitPythonLog::SetLogLevel);\n  m.def(\"debug\", FlowUnitPythonLog::Debug);\n  m.def(\"info\", FlowUnitPythonLog::Info);\n  m.def(\"notice\", FlowUnitPythonLog::Notice);\n  m.def(\"warn\", FlowUnitPythonLog::Warn);\n  m.def(\"error\", FlowUnitPythonLog::Error);\n  m.def(\"fatal\", FlowUnitPythonLog::Fatal);\n}\n\nvoid ModelboxPyApiSetUpLogLevel(pybind11::handle &h) {\n  py::enum_<LogLevel>(h, \"Level\", py::arithmetic(), py::module_local())\n      .value(\"DEBUG\", LOG_DEBUG)\n      .value(\"INFO\", LOG_INFO)\n      .value(\"NOTICE\", LOG_NOTICE)\n      .value(\"WARN\", LOG_WARN)\n      .value(\"ERROR\", LOG_ERROR)\n      .value(\"FATAL\", LOG_FATAL)\n      .value(\"OFF\", LOG_OFF);\n}\n\nvoid ModelboxPyApiSetUpStatus(pybind11::module &m) {\n  auto c =\n      py::class_<modelbox::Status, std::shared_ptr<modelbox::Status>>(\n          m, \"Status\", py::module_local())\n          .def(py::init<>())\n          .def(py::init<const StatusCode &>())\n          .def(py::init<const bool &>())\n          .def(py::init<const StatusCode &, const std::string &>())\n          .def(py::init<const Status &, const std::string &>())\n          .def(\"__str__\", &modelbox::Status::ToString)\n          .def(\"__bool__\", [](const modelbox::Status &s) { return bool(s); })\n          .def(py::self == true)\n          .def(py::self == py::self)  // NOLINT\n          .def(py::self == StatusCode())\n          .def(py::self != py::self)  // NOLINT\n          .def(py::self != StatusCode())\n          .def(\"code\", &modelbox::Status::Code)\n          .def(\"set_errormsg\", &modelbox::Status::SetErrormsg)\n          .def(\"str_code\", &modelbox::Status::StrCode)\n          .def(\"errormsg\", &modelbox::Status::Errormsg)\n          .def(\"wrap_errormsgs\", &modelbox::Status::WrapErrormsgs)\n          .def(\"unwrap\", &modelbox::Status::Unwrap);\n\n  py::enum_<StatusCode>(c, \"StatusCode\", py::arithmetic(), py::module_local())\n      .value(\"STATUS_SUCCESS\", STATUS_SUCCESS)\n      .value(\"STATUS_FAULT\", STATUS_FAULT)\n      .value(\"STATUS_NOTFOUND\", STATUS_NOTFOUND)\n      .value(\"STATUS_INVALID\", STATUS_INVALID)\n      .value(\"STATUS_AGAIN\", STATUS_AGAIN)\n      .value(\"STATUS_BADCONF\", STATUS_BADCONF)\n      .value(\"STATUS_NOMEM\", STATUS_NOMEM)\n      .value(\"STATUS_RANGE\", STATUS_RANGE)\n      .value(\"STATUS_EXIST\", STATUS_EXIST)\n      .value(\"STATUS_INTERNAL\", STATUS_INTERNAL)\n      .value(\"STATUS_BUSY\", STATUS_BUSY)\n      .value(\"STATUS_PERMIT\", STATUS_PERMIT)\n      .value(\"STATUS_NOTSUPPORT\", STATUS_NOTSUPPORT)\n      .value(\"STATUS_NODATA\", STATUS_NODATA)\n      .value(\"STATUS_NOSPACE\", STATUS_NOSPACE)\n      .value(\"STATUS_NOBUFS\", STATUS_NOBUFS)\n      .value(\"STATUS_OVERFLOW\", STATUS_OVERFLOW)\n      .value(\"STATUS_INPROGRESS\", STATUS_INPROGRESS)\n      .value(\"STATUS_ALREADY\", STATUS_ALREADY)\n      .value(\"STATUS_TIMEDOUT\", STATUS_TIMEDOUT)\n      .value(\"STATUS_NOSTREAM\", STATUS_NOSTREAM)\n      .value(\"STATUS_RESET\", STATUS_RESET)\n      .value(\"STATUS_CONTINUE\", STATUS_CONTINUE)\n      .value(\"STATUS_EDQUOT\", STATUS_EDQUOT)\n      .value(\"STATUS_STOP\", STATUS_STOP)\n      .value(\"STATUS_SHUTDOWN\", STATUS_SHUTDOWN)\n      .value(\"STATUS_EOF\", STATUS_EOF);\n}\n\nvoid ModelboxPyApiSetUpConfiguration(pybind11::module &m) {\n  py::class_<modelbox::Configuration, std::shared_ptr<modelbox::Configuration>>(\n      m, \"Configuration\", py::module_local())\n      .def(py::init<>())\n      .def(\"get_string\", &modelbox::Configuration::GetString, py::arg(\"key\"),\n           py::arg(\"default\") = py::str(\"\"))\n      .def(\"get_bool\", &modelbox::Configuration::GetBool, py::arg(\"key\"),\n           py::arg(\"default\") = py::bool_(false))\n      .def(\"get_int\", &modelbox::Configuration::GetInt64, py::arg(\"key\"),\n           py::arg(\"default\") = py::int_(0))\n      .def(\"get_float\", &modelbox::Configuration::GetDouble, py::arg(\"key\"),\n           py::arg(\"default\") = py::float_(0.0))\n      .def(\"get_string_list\", &modelbox::Configuration::GetStrings,\n           py::arg(\"key\"), py::arg(\"default\") = py::make_tuple())\n      .def(\"get_bool_list\", &modelbox::Configuration::GetBools, py::arg(\"key\"),\n           py::arg(\"default\") = py::make_tuple())\n      .def(\"get_int_list\", &modelbox::Configuration::GetInt64s, py::arg(\"key\"),\n           py::arg(\"default\") = py::make_tuple())\n      .def(\"get_float_list\", &modelbox::Configuration::GetDoubles,\n           py::arg(\"key\"), py::arg(\"default\") = py::make_tuple())\n      .def(\"set\", [](modelbox::Configuration &config, const std::string &key,\n                     const py::object &obj) {\n        ConfigurationSetAttributes(config, key, obj);\n      });\n}\n\npy::array BufferToPyRawBuffer(modelbox::Buffer &buffer) {\n  modelbox::ModelBoxDataType type = MODELBOX_TYPE_INVALID;\n  buffer.Get(\"type\", type, MODELBOX_UINT8);\n\n  auto typesize = GetDataTypeSize(type);\n  if (typesize == 0) {\n    throw std::invalid_argument(\"buffer type is invalid\");\n  }\n\n  size_t len = buffer.GetBytes() / typesize;\n  const auto *const_data_ptr = buffer.ConstData();\n  auto *data_ptr = const_cast<void *>(const_data_ptr);\n  switch (type) {\n    case MODELBOX_UINT8:\n      return mkarray_via_buffer<uint8_t>(data_ptr, len);\n    case MODELBOX_INT8:\n      return mkarray_via_buffer<int8_t>(data_ptr, len);\n    case MODELBOX_BOOL:\n      return mkarray_via_buffer<bool>(data_ptr, len);\n    case MODELBOX_INT16:\n      return mkarray_via_buffer<int16_t>(data_ptr, len);\n    case MODELBOX_UINT16:\n      return mkarray_via_buffer<uint16_t>(data_ptr, len);\n    case MODELBOX_INT32:\n      return mkarray_via_buffer<int32_t>(data_ptr, len);\n    case MODELBOX_UINT32:\n      return mkarray_via_buffer<uint32_t>(data_ptr, len);\n    case MODELBOX_INT64:\n      return mkarray_via_buffer<int64_t>(data_ptr, len);\n    case MODELBOX_UINT64:\n      return mkarray_via_buffer<uint64_t>(data_ptr, len);\n    case MODELBOX_FLOAT:\n      return mkarray_via_buffer<float>(data_ptr, len);\n    case MODELBOX_DOUBLE:\n      return mkarray_via_buffer<double>(data_ptr, len);\n    case MODELBOX_HALF:\n      return mkarray_via_buffer<modelbox::Float16>(data_ptr, len);\n    default:\n      break;\n  }\n\n  return mkarray_via_buffer<uint8_t>(data_ptr, len);\n}\n\npy::array BufferToPyArrayObject(modelbox::Buffer &buffer) {\n  return BufferToPyRawBuffer(buffer);\n}\n\npy::object BufferToPyString(modelbox::Buffer &buffer) {\n  auto *const_data_ptr = (char *)buffer.ConstData();\n  if (const_data_ptr == nullptr) {\n    throw std::runtime_error(\"can not get buffer data.\");\n  }\n\n  auto *data_ptr = const_cast<char *>(const_data_ptr);\n  if (data_ptr == nullptr) {\n    throw std::runtime_error(\"convert data to string failed.\");\n  }\n\n  int len = buffer.GetBytes() / GetDataTypeSize(MODELBOX_UINT8);\n  modelbox::ModelBoxDataType type = MODELBOX_TYPE_INVALID;\n  buffer.Get(\"type\", type, MODELBOX_UINT8);\n\n  return py::cast(std::string(data_ptr, len));\n}\n\npy::object BufferToPyObject(modelbox::Buffer &buffer) {\n  auto type = buffer.GetBufferType();\n  if (type == modelbox::BufferEnumType::RAW) {\n    return BufferToPyRawBuffer(buffer);\n  }\n\n  if (type == modelbox::BufferEnumType::IMG) {\n    return BufferToPyArrayObject(buffer);\n  }\n\n  if (type == modelbox::BufferEnumType::STR) {\n    return BufferToPyString(buffer);\n  }\n\n  throw std::runtime_error(\"invalid type\");\n}\n\nvoid StrToBuffer(const std::shared_ptr<Buffer> &buffer,\n                 const std::string &data) {\n  const char *s = data.c_str();\n  Py_ssize_t len = data.length();\n  buffer->BuildFromHost(const_cast<char *>(s), len);\n  buffer->SetGetBufferType(modelbox::BufferEnumType::STR);\n}\n\nvoid ListToBuffer(const std::shared_ptr<Buffer> &buffer, const py::list &data) {\n  std::vector<std::vector<size_t>> vec_shapes;\n  std::vector<size_t> sizes;\n  std::vector<void *> source_vec;\n  size_t total_bytes = 0;\n  std::string info_type;\n  for (const auto &item : data) {\n    auto b = py::cast<py::buffer>(item);\n    py::buffer_info info = b.request();\n    if (info.ptr != nullptr) {\n      source_vec.push_back(info.ptr);\n    }\n\n    std::vector<size_t> i_shape;\n    for (auto &dim : info.shape) {\n      i_shape.push_back(dim);\n    }\n    vec_shapes.push_back(i_shape);\n    size_t bytes = Volume(i_shape) * info.itemsize;\n    total_bytes += bytes;\n    sizes.push_back(bytes);\n    info_type = info.format;\n  }\n\n  buffer->Build(total_bytes);\n  void *start = buffer->MutableData();\n  int offset = 0;\n  for (size_t i = 0; i < sizes.size(); ++i) {\n    memcpy_s((u_char *)start + offset, total_bytes, source_vec[i], sizes[i]);\n    offset += sizes[i];\n  }\n  buffer->Set(\"shape\", vec_shapes);\n  buffer->Set(\"type\", TypeFromFormatStr(info_type));\n  buffer->SetGetBufferType(modelbox::BufferEnumType::RAW);\n}\n\nvoid ModelboxPyApiSetUpDevice(pybind11::module &m) {\n  py::class_<modelbox::Device, std::shared_ptr<modelbox::Device>>(\n      m, \"Device\", py::module_local())\n      .def(\"get_device_id\", &modelbox::Device::GetDeviceID)\n      .def(\"get_type\", &modelbox::Device::GetType)\n      .def(\"get_device_desc\", &modelbox::Device::GetDeviceDesc);\n}\n\nvoid ModelboxPyApiSetUpDataType(pybind11::handle &h) {\n  py::enum_<modelbox::ModelBoxDataType>(h, \"ModelBoxDataType\", py::arithmetic(),\n                                        py::module_local())\n      .value(\"UINT8\", MODELBOX_UINT8)\n      .value(\"INT8\", MODELBOX_INT8)\n      .value(\"BOOL\", MODELBOX_BOOL)\n      .value(\"INT16\", MODELBOX_INT16)\n      .value(\"UINT16\", MODELBOX_UINT16)\n      .value(\"INT32\", MODELBOX_INT32)\n      .value(\"UINT32\", MODELBOX_UINT32)\n      .value(\"INT64\", MODELBOX_INT64)\n      .value(\"UINT64\", MODELBOX_UINT64)\n      .value(\"FLOAT\", MODELBOX_FLOAT)\n      .value(\"DOUBLE\", MODELBOX_DOUBLE)\n      .export_values();\n}\n\npy::buffer_info ModelboxPyApiSetUpBufferDefBuffer(Buffer &buffer) {\n  std::vector<size_t> buffer_shape;\n  auto ret = buffer.Get(\"shape\", buffer_shape);\n  if (!ret) {\n    buffer_shape.push_back(buffer.GetBytes());\n  }\n\n  modelbox::ModelBoxDataType type = MODELBOX_TYPE_INVALID;\n  ret = buffer.Get(\"type\", type);\n  if (!ret) {\n    type = modelbox::ModelBoxDataType::MODELBOX_UINT8;\n  }\n\n  std::vector<ssize_t> shape(buffer_shape.size());\n  std::vector<ssize_t> stride(buffer_shape.size());\n  size_t dim_prod = 1;\n  for (size_t i = 0; i < buffer_shape.size(); ++i) {\n    shape[i] = buffer_shape[i];\n\n    // We iterate over stride backwards\n    stride[(buffer_shape.size() - 1) - i] =\n        modelbox::GetDataTypeSize(type) * dim_prod;\n    dim_prod *= buffer_shape[(buffer_shape.size() - 1) - i];\n  }\n\n  const auto *const_data_ptr = buffer.ConstData();\n  auto *data_ptr = const_cast<void *>(const_data_ptr);\n\n  return py::buffer_info(data_ptr, modelbox::GetDataTypeSize(type),\n                         FormatStrFromType(type), shape.size(), shape, stride);\n}\n\nvoid ModelboxPyApiSetUpBuffer(pybind11::module &m) {\n  using namespace pybind11::literals;  // NOLINT\n\n  ModelboxPyApiSetUpDevice(m);\n\n  auto h =\n      py::class_<modelbox::Buffer, std::shared_ptr<modelbox::Buffer>>(\n          m, \"Buffer\", py::module_local(), py::buffer_protocol())\n          .def_buffer(ModelboxPyApiSetUpBufferDefBuffer)\n          .def(py::init([](const std::shared_ptr<modelbox::Device> &device,\n                           const py::buffer &b) {\n                 auto buffer = std::make_shared<Buffer>(device);\n                 PyBufferToBuffer(buffer, b);\n                 return buffer;\n               }),\n               py::keep_alive<1, 2>())\n          .def(py::init([](const std::shared_ptr<modelbox::Device> &device,\n                           const std::string &str) {\n                 auto buffer = std::make_shared<Buffer>(device);\n                 StrToBuffer(buffer, str);\n                 return buffer;\n               }),\n               py::keep_alive<1, 2>())\n          .def(py::init([](const std::shared_ptr<modelbox::Device> &device,\n                           const py::list &li) {\n                 auto buffer = std::make_shared<Buffer>(device);\n                 ListToBuffer(buffer, li);\n                 return buffer;\n               }),\n               py::keep_alive<1, 2>())\n          .def(py::init([](const std::shared_ptr<modelbox::Device> &device) {\n                 auto buffer = std::make_shared<Buffer>(device);\n                 return buffer;\n               }),\n               py::keep_alive<1, 2>())\n          .def(py::init<const Buffer &>())\n          .def(\"build\", [](std::shared_ptr<Buffer> &buffer, const std::string &str) {\n                 StrToBuffer(buffer, str);\n               })\n          .def(\"build\", [](std::shared_ptr<Buffer> &buffer, const py::list &li) {\n                 ListToBuffer(buffer, li);\n               })\n          .def(\"build\", [](std::shared_ptr<Buffer> &buffer, const py::buffer &buf) {\n                 PyBufferToBuffer(buffer, buf);\n               })\n          .def(\"as_bytes\", [](Buffer &buffer) {\n                return py::bytes{(const char *)buffer.ConstData(),\n                                    buffer.GetBytes()};\n               })\n          .def(\"as_object\",\n               [](Buffer &buffer) -> py::object {\n                 return BufferToPyObject(buffer);\n               })\n          .def(\"__str__\",\n               [](Buffer &buffer) {\n                 return std::string((const char *)buffer.ConstData(),\n                                    buffer.GetBytes());\n               })\n          .def(\"has_error\", &modelbox::Buffer::HasError)\n          .def(\"set_error\", &modelbox::Buffer::SetError)\n          .def(\"get_error_code\", &modelbox::Buffer::GetErrorCode)\n          .def(\"get_error_msg\", &modelbox::Buffer::GetErrorMsg)\n          .def(\"get_bytes\", &modelbox::Buffer::GetBytes)\n          .def(\"copy_meta\",\n               [](Buffer &buffer, Buffer &other) {\n                 auto other_ptr = std::shared_ptr<modelbox::Buffer>(\n                     &other, [](void *data) {});\n                 buffer.CopyMeta(other_ptr);\n               })\n          .def(\"set\",\n               [](Buffer &buffer, const std::string &key,\n                  py::object &obj) { BufferSetAttributes(buffer, key, obj); })\n          .def(\"get\", BufferGetAttributes);\n\n  ModelboxPyApiSetUpDataType(h);\n}\n\nvoid ModelboxPyApiSetUpBufferList(pybind11::module &m) {\n  using namespace pybind11::literals;  // NOLINT\n\n  py::class_<modelbox::BufferList, std::shared_ptr<modelbox::BufferList>>(\n      m, \"BufferList\", py::module_local())\n      .def(py::init<>())\n      .def(py::init<const std::shared_ptr<modelbox::Device> &>())\n      .def(py::init<const std::shared_ptr<modelbox::Buffer> &>())\n      .def(py::init<const std::vector<std::shared_ptr<modelbox::Buffer>> &>())\n      .def(\"build\",\n           [](BufferList &bl, const std::vector<int> &shape) {\n             std::vector<size_t> new_shape(shape.begin(), shape.end());\n             return bl.Build(new_shape);\n           })\n      .def(\"size\", &modelbox::BufferList::Size)\n      .def(\"get_bytes\", &modelbox::BufferList::GetBytes)\n      .def(\"get_device\", &modelbox::BufferList::GetDevice)\n      .def(\n          \"push_back\",\n          [](BufferList &bl, Buffer &buffer) {\n            auto new_buffer = std::make_shared<Buffer>(buffer);\n            bl.PushBack(new_buffer);\n          },\n          py::keep_alive<1, 2>())\n      .def(\n          \"push_back\",\n          [](BufferList &bl, const py::buffer &b) {\n            auto buffer = std::make_shared<Buffer>(bl.GetDevice());\n            if (PyBufferToBuffer(buffer, b) != STATUS_OK) {\n              throw std::runtime_error(\n                  \"Failed to push back py::buffer to Buffer\");\n            }\n            bl.PushBack(buffer);\n          },\n          py::keep_alive<1, 2>())\n      .def(\"push_back\",\n           [](BufferList &bl, const std::string &data) {\n             auto buffer = std::make_shared<Buffer>(bl.GetDevice());\n             StrToBuffer(buffer, data);\n             bl.PushBack(buffer);\n           })\n      .def(\"set\",\n           [](BufferList &bl, const std::string &key, py::object &obj) {\n             for (auto &buffer : bl) {\n               BufferSetAttributes(*buffer, key, obj);\n             }\n           })\n      .def(\"front\", &BufferList::Front)\n      .def(\"back\", &BufferList::Back)\n      .def(\"set_error\", &modelbox::BufferList::SetError)\n      .def(\"copy_meta\", &modelbox::BufferList::CopyMeta)\n      .def(\"__len__\", [](const modelbox::BufferList &bl) { return bl.Size(); })\n      .def(\n          \"__iter__\",\n          [](const modelbox::BufferList &bl) {\n            return py::make_iterator<\n                py::return_value_policy::reference_internal>(bl.begin(),\n                                                             bl.end());\n          },\n          py::keep_alive<0, 1>())\n      .def(\n          \"__getitem__\",\n          [](modelbox::BufferList &bl, size_t i) -> std::shared_ptr<Buffer> {\n            return bl.At(i);\n          },\n          py::keep_alive<0, 1>());\n}\n\nvoid ModelBoxPyApiSetUpFlowUnitEvent(pybind11::module &m) {\n  py::class_<modelbox::FlowUnitError, std::shared_ptr<modelbox::FlowUnitError>>(\n      m, \"FlowUnitError\", py::module_local())\n      .def(py::init<std::string>())\n      .def(py::init<std::string, std::string, modelbox::Status>())\n      .def(\"get_description\", &modelbox::FlowUnitError::GetDesc);\n\n  py::class_<modelbox::FlowUnitEvent, std::shared_ptr<modelbox::FlowUnitEvent>>(\n      m, \"FlowUnitEvent\", py::module_local())\n      .def(py::init<>())\n      .def(\"set_private_int\",\n           [](FlowUnitEvent &e, const std::string &key, long data) {\n             auto private_content = std::make_shared<long>(data);\n             e.SetPrivate(key, private_content);\n           })\n      .def(\"get_private_int\",\n           [](FlowUnitEvent &e, const std::string &key) -> long {\n             auto data = e.GetPrivate(key);\n             if (!data) {\n               throw std::runtime_error(\"invalid key.\");\n             }\n\n             return *((long *)(data.get()));\n           })\n      .def(\"set_private_string\",\n           [](FlowUnitEvent &e, const std::string &key,\n              const std::string &data) {\n             auto private_content = std::make_shared<std::string>(data);\n             e.SetPrivate(key, private_content);\n           })\n      .def(\"get_private_string\",\n           [](FlowUnitEvent &e, const std::string &key) -> std::string {\n             auto data = e.GetPrivate(key);\n             if (!data) {\n               throw std::runtime_error(\"invalid key.\");\n             }\n\n             return *((std::string *)(data.get()));\n           });\n}\n\nvoid ModelboxPyApiSetUpDataMeta(pybind11::module &m) {\n  py::class_<modelbox::DataMeta, std::shared_ptr<modelbox::DataMeta>>(\n      m, \"DataMeta\", py::module_local())\n      .def(py::init<>())\n      .def(\"set_private_int\",\n           [](DataMeta &e, const std::string &key, long data) {\n             auto private_content = std::make_shared<long>(data);\n             e.SetMeta(key, private_content);\n           })\n      .def(\"get_private_int\",\n           [](DataMeta &e, const std::string &key) -> long {\n             auto data = e.GetMeta(key);\n             if (!data) {\n               throw std::runtime_error(\"invalid key.\");\n             }\n\n             return *((long *)(data.get()));\n           })\n      .def(\"set_private_string\",\n           [](DataMeta &e, const std::string &key, const std::string &data) {\n             auto private_content = std::make_shared<std::string>(data);\n             e.SetMeta(key, private_content);\n           })\n      .def(\"get_private_string\",\n           [](DataMeta &e, const std::string &key) -> std::string {\n             auto data = e.GetMeta(key);\n             if (!data) {\n               throw std::runtime_error(\"invalid key.\");\n             }\n\n             return *((std::string *)(data.get()));\n           });\n}\n\nvoid ModelboxPyApiSetUpSessionContext(pybind11::module &m) {\n  py::class_<modelbox::SessionContext,\n             std::shared_ptr<modelbox::SessionContext>>(m, \"SessionContext\",\n                                                        py::module_local())\n      .def(py::init<>())\n      .def(\"set_private_int\",\n           [](SessionContext &ctx, const std::string &key, long data) {\n             auto private_content = std::make_shared<long>(data);\n             ctx.SetPrivate(key, private_content);\n           })\n      .def(\"get_private_int\",\n           [](SessionContext &ctx, const std::string &key) -> long {\n             auto data = ctx.GetPrivate(key);\n             if (!data) {\n               throw std::runtime_error(\"invalid key.\");\n             }\n\n             return *((long *)(data.get()));\n           })\n      .def(\"set_private_string\",\n           [](SessionContext &ctx, const std::string &key,\n              const std::string &data) {\n             auto private_content = std::make_shared<std::string>(data);\n             ctx.SetPrivate(key, private_content);\n           })\n      .def(\"get_private_string\",\n           [](SessionContext &ctx, const std::string &key) -> std::string {\n             auto data = ctx.GetPrivate(key);\n             if (!data) {\n               throw std::runtime_error(\"invalid key.\");\n             }\n\n             return *((std::string *)(data.get()));\n           })\n      .def(\"set_private\",\n           [](SessionContext &ctx, const std::string &key, py::object &obj) {\n             SessionContextSetAttributes(ctx, key, obj);\n           })\n      .def(\"get_private\",\n           [](SessionContext &ctx, const std::string &key) {\n             return SessionContextGetAttributes(ctx, key);\n           })\n      .def(\"get_session_config\", &modelbox::SessionContext::GetConfig)\n      .def(\"get_session_id\", &modelbox::SessionContext::GetSessionId);\n}\n\nvoid ModelboxPyApiSetUpDataContext(pybind11::module &m) {\n  py::class_<modelbox::ExternalData, std::shared_ptr<modelbox::ExternalData>>(\n      m, \"ExternalData\", py::module_local())\n      .def(\"create_buffer_list\", &modelbox::ExternalData::CreateBufferList)\n      .def(\"send\", &modelbox::ExternalData::Send)\n      .def(\"get_session_context\", &modelbox::ExternalData::GetSessionContext)\n      .def(\"get_session_config\", &modelbox::ExternalData::GetSessionConfig)\n      .def(\"close\", &modelbox::ExternalData::Close);\n\n  py::class_<modelbox::DataContext, std::shared_ptr<modelbox::DataContext>>(\n      m, \"DataContext\", py::module_local())\n      .def(\"input\", static_cast<std::shared_ptr<modelbox::BufferList> (\n                        modelbox::DataContext::*)(const std::string &) const>(\n                        &modelbox::DataContext::Input))\n      .def(\"output\", static_cast<std::shared_ptr<modelbox::BufferList> (\n                         modelbox::DataContext::*)(const std::string &)>(\n                         &modelbox::DataContext::Output))\n      .def(\"external\", &modelbox::DataContext::External)\n      .def(\"event\", &modelbox::DataContext::Event)\n      .def(\"has_error\", &modelbox::DataContext::HasError)\n      .def(\"send_event\", &modelbox::DataContext::SendEvent)\n      .def(\"set_private_string\",\n           [](DataContext &ctx, const std::string &key,\n              const std::string &data) {\n             auto private_content = std::make_shared<std::string>(data);\n             ctx.SetPrivate(key, private_content);\n           })\n      .def(\"set_private_int\",\n           [](DataContext &ctx, const std::string &key, long data) {\n             auto private_content = std::make_shared<long>(data);\n             ctx.SetPrivate(key, private_content);\n           })\n      .def(\"get_private_string\",\n           [](DataContext &ctx, const std::string &key) -> std::string {\n             auto data = ctx.GetPrivate(key);\n             if (!data) {\n               throw std::runtime_error(\"invalid key.\");\n             }\n\n             return *((std::string *)(data.get()));\n           })\n      .def(\"get_private_int\",\n           [](DataContext &ctx, const std::string &key) -> long {\n             auto data = ctx.GetPrivate(key);\n             if (!data) {\n               throw std::runtime_error(\"invalid key.\");\n             }\n\n             return *((long *)(data.get()));\n           })\n      .def(\"set_private\",\n           [](DataContext &ctx, const std::string &key, py::object &obj) {\n             DataContextSetAttributes(ctx, key, obj);\n           })\n      .def(\"get_private\",\n           [](DataContext &ctx, const std::string &key) {\n             return DataContextGetAttributes(ctx, key);\n           })\n      .def(\"get_input_meta\", &modelbox::DataContext::GetInputMeta)\n      .def(\"get_input_group_meta\", &modelbox::DataContext::GetInputGroupMeta)\n      .def(\"set_output_meta\", &modelbox::DataContext::SetOutputMeta)\n      .def(\"get_session_config\", &modelbox::DataContext::GetSessionConfig)\n      .def(\"get_session_context\", &modelbox::DataContext::GetSessionContext);\n}\n\nvoid ModelboxPyApiSetUpGeneric(pybind11::module &m) {\n  ModelBoxPyApiSetUpFlowUnitEvent(m);\n  ModelboxPyApiSetUpDataMeta(m);\n  ModelboxPyApiSetUpSessionContext(m);\n  ModelboxPyApiSetUpDataContext(m);\n}\n\nclass PyFlowUnit : public modelbox::FlowUnit {\n public:\n  using FlowUnit::FlowUnit;  // Inherit constructors\n  Status Open(const std::shared_ptr<Configuration> &copnfigure) override {\n    PYBIND11_OVERLOAD_PURE(Status, FlowUnit, Open, copnfigure);\n  }\n\n  Status Close() override { PYBIND11_OVERLOAD_PURE(Status, FlowUnit, Close, ); }\n\n  Status Process(std::shared_ptr<DataContext> data_ctx) override {\n    PYBIND11_OVERLOAD_PURE(Status, IFlowUnit, Process, data_ctx);\n  }\n\n  Status DataPre(std::shared_ptr<DataContext> data_ctx) override {\n    PYBIND11_OVERLOAD_PURE(Status, IFlowUnit, DataPre, data_ctx);\n  }\n\n  Status DataPost(std::shared_ptr<DataContext> data_ctx) override {\n    PYBIND11_OVERLOAD_PURE(Status, IFlowUnit, DataPost, data_ctx);\n  }\n\n  Status DataGroupPre(std::shared_ptr<DataContext> data_ctx) override {\n    PYBIND11_OVERLOAD_PURE(Status, IFlowUnit, DataGroupPre, data_ctx);\n  }\n\n  Status DataGroupPost(std::shared_ptr<DataContext> data_ctx) override {\n    PYBIND11_OVERLOAD_PURE(Status, IFlowUnit, DataGroupPost, data_ctx);\n  }\n\n  std::shared_ptr<Device> GetBindDevice() {\n    PYBIND11_OVERLOAD(std::shared_ptr<Device>, FlowUnit, GetBindDevice, );\n  }\n\n  std::shared_ptr<ExternalData> CreateExternalData() const {\n    PYBIND11_OVERLOAD(std::shared_ptr<ExternalData>, FlowUnit,\n                      CreateExternalData);\n  }\n};\n\nvoid ModelboxPyApiSetUpFlowUnit(pybind11::module &m) {\n  py::class_<modelbox::FlowUnit, PyFlowUnit>(m, \"FlowUnit\", py::module_local())\n      .def(py::init<>())\n      .def(\"open\", &modelbox::FlowUnit::Open)\n      .def(\"close\", &modelbox::FlowUnit::Close)\n      .def(\"process\", &modelbox::FlowUnit::Process)\n      .def(\"data_pre\", &modelbox::FlowUnit::DataPre)\n      .def(\"data_post\", &modelbox::FlowUnit::DataPost)\n      .def(\"data_group_pre\", &modelbox::FlowUnit::DataGroupPre)\n      .def(\"data_group_post\", &modelbox::FlowUnit::DataGroupPost)\n      .def(\"get_bind_device\", &modelbox::FlowUnit::GetBindDevice)\n      .def(\"create_external_data\", &modelbox::FlowUnit::CreateExternalData)\n      .def(\n          \"create_buffer\",\n          [](modelbox::FlowUnit &flow,\n             py::buffer &b) -> std::shared_ptr<Buffer> {\n            auto buffer = std::make_shared<Buffer>(flow.GetBindDevice());\n            if (PyBufferToBuffer(buffer, b) != STATUS_OK) {\n              throw std::runtime_error(\"create buffer failed.\");\n            }\n            return buffer;\n          },\n          py::keep_alive<0, 1>());\n}\n\nvoid ModelboxPyApiSetUpEngine(pybind11::module &m) {\n  py::class_<modelbox::ModelBoxEngine,\n             std::shared_ptr<modelbox::ModelBoxEngine>>(m, \"ModelBoxEngine\")\n      .def(py::init<>())\n      .def(\n          \"init\",\n          [](ModelBoxEngine &env,\n             std::shared_ptr<modelbox::Configuration> &config) {\n            return env.Init(config);\n          },\n          py::call_guard<py::gil_scoped_release>())\n      .def(\n          \"init\",\n          [](ModelBoxEngine &env,\n             std::unordered_map<std::string, std::string> &config) {\n            auto configuration = std::make_shared<modelbox::Configuration>();\n            for (auto &iter : config) {\n              configuration->SetProperty(iter.first, iter.second);\n            }\n            return env.Init(configuration);\n          },\n          py::call_guard<py::gil_scoped_release>())\n      .def(\"shutdown\", &modelbox::ModelBoxEngine::ShutDown,\n           py::call_guard<py::gil_scoped_release>())\n      .def(\"close\", &modelbox::ModelBoxEngine::Close,\n           py::call_guard<py::gil_scoped_release>())\n      .def(\n          \"create_input\",\n          [](ModelBoxEngine &env, const std::set<std::string> &port_map) {\n            py::gil_scoped_release release;\n            return env.CreateInput(port_map);\n          },\n          py::keep_alive<0, 1>())\n      .def(\n          \"execute\",\n          [](ModelBoxEngine &env, const std::string &name,\n             std::map<std::string, std::string> &config,\n             std::map<std::string, std::shared_ptr<DataHandler>> &data) {\n            py::gil_scoped_release release;\n            return env.Execute(name, config, data);\n          },\n          py::keep_alive<0, 1>())\n      .def(\n          \"execute\",\n          [](ModelBoxEngine &env, const std::string &name,\n             std::map<std::string, std::string> &config,\n             std::shared_ptr<DataHandler> &data) {\n            py::gil_scoped_release release;\n            return env.Execute(name, config, data);\n          },\n          py::keep_alive<0, 1>());\n}\n\nvoid ModelboxPyApiSetUpDataHandler(pybind11::module &m) {\n  py::class_<modelbox::DataHandler, std::shared_ptr<modelbox::DataHandler>>(\n      m, \"DataHandler\")\n      .def(py::init<>())\n      .def(\"close\", &modelbox::DataHandler::Close,\n           py::call_guard<py::gil_scoped_release>())\n      .def(\n          \"__iter__\", [](DataHandler &data) -> DataHandler & { return data; },\n          py::call_guard<py::gil_scoped_release>())\n      .def(\n          \"__next__\",\n          [](DataHandler &data) {\n            py::gil_scoped_release release;\n            auto buffer = data.GetData();\n            if (buffer == nullptr) {\n              throw pybind11::stop_iteration();\n            }\n            return buffer;\n          },\n          py::keep_alive<0, 1>())\n      .def(\n          \"__getitem__\",\n          [](DataHandler &data, const std::string &key) {\n            auto sub_data = data.GetDataHandler(key);\n            if (sub_data == nullptr) {\n              throw pybind11::index_error();\n            }\n            return sub_data;\n          },\n          py::call_guard<py::gil_scoped_release>())\n      .def(\"setmeta\",\n           static_cast<modelbox::Status (modelbox::DataHandler::*)(\n               const std::string &, const std::string &)>(\n               &modelbox::DataHandler::SetMeta),\n           py::call_guard<py::gil_scoped_release>())\n      .def(\"pushdata\",\n           static_cast<modelbox::Status (modelbox::DataHandler::*)(\n               std::shared_ptr<modelbox::Buffer> &, const std::string &)>(\n               &modelbox::DataHandler::PushData),\n           py::call_guard<py::gil_scoped_release>())\n      .def(\"pushdata\",\n           static_cast<modelbox::Status (modelbox::DataHandler::*)(\n               std::shared_ptr<DataHandler> &, const std::string &)>(\n               &modelbox::DataHandler::PushData),\n           py::call_guard<py::gil_scoped_release>())\n      .def(\"get_datahandler\", &modelbox::DataHandler::GetDataHandler,\n           py::call_guard<py::gil_scoped_release>())\n      .def(\"set_datahandler\", &modelbox::DataHandler::SetDataHandler,\n           py::call_guard<py::gil_scoped_release>());\n}\n\nvoid ModelboxPyApiSetUpFlowGraphDesc(pybind11::module &m) {\n  py::class_<modelbox::FlowGraphDesc, std::shared_ptr<modelbox::FlowGraphDesc>>(\n      m, \"FlowGraphDesc\")\n      .def(py::init<>())\n      .def(\"set_queue_size\", &modelbox::FlowGraphDesc::SetQueueSize)\n      .def(\"set_batch_size\", &modelbox::FlowGraphDesc::SetBatchSize)\n      .def(\"set_drivers_dir\", &modelbox::FlowGraphDesc::SetDriversDir)\n      .def(\"set_skip_default_drivers\",\n           &modelbox::FlowGraphDesc::SetSkipDefaultDrivers)\n      .def(\"set_profile_dir\", &modelbox::FlowGraphDesc::SetProfileDir)\n      .def(\"set_profile_trace_enable\",\n           &modelbox::FlowGraphDesc::SetProfileTraceEnable)\n      .def(\"add_input\", &modelbox::FlowGraphDesc::AddInput,\n           py::call_guard<py::gil_scoped_release>())\n      .def(\n          \"add_output\",\n          [](FlowGraphDesc &self, const std::string &output_name,\n             const std::shared_ptr<FlowPortDesc> &source_node_port) {\n            return self.AddOutput(output_name, source_node_port);\n          },\n          py::call_guard<py::gil_scoped_release>())\n      .def(\n          \"add_output\",\n          [](FlowGraphDesc &self, const std::string &output_name,\n             const std::shared_ptr<FlowNodeDesc> &source_node) {\n            return self.AddOutput(output_name, source_node);\n          },\n          py::call_guard<py::gil_scoped_release>())\n      .def(\n          \"add_node\",\n          [](FlowGraphDesc &self, const std::string &flowunit_name,\n             const std::string &device, const std::vector<std::string> &config,\n             const std::unordered_map<std::string,\n                                      std::shared_ptr<FlowPortDesc>>\n                 &source_node_ports) {\n            return self.AddNode(flowunit_name, device, config,\n                                source_node_ports);\n          },\n          py::call_guard<py::gil_scoped_release>())\n      .def(\n          \"add_node\",\n          [](FlowGraphDesc &self, const std::string &flowunit_name,\n             const std::string &device, const std::vector<std::string> &config,\n             const std::shared_ptr<FlowNodeDesc> &source_node) {\n            return self.AddNode(flowunit_name, device, config, source_node);\n          },\n          py::call_guard<py::gil_scoped_release>())\n      .def(\n          \"add_node\",\n          [](FlowGraphDesc &self, const std::string &flowunit_name,\n             const std::string &device,\n             const std::unordered_map<std::string,\n                                      std::shared_ptr<FlowPortDesc>>\n                 &source_node_ports) {\n            return self.AddNode(flowunit_name, device, source_node_ports);\n          },\n          py::call_guard<py::gil_scoped_release>())\n      .def(\n          \"add_node\",\n          [](FlowGraphDesc &self, const std::string &flowunit_name,\n             const std::string &device,\n             const std::shared_ptr<FlowNodeDesc> &source_node) {\n            return self.AddNode(flowunit_name, device, source_node);\n          },\n          py::call_guard<py::gil_scoped_release>())\n      .def(\n          \"add_node\",\n          [](FlowGraphDesc &self, const std::string &flowunit_name,\n             const std::string &device,\n             const std::vector<std::string> &config) {\n            return self.AddNode(flowunit_name, device, config);\n          },\n          py::call_guard<py::gil_scoped_release>())\n      .def(\n          \"add_function\",\n          [](FlowGraphDesc &self,\n             const std::function<StatusCode(std::shared_ptr<DataContext>)>\n                 &func,\n             const std::vector<std::string> &input_name_list,\n             const std::vector<std::string> &output_name_list,\n             const std::unordered_map<std::string,\n                                      std::shared_ptr<FlowPortDesc>>\n                 &source_node_ports) {\n            return self.AddFunction(func, input_name_list, output_name_list,\n                                    source_node_ports);\n          },\n          py::call_guard<py::gil_scoped_release>())\n      .def(\n          \"add_function\",\n          [](FlowGraphDesc &self,\n             const std::function<StatusCode(std::shared_ptr<DataContext>)>\n                 &func,\n             const std::vector<std::string> &input_name_list,\n             const std::vector<std::string> &output_name_list,\n             const std::shared_ptr<FlowNodeDesc> &source_node) {\n            return self.AddFunction(func, input_name_list, output_name_list,\n                                    source_node);\n          },\n          py::call_guard<py::gil_scoped_release>());\n}\n\nvoid ModelboxPyApiSetUpFlowNodeDesc(pybind11::module &m) {\n  py::class_<modelbox::FlowNodeDesc, std::shared_ptr<modelbox::FlowNodeDesc>>(\n      m, \"FlowNodeDesc\")\n      .def(py::init<const std::string &>())\n      .def(\"set_node_name\", &modelbox::FlowNodeDesc::SetNodeName)\n      .def(\"get_node_name\", &modelbox::FlowNodeDesc::GetNodeName)\n      .def(\"__getitem__\",\n           [](modelbox::FlowNodeDesc &node_desc, const std::string &output_name)\n               -> std::shared_ptr<modelbox::FlowPortDesc> {\n             return node_desc[output_name];\n           })\n      .def(\"__getitem__\",\n           [](modelbox::FlowNodeDesc &node_desc,\n              size_t port_idx) -> std::shared_ptr<modelbox::FlowPortDesc> {\n             return node_desc[port_idx];\n           });\n}\n\nvoid ModelboxPyApiSetUpFlowPortDesc(pybind11::module &m) {\n  py::class_<modelbox::FlowPortDesc, std::shared_ptr<modelbox::FlowPortDesc>>(\n      m, \"FlowPortDesc\")\n      .def(py::init<std::shared_ptr<FlowNodeDesc>, const std::string &>())\n      .def(\"get_node_name\", &modelbox::FlowPortDesc::GetNodeName)\n      .def(\"get_port_name\", &modelbox::FlowPortDesc::GetPortName);\n}\n\nvoid ModelboxPyApiSetUpFlowStreamIO(pybind11::module &m) {\n  py::class_<PythonFlowStreamIO, std::shared_ptr<PythonFlowStreamIO>>(\n      m, \"FlowStreamIO\")\n      .def(py::init<std::shared_ptr<FlowStreamIO>>())\n      .def(\"create_buffer\", &PythonFlowStreamIO::CreateBuffer,\n           py::keep_alive<0, 1>(), py::call_guard<py::gil_scoped_release>())\n      .def(\n          \"create_buffer\",\n          [](PythonFlowStreamIO &self, const py::buffer &data) {\n            auto buffer = self.CreateBuffer();\n            if (PyBufferToBuffer(buffer, data) != STATUS_OK) {\n              throw std::runtime_error(\"Failed to create buffer\");\n            }\n            return buffer;\n          },\n          py::keep_alive<0, 1>())\n      .def(\n          \"create_buffer\",\n          [](PythonFlowStreamIO &self, const std::string &data) {\n            auto buffer = self.CreateBuffer();\n            StrToBuffer(buffer, data);\n            return buffer;\n          },\n          py::keep_alive<0, 1>(), py::call_guard<py::gil_scoped_release>())\n      .def(\"send\", &PythonFlowStreamIO::Send,\n           py::call_guard<py::gil_scoped_release>())\n      .def(\n          \"send\",\n          [](PythonFlowStreamIO &self, const std::string &input_name,\n             const py::buffer &data) {\n            auto buffer = self.CreateBuffer();\n            {\n              py::gil_scoped_acquire ac;\n              if (PyBufferToBuffer(buffer, data) != STATUS_OK) {\n                throw std::runtime_error(\"Failed to send buffer\");\n              }\n            }\n            return self.Send(input_name, buffer);\n          },\n          py::call_guard<py::gil_scoped_release>())\n      .def(\n          \"send\",\n          [](PythonFlowStreamIO &self, const std::string &input_name,\n             const std::string &data) {\n            auto buffer = self.CreateBuffer();\n            StrToBuffer(buffer, data);\n            return self.Send(input_name, buffer);\n          },\n          py::call_guard<py::gil_scoped_release>())\n      .def(\n          \"recv\",\n          [](PythonFlowStreamIO &self, const std::string &output_name,\n             std::shared_ptr<Buffer> &buffer,\n             size_t timeout) -> modelbox::Status {\n            std::shared_ptr<Buffer> out_buffer;\n            auto ret = self.Recv(output_name, out_buffer, timeout);\n            if (ret != STATUS_OK) {\n              return ret;\n            }\n\n            *buffer = *out_buffer;\n            return modelbox::STATUS_OK;\n          },\n          py::arg(\"output_name\"), py::arg(\"buffer\"), py::arg(\"timeout\") = 0,\n          py::call_guard<py::gil_scoped_release>())\n      .def(\n          \"recv\",\n          [](PythonFlowStreamIO &self, const std::string &output_name,\n             size_t timeout) {\n            std::shared_ptr<Buffer> buffer;\n            self.Recv(output_name, buffer, timeout);\n            return buffer;\n          },\n          py::keep_alive<0, 1>(), py::call_guard<py::gil_scoped_release>())\n      .def(\n          \"recv\",\n          [](PythonFlowStreamIO &self, const std::string &output_name) {\n            std::shared_ptr<Buffer> buffer;\n            self.Recv(output_name, buffer, 0);\n            return buffer;\n          },\n          py::keep_alive<0, 1>(), py::call_guard<py::gil_scoped_release>())\n      .def(\"close_input\", &PythonFlowStreamIO::CloseInput,\n           py::call_guard<py::gil_scoped_release>());\n}\n\nvoid ModelBoxPyApiSetUpExternalDataMapSimple(pybind11::module &m) {\n  py::class_<modelbox::ExternalDataSimple,\n             std::shared_ptr<modelbox::ExternalDataSimple>>(\n      m, \"ExternalDataSimple\")\n      .def(py::init<std::shared_ptr<ExternalDataMap> &>())\n      .def(\"pushdata\",\n           [](ExternalDataSimple &extern_data_simple,\n              const std::string &port_name,\n              std::shared_ptr<BufferList> &bufferlist) {\n             py::gil_scoped_release release;\n             return extern_data_simple.PushData(port_name, bufferlist);\n           })\n      .def(\"getresult\",\n           [](ExternalDataSimple &extern_data_simple,\n              const std::string &port_name,\n              int timeout = 0) -> std::shared_ptr<Buffer> {\n             py::gil_scoped_release release;\n             std::shared_ptr<Buffer> buffer;\n             if (STATUS_OK ==\n                 extern_data_simple.GetResult(port_name, buffer, timeout)) {\n               return buffer;\n             }\n             return nullptr;\n           });\n}\n\nvoid ModelboxPyApiSetUpModel(pybind11::module &m) {\n  py::class_<PythonModel, std::shared_ptr<PythonModel>>(m, \"Model\",\n                                                        py::module_local())\n      .def(py::init<std::string, std::string, size_t, std::string,\n                    std::string>())\n      .def(\"add_path\", &PythonModel::AddPath,\n           py::call_guard<py::gil_scoped_release>())\n      .def(\"start\", &PythonModel::Start,\n           py::call_guard<py::gil_scoped_release>())\n      .def(\"stop\", &PythonModel::Stop, py::call_guard<py::gil_scoped_release>())\n      .def(\"infer\", &PythonModel::Infer,\n           py::call_guard<py::gil_scoped_release>())\n      .def(\"infer_batch\", &PythonModel::InferBatch,\n           py::call_guard<py::gil_scoped_release>());\n}\n\n}  // namespace modelbox\n"
  },
  {
    "path": "src/drivers/common/python/modelbox_api/modelbox_api.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_PYTHON_MODELBOX_API_H_\n#define MODELBOX_PYTHON_MODELBOX_API_H_\n\n#include <modelbox/base/log.h>\n#include <pybind11/numpy.h>\n#include <pybind11/pybind11.h>\n\nnamespace py = pybind11;\n\nnamespace modelbox {\n\ntemplate <typename T>\npy::array mkarray_via_buffer(void *data, size_t n) {\n  return py::array(py::buffer_info(data, sizeof(T),\n                                   py::format_descriptor<T>::format(), 1, {n},\n                                   {sizeof(T)}));\n}\n\nvoid ModelboxPyApiSetUpStatus(pybind11::module &m);\n\nvoid ModelboxPyApiSetUpConfiguration(pybind11::module &m);\n\nvoid ModelboxPyApiSetUpLogLevel(pybind11::handle &h);\n\nvoid ModelboxPyApiSetUpLog(pybind11::module &m);\n\nvoid ModelboxPyApiSetUpBuffer(pybind11::module &m);\n\nvoid ModelboxPyApiSetUpBufferList(pybind11::module &m);\n\nvoid ModelboxPyApiSetUpGeneric(pybind11::module &m);\n\nvoid ModelboxPyApiSetUpFlowUnit(pybind11::module &m);\n\nvoid ModelboxPyApiSetUpEngine(pybind11::module &m);\n\nvoid ModelboxPyApiSetUpDataHandler(pybind11::module &m);\n\nvoid ModelboxPyApiSetUpFlowGraphDesc(pybind11::module &m);\n\nvoid ModelboxPyApiSetUpFlowNodeDesc(pybind11::module &m);\n\nvoid ModelboxPyApiSetUpFlowPortDesc(pybind11::module &m);\n\nvoid ModelboxPyApiSetUpFlowStreamIO(pybind11::module &m);\n\nvoid ModelBoxPyApiSetUpExternalDataMapSimple(pybind11::module &m);\n\nvoid ModelboxPyApiSetUpModel(pybind11::module &m);\n}  // namespace modelbox\n\n#endif  // MODELBOX_PYTHON_MODELBOX_API_H_\n"
  },
  {
    "path": "src/drivers/common/python/modelbox_api/python_common.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <pybind11/chrono.h>\n#include <pybind11/complex.h>\n#include <pybind11/functional.h>\n#include <pybind11/numpy.h>\n#include <pybind11/operators.h>\n#include <pybind11/pybind11.h>\n#include <pybind11/stl.h>\n\n#include <unordered_map>\n\n#include \"modelbox/flow.h\"\n#include \"modelbox/type.h\"\n\nnamespace modelbox {\n\nnamespace py = pybind11;\n\nstatic std::unordered_map<int, std::string> kTypeToNPType;\nstatic std::unordered_map<std::string, ModelBoxDataType> kNPTypeToType;\n\nvoid BuildTypeToNumpyType() {\n  if (kTypeToNPType.empty() && kNPTypeToType.empty()) {\n    kTypeToNPType[MODELBOX_UINT8] = py::format_descriptor<uint8_t>::format();\n    kTypeToNPType[MODELBOX_INT8] = py::format_descriptor<int8_t>::format();\n    kTypeToNPType[MODELBOX_BOOL] = py::format_descriptor<bool>::format();\n    kTypeToNPType[MODELBOX_INT16] = py::format_descriptor<int16_t>::format();\n    kTypeToNPType[MODELBOX_UINT16] = py::format_descriptor<uint16_t>::format();\n    kTypeToNPType[MODELBOX_INT32] = py::format_descriptor<int32_t>::format();\n    kTypeToNPType[MODELBOX_UINT32] = py::format_descriptor<uint32_t>::format();\n    kTypeToNPType[MODELBOX_INT64] = py::format_descriptor<int64_t>::format();\n    kTypeToNPType[MODELBOX_UINT64] = py::format_descriptor<uint64_t>::format();\n    kTypeToNPType[MODELBOX_FLOAT] = py::format_descriptor<float>::format();\n    kTypeToNPType[MODELBOX_DOUBLE] = py::format_descriptor<double>::format();\n    kNPTypeToType[py::format_descriptor<uint8_t>::format()] = MODELBOX_UINT8;\n    kNPTypeToType[py::format_descriptor<int8_t>::format()] = MODELBOX_INT8;\n    kNPTypeToType[py::format_descriptor<bool>::format()] = MODELBOX_BOOL;\n    kNPTypeToType[py::format_descriptor<int16_t>::format()] = MODELBOX_INT16;\n    kNPTypeToType[py::format_descriptor<uint16_t>::format()] = MODELBOX_UINT16;\n    kNPTypeToType[py::format_descriptor<int32_t>::format()] = MODELBOX_INT32;\n    kNPTypeToType[py::format_descriptor<uint32_t>::format()] = MODELBOX_UINT32;\n    kNPTypeToType[py::format_descriptor<int64_t>::format()] = MODELBOX_INT64;\n    kNPTypeToType[py::format_descriptor<uint64_t>::format()] = MODELBOX_UINT64;\n    kNPTypeToType[py::format_descriptor<float>::format()] = MODELBOX_FLOAT;\n    kNPTypeToType[py::format_descriptor<double>::format()] = MODELBOX_DOUBLE;\n    kNPTypeToType[\"e\"] = MODELBOX_FLOAT;\n    kNPTypeToType[\"l\"] = MODELBOX_INT64;\n  }\n}\n\nstd::string FormatStrFromType(const modelbox::ModelBoxDataType &type) {\n  BuildTypeToNumpyType();\n\n  auto iter = kTypeToNPType.find(type);\n  if (iter == kTypeToNPType.end()) {\n    std::string errmsg = \"invalid modelbox data type: \";\n    errmsg += std::to_string(type);\n    throw std::runtime_error(errmsg);\n  }\n\n  return iter->second;\n}\n\nmodelbox::ModelBoxDataType TypeFromFormatStr(const std::string &format) {\n  BuildTypeToNumpyType();\n\n  auto iter = kNPTypeToType.find(format);\n  if (iter == kNPTypeToType.end()) {\n    throw std::runtime_error(\"invalid numpy data type: \" + format);\n  }\n\n  return iter->second;\n}\n\nStatus PyBufferToBuffer(const std::shared_ptr<Buffer> &buffer,\n                        const py::buffer &data) {\n  py::buffer_info info = data.request();\n  std::vector<size_t> i_shape;\n  Status ret;\n\n  for (auto &dim : info.shape) {\n    i_shape.push_back(dim);\n  }\n\n  if (info.shape.size() == 0) {\n    throw std::runtime_error(\"can not accept empty numpy.\");\n  }\n\n  size_t bytes = Volume(i_shape) * info.itemsize;\n  if (PyBuffer_IsContiguous(info.view(), 'C')) {\n    auto *buffer_obj = new (std::nothrow) py::buffer;\n    if (buffer_obj == nullptr) {\n      return {STATUS_NOBUFS, \"alloc py::buffer failed.\"};\n    }\n\n    *buffer_obj = data;\n    ret = buffer->BuildFromHost(info.ptr, bytes,\n                                [buffer_obj](void *pbuff) mutable {\n                                  py::gil_scoped_acquire interpreter_guard{};\n                                  delete buffer_obj;\n                                });\n  } else {\n    // py_buffer is not C Contiguous, need convert\n    ret = buffer->Build(bytes);\n    if (ret != STATUS_OK) {\n      return ret;\n    }\n    if (PyBuffer_ToContiguous(buffer->MutableData(), info.view(), bytes, 'C') !=\n        0) {\n      return {STATUS_NOBUFS, \"convert numpy to contiguous failed.\"};\n    }\n  }\n\n  buffer->Set(\"shape\", i_shape);\n  buffer->Set(\"type\", TypeFromFormatStr(info.format));\n  buffer->SetGetBufferType(modelbox::BufferEnumType::RAW);\n\n  return STATUS_OK;\n}\n\n}  // namespace modelbox\n"
  },
  {
    "path": "src/drivers/common/python/modelbox_api/python_common.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_PYTHON_MODELBOX_API_COMMON_H_\n#define MODELBOX_PYTHON_MODELBOX_API_COMMON_H_\n\n#include <pybind11/pybind11.h>\n\n#include <memory>\n\nnamespace modelbox {\n\nstd::string FormatStrFromType(const ModelBoxDataType &type);\n\nModelBoxDataType TypeFromFormatStr(const std::string &format);\n\nStatus PyBufferToBuffer(const std::shared_ptr<Buffer> &buffer,\n                        const py::buffer &data);\n\n}  // namespace modelbox\n\n#endif  // MODELBOX_PYTHON_MODELBOX_API_COMMON_H_\n"
  },
  {
    "path": "src/drivers/common/python/modelbox_api/python_flow.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"python_flow.h\"\n\n#include <utility>\n\nnamespace modelbox {\n\nPythonFlowStreamIO::PythonFlowStreamIO(\n    std::shared_ptr<modelbox::FlowStreamIO> io)\n    : io_(std::move(io)) {}\n\nPythonFlowStreamIO::~PythonFlowStreamIO() {\n  // we need release gil before clear flow resource\n  py::gil_scoped_release release;\n  io_->CloseInput();\n}\n\nstd::shared_ptr<modelbox::Buffer> PythonFlowStreamIO::CreateBuffer() {\n  return io_->CreateBuffer();\n}\n\nmodelbox::Status PythonFlowStreamIO::Send(\n    const std::string &input_name,\n    const std::shared_ptr<modelbox::Buffer> &buffer) {\n  return io_->Send(input_name, buffer);\n}\n\nmodelbox::Status PythonFlowStreamIO::Recv(\n    const std::string &output_name, std::shared_ptr<modelbox::Buffer> &buffer,\n    size_t timeout) {\n  return io_->Recv(output_name, buffer, timeout);\n}\n\nvoid PythonFlowStreamIO::CloseInput() { io_->CloseInput(); }\n\n}  // namespace modelbox\n"
  },
  {
    "path": "src/drivers/common/python/modelbox_api/python_flow.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_PYTHON_MODELBOX_API_FLOW_H_\n#define MODELBOX_PYTHON_MODELBOX_API_FLOW_H_\n\n#include <pybind11/pybind11.h>\n\n#include \"modelbox/flow.h\"\n\nnamespace py = pybind11;\n\nnamespace modelbox {\n\nclass PythonFlowStreamIO {\n public:\n  PythonFlowStreamIO(std::shared_ptr<modelbox::FlowStreamIO> io);\n\n  virtual ~PythonFlowStreamIO();\n\n  std::shared_ptr<modelbox::Buffer> CreateBuffer();\n\n  modelbox::Status Send(const std::string &input_name,\n                        const std::shared_ptr<modelbox::Buffer> &buffer);\n\n  modelbox::Status Recv(const std::string &output_name,\n                        std::shared_ptr<modelbox::Buffer> &buffer,\n                        size_t timeout = 0);\n\n  void CloseInput();\n\n private:\n  std::shared_ptr<modelbox::FlowStreamIO> io_;\n};\n\n}  // namespace modelbox\n#endif  // MODELBOX_PYTHON_MODELBOX_API_FLOW_H_\n"
  },
  {
    "path": "src/drivers/common/python/modelbox_api/python_flowunit.cc",
    "content": ""
  },
  {
    "path": "src/drivers/common/python/modelbox_api/python_flowunit.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#ifndef MODELBOX_PYTHON_MODELBOX_API_LOG_H_\n#define MODELBOX_PYTHON_MODELBOX_API_LOG_H_\n\n#include <modelbox/base/log.h>\n#include <pybind11/pybind11.h>\n\nnamespace py = pybind11;\n\nnamespace modelbox {\n \n\n}  // namespace modelbox\n\n#endif  // MODELBOX_PYTHON_MODELBOX_API_LOG_H_\n"
  },
  {
    "path": "src/drivers/common/python/modelbox_api/python_log.cc",
    "content": "\n#include \"python_log.h\"\n\n#include <utility>\n\n#include \"modelbox/base/log.h\"\n\nnamespace modelbox {\n\nstatic std::shared_ptr<FlowUnitPythonLog> kInst = nullptr;\n\nvoid FlowUnitPythonLog::Init() {\n  if (!kInst) {\n    auto* data = new FlowUnitPythonLog();\n    py::gil_scoped_acquire interpreter_guard{};\n    data->inspect_module_ = py::module::import(\"inspect\");\n    kInst = std::shared_ptr<FlowUnitPythonLog>(\n        data, [](FlowUnitPythonLog* ptr) { delete ptr; });\n  }\n}\n\nvoid FlowUnitPythonLog::Finish() { kInst = nullptr; }\n\nFlowUnitPythonLog& FlowUnitPythonLog::Instance() {\n  if (!kInst) {\n    Init();\n  }\n  return *kInst;\n}\n\nFlowUnitPythonLog::FlowUnitPythonLog() = default;\n\nFlowUnitPythonLog::~FlowUnitPythonLog() {\n  // Avoid crash when log destroy.\n  if (inspect_module_.ref_count() == 1) {\n    inspect_module_.release();\n  }\n}\n\nvoid FlowUnitPythonLog::SetLogLevel(LogLevel level) {\n  ModelBoxLogger.GetLogger()->SetLogLevel(level);\n}\n\nvoid FlowUnitPythonLog::Debug(const py::args& args, const py::kwargs& kwargs) {\n  Instance().Log(modelbox::LOG_DEBUG, args, kwargs);\n}\n\nvoid FlowUnitPythonLog::Info(const py::args& args, const py::kwargs& kwargs) {\n  Instance().Log(modelbox::LOG_INFO, args, kwargs);\n}\n\nvoid FlowUnitPythonLog::Notice(const py::args& args, const py::kwargs& kwargs) {\n  Instance().Log(modelbox::LOG_NOTICE, args, kwargs);\n}\n\nvoid FlowUnitPythonLog::Warn(const py::args& args, const py::kwargs& kwargs) {\n  Instance().Log(modelbox::LOG_WARN, args, kwargs);\n}\n\nvoid FlowUnitPythonLog::Error(const py::args& args, const py::kwargs& kwargs) {\n  Instance().Log(modelbox::LOG_ERROR, args, kwargs);\n}\n\nvoid FlowUnitPythonLog::Fatal(const py::args& args, const py::kwargs& kwargs) {\n  Instance().Log(modelbox::LOG_FATAL, args, kwargs);\n}\n\nvoid FlowUnitPythonLog::Log(LogLevel level, const py::args& args,\n                            const py::kwargs& kwargs) {\n  if (ModelBoxLogger.CanLog(level) == false) {\n    return;\n  }\n\n  std::string msg{};\n  for (unsigned int i = 0; i < args.size(); i++) {\n    if (i > 0) {\n      msg += \", \";\n    }\n    msg += pybind11::str(args[i]);\n  }\n\n  auto frame = inspect_module_.attr(\"currentframe\")();\n  auto info = inspect_module_.attr(\"getframeinfo\")(frame);\n\n  auto filename = info.attr(\"filename\").cast<std::string>();\n  int last_slash_index = filename.find_last_of('/');\n  const char* s_filename = filename.c_str();\n  if (last_slash_index > 0) {\n    s_filename = filename.c_str() + last_slash_index + 1;\n  }\n\n  ModelBoxLogger.Print(level, s_filename, info.attr(\"lineno\").cast<int>(),\n                       info.attr(\"function\").cast<std::string>().c_str(), \"%s\",\n                       msg.c_str());\n}\n\n}  // namespace modelbox\n"
  },
  {
    "path": "src/drivers/common/python/modelbox_api/python_log.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#ifndef MODELBOX_PYTHON_MODELBOX_API_LOG_H_\n#define MODELBOX_PYTHON_MODELBOX_API_LOG_H_\n\n#include <modelbox/base/log.h>\n#include <pybind11/pybind11.h>\n\nnamespace py = pybind11;\n\nnamespace modelbox {\n\nclass __attribute__((visibility(\"hidden\"))) FlowUnitPythonLog {\n public:\n  static void Init();\n  static void Finish();\n  static FlowUnitPythonLog &Instance();\n  static void SetLogLevel(LogLevel level);\n  static void Debug(const py::args& args, const py::kwargs& kwargs);\n  static void Info(const py::args& args, const py::kwargs& kwargs);\n  static void Notice(const py::args& args, const py::kwargs& kwargs);\n  static void Warn(const py::args& args, const py::kwargs& kwargs);\n  static void Error(const py::args& args, const py::kwargs& kwargs);\n  static void Fatal(const py::args& args, const py::kwargs& kwargs);\n\n private:\n  FlowUnitPythonLog();\n  FlowUnitPythonLog(FlowUnitPythonLog &) = delete;\n  void operator=(FlowUnitPythonLog &) = delete;\n  virtual ~FlowUnitPythonLog();\n\n  void Log(LogLevel level, const py::args &args, const py::kwargs &kwargs);\n  py::module inspect_module_;\n};\n\n}  // namespace modelbox\n\n#endif  // MODELBOX_PYTHON_MODELBOX_API_LOG_H_\n"
  },
  {
    "path": "src/drivers/common/python/modelbox_api/python_model.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"python_model.h\"\n\n#include <modelbox/base/utils.h>\n\n#include <regex>\n#include <toml.hpp>\n\n#include \"python_common.h\"\n\nnamespace modelbox {\n\nPythonModel::PythonModel(std::string path, std::string name,\n                         size_t max_batch_size, std::string device,\n                         std::string device_id)\n    : name_(std::move(name)),\n      max_batch_size_(std::to_string(max_batch_size)),\n      device_(std::move(device)),\n      device_id_(std::move(device_id)) {\n  path_.emplace_back(std::move(path));\n}\n\nPythonModel::~PythonModel() {\n  // we need release gil before clear flow resource\n  py::gil_scoped_release release;\n  Stop();\n}\n\nvoid PythonModel::AddPath(const std::string &path) { path_.emplace_back(path); }\n\nmodelbox::Status PythonModel::Start() {\n  auto ret = ReadModelIO(in_names_, out_names_);\n  if (!ret) {\n    MBLOG_ERROR << \"read model io failed\";\n    return ret;\n  }\n\n  flow_graph_desc_ = std::make_shared<modelbox::FlowGraphDesc>();\n  flow_graph_desc_->SetDriversDir(path_);\n\n  std::unordered_map<std::string, std::shared_ptr<FlowPortDesc>> source_ports;\n  for (const auto &in_name : in_names_) {\n    auto in_port = flow_graph_desc_->AddInput(in_name);\n    source_ports[in_name] = (*in_port)[0];\n  }\n\n  auto inference = flow_graph_desc_->AddNode(\n      name_, device_,\n      {\"batch_size=\" + max_batch_size_, \"deviceid=\" + device_id_},\n      source_ports);\n\n  for (const auto &out_name : out_names_) {\n    flow_graph_desc_->AddOutput(out_name, (*inference)[out_name]);\n  }\n\n  auto flow = std::make_shared<modelbox::Flow>();\n  auto status = flow->Init(flow_graph_desc_);\n  if (status != STATUS_OK) {\n    MBLOG_ERROR << \"init flow failed, \" << status;\n    return status;\n  }\n\n  status = flow->StartRun();\n  if (status != STATUS_OK) {\n    MBLOG_ERROR << \"start flow failed, \" << status;\n    return status;\n  }\n\n  flow_ = flow;\n  return STATUS_SUCCESS;\n}\n\nvoid PythonModel::Stop() {\n  if (flow_ != nullptr) {\n    flow_->Stop();\n  }\n\n  flow_ = nullptr;\n  flow_graph_desc_ = nullptr;\n}\n\nstd::vector<std::shared_ptr<Buffer>> PythonModel::Infer(\n    const std::vector<py::buffer> &data_list) {\n  std::vector<std::shared_ptr<Buffer>> result_list;\n  Status ret;\n\n  if (data_list.size() != in_names_.size()) {\n    MBLOG_ERROR << \"infer input data size != model input count\";\n    return result_list;\n  }\n\n  auto io = flow_->CreateStreamIO();\n  auto in_port_count = in_names_.size();\n  for (size_t i = 0; i < in_port_count; ++i) {\n    auto buffer = io->CreateBuffer();\n    {\n      py::gil_scoped_acquire ac;\n      ret = PyBufferToBuffer(buffer, data_list[i]);\n      if (ret != STATUS_OK) {\n        MBLOG_ERROR << \"infer input data failed, err \" << ret;\n        return result_list;\n      }\n    }\n    ret = io->Send(in_names_[i], buffer);\n    if (ret != STATUS_OK) {\n      MBLOG_ERROR << \"infer send data failed, err \" << ret;\n      return result_list;\n    }\n  }\n  io->CloseInput();\n\n  result_list.reserve(out_names_.size());\n  for (auto &out_name : out_names_) {\n    std::shared_ptr<Buffer> out_buffer;\n    io->Recv(out_name, out_buffer, 0);\n    result_list.push_back(out_buffer);\n  }\n\n  return result_list;\n}\n\nstd::vector<std::vector<std::shared_ptr<Buffer>>> PythonModel::InferBatch(\n    const std::vector<std::vector<py::buffer>> &data_list) {\n  // input[port1[batch1,batch2],port2[batch1,batch2]]\n  // output[port1[batch1,batch2],port2[batch1,batch2]]\n  std::vector<std::vector<std::shared_ptr<Buffer>>> result_list;\n  Status ret;\n  \n  if (data_list.size() != in_names_.size()) {\n    MBLOG_ERROR << \"infer input data size != model input count\";\n    return result_list;\n  }\n\n  const auto &first_port_batch = data_list.front();\n  auto input_batch_size = first_port_batch.size();\n  if (input_batch_size == 0) {\n    MBLOG_ERROR << \"infer input batch size is zero.\";\n    return result_list;\n  }\n\n  auto in_port_count = in_names_.size();\n  auto io = flow_->CreateStreamIO();\n  auto max_batch_size = atoi(max_batch_size_.c_str());\n  max_batch_size = max_batch_size < 2 ? 1 : max_batch_size;\n  auto infer_times = ceil(input_batch_size * 1.0 / max_batch_size);\n  for (size_t t = 0; t < infer_times; ++t) {\n    size_t batch_begin = t * max_batch_size;\n    size_t batch_end = std::min((t + 1) * max_batch_size, input_batch_size);\n    for (size_t i = 0; i < in_port_count; ++i) {\n      std::vector<std::shared_ptr<Buffer>> input_list;\n      for (size_t batch_idx = batch_begin; batch_idx < batch_end; ++batch_idx) {\n        auto buffer = io->CreateBuffer();\n        {\n          py::gil_scoped_acquire ac;\n          ret = PyBufferToBuffer(buffer, data_list[i][batch_idx]);\n          if (ret != STATUS_OK) {\n            MBLOG_ERROR << \"infer input data failed, err \" << ret;\n            return result_list;\n          }\n        }\n        input_list.push_back(buffer);\n      }\n\n      ret = io->Send(in_names_[i], input_list);\n      if (ret != STATUS_OK) {\n        MBLOG_ERROR << \"infer send \" << in_names_[i] << \" data failed, err \"\n                    << ret;\n        return result_list;\n      }\n    }\n  }\n  io->CloseInput();\n\n  auto out_port_count = out_names_.size();\n  result_list.resize(out_port_count);\n  for (size_t batch_idx = 0; batch_idx < input_batch_size; ++batch_idx) {\n    for (size_t i = 0; i < out_port_count; ++i) {\n      std::shared_ptr<Buffer> out_buffer;\n      io->Recv(out_names_[i], out_buffer, 0);\n      result_list[i].push_back(out_buffer);\n    }\n  }\n\n  return result_list;\n}\n\nmodelbox::Status PythonModel::ReadModelIO(std::vector<std::string> &in_names,\n                                          std::vector<std::string> &out_names) {\n  std::vector<std::string> files;\n  auto ret = modelbox::ListSubDirectoryFiles(path_.front(), \"*.toml\", &files);\n  if (!ret) {\n    MBLOG_ERROR << \"list file in path \" << path_.front() << \" failed, error \"\n                << ret;\n    return ret;\n  }\n\n  if (files.empty()) {\n    MBLOG_ERROR << \"no valid model conf in path \" << path_.front();\n    return STATUS_BADCONF;\n  }\n\n  std::stringstream err_msg_cache;\n  for (auto &file : files) {\n    try {\n      auto fu_config = toml::parse(file);\n      auto name = toml::find<std::string>(fu_config, \"base\", \"name\");\n      if (name != name_) {\n        continue;\n      }\n\n      std::ifstream ifs(file);\n      if (!ifs.good()) {\n        err_msg_cache << \"[\" << file << \"] read failed\" << std::endl;\n        continue;\n      }\n\n      Defer { ifs.close(); };\n\n      // try to keep input and output order in config file\n      std::string content((std::istreambuf_iterator<char>(ifs)),\n                          std::istreambuf_iterator<char>());\n      std::smatch search_result;\n\n      auto search_text = content;\n      std::regex input_regex(R\"(\\[input\\.(.*?)\\])\");\n      std::vector<std::string> input_key_list;\n      while (std::regex_search(search_text, search_result, input_regex)) {\n        input_key_list.push_back(search_result[1]);\n        search_text = search_result.suffix();\n      }\n\n      search_text = content;\n      std::regex output_regex(R\"(\\[output\\.(.*?)\\])\");\n      std::vector<std::string> output_key_list;\n      while (std::regex_search(search_text, search_result, output_regex)) {\n        output_key_list.push_back(search_result[1]);\n        search_text = search_result.suffix();\n      }\n\n      for (const auto &input_key : input_key_list) {\n        auto input_name =\n            toml::find<std::string>(fu_config, \"input\", input_key, \"name\");\n        in_names.push_back(input_name);\n      }\n\n      for (const auto &output_key : output_key_list) {\n        auto output_name =\n            toml::find<std::string>(fu_config, \"output\", output_key, \"name\");\n        out_names.push_back(output_name);\n      }\n\n      return STATUS_OK;\n    } catch (std::exception &e) {\n      err_msg_cache << \"[\" << file << \"] parse toml failed, err: \" << e.what()\n                    << std::endl;\n      continue;\n    }\n  }\n\n  auto err_msg = err_msg_cache.str();\n  if (err_msg.empty()) {\n    err_msg = \" target model not found\";\n  }\n\n  MBLOG_ERROR << \"can not load IO info for modle \" << name_ << \" in path \"\n              << path_.front() << \", detail: \" << err_msg;\n  return modelbox::STATUS_BADCONF;\n}\n\n}  // namespace modelbox\n"
  },
  {
    "path": "src/drivers/common/python/modelbox_api/python_model.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_PYTHON_MODELBOX_API_MODEL_H_\n#define MODELBOX_PYTHON_MODELBOX_API_MODEL_H_\n\n#include <pybind11/pybind11.h>\n\n#include \"modelbox/flow.h\"\n\nnamespace py = pybind11;\n\nnamespace modelbox {\n\nclass PythonModel {\n public:\n  PythonModel(std::string path, std::string name, size_t max_batch_size,\n              std::string device, std::string device_id);\n\n  virtual ~PythonModel();\n\n  void AddPath(const std::string &path);\n\n  modelbox::Status Start();\n\n  void Stop();\n\n  std::vector<std::shared_ptr<Buffer>> Infer(\n      const std::vector<py::buffer> &data_list);\n\n  std::vector<std::vector<std::shared_ptr<Buffer>>> InferBatch(\n      const std::vector<std::vector<py::buffer>> &data_list);\n\n private:\n  modelbox::Status ReadModelIO(std::vector<std::string> &in_names,\n                               std::vector<std::string> &out_names);\n\n  std::vector<std::string> path_;\n  std::string name_;\n  std::string max_batch_size_;\n  std::string device_;\n  std::string device_id_;\n\n  std::vector<std::string> in_names_;\n  std::vector<std::string> out_names_;\n\n  std::shared_ptr<modelbox::FlowGraphDesc> flow_graph_desc_;\n  std::shared_ptr<modelbox::Flow> flow_;\n};\n\n}  // namespace modelbox\n\n#endif  // MODELBOX_PYTHON_MODELBOX_API_MODEL_H_\n"
  },
  {
    "path": "src/drivers/devices/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\ncmake_minimum_required(VERSION 3.10)\n\nproject(modelbox-device)\n\nif(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_BINARY_DIR})\n    message(FATAL_ERROR \"Do not build in source directory!\")\nendif()\n\nif(ACL_FOUND AND DSMI_FOUND)\n    add_subdirectory(ascend)\nendif()\n\nadd_subdirectory(cpu)\n\nif(CUDA_FOUND)\n    add_subdirectory(cuda)\nendif()\n\nif(ROCKCHIP_FOUND)\n    add_subdirectory(rockchip)\nendif()\n"
  },
  {
    "path": "src/drivers/devices/ascend/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nproject(modelbox-drivers-ascend)\n\nif(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_BINARY_DIR})\n    message(FATAL_ERROR \"Do not build in source directory!\")\nendif()\n\ninclude_directories(${HUAWEI_SECURE_C_INCLUDE_DIR})\n\nadd_subdirectory(core)\nadd_subdirectory(flowunit)"
  },
  {
    "path": "src/drivers/devices/ascend/core/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nset(DEVICE_NAME \"ascend\")\nproject(modelbox-devices-${DEVICE_NAME})\n\nfile(GLOB_RECURSE LIBMODELBOX_DEVICE_SOURCES *.cpp *.cc *.c)\nset(LIBMODELBOX_DEVICE_ASCEND_INCLUDE ${CMAKE_CURRENT_LIST_DIR}/include)\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_ASCEND_INCLUDE})\ninclude_directories(${ACL_INCLUDE_DIR})\ninclude_directories(${DSMI_INCLUDE_DIR})\ninclude_directories(${MODELBOX_COMMON_DEVICE_STREAM_INCLUDE})\n\nset(HEADER \n    ${LIBMODELBOX_DEVICE_ASCEND_INCLUDE}/modelbox\n)\n\nset(LIBMODELBOX_DEVICE_ASCEND_STATIC libmodelbox-device-${DEVICE_NAME}-static)\nset(LIBMODELBOX_DEVICE_ASCEND_SHARED libmodelbox-device-${DEVICE_NAME}-shared)\n\nadd_library(${LIBMODELBOX_DEVICE_ASCEND_STATIC} STATIC ${LIBMODELBOX_DEVICE_SOURCES})\nadd_library(${LIBMODELBOX_DEVICE_ASCEND_SHARED} SHARED ${LIBMODELBOX_DEVICE_SOURCES})\n\nset_target_properties(${LIBMODELBOX_DEVICE_ASCEND_SHARED} PROPERTIES \n    SOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n\ntarget_link_libraries(${LIBMODELBOX_DEVICE_ASCEND_STATIC} ${LIBMODELBOX_SHARED})\ntarget_link_libraries(${LIBMODELBOX_DEVICE_ASCEND_STATIC} pthread)\ntarget_link_libraries(${LIBMODELBOX_DEVICE_ASCEND_STATIC} rt)\ntarget_link_libraries(${LIBMODELBOX_DEVICE_ASCEND_STATIC} dl)\ntarget_link_libraries(${LIBMODELBOX_DEVICE_ASCEND_STATIC} ${ACL_LIBRARIES})\ntarget_link_libraries(${LIBMODELBOX_DEVICE_ASCEND_STATIC} ${DSMI_LIBRARIES})\ntarget_link_libraries(${LIBMODELBOX_DEVICE_ASCEND_STATIC} ${MODELBOX_COMMON_DEVICE_STREAM_LIBRARY})\n\ntarget_link_libraries(${LIBMODELBOX_DEVICE_ASCEND_SHARED} ${LIBMODELBOX_SHARED})\ntarget_link_libraries(${LIBMODELBOX_DEVICE_ASCEND_SHARED} pthread)\ntarget_link_libraries(${LIBMODELBOX_DEVICE_ASCEND_SHARED} rt)\ntarget_link_libraries(${LIBMODELBOX_DEVICE_ASCEND_SHARED} dl)\ntarget_link_libraries(${LIBMODELBOX_DEVICE_ASCEND_SHARED} ${ACL_LIBRARIES})\ntarget_link_libraries(${LIBMODELBOX_DEVICE_ASCEND_SHARED} ${DSMI_LIBRARIES})\ntarget_link_libraries(${LIBMODELBOX_DEVICE_ASCEND_SHARED} ${MODELBOX_COMMON_DEVICE_STREAM_LIBRARY})\n\nset_target_properties(${LIBMODELBOX_DEVICE_ASCEND_STATIC} ${LIBMODELBOX_DEVICE_ASCEND_SHARED} \n    PROPERTIES OUTPUT_NAME \"modelbox-device-${DEVICE_NAME}\"\n)\nset_target_properties(${LIBMODELBOX_DEVICE_ASCEND_STATIC} ${LIBMODELBOX_DEVICE_ASCEND_SHARED}\n    PROPERTIES\n    ARCHIVE_OUTPUT_DIRECTORY \"${TEST_WORKING_LIB_DIR}\"\n    RUNTIME_OUTPUT_DIRECTORY \"${TEST_WORKING_BIN_DIR}\"\n)\n\nconfigure_file(${CMAKE_CURRENT_LIST_DIR}/libmodelbox-device-${DEVICE_NAME}.pc.in ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-device-${DEVICE_NAME}.pc @ONLY)\n\ninstall(TARGETS ${LIBMODELBOX_DEVICE_ASCEND_SHARED} \n    COMPONENT ascend-device-flowunit\n    RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n    LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    OPTIONAL)\n\ninstall(TARGETS ${LIBMODELBOX_DEVICE_ASCEND_STATIC} \n    COMPONENT ascend-device-flowunit-devel\n    ARCHIVE DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    OPTIONAL)\n\ninstall(DIRECTORY \n    ${HEADER} \n    DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR} \n    COMPONENT ascend-device-flowunit-devel\n    )\n\ninstall(FILES \n    ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-device-${DEVICE_NAME}.pc \n    DESTINATION ${CMAKE_INSTALL_LIBDIR}/pkgconfig \n    COMPONENT ascend-device-flowunit-devel\n    )\n\nset(LIBMODELBOX_DEVICE_ASCEND_STATIC ${LIBMODELBOX_DEVICE_ASCEND_STATIC} CACHE INTERNAL \"\")\nset(LIBMODELBOX_DEVICE_ASCEND_SHARED ${LIBMODELBOX_DEVICE_ASCEND_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_DEVICE_ASCEND_INCLUDE ${LIBMODELBOX_DEVICE_ASCEND_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_DEVICE_ASCEND_BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR} CACHE INTERNAL \"\")\nset(LIBMODELBOX_DEVICE_SOURCES ${LIBMODELBOX_DEVICE_SOURCES} CACHE INTERNAL \"\")\nset(LIBMODELBOX_DEVICE_ASCEND_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-device-${DEVICE_NAME}.so CACHE INTERNAL \"\")\n\n# driver test\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_UNIT_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${LIBMODELBOX_DEVICE_ASCEND_SHARED})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")"
  },
  {
    "path": "src/drivers/devices/ascend/core/ascend_memory.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/device/ascend/ascend_memory.h\"\n\n#include <dsmi_common_interface.h>\n\nnamespace modelbox {\n\nstd::map<uint32_t, std::string> g_ascend_flags_name{\n    {ASCEND_MEM_DVPP, \"ASCEND_MEM_DVPP\"},\n    {ASCEND_MEM_NORMAL, \"ASCEND_MEM_NORMAL\"}};\n\n/**\n * @brief Call be ascend stream.\n **/\nvoid AscendReleaseMemoryTask(void *mem_list_ptr) {\n  auto *list =\n      (std::vector<std::shared_ptr<const DeviceMemory>> *)(mem_list_ptr);\n  list->clear();\n  delete list;\n}\n\nvoid AscendReleaseMemoryAsync(void *mem_list_ptr) {\n  // Should not operate the mem and stream in this callback\n  auto *timer = GetTimer();\n  auto task = std::make_shared<TimerTask>();\n  task->Callback(AscendReleaseMemoryTask, mem_list_ptr);\n  task->SetName(\"AscendMemReleaseTask\");\n  timer->Schedule(task, 0, 0, true);\n}\n\nAscendStream::AscendStream(int32_t device_id, uint64_t callback_tid)\n    : device_id_(device_id), callback_thread_id_(callback_tid) {}\n\nAscendStream::~AscendStream() { Deinit(); }\n\nStatus AscendStream::Sync() const {\n  auto ret = aclrtSetDevice(device_id_);\n  if (ret != ACL_SUCCESS) {\n    MBLOG_ERROR << \"Bind ascend device \" << device_id_ << \" failed, acl ret \"\n                << ret;\n    return STATUS_FAULT;\n  }\n\n  ret = aclrtSynchronizeStream(stream_);\n  if (ret != ACL_SUCCESS) {\n    MBLOG_ERROR << \"Ascend stream sync failed, device id \" << device_id_\n                << \",acl ret \" << ret;\n    return STATUS_FAULT;\n  }\n\n  return STATUS_SUCCESS;\n}\n\nStatus AscendStream::Bind(\n    std::vector<std::shared_ptr<const DeviceMemory>> mem_list) const {\n  auto ret = aclrtSetDevice(device_id_);\n  if (ret != ACL_SUCCESS) {\n    MBLOG_ERROR << \"Bind ascend device \" << device_id_ << \" failed, acl ret \"\n                << ret;\n    return STATUS_FAULT;\n  }\n\n  auto *mem_list_ptr =\n      new (std::nothrow) std::vector<std::shared_ptr<const DeviceMemory>>();\n  if (mem_list_ptr == nullptr) {\n    MBLOG_ERROR << \"New std::vector<>() failed\";\n    return STATUS_FAULT;\n  }\n\n  mem_list_ptr->assign(mem_list.begin(), mem_list.end());\n  ret =\n      aclrtLaunchCallback(AscendReleaseMemoryAsync, (void *)mem_list_ptr,\n                          aclrtCallbackBlockType::ACL_CALLBACK_BLOCK, stream_);\n  if (ret != ACL_SUCCESS) {\n    MBLOG_ERROR << \"aclrtLaunchCallback failed, acl ret \" << ret;\n    delete mem_list_ptr;\n    return STATUS_FAULT;\n  }\n\n  return STATUS_SUCCESS;\n}\n\nStatus AscendStream::Init() {\n  if (init_flag_) {\n    return STATUS_SUCCESS;\n  }\n\n  auto ret = aclrtSetDevice(device_id_);\n  if (ret != ACL_SUCCESS) {\n    MBLOG_ERROR << \"Bind ascend device \" << device_id_ << \" failed, acl ret \"\n                << ret;\n    return STATUS_FAULT;\n  }\n\n  ret = aclrtCreateStream(&stream_);\n  if (ret != ACL_SUCCESS) {\n    MBLOG_ERROR << \"Create ascend stream failed, acl ret \" << ret;\n    return STATUS_FAULT;\n  }\n\n  ret = aclrtSubscribeReport(callback_thread_id_, stream_);\n  if (ret != ACL_SUCCESS) {\n    MBLOG_ERROR << \"aclrtSubscribeReport failed, acl ret \" << ret;\n    aclrtDestroyStream(stream_);\n    return STATUS_FAULT;\n  }\n\n  init_flag_ = true;\n  return STATUS_SUCCESS;\n}\n\nvoid AscendStream::Deinit() {\n  if (!init_flag_) {\n    return;\n  }\n\n  auto ret = aclrtSetDevice(device_id_);\n  if (ret != ACL_SUCCESS) {\n    MBLOG_ERROR << \"aclrtSetDevice failed, acl ret \" << ret;\n  }\n\n  ret = aclrtUnSubscribeReport(callback_thread_id_, stream_);\n  if (ret != ACL_SUCCESS) {\n    MBLOG_ERROR << \"aclrtUnSubscribeReport failed, acl ret \" << ret;\n  }\n\n  ret = aclrtDestroyStream(stream_);\n  if (ret != ACL_SUCCESS) {\n    MBLOG_ERROR << \"aclrtDestroyStream failed, acl ret \" << ret;\n  }\n\n  init_flag_ = false;\n}\n\nAscendStreamPool::AscendStreamPool(const std::string &device_id) {\n  device_id_ = atoi(device_id.c_str());\n  stream_callback_thread_ =\n      std::make_shared<std::thread>(&AscendStreamPool::StreamCallBack, this);\n  std::stringstream ss;\n  ss << stream_callback_thread_->get_id();\n  callback_thread_id_ = std::stoull(ss.str());\n  stream_shrink_timer_.SetName(\"AscendStreamShrinkTimer\");\n  stream_shrink_timer_.Start();\n  // TODO: Activate timer to shrink stream pool\n}\n\nvoid AscendStreamPool::StreamCallBack() {\n  while (!is_exit_) {\n    auto ret = aclrtProcessReport(100);\n    if (ret == ACL_ERROR_RT_THREAD_SUBSCRIBE) {\n      std::this_thread::sleep_for(std::chrono::milliseconds(100));\n    } else if (ret == ACL_ERROR_RT_REPORT_TIMEOUT) {\n      // This is ok\n    } else if (ret != ACL_SUCCESS) {\n      MBLOG_ERROR << \"aclrtProcessReport return err \" << ret;\n      return;\n    }\n  }\n}\n\nvoid AscendStreamPool::Shrink() {\n  std::list<AscendStream *> stream_to_del;\n  {\n    std::lock_guard<std::mutex> lock(stream_list_lock_);\n    MBLOG_INFO << \"AscendStreamPool before shrink, total stream:\"\n               << allocate_count_ << \", idel stream:\" << stream_list_.size();\n    auto keep = allocate_count_ / 5;  // Reserve 20% idel\n    if (keep >= stream_list_.size()) {\n      // utilization >= 80%, no need to free\n      return;\n    }\n\n    auto del = stream_list_.size() - keep;\n    auto end_pos = stream_list_.begin();\n    std::advance(end_pos, del);\n    stream_to_del.splice(stream_to_del.begin(), stream_list_,\n                         stream_list_.begin(), end_pos);\n    allocate_count_ -= del;\n    MBLOG_INFO << \"AscendStreamPool after shrink, total stream:\"\n               << allocate_count_ << \", idel stream:\" << stream_list_.size();\n  }\n\n  for (auto *stream : stream_to_del) {\n    stream->Sync();\n    delete stream;\n  }\n}\n\nAscendStreamPool::~AscendStreamPool() {\n  is_exit_ = true;\n  if (stream_callback_thread_ != nullptr) {\n    MBLOG_INFO << \"Wait for ascend stream callback exit in ~AscendStreamPool\";\n    stream_callback_thread_->join();\n    MBLOG_INFO << \"Ascend stream callback exit ok\";\n  }\n\n  stream_shrink_timer_.Stop();\n  for (auto *stream : stream_list_) {\n    delete stream;\n  }\n}\n\nstd::shared_ptr<AscendStream> AscendStreamPool::Alloc() {\n  std::shared_ptr<AscendStream> stream;\n  std::weak_ptr<AscendStreamPool> pool_ref = shared_from_this();\n  auto free_func = [pool_ref](AscendStream *stream_ptr) {\n    auto pool = pool_ref.lock();\n    if (pool == nullptr) {\n      delete stream_ptr;\n      return;\n    }\n\n    pool->Free(stream_ptr);\n  };\n\n  {\n    std::lock_guard<std::mutex> lock(stream_list_lock_);\n    if (!stream_list_.empty()) {\n      auto *stream_ptr = stream_list_.front();\n      stream_list_.pop_front();\n      stream.reset(stream_ptr, free_func);\n      return stream;\n    }\n  }\n\n  auto *stream_ptr = new AscendStream(device_id_, callback_thread_id_);\n  auto ret = stream_ptr->Init();\n  if (ret != STATUS_SUCCESS) {\n    delete stream_ptr;\n    return nullptr;\n  }\n\n  allocate_count_++;\n  stream.reset(stream_ptr, free_func);\n  return stream;\n}\n\nStatus AscendStreamPool::Free(AscendStream *&stream) {\n  if (stream == nullptr) {\n    return STATUS_SUCCESS;\n  }\n\n  std::lock_guard<std::mutex> lock(stream_list_lock_);\n  stream_list_.push_back(stream);\n\n  return STATUS_SUCCESS;\n}\n\nAscendMemoryPool::AscendMemoryPool(AscendMemoryManager *mem_manager) {\n  mem_manager_ = mem_manager;\n}\n\nStatus AscendMemoryPool::Init() {\n  auto status = InitSlabCache();\n  if (!status) {\n    return {status, \"init mempool failed.\"};\n  }\n\n  auto timer = std::make_shared<TimerTask>();\n  timer->Callback(&AscendMemoryPool::OnTimer, this);\n  flush_timer_ = timer;\n\n  // flush slab every 10s\n  GetTimer()->Schedule(flush_timer_, 1000, 10000);\n  return STATUS_OK;\n}\n\nAscendMemoryPool::~AscendMemoryPool() {\n  if (flush_timer_) {\n    flush_timer_->Stop();\n    flush_timer_ = nullptr;\n  }\n}\n\nvoid AscendMemoryPool::OnTimer() {\n  // TODO support config shrink time.\n}\n\nvoid *AscendMemoryPool::MemAlloc(size_t size) {\n  return mem_manager_->Malloc(size, ASCEND_MEM_NORMAL);\n}\n\nvoid AscendMemoryPool::MemFree(void *ptr) {\n  mem_manager_->Free(ptr, ASCEND_MEM_NORMAL);\n}\n\nAscendMemory::AscendMemory(const std::shared_ptr<Device> &device,\n                           const std::shared_ptr<DeviceMemoryManager> &mem_mgr,\n                           const std::shared_ptr<void> &device_mem_ptr,\n                           size_t size)\n    : DeviceMemory(device, mem_mgr, device_mem_ptr, size) {}\n\nAscendMemory::~AscendMemory() = default;\n\nStatus AscendMemory::BindStream(\n    const std::shared_ptr<AscendStream> &stream_ptr) {\n  if (ascend_stream_ptr_ != nullptr) {\n    if (ascend_stream_ptr_ == stream_ptr) {\n      return STATUS_SUCCESS;\n    }\n    // Change stream to another is not allowed\n    return {STATUS_BUSY, \"Memory has bind to a stream\"};\n  }\n\n  Status ret = STATUS_SUCCESS;\n  if (stream_ptr != nullptr) {\n    if (stream_ptr->IsInDevice(device_->GetDeviceID())) {\n      ascend_stream_ptr_ = stream_ptr;\n      return STATUS_SUCCESS;\n    }\n    // We need create a new stream when cross gpu device, so bind failed in fact\n    ret = STATUS_BUSY;\n  }\n\n  auto ascend_mem_mgr = std::static_pointer_cast<AscendMemoryManager>(mem_mgr_);\n  ascend_stream_ptr_ = ascend_mem_mgr->AllocStream();\n  return ret;\n}\n\nStatus AscendMemory::DetachStream() {\n  if (ascend_stream_ptr_ == nullptr) {\n    return STATUS_SUCCESS;\n  }\n\n  auto ret = ascend_stream_ptr_->Sync();\n  if (ret != STATUS_SUCCESS) {\n    return ret;\n  }\n\n  ascend_stream_ptr_.reset();\n  return STATUS_SUCCESS;\n}\n\nStatus AscendMemory::CopyExtraMetaTo(\n    std::shared_ptr<DeviceMemory> &device_mem) {\n  if (device_mem->GetDevice() != device_) {\n    return STATUS_SUCCESS;\n  }\n\n  auto target = std::static_pointer_cast<AscendMemory>(device_mem);\n  target->ascend_stream_ptr_ = ascend_stream_ptr_;\n  return STATUS_SUCCESS;\n}\n\nStatus AscendMemory::CombineExtraMeta(\n    const std::vector<std::shared_ptr<DeviceMemory>> &mem_list) {\n  for (const auto &mem : mem_list) {\n    auto ascend_mem = std::dynamic_pointer_cast<AscendMemory>(mem);\n    if (ascend_stream_ptr_ == nullptr) {\n      // If this has no stream, use the first stream we found\n      ascend_stream_ptr_ = ascend_mem->ascend_stream_ptr_;\n    } else {\n      // If this has valid stream now, other stream should be synchronized\n      auto other_ascend_stream_ptr = ascend_mem->ascend_stream_ptr_;\n      if (other_ascend_stream_ptr == nullptr) {\n        continue;\n      }\n\n      if (ascend_stream_ptr_ == other_ascend_stream_ptr) {\n        continue;\n      }\n\n      auto ret = other_ascend_stream_ptr->Sync();\n      if (ret != STATUS_SUCCESS) {\n        MBLOG_ERROR << \"Sync ascend stream failed when combine ascend memory\";\n        return STATUS_FAULT;\n      }\n    }\n  }\n\n  return STATUS_SUCCESS;\n}\n\nAscendMemoryManager::AscendMemoryManager(const std::string &device_id)\n    : DeviceMemoryManager(device_id),\n      mem_pool_(this),\n      mem_copy_kind_map_{{DeviceMemoryCopyKind::FromHost,\n                          aclrtMemcpyKind::ACL_MEMCPY_HOST_TO_DEVICE},\n                         {DeviceMemoryCopyKind::SameDeviceType,\n                          aclrtMemcpyKind::ACL_MEMCPY_DEVICE_TO_DEVICE},\n                         {DeviceMemoryCopyKind::ToHost,\n                          aclrtMemcpyKind::ACL_MEMCPY_DEVICE_TO_HOST}} {\n  stream_pool_ = std::make_shared<AscendStreamPool>(device_id);\n  npu_id_ = atoi(device_id.c_str());\n}\n\nAscendMemoryManager::~AscendMemoryManager() = default;\n\nStatus AscendMemoryManager::Init() { return STATUS_OK; }\n\nstd::shared_ptr<DeviceMemory> AscendMemoryManager::MakeDeviceMemory(\n    const std::shared_ptr<Device> &device, std::shared_ptr<void> mem_ptr,\n    size_t size) {\n  return std::make_shared<AscendMemory>(device, shared_from_this(), mem_ptr,\n                                        size);\n}\n\nvoid *AscendMemoryManager::Malloc(size_t size, uint32_t mem_flags) {\n  auto ret = aclrtSetDevice(npu_id_);\n  if (ret != ACL_SUCCESS) {\n    MBLOG_ERROR << \"Bind device \" << npu_id_ << \" failed, acl ret \" << ret;\n    return nullptr;\n  }\n\n  void *npu_mem_ptr = nullptr;\n  switch (mem_flags) {\n    case ASCEND_MEM_DVPP:\n      ret = acldvppMalloc(&npu_mem_ptr, size);\n      break;\n\n    case ASCEND_MEM_NORMAL:\n      ret = aclrtMalloc(&npu_mem_ptr, size,\n                        aclrtMemMallocPolicy::ACL_MEM_MALLOC_NORMAL_ONLY);\n      break;\n\n    default:\n      MBLOG_ERROR << \"Not support mem alloc flags \" << mem_flags;\n      return nullptr;\n  }\n\n  if (ret != ACL_SUCCESS) {\n    MBLOG_ERROR << \"Malloc failed, size \" << size << \", acl ret \" << ret\n                << \", flags \" << g_ascend_flags_name[mem_flags];\n    return nullptr;\n  }\n\n  return npu_mem_ptr;\n}\n\nvoid AscendMemoryManager::Free(void *mem_ptr, uint32_t mem_flags) {\n  auto ret = aclrtSetDevice(npu_id_);\n  if (ret != ACL_SUCCESS) {\n    MBLOG_ERROR << \"Bind device \" << npu_id_ << \" failed, acl ret \" << ret;\n  }\n\n  switch (mem_flags) {\n    case ASCEND_MEM_DVPP:\n      ret = acldvppFree(mem_ptr);\n      break;\n\n    case ASCEND_MEM_NORMAL:\n      ret = aclrtFree(mem_ptr);\n      break;\n\n    default:\n      MBLOG_ERROR << \"Not support mem free flags, flags \" << mem_flags;\n      return;\n  }\n\n  if (ret != ACL_SUCCESS) {\n    MBLOG_ERROR << \"Free on ascend \" << npu_id_ << \" failed, acl ret \" << ret\n                << \", flags \" << g_ascend_flags_name[mem_flags];\n    return;\n  }\n}\n\nStatus AscendMemoryManager::Copy(void *dest, size_t dest_size,\n                                 const void *src_buffer, size_t src_size,\n                                 DeviceMemoryCopyKind kind) {\n  if (dest == nullptr || src_buffer == nullptr) {\n    MBLOG_ERROR << \"Ascend copy src \" << src_buffer << \" to dest \" << dest\n                << \"failed\";\n    return STATUS_INVALID;\n  }\n\n  if (dest_size < src_size) {\n    MBLOG_ERROR << \"Ascend memcpy failed, dest size[\" << dest_size\n                << \"] < src size[\" << src_size << \"]\";\n    return STATUS_RANGE;\n  }\n\n  auto ret = aclrtSetDevice(npu_id_);\n  if (ret != ACL_SUCCESS) {\n    MBLOG_ERROR << \"Bind device \" << npu_id_ << \" failed, acl ret \" << ret;\n    return STATUS_FAULT;\n  }\n\n  aclrtMemcpyKind ascend_copy_kind;\n  GetAscendMemcpyKind(kind, ascend_copy_kind);\n  ret = aclrtMemcpy(dest, dest_size, src_buffer, src_size, ascend_copy_kind);\n  if (ret != ACL_SUCCESS) {\n    MBLOG_ERROR << \"Asend memcpy failed, ret \" << ret << \", src size \"\n                << src_size << \", ascend cpy kind \" << ascend_copy_kind;\n    return STATUS_FAULT;\n  }\n\n  return STATUS_SUCCESS;\n}\n\nStatus AscendMemoryManager::GetDeviceMemUsage(size_t *free,\n                                              size_t *total) const {\n  dsmi_memory_info_stru mem_info;\n  auto ret = dsmi_get_memory_info(npu_id_, &mem_info);\n  if (ret != 0) {\n    MBLOG_ERROR << \"Get npu \" << npu_id_ << \" mem info failed, dsmi ret \"\n                << ret;\n    return STATUS_FAULT;\n  }\n\n  const size_t mb = 1024 * 1024;\n  size_t total_in_byte = mem_info.memory_size * mb;\n  if (free != nullptr) {\n    *free = total_in_byte * (100 - mem_info.utiliza) / 100;\n  }\n\n  if (total != nullptr) {\n    *total = total_in_byte;\n  }\n\n  return STATUS_SUCCESS;\n}\n\nStatus AscendMemoryManager::DeviceMemoryCopy(\n    const std::shared_ptr<DeviceMemory> &dest_memory, size_t dest_offset,\n    const std::shared_ptr<const DeviceMemory> &src_memory, size_t src_offset,\n    size_t src_size, DeviceMemoryCopyKind copy_kind) {\n  auto src_device = src_memory->GetDevice();\n  auto dest_device = dest_memory->GetDevice();\n  if (copy_kind == DeviceMemoryCopyKind::SameDeviceType &&\n      src_device != dest_device) {\n    return STATUS_NOTSUPPORT;\n  }\n\n  aclrtMemcpyKind ascend_copy_kind;\n  GetAscendMemcpyKind(copy_kind, ascend_copy_kind);\n  std::shared_ptr<AscendStream> ascend_stream_ptr;\n  auto ret = SetupAscendStream(src_memory, dest_memory, ascend_stream_ptr);\n  if (ret != STATUS_SUCCESS) {\n    MBLOG_ERROR << \"Set up ascend stream failed, stream is null\";\n  }\n\n  aclrtStream ascend_stream =\n      ascend_stream_ptr == nullptr ? nullptr : ascend_stream_ptr->Get();\n  auto *dest_ptr = dest_memory->GetPtr<uint8_t>().get() + dest_offset;\n  const auto *src_ptr = src_memory->GetConstPtr<uint8_t>().get() + src_offset;\n  if (!CheckCopyAsync(src_ptr, dest_ptr) && ascend_stream_ptr != nullptr) {\n    ascend_stream_ptr->Sync();\n    ascend_stream = nullptr;\n  }\n\n  aclrtSetDevice(npu_id_);\n  aclError acl_ret = ACL_SUCCESS;\n  if (ascend_stream != nullptr) {\n    acl_ret = aclrtMemcpyAsync(dest_ptr, src_size, src_ptr, src_size,\n                               ascend_copy_kind, ascend_stream);\n  } else {\n    acl_ret =\n        aclrtMemcpy(dest_ptr, src_size, src_ptr, src_size, ascend_copy_kind);\n  }\n\n  if (acl_ret != ACL_SUCCESS) {\n    MBLOG_ERROR << \"aclrtMemcpyAsync failed, acl ret \" << acl_ret\n                << \", src_size:\" << src_size << \",kind:\" << ascend_copy_kind\n                << \",stream:\" << ascend_stream;\n    return STATUS_FAULT;\n  }\n\n  if (ascend_stream != nullptr) {\n    if (dest_memory->IsHost()) {\n      ascend_stream_ptr->Sync();\n    } else {\n      // When async operation complete, the reference of memory will be\n      // released\n      ascend_stream_ptr->Bind({src_memory, dest_memory});\n    }\n  }\n\n  return STATUS_SUCCESS;\n}\n\nvoid AscendMemoryManager::GetAscendMemcpyKind(\n    DeviceMemoryCopyKind copy_kind, aclrtMemcpyKind &ascend_copy_kind) {\n  ascend_copy_kind = mem_copy_kind_map_[copy_kind];\n}\n\nbool AscendMemoryManager::CheckCopyAsync(const void *src_addr,\n                                         const void *dest_addr) {\n  if (IsMemAligned((uintptr_t)src_addr, ASCEND_ASYNC_ALIGN) &&\n      IsMemAligned((uintptr_t)dest_addr, ASCEND_ASYNC_ALIGN)) {\n    return true;\n  }\n\n  return false;\n}\n\nStatus AscendMemoryManager::SetupAscendStream(\n    const std::shared_ptr<const DeviceMemory> &src_memory,\n    const std::shared_ptr<DeviceMemory> &dest_memory,\n    std::shared_ptr<AscendStream> &ascend_stream_ptr) {\n  if (src_memory->IsHost()) {\n    ascend_stream_ptr = nullptr;\n  } else {\n    ascend_stream_ptr = std::static_pointer_cast<const AscendMemory>(src_memory)\n                            ->GetBindStream();\n  }\n\n  if (!dest_memory->IsHost()) {\n    auto dest_ascend_memory =\n        std::dynamic_pointer_cast<AscendMemory>(dest_memory);\n    auto ret = dest_ascend_memory->BindStream(ascend_stream_ptr);\n    if (ret == STATUS_BUSY && ascend_stream_ptr != nullptr) {\n      // Case: Two memory has different stream, we choose to sync source\n      ascend_stream_ptr->Sync();\n    }\n\n    ascend_stream_ptr = dest_ascend_memory->GetBindStream();\n  }\n\n  return STATUS_SUCCESS;\n}\n}  // namespace modelbox\n"
  },
  {
    "path": "src/drivers/devices/ascend/core/device_ascend.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/device/ascend/device_ascend.h\"\n\n#include <acl/acl.h>\n#include <dsmi_common_interface.h>\n#include <stdio.h>\n\n#include \"device_stream.h\"\n#include \"modelbox/base/log.h\"\n#include \"modelbox/base/os.h\"\n#include \"modelbox/device/ascend/ascend_memory.h\"\n\nconst size_t SIZE_MB = 1024 * 1024;\n\nnamespace modelbox {\nAscend::Ascend(const std::shared_ptr<DeviceMemoryManager> &mem_mgr)\n    : Device(mem_mgr) {\n  aclInit(nullptr);\n}\n\nAscend::~Ascend() { aclFinalize(); }\n\nstd::string Ascend::GetType() const { return DEVICE_TYPE; }\n\nStatus Ascend::DeviceExecute(const DevExecuteCallBack &fun, int32_t priority,\n                             size_t count) {\n  if (0 == count) {\n    return STATUS_OK;\n  }\n\n  for (size_t i = 0; i < count; ++i) {\n    auto status = fun(i);\n    if (!status) {\n      MBLOG_WARN << \"executor func failed: \" << status\n                 << \" stack trace:\" << GetStackTrace();\n      return status;\n    }\n  }\n\n  return STATUS_OK;\n};\n\nbool Ascend::NeedResourceNice() { return true; }\n\nAscendFactory::AscendFactory() = default;\nAscendFactory::~AscendFactory() = default;\n\nstd::map<std::string, std::shared_ptr<DeviceDesc>>\nAscendFactory::DeviceProbe() {\n  std::map<std::string, std::shared_ptr<DeviceDesc>> device_desc_map;\n  int32_t count = 0;\n  auto ret = dsmi_get_device_count(&count);\n  if (ret != 0) {\n    MBLOG_ERROR << \"dsmi_get_device_count failed, ret \" << ret;\n    return device_desc_map;\n  }\n\n  for (int32_t id = 0; id < count; ++id) {\n    dsmi_memory_info_stru mem_info;\n    ret = dsmi_get_memory_info(id, &mem_info);\n    if (ret != 0) {\n      MBLOG_ERROR << \"dsmi_get_memory_info id:\" << id << \"failed, ret \" << ret;\n      continue;\n    }\n\n    auto device_desc = std::make_shared<AscendDesc>();\n    device_desc->SetDeviceDesc(\"This is a ascend device description.\");\n    auto id_str = std::to_string(id);\n    device_desc->SetDeviceId(id_str);\n    device_desc->SetDeviceMemory(\n        GetBytesReadable(mem_info.memory_size * SIZE_MB));\n    device_desc->SetDeviceType(\"ascend\");\n    device_desc_map.insert(std::make_pair(id_str, device_desc));\n  }\n\n  return device_desc_map;\n}\n\nstd::string AscendFactory::GetDeviceFactoryType() { return DEVICE_TYPE; }\n\nstd::shared_ptr<Device> AscendFactory::CreateDevice(\n    const std::string &device_id) {\n  auto mem_mgr = std::make_shared<AscendMemoryManager>(device_id);\n  auto status = mem_mgr->Init();\n  if (!status) {\n    StatusError = status;\n    return nullptr;\n  }\n\n  return std::make_shared<Ascend>(mem_mgr);\n}\n\nAscendDesc::AscendDesc() = default;\n\nAscendDesc::~AscendDesc() = default;\n\nAscendFlowUnit::AscendFlowUnit() = default;\n\nAscendFlowUnit::~AscendFlowUnit() = default;\n\nStatus AscendFlowUnit::Process(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  auto ret = aclrtSetDevice(dev_id_);\n  if (ret != ACL_SUCCESS) {\n    MBLOG_ERROR << \"Set ascend device \" << dev_id_ << \" failed, acl ret \"\n                << ret;\n    return STATUS_FAULT;\n  }\n\n  auto stream = GetDevSyncStream<AscendStream, AscendMemory>(data_ctx);\n  modelbox::Status status;\n  if (stream == nullptr) {\n    return {modelbox::STATUS_NOTFOUND, \"get sync stream failed.\"};\n  }\n\n  auto process_status = AscendProcess(data_ctx, stream->Get());\n  if (process_status != modelbox::STATUS_OK &&\n      process_status != modelbox::STATUS_CONTINUE) {\n    return process_status;\n  }\n\n  status = SetDevStream<AscendStream, AscendMemory>(data_ctx, stream);\n  if (!status) {\n    return status;\n  }\n\n  status = HoldMemory<AscendStream>(data_ctx, stream);\n  if (!status) {\n    return status;\n  }\n\n  return process_status;\n}\n\n}  // namespace modelbox\n"
  },
  {
    "path": "src/drivers/devices/ascend/core/driver_desc.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include \"modelbox/device/ascend/device_ascend.h\"\n#include \"modelbox/base/driver_api_helper.h\"\n\n#include <stdio.h>\n#include <memory>\n\nstd::shared_ptr<modelbox::Timer> kDeviceTimer;\n\nmodelbox::Timer *GetTimer() { return kDeviceTimer.get(); }\n\nstd::shared_ptr<modelbox::DriverFactory> CreateDriverFactory() {\n  std::shared_ptr<modelbox::DriverFactory> factory =\n      std::make_shared<modelbox::AscendFactory>();\n  return factory;\n}\n\nvoid DriverDescription(modelbox::DriverDesc *desc) {\n  desc->SetClass(modelbox::DRIVER_CLASS_DEVICE);\n  desc->SetType(modelbox::DEVICE_TYPE);\n  desc->SetName(modelbox::DEVICE_DRIVER_NAME);\n  desc->SetDescription(modelbox::DEVICE_DRIVER_DESCRIPTION);\n}\n\nmodelbox::Status DriverInit() {\n  if (kDeviceTimer != nullptr) {\n    return modelbox::STATUS_OK;\n  } \n\n  kDeviceTimer = std::make_shared<modelbox::Timer>();\n  kDeviceTimer->SetName(\"Ascend-Timer\");\n  kDeviceTimer->Start();\n  return modelbox::STATUS_OK;\n}\n\nvoid DriverFini() {\n  if (kDeviceTimer == nullptr) {\n    return;\n  }\n\n  // Driver Fini.\n  kDeviceTimer->Stop();\n  kDeviceTimer = nullptr;\n}\n\n"
  },
  {
    "path": "src/drivers/devices/ascend/core/include/modelbox/device/ascend/ascend_memory.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_ASCEND_MEMORY_H_\n#define MODELBOX_ASCEND_MEMORY_H_\n\n#include <acl/acl.h>\n#include <modelbox/base/device.h>\n#include <modelbox/base/memory_pool.h>\n#include <modelbox/base/status.h>\n#include <modelbox/base/timer.h>\n\n#include <list>\n#include <thread>\n\n#ifndef ENABLE_DVPP_INTERFACE\n#define ENABLE_DVPP_INTERFACE\n#endif\n\n#include <acl/ops/acl_dvpp.h>\n\nextern modelbox::Timer *GetTimer();\n\nnamespace modelbox {\nconstexpr uint32_t ASCEND_MEM_NORMAL = 0;\nconstexpr uint32_t ASCEND_MEM_DVPP = 1;\n\nconstexpr uintptr_t ASCEND_ASYNC_ALIGN =\n    64;  // Precondition for ascend async copy\n\nvoid AscendReleaseMemoryAsync(void *mem_list_ptr);\n\nclass AscendMemory;\nclass AscendStreamPool;\nclass AscendMemoryManager;\n\nclass AscendStream {\n  friend class AscendMemory;\n  friend class AscendStreamPool;\n\n public:\n  AscendStream(const AscendStream &stream) = delete;\n  AscendStream(const AscendStream &&stream) = delete;\n  AscendStream &operator=(const AscendStream &stream) = delete;\n  AscendStream &operator=(const AscendStream &&stream) = delete;\n  virtual ~AscendStream();\n  AscendStream(int32_t device_id, uint64_t callback_tid);\n\n  inline bool IsInDevice(const std::string &device_id) const {\n    auto device_id_num = atoi(device_id.c_str());\n    return IsInDevice(device_id_num);\n  }\n\n  inline bool IsInDevice(int32_t device_id) const {\n    return device_id == device_id_;\n  }\n\n  inline aclrtStream Get() const { return stream_; }\n\n  Status Sync() const;\n\n  Status Bind(std::vector<std::shared_ptr<const DeviceMemory>> mem_list) const;\n\n protected:\n  Status Init();\n\n  void Deinit();\n\n private:\n  aclrtStream stream_;\n  int32_t device_id_{0};\n  uint64_t callback_thread_id_{0};\n  std::atomic_bool init_flag_{false};\n};\n\nclass AscendStreamPool : public std::enable_shared_from_this<AscendStreamPool> {\n public:\n  AscendStreamPool(const std::string &device_id);\n\n  virtual ~AscendStreamPool();\n\n  /**\n   * @brief Allocate cuda stream associated with device\n   * @return Cuda stream or nullptr\n   */\n  std::shared_ptr<AscendStream> Alloc();\n\n  /**\n   * @brief Release cuda stream\n   * @param stream Cuda stream to free\n   */\n  Status Free(AscendStream *&stream);\n\n  /**\n   * @brief Get allocated stream count\n   * @return Allocated stream count\n   */\n  inline size_t GetAllocatedStreamCount() const {\n    return allocate_count_.load();\n  }\n\n  void StreamCallBack();\n\n  void Shrink();\n\n private:\n  std::atomic<size_t> allocate_count_{0};\n  int32_t device_id_{0};\n  std::shared_ptr<std::thread> stream_callback_thread_;\n  uint64_t callback_thread_id_{0};\n  bool is_exit_{false};\n\n  std::mutex stream_list_lock_;\n  std::list<AscendStream *> stream_list_;\n  Timer stream_shrink_timer_;\n  const uint64_t shrink_interval_ms_{10 * 60 * 1000};  // 10 min\n};\n\nclass AscendMemory : public DeviceMemory {\n public:\n  AscendMemory(const std::shared_ptr<Device> &device,\n               const std::shared_ptr<DeviceMemoryManager> &mem_mgr,\n               const std::shared_ptr<void> &device_mem_ptr, size_t size);\n\n  ~AscendMemory() override;\n\n  /**\n   * @brief Get bind ascend stream\n   * @return Ascend stream\n   */\n  inline std::shared_ptr<AscendStream> GetBindStream() const {\n    return ascend_stream_ptr_;\n  }\n\n  /**\n   * @brief Bind ascend stream\n   * @param stream_ptr Ascend stream\n   *        if null\n   *          new stream will be created\n   *        else\n   *          set stream when return success\n   *          has one different stream when return busy\n   */\n  Status BindStream(const std::shared_ptr<AscendStream> &stream_ptr = nullptr);\n\n  Status DetachStream();\n\n protected:\n  Status CopyExtraMetaTo(std::shared_ptr<DeviceMemory> &device_mem) override;\n\n  Status CombineExtraMeta(\n      const std::vector<std::shared_ptr<DeviceMemory>> &mem_list) override;\n\n private:\n  std::shared_ptr<AscendStream> ascend_stream_ptr_;\n};\n\nclass AscendMemoryPool : public MemoryPoolBase {\n public:\n  AscendMemoryPool(AscendMemoryManager *mem_manager);\n\n  ~AscendMemoryPool() override;\n\n  Status Init();\n\n  void *MemAlloc(size_t size) override;\n\n  void MemFree(void *ptr) override;\n\n  virtual void OnTimer();\n\n private:\n  AscendMemoryManager *mem_manager_;\n  std::shared_ptr<TimerTask> flush_timer_;\n};\n\nclass AscendMemoryManager : public DeviceMemoryManager {\n public:\n  AscendMemoryManager(const std::string &device_id);\n  ~AscendMemoryManager() override;\n\n  Status Init();\n\n  /**\n   * @brief Create a specified memory container\n   * @param device pointer to device\n   * @param mem_ptr shared pointer to memory\n   * @param size memory size\n   * @return Empty memory container\n   */\n  std::shared_ptr<DeviceMemory> MakeDeviceMemory(\n      const std::shared_ptr<Device> &device, std::shared_ptr<void> mem_ptr,\n      size_t size) override;\n\n  /**\n   * @brief Implement by specified device, alloc memory\n   * @param size Memory size to allocate.\n   * @param mem_flags Flags to create device memory\n   * @return Device memory.\n   */\n  void *Malloc(size_t size, uint32_t mem_flags = 0) override;\n\n  /**\n   * @brief Implement by specified device, free memory\n   * @param mem_ptr Memory to free\n   * @param mem_flags Flags of device memory\n   */\n  void Free(void *mem_ptr, uint32_t mem_flags = 0) override;\n\n  /**\n   * @brief Implement by specified device, copy data from src to dest\n   * @param dest dest buffer to write\n   * @param dest_size dest buffer size\n   * @param src_buffer src buffer to read\n   * @param src_size read data size\n   * @param kind data copy kind\n   * @return Status\n   */\n  Status Copy(void *dest, size_t dest_size, const void *src_buffer,\n              size_t src_size, DeviceMemoryCopyKind kind) override;\n\n  /**\n   * @brief Copy memory between current device and host\n   * @param dest_memory Destination memory\n   * @param dest_offset Destination memory offset\n   * @param src_memory Source memory\n   * @param src_offset Source offset\n   * @param src_size Source memory size\n   * @param copy_kind Memory copy mode\n   * @return Status\n   */\n  Status DeviceMemoryCopy(\n      const std::shared_ptr<DeviceMemory> &dest_memory, size_t dest_offset,\n      const std::shared_ptr<const DeviceMemory> &src_memory, size_t src_offset,\n      size_t src_size,\n      DeviceMemoryCopyKind copy_kind = DeviceMemoryCopyKind::FromHost) override;\n\n  /**\n   * @brief Get device memory info\n   * @return Status\n   */\n  Status GetDeviceMemUsage(size_t *free, size_t *total) const override;\n\n  inline std::shared_ptr<AscendStream> AllocStream() {\n    return stream_pool_->Alloc();\n  };\n\n private:\n  void GetAscendMemcpyKind(DeviceMemoryCopyKind copy_kind,\n                           aclrtMemcpyKind &ascend_copy_kind);\n\n  Status SetupAscendStream(\n      const std::shared_ptr<const DeviceMemory> &src_memory,\n      const std::shared_ptr<DeviceMemory> &dest_memory,\n      std::shared_ptr<AscendStream> &ascend_stream_ptr);\n\n  bool CheckCopyAsync(const void *src_addr, const void *dest_addr);\n\n  std::shared_ptr<AscendStreamPool> stream_pool_;\n  AscendMemoryPool mem_pool_;\n  std::map<DeviceMemoryCopyKind, aclrtMemcpyKind> mem_copy_kind_map_;\n  int32_t npu_id_{0};\n};\n\n}  // namespace modelbox\n\n#endif  // MODELBOX_CUDA_MEMORY_H_\n"
  },
  {
    "path": "src/drivers/devices/ascend/core/include/modelbox/device/ascend/device_ascend.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_DEVICE_ASCEND_H_\n#define MODELBOX_DEVICE_ASCEND_H_\n\n#include <modelbox/base/device.h>\n#include <modelbox/data_context.h>\n#include <modelbox/device/ascend/ascend_memory.h>\n#include <modelbox/flow.h>\n\nnamespace modelbox {\n\nconstexpr const char *DEVICE_TYPE = \"ascend\";\nconstexpr const char *DEVICE_DRIVER_NAME = \"device-ascend\";\nconstexpr const char *DEVICE_DRIVER_DESCRIPTION = \"A ascend device driver\";\n\nclass Ascend : public Device {\n public:\n  Ascend(const std::shared_ptr<DeviceMemoryManager> &mem_mgr);\n  ~Ascend() override;\n  std::string GetType() const override;\n\n  Status DeviceExecute(const DevExecuteCallBack &fun, int32_t priority,\n                       size_t count) override;\n\n  bool NeedResourceNice() override;\n};\n\nclass AscendFactory : public DeviceFactory {\n public:\n  AscendFactory();\n  ~AscendFactory() override;\n\n  std::map<std::string, std::shared_ptr<DeviceDesc>> DeviceProbe() override;\n  std::string GetDeviceFactoryType() override;\n  std::shared_ptr<Device> CreateDevice(const std::string &device_id) override;\n};\n\nclass AscendDesc : public DeviceDesc {\n public:\n  AscendDesc();\n  ~AscendDesc() override;\n};\n\nclass AscendFlowUnit : public FlowUnit {\n public:\n  AscendFlowUnit();\n  ~AscendFlowUnit() override;\n\n  virtual Status AscendProcess(std::shared_ptr<modelbox::DataContext> data_ctx,\n                               aclrtStream stream) = 0;\n\n  Status Process(std::shared_ptr<modelbox::DataContext> data_ctx) override;\n};\n\n}  // namespace modelbox\n\n#endif  // MODELBOX_DEVICE_ASCEND_H_"
  },
  {
    "path": "src/drivers/devices/ascend/core/libmodelbox-device-ascend.pc.in",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nprefix=/usr\nexec_prefix=${prefix}\nlibdir=${prefix}/lib\nincludedir=${prefix}/include/modelbox/device/ascend\n\nName: libmodelbox-device-ascend\nDescription: modelbox ascend device SDK\nVersion: @MODELBOX_VERSION_STRING@\nLibs: -L${libdir} -lmodelbox-device-ascend\nCflags: -I${includedir}"
  },
  {
    "path": "src/drivers/devices/ascend/flowunit/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nproject(modelbox-drivers-cuda-flowunit)\n\nif(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_BINARY_DIR})\n    message(FATAL_ERROR \"Do not build in source directory!\")\nendif()\n\nsubdirlist(SUBDIRS ${CMAKE_CURRENT_SOURCE_DIR} \"CMakeLists.txt\")\n\nforeach(subdir ${SUBDIRS})\n    add_subdirectory(${subdir})\nendforeach()\n\nlist(APPEND DRIVER_UNIT_TEST_INCLUDE ${ACL_INCLUDE_DIR})\nlist(APPEND DRIVER_UNIT_TEST_INCLUDE ${DSMI_INCLUDE_DIR})\nset(DRIVER_UNIT_TEST_INCLUDE ${DRIVER_UNIT_TEST_INCLUDE} CACHE INTERNAL \"\")\nlist(APPEND DRIVER_UNIT_TEST_LINK_LIBRARIES ${ACL_LIBRARIES})\nlist(APPEND DRIVER_UNIT_TEST_LINK_LIBRARIES ${DSMI_LIBRARIES})\nset(DRIVER_UNIT_TEST_LINK_LIBRARIES ${DRIVER_UNIT_TEST_LINK_LIBRARIES} CACHE INTERNAL \"\")"
  },
  {
    "path": "src/drivers/devices/ascend/flowunit/crop/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nset(UNIT_DEVICE \"ascend\")\nset(UNIT_NAME \"crop\")\n\nproject(modelbox-flowunit-${UNIT_DEVICE}-${UNIT_NAME})\nfile(GLOB_RECURSE UNIT_SOURCE *.cpp *.cc *.c)\ngroup_source_test_files(MODELBOX_UNIT_SOURCE MODELBOX_UNIT_TEST_SOURCE \"_test.c*\" ${UNIT_SOURCE})\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_ASCEND_INCLUDE})\ninclude_directories(${ACL_INCLUDE_DIR})\ninclude_directories(${MODELBOX_COMMON_IMAGE_PROCESS_INCLUDE})\n\nset(MODELBOX_UNIT_SHARED libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-shared)\nset(MODELBOX_UNIT_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\nadd_library(${MODELBOX_UNIT_SHARED} SHARED ${MODELBOX_UNIT_SOURCE})\nset(LIBMODELBOX_FLOWUNIT_CROP_ASCEND_SHARED ${MODELBOX_UNIT_SHARED})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES \n    SOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} rt)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} dl)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_DEVICE_ASCEND_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${ACL_LIBRARIES})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${MODELBOX_COMMON_IMAGE_PROCESS_LIBRARY})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}\")\n\ninstall(TARGETS ${MODELBOX_UNIT_SHARED}\n        COMPONENT ascend-device-flowunit\n        RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n        LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        ARCHIVE DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        OPTIONAL)\ninstall(DIRECTORY ${HEADER} \n        DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR}\n        COMPONENT ascend-device-flowunit-devel)\n\nset(LIBMODELBOX_FLOWUNIT_CROP_ASCEND_SHARED ${MODELBOX_UNIT_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_CROP_ASCEND_INCLUDE ${MODELBOX_UNIT_SOURCE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_CROP_ASCEND_SOURCES ${MODELBOX_UNIT_SOURCE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_CROP_ASCEND_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}.so CACHE INTERNAL \"\")\n\n# driver test\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_UNIT_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_UNIT_SHARED})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")"
  },
  {
    "path": "src/drivers/devices/ascend/flowunit/crop/crop_flowunit.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"crop_flowunit.h\"\n\n#include \"image_process.h\"\n#include \"modelbox/flowunit_api_helper.h\"\n\nconst int MIN_WIDTH_STRIDE = 32;\nconst std::string OUTPUT_IMG_PIX_FMT = \"nv12\";\n\nmodelbox::Status CropFlowUnit::Open(\n    const std::shared_ptr<modelbox::Configuration> &opts) {\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status CropFlowUnit::AscendProcess(\n    std::shared_ptr<modelbox::DataContext> data_ctx, aclrtStream stream) {\n  auto input_img_buffer_list = data_ctx->Input(IN_IMG);\n  auto input_box_buffer_list = data_ctx->Input(IN_BOX);\n  auto box_count = input_img_buffer_list->Size();\n  auto img_count = input_box_buffer_list->Size();\n  if (box_count != img_count) {\n    auto err_msg = \"box buffer size \" + std::to_string(box_count) +\n                   \" and img buffer size \" + std::to_string(img_count) +\n                   \" not equal\";\n    MBLOG_ERROR << err_msg;\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  auto output_img_buffer_list = data_ctx->Output(OUT_IMG);\n  auto ret = PrepareOutput(input_box_buffer_list, output_img_buffer_list);\n  if (!ret) {\n    MBLOG_ERROR << \"Preapre output failed \" << ret;\n    return ret;\n  }\n\n  output_img_buffer_list->CopyMeta(input_img_buffer_list);\n  for (size_t i = 0; i < img_count; ++i) {\n    auto in_img_buffer = input_img_buffer_list->At(i);\n    auto in_box_buffer = input_box_buffer_list->At(i);\n    auto out_img_buffer = output_img_buffer_list->At(i);\n    auto ret =\n        ProcessOneImg(in_img_buffer, in_box_buffer, out_img_buffer, stream);\n    if (!ret) {\n      MBLOG_ERROR << \"Crop image failed, err \" << ret;\n      return ret;\n    }\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status CropFlowUnit::PrepareOutput(\n    std::shared_ptr<modelbox::BufferList> &input_box_buffer_list,\n    std::shared_ptr<modelbox::BufferList> &output_img_buffer_list) {\n  auto img_count = input_box_buffer_list->Size();\n  std::vector<size_t> output_shape;\n  for (size_t i = 0; i < img_count; ++i) {\n    auto box_buffer = input_box_buffer_list->At(i);\n    auto *box_ptr = (imageprocess::RoiBox *)box_buffer->ConstData();\n    size_t bytes = 0;\n    int32_t align_w =\n        imageprocess::align_up(box_ptr->w, imageprocess::ASCEND_WIDTH_ALIGN);\n    align_w = std::max(align_w, MIN_WIDTH_STRIDE);\n    int32_t align_h =\n        imageprocess::align_up(box_ptr->h, imageprocess::ASCEND_HEIGHT_ALIGN);\n    auto ret = imageprocess::GetImageBytes(OUTPUT_IMG_PIX_FMT, align_w, align_h,\n                                           bytes);\n    if (!ret) {\n      return ret;\n    }\n\n    output_shape.emplace_back(bytes);\n  }\n\n  return output_img_buffer_list->Build(output_shape, false);\n}\n\nmodelbox::Status CropFlowUnit::CheckInputParamVaild(\n    std::shared_ptr<modelbox::Buffer> &in_image,\n    std::shared_ptr<modelbox::Buffer> &in_box) {\n  std::string in_pix_fmt;\n  int32_t in_img_width = 0;\n  int32_t in_img_height = 0;\n  int32_t in_img_width_stride = 0;\n  int32_t in_img_height_stride = 0;\n  auto ret = imageprocess::GetImgParam(in_image, in_pix_fmt, in_img_width,\n                                       in_img_height, in_img_width_stride,\n                                       in_img_height_stride);\n  if (!ret) {\n    return ret;\n  }\n  const auto *box_ptr = (const imageprocess::RoiBox *)in_box->ConstData();\n  if (!imageprocess::CheckRoiBoxVaild(box_ptr, in_img_width, in_img_height)) {\n    return {modelbox::STATUS_FAULT, \"roi box param is invaild !\"};\n  }\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status CropFlowUnit::ProcessOneImg(\n    std::shared_ptr<modelbox::Buffer> &in_image,\n    std::shared_ptr<modelbox::Buffer> &in_box,\n    std::shared_ptr<modelbox::Buffer> &out_image, aclrtStream stream) {\n  auto ret = CheckInputParamVaild(in_image, in_box);\n  if (!ret) {\n    return {modelbox::STATUS_FAULT, \"check input param failed\"};\n  }\n\n  auto chan_desc = imageprocess::GetDvppChannel(dev_id_);\n  if (chan_desc == nullptr) {\n    return {modelbox::STATUS_FAULT, \"Get dvpp channel failed\"};\n  }\n\n  std::shared_ptr<acldvppPicDesc> in_img_desc;\n  ret = GetInputDesc(in_image, in_img_desc);\n  if (!ret) {\n    return ret;\n  }\n\n  std::shared_ptr<acldvppPicDesc> out_img_desc;\n  ret = GetOutputDesc(in_box, out_image, out_img_desc);\n  if (!ret) {\n    return ret;\n  }\n\n  std::shared_ptr<acldvppRoiConfig> roi_cfg;\n  ret = GetRoiCfg(in_box, roi_cfg);\n  if (!ret) {\n    return ret;\n  }\n\n  ret = Crop(chan_desc, in_img_desc, out_img_desc, roi_cfg, out_image, stream);\n  if (!ret) {\n    return ret;\n  }\n\n  return imageprocess::SetOutImgMeta(out_image, OUTPUT_IMG_PIX_FMT,\n                                     out_img_desc);\n}\n\nmodelbox::Status CropFlowUnit::GetInputDesc(\n    const std::shared_ptr<modelbox::Buffer> &in_image,\n    std::shared_ptr<acldvppPicDesc> &in_img_desc) {\n  std::string in_pix_fmt;\n  int32_t in_img_width = 0;\n  int32_t in_img_height = 0;\n  int32_t in_img_width_stride = 0;\n  int32_t in_img_height_stride = 0;\n  auto ret = imageprocess::GetImgParam(in_image, in_pix_fmt, in_img_width,\n                                       in_img_height, in_img_width_stride,\n                                       in_img_height_stride);\n  if (!ret) {\n    return ret;\n  }\n\n  in_img_desc = CreateImgDesc(\n      in_image->GetBytes(), (void *)in_image->ConstData(), in_pix_fmt,\n      imageprocess::ImageShape{in_img_width, in_img_height, in_img_width_stride,\n                               in_img_height_stride},\n      imageprocess::ImgDescDestroyFlag::DESC_ONLY);\n  if (in_img_desc == nullptr) {\n    return modelbox::StatusError;\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status CropFlowUnit::GetOutputDesc(\n    const std::shared_ptr<modelbox::Buffer> &in_box,\n    const std::shared_ptr<modelbox::Buffer> &out_image,\n    std::shared_ptr<acldvppPicDesc> &out_img_desc) {\n  const auto *box_ptr = (const imageprocess::RoiBox *)in_box->ConstData();\n  if (box_ptr->x % 2 != 0 || box_ptr->y % 2 != 0 || box_ptr->w % 2 != 0 ||\n      box_ptr->h % 2 != 0) {\n    return {modelbox::STATUS_INVALID,\n            \"Input box[x:\" + std::to_string(box_ptr->x) +\n                \", y:\" + std::to_string(box_ptr->y) +\n                \", w:\" + std::to_string(box_ptr->w) +\n                \", h:\" + std::to_string(box_ptr->h) +\n                \"] is invalid, value must be even\"};\n  }\n\n  auto align_w =\n      imageprocess::align_up(box_ptr->w, imageprocess::ASCEND_WIDTH_ALIGN);\n  align_w = std::max(align_w, MIN_WIDTH_STRIDE);\n  auto align_h =\n      imageprocess::align_up(box_ptr->h, imageprocess::ASCEND_HEIGHT_ALIGN);\n  out_img_desc = CreateImgDesc(\n      out_image->GetBytes(), (void *)out_image->MutableData(),\n      OUTPUT_IMG_PIX_FMT,\n      imageprocess::ImageShape{box_ptr->w, box_ptr->h, align_w, align_h},\n      imageprocess::ImgDescDestroyFlag::DESC_ONLY);\n  if (out_img_desc == nullptr) {\n    return modelbox::StatusError;\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status CropFlowUnit::GetRoiCfg(\n    const std::shared_ptr<modelbox::Buffer> &in_box,\n    std::shared_ptr<acldvppRoiConfig> &roi_cfg) {\n  const auto *box_ptr = (const imageprocess::RoiBox *)in_box->ConstData();\n  uint32_t left = box_ptr->x;\n  uint32_t right = box_ptr->x + box_ptr->w - 1;\n  uint32_t top = box_ptr->y;\n  uint32_t bottom = box_ptr->y + box_ptr->h - 1;\n  auto *roi_cfg_ptr = acldvppCreateRoiConfig(left, right, top, bottom);\n  if (roi_cfg_ptr == nullptr) {\n    return {modelbox::STATUS_FAULT, \"acldvppCreateRoiConfig return null\"};\n  }\n\n  roi_cfg.reset(roi_cfg_ptr,\n                [](acldvppRoiConfig *ptr) { acldvppDestroyRoiConfig(ptr); });\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status CropFlowUnit::Crop(\n    std::shared_ptr<acldvppChannelDesc> &chan_desc,\n    std::shared_ptr<acldvppPicDesc> &in_img_desc,\n    std::shared_ptr<acldvppPicDesc> &out_img_desc,\n    std::shared_ptr<acldvppRoiConfig> &roi_cfg,\n    std::shared_ptr<modelbox::Buffer> &out_image, aclrtStream stream) {\n  auto acl_ret = acldvppVpcCropAsync(chan_desc.get(), in_img_desc.get(),\n                                     out_img_desc.get(), roi_cfg.get(), stream);\n  if (acl_ret != ACL_SUCCESS) {\n    MBLOG_ERROR << \"acldvppVpcCropAsync failed, err \" << acl_ret;\n    return modelbox::STATUS_FAULT;\n  }\n\n  acl_ret = aclrtSynchronizeStream(stream);\n  if (acl_ret != ACL_SUCCESS) {\n    MBLOG_ERROR << \"aclrtSynchronizeStream failed, err \" << acl_ret;\n    return modelbox::STATUS_FAULT;\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status CropFlowUnit::Close() { return modelbox::STATUS_OK; }\n\nMODELBOX_FLOWUNIT(CropFlowUnit, desc) {\n  desc.SetFlowUnitName(FLOWUNIT_NAME);\n  desc.SetFlowUnitGroupType(\"Image\");\n  desc.AddFlowUnitInput({IN_IMG, modelbox::ASCEND_MEM_DVPP});\n  desc.AddFlowUnitInput({IN_BOX, \"cpu\"});\n  desc.AddFlowUnitOutput({OUT_IMG, modelbox::ASCEND_MEM_DVPP});\n  desc.SetFlowType(modelbox::NORMAL);\n  desc.SetInputContiguous(false);\n  desc.SetDescription(FLOWUNIT_DESC);\n}\n\nMODELBOX_DRIVER_FLOWUNIT(desc) {\n  desc.Desc.SetName(FLOWUNIT_NAME);\n  desc.Desc.SetClass(modelbox::DRIVER_CLASS_FLOWUNIT);\n  desc.Desc.SetType(FLOWUNIT_TYPE);\n  desc.Desc.SetDescription(FLOWUNIT_DESC);\n  desc.Desc.SetVersion(\"1.0.0\");\n}"
  },
  {
    "path": "src/drivers/devices/ascend/flowunit/crop/crop_flowunit.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_ASCEND_CROP_H_\n#define MODELBOX_FLOWUNIT_ASCEND_CROP_H_\n\n#define ENABLE_DVPP_INTERFACE\n#define ACL_ENABLE\n\n#include <acl/ops/acl_dvpp.h>\n#include <modelbox/base/device.h>\n#include <modelbox/base/status.h>\n#include <modelbox/buffer.h>\n#include <modelbox/device/ascend/device_ascend.h>\n#include <modelbox/flow.h>\n#include <modelbox/flowunit.h>\n\nconstexpr const char *FLOWUNIT_TYPE = \"ascend\";\nconstexpr const char *FLOWUNIT_NAME = \"crop\";\nconstexpr const char *FLOWUNIT_DESC =\n    \"\\n\\t@Brief: A crop flowunit on ascend device. \\n\"\n    \"\\t@Port parameter: The input port 'in_image' and the output port \"\n    \"'out_image' buffer type are image. \\n\"\n    \"\\t  The image type buffer contain the following meta fields:\\n\"\n    \"\\t\\tField Name: width,         Type: int32_t\\n\"\n    \"\\t\\tField Name: height,        Type: int32_t\\n\"\n    \"\\t\\tField Name: width_stride,  Type: int32_t\\n\"\n    \"\\t\\tField Name: height_stride, Type: int32_t\\n\"\n    \"\\t\\tField Name: channel,       Type: int32_t\\n\"\n    \"\\t\\tField Name: pix_fmt,       Type: string\\n\"\n    \"\\t\\tField Name: layout,        Type: int32_t\\n\"\n    \"\\t\\tField Name: shape,         Type: vector<size_t>\\n\"\n    \"\\t\\tField Name: type,          Type: ModelBoxDataType::MODELBOX_UINT8\\n\"\n    \"\\t  The other input port 'in_region' buffer type is rectangle, the memory \"\n    \"arrangement is [x,y,w,h].\\n\"\n    \"\\t  it contain the following meta fields: \\n\"\n    \"\\t\\tField Name: type,          Type: ModelBoxDataType::MODELBOX_UINT8\\n\"\n    \"\\t@Constraint: The field value range of this flowunit support: 'pix_fmt': \"\n    \"[nv12], 'layout': [hwc]. One image can only be cropped with one \"\n    \"rectangle and output one crop image.\";\n\nconstexpr const char *IN_IMG = \"in_image\";\nconstexpr const char *IN_BOX = \"in_region\";\nconstexpr const char *OUT_IMG = \"out_image\";\n\nclass CropFlowUnit : public modelbox::AscendFlowUnit {\n public:\n  CropFlowUnit() = default;\n  ~CropFlowUnit() override = default;\n\n  modelbox::Status Open(\n      const std::shared_ptr<modelbox::Configuration> &opts) override;\n\n  modelbox::Status Close() override;\n\n  modelbox::Status AscendProcess(\n      std::shared_ptr<modelbox::DataContext> data_ctx,\n      aclrtStream stream) override;\n\n private:\n  modelbox::Status PrepareOutput(\n      std::shared_ptr<modelbox::BufferList> &input_box_buffer_list,\n      std::shared_ptr<modelbox::BufferList> &output_img_buffer_list);\n\n  modelbox::Status ProcessOneImg(std::shared_ptr<modelbox::Buffer> &in_image,\n                                 std::shared_ptr<modelbox::Buffer> &in_box,\n                                 std::shared_ptr<modelbox::Buffer> &out_image,\n                                 aclrtStream stream);\n\n  modelbox::Status GetInputDesc(\n      const std::shared_ptr<modelbox::Buffer> &in_image,\n      std::shared_ptr<acldvppPicDesc> &in_img_desc);\n\n  modelbox::Status GetOutputDesc(\n      const std::shared_ptr<modelbox::Buffer> &in_box,\n      const std::shared_ptr<modelbox::Buffer> &out_image,\n      std::shared_ptr<acldvppPicDesc> &out_img_desc);\n\n  modelbox::Status GetRoiCfg(const std::shared_ptr<modelbox::Buffer> &in_box,\n                             std::shared_ptr<acldvppRoiConfig> &roi_cfg);\n\n  modelbox::Status Crop(std::shared_ptr<acldvppChannelDesc> &chan_desc,\n                        std::shared_ptr<acldvppPicDesc> &in_img_desc,\n                        std::shared_ptr<acldvppPicDesc> &out_img_desc,\n                        std::shared_ptr<acldvppRoiConfig> &roi_cfg,\n                        std::shared_ptr<modelbox::Buffer> &out_image,\n                        aclrtStream stream);\n  modelbox::Status CheckInputParamVaild(\n      std::shared_ptr<modelbox::Buffer> &in_image,\n      std::shared_ptr<modelbox::Buffer> &in_box);\n};\n\n#endif  // MODELBOX_FLOWUNIT_ASCEND_CROP_H_\n"
  },
  {
    "path": "src/drivers/devices/ascend/flowunit/crop/crop_flowunit_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <acl/acl_rt.h>\n#include <dsmi_common_interface.h>\n#include <securec.h>\n\n#include <fstream>\n#include <functional>\n#include <future>\n#include <opencv2/opencv.hpp>\n#include <random>\n#include <thread>\n\n#include \"driver_flow_test.h\"\n#include \"flowunit_mockflowunit/flowunit_mockflowunit.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"modelbox/base/log.h\"\n#include \"modelbox/base/utils.h\"\n#include \"modelbox/buffer.h\"\n#include \"test/mock/minimodelbox/mockflow.h\"\n\nnamespace modelbox {\nclass CropFlowUnitTest : public testing::Test {\n public:\n  CropFlowUnitTest() : driver_flow_(std::make_shared<MockFlow>()) {}\n\n protected:\n  void SetUp() override {\n    // Test ascend runtime\n    int32_t count = 0;\n    auto dsmi_ret = dsmi_get_device_count(&count);\n    if (dsmi_ret != 0) {\n      MBLOG_INFO << \"no ascend device, skip test suit\";\n      GTEST_SKIP();\n    }\n  }\n\n  void TearDown() override {\n    std::shared_ptr<Drivers> drivers = Drivers::GetInstance();\n    std::shared_ptr<DeviceManager> device_mgr = DeviceManager::GetInstance();\n    device_mgr->Clear();\n    drivers->Clear();\n\n    driver_flow_ = nullptr;\n  };\n\n  std::shared_ptr<MockFlow> GetDriverFlow();\n\n  const std::string test_lib_dir = TEST_DRIVER_DIR,\n                    test_data_dir = TEST_DATA_DIR, test_assets = TEST_ASSETS;\n\n private:\n  std::shared_ptr<MockFlow> driver_flow_;\n};\n\nstd::shared_ptr<MockFlow> CropFlowUnitTest::GetDriverFlow() {\n  return driver_flow_;\n}\n\nTEST_F(CropFlowUnitTest, RunUnit) {\n  std::string toml_content = R\"(\n    [driver]\n    skip-default=true\n    dir=[\")\" + test_lib_dir + \"\\\",\\\"\" +\n                             test_data_dir + \"\\\"]\\n\" +\n                             R\"([graph]\n    graphconf = '''digraph demo {\n          input1[type=input]\n          input2[type=input]\n          output[type=output]\n          crop[type=flowunit, flowunit=crop, device=ascend, deviceid=0]\n\n          input1 -> crop:in_image\n          input2 -> crop:in_region\n          crop:out_image -> output\n        }'''\n    format = \"graphviz\"\n  )\";\n\n  auto driver_flow = GetDriverFlow();\n  driver_flow->BuildAndRun(\"RunUnit\", toml_content, 10);\n\n  auto img = cv::imread(std::string(TEST_ASSETS) + \"/test.jpg\");\n  auto extern_data = driver_flow->GetFlow()->CreateExternalDataMap();\n  // in img\n  auto in_img_buffer_list = extern_data->CreateBufferList();\n  in_img_buffer_list->Build({img.total() * img.elemSize()});\n  auto in_img_buffer = in_img_buffer_list->At(0);\n  in_img_buffer->Set(\"width\", img.cols);\n  in_img_buffer->Set(\"height\", img.rows);\n  in_img_buffer->Set(\"width_stride\", img.cols * 3);\n  in_img_buffer->Set(\"height_stride\", img.rows);\n  in_img_buffer->Set(\"pix_fmt\", std::string(\"bgr\"));\n  auto e_ret = memcpy_s(in_img_buffer->MutableData(), in_img_buffer->GetBytes(),\n                        img.data, img.total() * img.elemSize());\n  EXPECT_EQ(e_ret, 0);\n  auto status = extern_data->Send(\"input1\", in_img_buffer_list);\n  EXPECT_EQ(status, STATUS_OK);\n  // in box\n  auto in_box_buffer_list = extern_data->CreateBufferList();\n  in_box_buffer_list->Build({sizeof(int32_t) * 4});\n  auto in_box_buffer = in_box_buffer_list->At(0);\n  auto *data_ptr = (int32_t *)in_box_buffer->MutableData();\n  data_ptr[0] = 30;\n  data_ptr[1] = 0;\n  data_ptr[2] = 128;\n  data_ptr[3] = 128;\n  status = extern_data->Send(\"input2\", in_box_buffer_list);\n  EXPECT_EQ(status, STATUS_OK);\n  // check output\n  OutputBufferList map_buffer_list;\n  status = extern_data->Recv(map_buffer_list);\n  EXPECT_EQ(status, STATUS_OK);\n  auto output_buffer_list = map_buffer_list[\"output\"];\n  ASSERT_EQ(output_buffer_list->Size(), 1);\n  auto output_buffer = output_buffer_list->At(0);\n  cv::Mat yuv_out_img(128 * 3 / 2, 128, CV_8UC1);\n  auto acl_ret = aclrtSetDevice(0);\n  EXPECT_EQ(acl_ret, ACL_SUCCESS);\n  acl_ret = aclrtMemcpy(yuv_out_img.data, output_buffer->GetBytes(),\n                        output_buffer->ConstData(), output_buffer->GetBytes(),\n                        aclrtMemcpyKind::ACL_MEMCPY_DEVICE_TO_HOST);\n  EXPECT_EQ(acl_ret, ACL_SUCCESS);\n\n  auto image_size = yuv_out_img.rows * yuv_out_img.cols * yuv_out_img.elemSize();\n  char expected_img[image_size];\n  std::ifstream infile;\n  infile.open(std::string(TEST_ASSETS) + \"/ascend_crop_yuv\");\n  infile.read((char*)expected_img, image_size);\n\n  EXPECT_EQ(memcmp((char*)yuv_out_img.data, expected_img, image_size), 0);\n\n  driver_flow->GetFlow()->Wait(3 * 1000);\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/drivers/devices/ascend/flowunit/inference/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nset(UNIT_DEVICE \"ascend\")\nset(UNIT_NAME \"acl_inference\")\n\nproject(modelbox-flowunit-${UNIT_DEVICE}-${UNIT_NAME})\n\nfile(GLOB_RECURSE UNIT_SOURCE *.cpp *.cc *.c)\ngroup_source_test_files(MODELBOX_UNIT_SOURCE MODELBOX_UNIT_TEST_SOURCE \"_test.c*\" ${UNIT_SOURCE})\n\nconfigure_file(${CMAKE_CURRENT_SOURCE_DIR}/test_toml/modelbox.test.ascend.inference.in ${CMAKE_BINARY_DIR}/test/test-working-dir/data/virtual_atc_infer_test.toml @ONLY)\nconfigure_file(${CMAKE_CURRENT_SOURCE_DIR}/test_toml/modelbox.test.ascend.inference.encrypt.in ${CMAKE_BINARY_DIR}/test/test-working-dir/data/virtual_atc_infer_test_en.toml @ONLY)\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_ASCEND_INCLUDE})\ninclude_directories(${LIBMODELBOX_VIRTUALDRIVER_INFERENCE_INCLUDE})\ninclude_directories(${ACL_INCLUDE_DIR})\ninclude_directories(${MODELBOX_COMMON_INFERENCE_INCLUDE})\n\nset(MODELBOX_UNIT_SHARED libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-shared)\nset(MODELBOX_UNIT_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\nadd_library(${MODELBOX_UNIT_SHARED} SHARED ${MODELBOX_UNIT_SOURCE})\nset(LIBMODELBOX_FLOWUNIT_INFERENCE_ASCEND_SHARED ${MODELBOX_UNIT_SHARED})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES \n    SOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} rt)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} dl)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_DEVICE_ASCEND_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_VIRTUALDRIVER_INFERENCE_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${ACL_LIBRARIES})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${MODELBOX_COMMON_INFERENCE_LIBRARY})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}\")\n\ninstall(TARGETS ${MODELBOX_UNIT_SHARED}\n        COMPONENT ascend-device-flowunit\n        RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n        LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        ARCHIVE DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        OPTIONAL)\ninstall(DIRECTORY ${HEADER} \n        DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR}\n        COMPONENT ascend-device-flowunit-devel)\n\nset(LIBMODELBOX_FLOWUNIT_INFERENCE_ASCEND_SHARED ${MODELBOX_UNIT_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_INFERENCE_ASCEND_INCLUDE ${MODELBOX_UNIT_SOURCE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_INFERENCE_ASCEND_SOURCES ${MODELBOX_UNIT_SOURCE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_INFERENCE_ASCEND_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}.so CACHE INTERNAL \"\")\n\n# driver test\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_UNIT_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_UNIT_SHARED})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")"
  },
  {
    "path": "src/drivers/devices/ascend/flowunit/inference/atc_inference.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"atc_inference.h\"\n\n#include <acl/acl.h>\n#include <model_decrypt.h>\n#include <modelbox/base/log.h>\n\n#include <cstdint>\n#include <utility>\n\nmodelbox::Status AtcInference::Init(\n    const std::string &model_file,\n    const std::shared_ptr<modelbox::Configuration> &config,\n    const std::vector<std::string> &unit_input_list,\n    const std::vector<std::string> &unit_output_list,\n    const std::shared_ptr<modelbox::Drivers> &drivers_ptr) {\n  model_file_ = model_file;\n  auto ret = ParseConfig(config);\n  if (ret != modelbox::STATUS_SUCCESS) {\n    return ret;\n  }\n\n  auto acl_ret = aclrtSetDevice(device_id_);\n  if (acl_ret != ACL_ERROR_NONE) {\n    MBLOG_ERROR << \"aclrtSetDevice failed, ret \" << acl_ret;\n    return modelbox::STATUS_FAULT;\n  }\n\n  ret = LoadModel(drivers_ptr, config);\n  if (ret != modelbox::STATUS_SUCCESS) {\n    return ret;\n  }\n\n  ret = GetModelDesc();\n  if (ret != modelbox::STATUS_SUCCESS) {\n    return ret;\n  }\n\n  ReadModelInfo();\n  ret = CheckModelIO(unit_input_list, unit_output_list);\n  if (ret != modelbox::STATUS_SUCCESS) {\n    return ret;\n  }\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status AtcInference::ParseConfig(\n    const std::shared_ptr<modelbox::Configuration> &config) {\n  device_id_ = config->GetInt32(\"deviceid\");\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status AtcInference::LoadModel(\n    const std::shared_ptr<modelbox::Drivers> &drivers_ptr,\n    const std::shared_ptr<modelbox::Configuration> &config) {\n  aclError ret = ACL_ERROR_INVALID_FILE;\n  ModelDecryption model_decrypt;\n  if (modelbox::STATUS_SUCCESS !=\n      model_decrypt.Init(model_file_, drivers_ptr, config)) {\n    MBLOG_ERROR << \"init model fail\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  if (model_decrypt.GetModelState() == ModelDecryption::MODEL_STATE_ENCRYPT) {\n    int64_t model_len = 0;\n    std::shared_ptr<uint8_t> modelBuf =\n        model_decrypt.GetModelSharedBuffer(model_len);\n    if (!modelBuf) {\n      MBLOG_ERROR << \"GetDecryptModelBuffer fail\";\n      return modelbox::STATUS_FAULT;\n    }\n    ret = aclmdlLoadFromMem((char *)(modelBuf.get()), model_len, &model_id_);\n  } else if (model_decrypt.GetModelState() ==\n             ModelDecryption::MODEL_STATE_PLAIN) {\n    ret = aclmdlLoadFromFile(model_file_.c_str(), &model_id_);\n  }\n  if (ret != ACL_ERROR_NONE) {\n    MBLOG_ERROR << \"aclmdlLoadFromFile failed, ret \" << ret\n                << \", model:\" << model_file_;\n    return modelbox::STATUS_FAULT;\n  }\n\n  is_model_load_ = true;\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status AtcInference::GetModelDesc() {\n  auto *desc = aclmdlCreateDesc();\n  if (desc == nullptr) {\n    MBLOG_ERROR << \"aclmdlCreateDesc failed\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  model_desc_.reset(desc, [](aclmdlDesc *desc) { aclmdlDestroyDesc(desc); });\n  auto ret = aclmdlGetDesc(model_desc_.get(), model_id_);\n  if (ret != ACL_ERROR_NONE) {\n    MBLOG_ERROR << \"aclmdlGetDesc failed, model:\" << model_file_;\n    return modelbox::STATUS_FAULT;\n  }\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status AtcInference::CheckModelIO(\n    const std::vector<std::string> &unit_input_list,\n    const std::vector<std::string> &unit_output_list) {\n  std::set<std::string> unit_input_set(unit_input_list.begin(),\n                                       unit_input_list.end());\n  std::set<std::string> unit_output_set(unit_output_list.begin(),\n                                        unit_output_list.end());\n\n  auto model_input_size = dynamic_batch_tensor_index_ >= 0\n                              ? model_input_list_.size() - 1\n                              : model_input_list_.size();\n\n  if (model_input_list_.empty() || model_output_list_.empty() ||\n      model_input_size != unit_input_list.size() ||\n      model_output_list_.size() != unit_output_list.size()) {\n    MBLOG_ERROR << \"Model input[\" << model_input_size << \"], output[\"\n                << model_output_list_.size() << \"], FlowUnit input[\"\n                << unit_input_list.size() << \"], output[\"\n                << unit_output_list.size() << \"], these io count is bad\";\n    return modelbox::STATUS_BADCONF;\n  }\n\n  for (auto &model_input_name : model_input_list_) {\n    if (model_input_name == ACL_DYNAMIC_TENSOR_NAME) {\n      continue;\n    }\n    if (unit_input_set.find(model_input_name) == unit_input_set.end()) {\n      MBLOG_ERROR << \"Model miss input [\" << model_input_name\n                  << \"] in graph config\";\n      return modelbox::STATUS_BADCONF;\n    }\n  }\n\n  for (auto &model_output_name : model_output_list_) {\n    if (unit_output_set.find(model_output_name) == unit_output_set.end()) {\n      MBLOG_ERROR << \"Model miss output [\" << model_output_name\n                  << \"] in graph config\";\n      return modelbox::STATUS_BADCONF;\n    }\n  }\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nvoid AtcInference::ReadModelInfo() {\n  auto input_num = aclmdlGetNumInputs(model_desc_.get());\n  auto output_num = aclmdlGetNumOutputs(model_desc_.get());\n  std::stringstream model_info;\n  model_info << \"Model:\" << model_file_ << std::endl;\n  auto *desc_ptr = model_desc_.get();\n  size_t max_batch_size = 1;\n  SaveBatchInfo(desc_ptr, model_info, max_batch_size);\n  model_info << \"Input:\" << std::endl;\n  aclmdlIODims dims;\n  for (size_t i = 0; i < input_num; ++i) {\n    auto ret = aclmdlGetInputDims(desc_ptr, i, &dims);\n    if (ret != ACL_ERROR_NONE) {\n      MBLOG_ERROR << \"Get model info for input [\" << i << \"] failed, ret \"\n                  << ret;\n      continue;\n    }\n\n    std::string name = aclmdlGetInputNameByIndex(desc_ptr, i);\n    model_input_list_.push_back(name);\n    auto size = aclmdlGetInputSizeByIndex(desc_ptr, i);\n    model_input_size_.push_back(size);\n    auto format = aclmdlGetInputFormat(desc_ptr, i);\n    auto data_type = aclmdlGetInputDataType(desc_ptr, i);\n\n    if (name == ACL_DYNAMIC_TENSOR_NAME) {\n      dynamic_batch_tensor_index_ = i;\n      ret = aclrtMalloc(&dynamic_batch_mem_ptr_, model_input_size_[i],\n                        aclrtMemMallocPolicy::ACL_MEM_MALLOC_NORMAL_ONLY);\n      if (ret != ACL_SUCCESS || dynamic_batch_mem_ptr_ == nullptr) {\n        MBLOG_ERROR << \"malloc acl memory failed, size: \" << size;\n        return;\n      }\n    }\n\n    LogTensorInfo(desc_ptr, i, dims, size, format, data_type, model_info);\n  }\n\n  model_info << \"Output:\" << std::endl;\n  for (size_t i = 0; i < output_num; ++i) {\n    auto ret = aclmdlGetOutputDims(desc_ptr, i, &dims);\n    if (ret != ACL_ERROR_NONE) {\n      MBLOG_ERROR << \"Get model info for output [\" << i << \"] failed, ret \"\n                  << ret;\n      continue;\n    }\n\n    SaveOutputShape(dims);\n    std::string name = aclmdlGetOutputNameByIndex(desc_ptr, i);\n    model_output_list_.push_back(name);\n    auto size = aclmdlGetOutputSizeByIndex(desc_ptr, i);\n    if (dynamic_batch_tensor_index_ >= 0 && dims.dimCount > 0 &&\n        max_batch_size != (size_t)dims.dims[0]) {\n      MBLOG_ERROR << \"model output tensor [\" << name\n                  << \"] dims error, first dims: \" << dims.dims[0]\n                  << \" is not same with max_batch_size: \" << max_batch_size;\n    }\n\n    model_output_size_.push_back(size / max_batch_size);\n    auto format = aclmdlGetOutputFormat(desc_ptr, i);\n    auto data_type = aclmdlGetOutputDataType(desc_ptr, i);\n    output_data_type_.push_back(GetModelBoxDataType(data_type));\n    LogTensorInfo(desc_ptr, i, dims, size, format, data_type, model_info);\n  }\n\n  MBLOG_INFO << model_info.str();\n}\n\nvoid AtcInference::SaveOutputShape(const aclmdlIODims &dims) {\n  std::vector<size_t> shape;\n  for (size_t i = 0; i < dims.dimCount; ++i) {\n    shape.push_back(dims.dims[i]);\n  }\n\n  output_shape_.push_back(shape);\n}\n\nvoid AtcInference::SaveBatchInfo(aclmdlDesc *desc_ptr,\n                                 std::stringstream &model_info,\n                                 size_t &max_batch_size) {\n  aclmdlBatch batch;\n  auto ret = aclmdlGetDynamicBatch(desc_ptr, &batch);\n  if (ret != ACL_ERROR_NONE) {\n    model_info << \"Get dynamic batch failed, ret \" << ret;\n  } else {\n    model_info << \"Dynamic batch:[\";\n    size_t size = 0;\n    for (size_t i = 0; i < batch.batchCount; ++i) {\n      dynamic_batch_set_.emplace(batch.batch[i]);\n      if (size < batch.batch[i]) {\n        size = batch.batch[i];\n      }\n      model_info << batch.batch[i];\n      if (i + 1 == batch.batchCount) {\n        model_info << \"]\";\n      } else {\n        model_info << \",\";\n      }\n    }\n\n    if (batch.batchCount == 0) {\n      model_info << \"]\";\n    } else {\n      max_batch_size = size;\n    }\n  }\n  model_info << std::endl;\n}\n\nvoid AtcInference::LogTensorInfo(aclmdlDesc *desc_ptr, size_t index,\n                                 aclmdlIODims &dims, size_t size,\n                                 aclFormat format, aclDataType data_type,\n                                 std::stringstream &model_info) {\n  model_info << \"index:\" << index;\n  model_info << \",name:\" << dims.name;\n  model_info << \",dim:[\";\n  for (size_t j = 0; j < dims.dimCount; ++j) {\n    model_info << dims.dims[j];\n    if (j + 1 == dims.dimCount) {\n      model_info << \"]\";\n    } else {\n      model_info << \",\";\n    }\n  }\n  if (dims.dimCount == 0) {\n    model_info << \"]\";\n  }\n\n  model_info << \",size:\" << size;\n  model_info << \",format:\" << GetFormatStr(format);\n  model_info << \",data type:\" << GetDataTypeStr(data_type);\n  model_info << std::endl;\n}\n\nstd::string AtcInference::GetFormatStr(aclFormat format) {\n  auto item = format_str_.find(format);\n  if (item != format_str_.end()) {\n    return item->second;\n  }\n\n  return std::to_string(format);\n}\n\nstd::string AtcInference::GetDataTypeStr(aclDataType data_type) {\n  auto item = data_type_str_.find(data_type);\n  if (item != data_type_str_.end()) {\n    return item->second;\n  }\n\n  return std::to_string(data_type);\n}\n\nmodelbox::ModelBoxDataType AtcInference::GetModelBoxDataType(\n    aclDataType data_type) {\n  auto item = data_type_flow_.find(data_type);\n  if (item != data_type_flow_.end()) {\n    return item->second;\n  }\n\n  return modelbox::ModelBoxDataType::MODELBOX_TYPE_INVALID;\n}\n\nmodelbox::Status AtcInference::GetCurrentBatchSize(\n    std::shared_ptr<modelbox::DataContext> &data_ctx, size_t &batch_size) {\n  if (model_input_list_.size() == 0) {\n    MBLOG_ERROR << \"model_input_list_ is empty \";\n    return modelbox::STATUS_FAULT;\n  }\n\n  auto buffer_list = data_ctx->Input()->at(model_input_list_[0]);\n  if (buffer_list == nullptr) {\n    MBLOG_ERROR << \"get current batch size  failed  \";\n    return modelbox::STATUS_FAULT;\n  }\n\n  if ((dynamic_batch_tensor_index_ >= 0) &&\n      (dynamic_batch_set_.find(buffer_list->Size()) ==\n       dynamic_batch_set_.end())) {\n    MBLOG_ERROR << \"current model is not support input batch_size: \"\n                << buffer_list->Size();\n    return modelbox::STATUS_FAULT;\n  }\n\n  batch_size = buffer_list->Size();\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status AtcInference::Infer(\n    std::shared_ptr<modelbox::DataContext> &data_ctx, aclrtStream stream) {\n  auto acl_ret = aclrtSetDevice(device_id_);\n  if (acl_ret != ACL_ERROR_NONE) {\n    MBLOG_ERROR << \"aclrtSetDevice failed, device_id \" << device_id_ << \",ret \"\n                << acl_ret;\n    return {modelbox::STATUS_FAULT, \"Set device failed\"};\n  }\n\n  size_t current_batch_size;\n  auto ret = GetCurrentBatchSize(data_ctx, current_batch_size);\n  if (ret != modelbox::STATUS_SUCCESS) {\n    MBLOG_ERROR << \"get current batch size failed\";\n    return {modelbox::STATUS_FAULT, \"Get current batch size failed\"};\n  }\n\n  auto input =\n      CreateDataSet(data_ctx->Input(), model_input_list_, current_batch_size);\n  if (input == nullptr) {\n    MBLOG_ERROR << \"Create input for infer failed\";\n    return {modelbox::STATUS_FAULT, \"Create input failed\"};\n  }\n\n  ret = PrepareOutput(data_ctx, current_batch_size);\n  if (ret != modelbox::STATUS_SUCCESS) {\n    MBLOG_ERROR << \"Prepare output failed\";\n    return {modelbox::STATUS_FAULT, \"Prepare output failed\"};\n  }\n\n  auto output =\n      CreateDataSet(data_ctx->Output(), model_output_list_, current_batch_size);\n  if (output == nullptr) {\n    MBLOG_ERROR << \"Create output for infer failed\";\n    return {modelbox::STATUS_FAULT, \"Create output failed\"};\n  }\n\n  if (dynamic_batch_tensor_index_ >= 0) {\n    acl_ret = aclmdlSetDynamicBatchSize(model_id_, input.get(),\n                                        dynamic_batch_tensor_index_,\n                                        current_batch_size);\n    if (acl_ret != ACL_ERROR_NONE) {\n      MBLOG_ERROR << \"aclmdlSetDynamicBatchSize failed, ret \" << acl_ret;\n      return {modelbox::STATUS_FAULT, \"Execute acl set batch_size failed\"};\n    }\n  }\n\n  acl_ret = ACL_ERROR_NONE;\n  if (stream == nullptr) {\n    acl_ret = aclmdlExecute(model_id_, input.get(), output.get());\n  } else {\n    acl_ret = aclmdlExecuteAsync(model_id_, input.get(), output.get(), stream);\n    aclrtSynchronizeStream(stream);\n  }\n\n  if (acl_ret != ACL_ERROR_NONE) {\n    MBLOG_ERROR << \"aclmdlExecute failed, ret \" << acl_ret;\n    return {modelbox::STATUS_FAULT, \"Execute acl infer failed\"};\n  }\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status AtcInference::PrepareOutput(\n    std::shared_ptr<modelbox::DataContext> &data_ctx,\n    const size_t &current_batch_size) {\n  auto output_count = model_output_list_.size();\n  for (size_t i = 0; i < output_count; ++i) {\n    auto &name = model_output_list_[i];\n    auto buffer_list = data_ctx->Output(name);\n    auto &size = model_output_size_[i];\n    std::vector<size_t> shape(current_batch_size, size);\n    buffer_list->Build(shape);\n    buffer_list->Set(\"shape\", output_shape_[i]);\n    buffer_list->Set(\"type\", output_data_type_[i]);\n  }\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nstd::shared_ptr<aclmdlDataset> AtcInference::CreateDataSet(\n    const std::shared_ptr<modelbox::BufferListMap> &buffer_list_map,\n    std::vector<std::string> &name_list, const size_t &current_batch_size) {\n  auto *data_set_ptr = aclmdlCreateDataset();\n  if (data_set_ptr == nullptr) {\n    MBLOG_ERROR << \"aclmdlCreateDataset return null\";\n    return nullptr;\n  }\n\n  std::shared_ptr<aclmdlDataset> data_set(data_set_ptr, [](aclmdlDataset *ptr) {\n    for (size_t i = 0; i < aclmdlGetDatasetNumBuffers(ptr); ++i) {\n      aclDataBuffer *dataBuffer = aclmdlGetDatasetBuffer(ptr, i);\n      (void)aclDestroyDataBuffer(dataBuffer);\n    }\n\n    (void)aclmdlDestroyDataset(ptr);\n  });\n\n  for (auto &tensor_name : name_list) {\n    void *mem_ptr = nullptr;\n    size_t size;\n    if (tensor_name != ACL_DYNAMIC_TENSOR_NAME) {\n      auto buffer_list = buffer_list_map->at(tensor_name);\n      if (buffer_list == nullptr) {\n        MBLOG_ERROR << \"Create data set for tensor \" << tensor_name\n                    << \" failed, buffer list is null\";\n        return nullptr;\n      }\n\n      if (current_batch_size != buffer_list->Size()) {\n        MBLOG_ERROR << \"buffer bacth_size is not same, first bacth_size: \"\n                    << current_batch_size\n                    << \"  , current tensor: \" << tensor_name\n                    << \" bacth_size:\" << buffer_list->Size();\n        return nullptr;\n      }\n\n      mem_ptr = const_cast<void *>(buffer_list->ConstData());\n      size = buffer_list->GetBytes();\n    } else {\n      size = model_input_size_[dynamic_batch_tensor_index_];\n      mem_ptr = dynamic_batch_mem_ptr_;\n    }\n\n    auto *data_buffer = aclCreateDataBuffer(mem_ptr, size);\n    if (data_buffer == nullptr) {\n      MBLOG_ERROR << \"Create data set buffer for tensor \" << tensor_name\n                  << \"failed\";\n      return nullptr;\n    }\n\n    auto ret = aclmdlAddDatasetBuffer(data_set_ptr, data_buffer);\n    if (ret != ACL_ERROR_NONE) {\n      MBLOG_ERROR << \"Add data buffer to set failed for tensor \" << tensor_name;\n      aclDestroyDataBuffer(data_buffer);\n      return nullptr;\n    }\n  }\n\n  return data_set;\n}\n\nmodelbox::Status AtcInference::Deinit() {\n  if (!is_model_load_) {\n    return modelbox::STATUS_SUCCESS;\n  }\n\n  auto acl_ret = aclrtSetDevice(device_id_);\n  if (acl_ret != ACL_ERROR_NONE) {\n    MBLOG_ERROR << \"aclrtSetDevice failed, ret \" << acl_ret;\n    return modelbox::STATUS_FAULT;\n  }\n\n  auto ret = aclmdlUnload(model_id_);\n  if (ret != ACL_ERROR_NONE) {\n    MBLOG_ERROR << \"Unload model failed, model id is \" << model_id_;\n    return modelbox::STATUS_FAULT;\n  }\n\n  if (dynamic_batch_mem_ptr_ != nullptr) {\n    aclrtFree(dynamic_batch_mem_ptr_);\n  }\n\n  return modelbox::STATUS_SUCCESS;\n}"
  },
  {
    "path": "src/drivers/devices/ascend/flowunit/inference/atc_inference.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_ATC_INFERENCE_H_\n#define MODELBOX_FLOWUNIT_ATC_INFERENCE_H_\n\n#include <acl/acl.h>\n#include <modelbox/base/configuration.h>\n#include <modelbox/base/status.h>\n#include <modelbox/data_context.h>\n\n#include <string>\n#include <vector>\n\nclass AtcInference {\n public:\n  modelbox::Status Init(const std::string &model_file,\n                        const std::shared_ptr<modelbox::Configuration> &config,\n                        const std::vector<std::string> &unit_input_list,\n                        const std::vector<std::string> &unit_output_list,\n                        const std::shared_ptr<modelbox::Drivers> &drivers_ptr);\n\n  modelbox::Status Infer(std::shared_ptr<modelbox::DataContext> &data_ctx,\n                         aclrtStream stream);\n\n  modelbox::Status Deinit();\n\n private:\n  modelbox::Status ParseConfig(\n      const std::shared_ptr<modelbox::Configuration> &config);\n\n  modelbox::Status LoadModel(\n      const std::shared_ptr<modelbox::Drivers> &drivers_ptr,\n      const std::shared_ptr<modelbox::Configuration> &config);\n\n  modelbox::Status GetModelDesc();\n\n  modelbox::Status CheckModelIO(\n      const std::vector<std::string> &unit_input_list,\n      const std::vector<std::string> &unit_output_list);\n\n  void ReadModelInfo();\n\n  void SaveOutputShape(const aclmdlIODims &dims);\n\n  void SaveBatchInfo(aclmdlDesc *desc_ptr, std::stringstream &model_info,\n                     size_t &max_batch_size);\n\n  void LogTensorInfo(aclmdlDesc *desc_ptr, size_t index, aclmdlIODims &dims,\n                     size_t size, aclFormat format, aclDataType data_type,\n                     std::stringstream &model_info);\n\n  std::string GetFormatStr(aclFormat format);\n\n  std::string GetDataTypeStr(aclDataType data_type);\n\n  modelbox::ModelBoxDataType GetModelBoxDataType(aclDataType data_type);\n\n  modelbox::Status PrepareOutput(\n      std::shared_ptr<modelbox::DataContext> &data_ctx,\n      const size_t &current_batch_size);\n\n  std::shared_ptr<aclmdlDataset> CreateDataSet(\n      const std::shared_ptr<modelbox::BufferListMap> &buffer_list_map,\n      std::vector<std::string> &name_list, const size_t &current_batch_size);\n\n  modelbox::Status GetCurrentBatchSize(\n      std::shared_ptr<modelbox::DataContext> &data_ctx, size_t &batch_size);\n\n  int32_t device_id_{0};\n  std::string model_file_;\n  int32_t dynamic_batch_tensor_index_{-1};\n  void *dynamic_batch_mem_ptr_{nullptr};\n  std::set<size_t> dynamic_batch_set_;\n\n  uint32_t model_id_{0};\n  bool is_model_load_{false};\n  std::shared_ptr<aclmdlDesc> model_desc_{nullptr};\n  std::vector<std::string> model_input_list_;\n  std::vector<std::string> model_output_list_;\n  std::vector<size_t> model_input_size_;\n  std::vector<size_t> model_output_size_;\n  std::vector<std::vector<size_t>> output_shape_;\n  std::vector<modelbox::ModelBoxDataType> output_data_type_;\n\n  std::map<aclFormat, std::string> format_str_{\n      {ACL_FORMAT_UNDEFINED, \"UNDEFINED\"}, {ACL_FORMAT_NCHW, \"NCHW\"},\n      {ACL_FORMAT_NHWC, \"NHWC\"},           {ACL_FORMAT_ND, \"ND\"},\n      {ACL_FORMAT_NC1HWC0, \"NC1HWC0\"},     {ACL_FORMAT_FRACTAL_Z, \"FRACTAL_Z\"}};\n  std::map<aclDataType, std::string> data_type_str_{\n      {ACL_DT_UNDEFINED, \"UNDEFINED\"},\n      {ACL_FLOAT, \"FLOAT\"},\n      {ACL_FLOAT16, \"FLOAT16\"},\n      {ACL_INT8, \"INT8\"},\n      {ACL_INT32, \"INT32\"},\n      {ACL_UINT8, \"UINT8\"},\n      {ACL_INT16, \"INT16\"},\n      {ACL_UINT16, \"UINT16\"},\n      {ACL_UINT32, \"UINT32\"},\n      {ACL_INT64, \"INT64\"},\n      {ACL_UINT64, \"UINT64\"},\n      {ACL_DOUBLE, \"DOUBLE\"},\n      {ACL_BOOL, \"BOOL\"}};\n  std::map<aclDataType, modelbox::ModelBoxDataType> data_type_flow_{\n      {ACL_FLOAT, modelbox::MODELBOX_FLOAT},\n      {ACL_FLOAT16, modelbox::MODELBOX_HALF},\n      {ACL_INT8, modelbox::MODELBOX_INT8},\n      {ACL_INT32, modelbox::MODELBOX_INT32},\n      {ACL_UINT8, modelbox::MODELBOX_UINT8},\n      {ACL_INT16, modelbox::MODELBOX_INT16},\n      {ACL_UINT16, modelbox::MODELBOX_UINT16},\n      {ACL_UINT32, modelbox::MODELBOX_UINT32},\n      {ACL_INT64, modelbox::MODELBOX_INT64},\n      {ACL_UINT64, modelbox::MODELBOX_UINT64},\n      {ACL_DOUBLE, modelbox::MODELBOX_DOUBLE},\n      {ACL_BOOL, modelbox::MODELBOX_BOOL}};\n};\n\n#endif"
  },
  {
    "path": "src/drivers/devices/ascend/flowunit/inference/atc_inference_flowunit.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include \"atc_inference_flowunit.h\"\n\n#include <fstream>\n\n#include \"modelbox/device/ascend/device_ascend.h\"\n#include \"virtualdriver_inference.h\"\n\nAtcInferenceFlowUnit::AtcInferenceFlowUnit() = default;\nAtcInferenceFlowUnit::~AtcInferenceFlowUnit() = default;\n\nmodelbox::Status AtcInferenceFlowUnit::Open(\n    const std::shared_ptr<modelbox::Configuration> &opts) {\n  auto unit_desc = std::dynamic_pointer_cast<VirtualInferenceFlowUnitDesc>(\n      this->GetFlowUnitDesc());\n  unit_desc->GetModelEntry();\n  auto config = unit_desc->GetConfiguration();\n\n  auto merge_config = std::make_shared<modelbox::Configuration>();\n  // opts override python_desc_ config\n  merge_config->Add(*config);\n  merge_config->Add(*opts);\n\n  std::vector<std::string> input_name_list;\n  std::vector<std::string> output_name_list;\n  auto ret = GetFlowUnitIO(input_name_list, output_name_list);\n  if (ret != modelbox::STATUS_OK) {\n    return ret;\n  }\n\n  merge_config->SetProperty(\"deviceid\", dev_id_);\n  infer_ = std::make_shared<AtcInference>();\n  ret = infer_->Init(unit_desc->GetModelEntry(), merge_config, input_name_list,\n                     output_name_list, GetBindDevice()->GetDeviceManager()->GetDrivers());\n  if (ret != modelbox::STATUS_SUCCESS) {\n    MBLOG_ERROR << \"Init inference failed\";\n    return ret;\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status AtcInferenceFlowUnit::GetFlowUnitIO(\n    std::vector<std::string> &input_name_list,\n    std::vector<std::string> &output_name_list) {\n  auto unit_desc = std::dynamic_pointer_cast<VirtualInferenceFlowUnitDesc>(\n      this->GetFlowUnitDesc());\n  auto input_desc = unit_desc->GetFlowUnitInput();\n  auto output_desc = unit_desc->GetFlowUnitOutput();\n  for (auto &input : input_desc) {\n    input_name_list.push_back(input.GetPortName());\n  }\n\n  for (auto &output : output_desc) {\n    output_name_list.push_back(output.GetPortName());\n  }\n\n  if (input_name_list.empty() || output_name_list.empty()) {\n    MBLOG_ERROR << \"Wrong input[\" << input_name_list.size() << \"] or output[\"\n                << output_name_list.size() << \"] number\";\n    return modelbox::STATUS_BADCONF;\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status AtcInferenceFlowUnit::AscendProcess(\n    std::shared_ptr<modelbox::DataContext> data_ctx, aclrtStream stream) {\n  auto ret = infer_->Infer(data_ctx, stream);\n  if (ret != modelbox::STATUS_SUCCESS) {\n    MBLOG_ERROR << \"Execute infer failed, detail:\" << ret.Errormsg();\n    return {modelbox::STATUS_FAULT, ret.Errormsg()};\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status AtcInferenceFlowUnit::Close() {\n  auto ret = infer_->Deinit();\n  if (ret != modelbox::STATUS_SUCCESS) {\n    MBLOG_ERROR << \"Deinit inference failed\";\n    return ret;\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nvoid AtcInferenceFlowUnitDesc::SetModelEntry(const std::string &model_entry) {\n  model_entry_ = model_entry;\n}\n\nstd::string AtcInferenceFlowUnitDesc::GetModelEntry() { return model_entry_; }\n\nstd::shared_ptr<modelbox::FlowUnit>\nAtcInferenceFlowUnitFactory::VirtualCreateFlowUnit(\n    const std::string &unit_name, const std::string &unit_type,\n    const std::string &virtual_type) {\n  return std::make_shared<AtcInferenceFlowUnit>();\n};"
  },
  {
    "path": "src/drivers/devices/ascend/flowunit/inference/atc_inference_flowunit.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#ifndef MODELBOX_FLOWUNIT_ASCEND_INFERENCE_H_\n#define MODELBOX_FLOWUNIT_ASCEND_INFERENCE_H_\n\n#include <modelbox/base/device.h>\n#include <modelbox/base/status.h>\n#include <modelbox/buffer.h>\n#include <modelbox/device/ascend/device_ascend.h>\n#include <modelbox/flow.h>\n#include <modelbox/flowunit.h>\n\n#include \"atc_inference.h\"\n\nconstexpr const char *FLOWUNIT_TYPE = \"ascend\";\nconstexpr const char *INFERENCE_TYPE = \"acl\";\n\nclass AtcInferenceFlowUnit : public modelbox::AscendFlowUnit {\n public:\n  AtcInferenceFlowUnit();\n  ~AtcInferenceFlowUnit() override;\n\n  modelbox::Status Open(\n      const std::shared_ptr<modelbox::Configuration> &opts) override;\n\n  modelbox::Status Close() override;\n\n  modelbox::Status AscendProcess(\n      std::shared_ptr<modelbox::DataContext> data_ctx,\n      aclrtStream stream) override;\n\n private:\n  modelbox::Status GetFlowUnitIO(std::vector<std::string> &input_name_list,\n                               std::vector<std::string> &output_name_list);\n\n  std::shared_ptr<AtcInference> infer_;\n};\n\nclass AtcInferenceFlowUnitDesc : public modelbox::FlowUnitDesc {\n public:\n  AtcInferenceFlowUnitDesc() = default;\n  ~AtcInferenceFlowUnitDesc() override = default;\n\n  void SetModelEntry(const std::string &model_entry);\n  std::string GetModelEntry();\n\n private:\n  std::string model_entry_;\n};\n\nclass AtcInferenceFlowUnitFactory : public modelbox::FlowUnitFactory {\n public:\n  AtcInferenceFlowUnitFactory() = default;\n  ~AtcInferenceFlowUnitFactory() override = default;\n\n  std::map<std::string, std::shared_ptr<modelbox::FlowUnitDesc>> FlowUnitProbe()\n      override {\n    return {};\n  }\n\n  std::string GetFlowUnitFactoryType() override { return FLOWUNIT_TYPE; };\n  std::string GetVirtualType() override { return INFERENCE_TYPE; };\n  std::shared_ptr<modelbox::FlowUnit> VirtualCreateFlowUnit(\n      const std::string &unit_name, const std::string &unit_type,\n      const std::string &virtual_type) override;\n};\n\n#endif  // MODELBOX_FLOWUNIT_ASCEND_INFERENCE_H_\n"
  },
  {
    "path": "src/drivers/devices/ascend/flowunit/inference/atc_inference_flowunit_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include <dsmi_common_interface.h>\n\n#include <functional>\n#include <future>\n#include <random>\n#include <thread>\n\n#include \"modelbox/base/log.h\"\n#include \"modelbox/base/utils.h\"\n#include \"modelbox/buffer.h\"\n#include \"driver_flow_test.h\"\n#include \"flowunit_mockflowunit/flowunit_mockflowunit.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"test/mock/minimodelbox/mockflow.h\"\n\nnamespace modelbox {\nclass InferenceAscendFlowUnitTest : public testing::Test {\n public:\n  InferenceAscendFlowUnitTest() : driver_flow_(std::make_shared<MockFlow>()) {}\n\n protected:\n  void SetUp() override {\n    // Test ascend runtime\n    int32_t count = 0;\n    auto dsmi_ret = dsmi_get_device_count(&count);\n    if (dsmi_ret != 0) {\n      MBLOG_INFO << \"no ascend device, skip test suit\";\n      GTEST_SKIP();\n    }\n\n    auto ret = AddMockFlowUnit();\n    EXPECT_EQ(ret, STATUS_OK);\n\n    const std::string src_file =\n        test_assets + \"/atc_inference/\" + test_model_file;\n    const std::string src_toml = test_data_dir + \"/\" + test_toml_file;\n    atc_inference_path = test_data_dir + \"/atc_inference\";\n    mkdir(atc_inference_path.c_str(), 0700);\n    dest_model_file = atc_inference_path + \"/\" + test_model_file;\n    dest_toml_file = atc_inference_path + \"/\" + test_toml_file;\n    CopyFile(src_file, dest_model_file, true);\n    CopyFile(src_toml, dest_toml_file, true);\n    const std::string src_file_en =\n        test_assets + \"/atc_inference/\" + test_model_file_en;\n    const std::string src_toml_en = test_data_dir + \"/\" + test_toml_file_en;\n    dest_model_file_en = atc_inference_path + \"/\" + test_model_file_en;\n    dest_toml_file_en = atc_inference_path + \"/\" + test_toml_file_en;\n    CopyFile(src_file_en, dest_model_file_en, true);\n    CopyFile(src_toml_en, dest_toml_file_en, true);\n  }\n\n  void TearDown() override {\n    remove(dest_model_file.c_str());\n    remove(dest_toml_file.c_str());\n    remove(dest_model_file_en.c_str());\n    remove(dest_toml_file_en.c_str());\n    remove(atc_inference_path.c_str());\n\n    driver_flow_ = nullptr;\n  };\n\n  std::shared_ptr<MockFlow> GetDriverFlow();\n\n  const std::string test_lib_dir = TEST_DRIVER_DIR,\n                    test_data_dir = TEST_DATA_DIR, test_assets = TEST_ASSETS,\n                    test_model_file = \"2d_2048_w_stage1_pad0.om\",\n                    test_toml_file = \"virtual_atc_infer_test.toml\",\n                    test_model_file_en = \"2d_2048_w_stage1_pad0_en.om\",\n                    test_toml_file_en = \"virtual_atc_infer_test_en.toml\";\n\n  std::string atc_inference_path, dest_model_file, dest_toml_file,\n      dest_model_file_en, dest_toml_file_en;\n\n private:\n  Status AddMockFlowUnit();\n  std::shared_ptr<MockFlow> driver_flow_;\n};\n\nStatus InferenceAscendFlowUnitTest::AddMockFlowUnit() {\n  {\n    auto mock_desc = GenerateFlowunitDesc(\"prepare_infer_data\", {}, {\"out\"});\n    mock_desc->SetFlowType(STREAM);\n    auto open_func =\n        [=](const std::shared_ptr<modelbox::Configuration>& flow_option,\n            const std::shared_ptr<MockFlowUnit>& mock_flowunit) {\n          auto ext_data = mock_flowunit->CreateExternalData();\n          if (!ext_data) {\n            MBLOG_ERROR << \"can not get external data.\";\n          }\n\n          auto buffer_list = ext_data->CreateBufferList();\n          buffer_list->Build({10});\n\n          auto status = ext_data->Send(buffer_list);\n          if (!status) {\n            MBLOG_ERROR << \"external data send buffer list failed:\" << status;\n          }\n\n          status = ext_data->Close();\n          if (!status) {\n            MBLOG_ERROR << \"external data close failed:\" << status;\n          }\n\n          return modelbox::STATUS_OK;\n        };\n\n    auto process_func =\n        [=](const std::shared_ptr<DataContext>& op_ctx,\n            const std::shared_ptr<MockFlowUnit>& mock_flowunit) {\n          MBLOG_INFO << \"prepare_infer_data Process\";\n          auto output_buf_1 = op_ctx->Output(\"out\");\n          const size_t len = 2048;\n          std::vector<size_t> shape_vector(1, len * sizeof(float));\n          modelbox::ModelBoxDataType type = MODELBOX_FLOAT;\n          output_buf_1->Build(shape_vector);\n          output_buf_1->Set(\"type\", type);\n          std::vector<size_t> shape{len};\n          output_buf_1->Set(\"shape\", shape);\n          auto* dev_data = (float*)(output_buf_1->MutableData());\n          for (size_t i = 0; i < output_buf_1->Size(); ++i) {\n            for (size_t j = 0; j < len; ++j) {\n              dev_data[i * len + j] = 0.0;\n            }\n          }\n\n          return modelbox::STATUS_OK;\n        };\n\n    auto mock_functions = std::make_shared<MockFunctionCollection>();\n    mock_functions->RegisterOpenFunc(open_func);\n    mock_functions->RegisterProcessFunc(process_func);\n    driver_flow_->AddFlowUnitDesc(\n        mock_desc, mock_functions->GenerateCreateFunc(), TEST_DRIVER_DIR);\n  }\n  {\n    auto mock_desc = GenerateFlowunitDesc(\"check_infer_result\", {\"in\"}, {});\n    mock_desc->SetFlowType(STREAM);\n    auto process_func =\n        [=](const std::shared_ptr<DataContext>& op_ctx,\n            const std::shared_ptr<MockFlowUnit>& mock_flowunit) {\n          std::shared_ptr<BufferList> input_bufs = op_ctx->Input(\"in\");\n          EXPECT_EQ(input_bufs->Size(), 1);\n          std::vector<size_t> input_shape;\n          auto result = input_bufs->At(0)->Get(\"shape\", input_shape);\n          EXPECT_TRUE(result);\n          EXPECT_EQ(input_shape.size(), 4);\n          EXPECT_EQ(input_shape[0], 1);\n          EXPECT_EQ(input_shape[1], 256);\n          EXPECT_EQ(input_shape[2], 1);\n          EXPECT_EQ(input_shape[3], 2048);\n\n          const auto* ptr = (const float*)input_bufs->ConstData();\n          for (size_t i = 0; i < 200; ++i) {\n            EXPECT_TRUE(std::abs(ptr[i]) < 1e-7);\n          }\n\n          return modelbox::STATUS_OK;\n        };\n\n    auto mock_functions = std::make_shared<MockFunctionCollection>();\n    mock_functions->RegisterProcessFunc(process_func);\n    driver_flow_->AddFlowUnitDesc(\n        mock_desc, mock_functions->GenerateCreateFunc(), TEST_DRIVER_DIR);\n  }\n\n  return STATUS_OK;\n}\n\nstd::shared_ptr<MockFlow> InferenceAscendFlowUnitTest::GetDriverFlow() {\n  return driver_flow_;\n}\n\nTEST_F(InferenceAscendFlowUnitTest, RunUnit) {\n  std::string toml_content = R\"(\n    [driver]\n    skip-default=true\n    dir=[\")\" + test_lib_dir + \"\\\",\\\"\" +\n                             test_data_dir + \"\\\"]\\n    \" +\n                             R\"([graph]\n    graphconf = '''digraph demo {                                                                            \n          prepare_infer_data[type=flowunit, flowunit=prepare_infer_data, device=cpu, deviceid=0, label=\"<out>\"]             \n          atc_inference[type=flowunit, flowunit=acl_inference, device=ascend, deviceid=0, label=\"<input> | <output:0>\", batch_size=1]\n          check_infer_result[type=flowunit, flowunit=check_infer_result, device=cpu, deviceid=0, label=\"<in>\", batch_size=1]  \n                                  \n          prepare_infer_data:out -> atc_inference:input\n          atc_inference:output:0 -> check_infer_result:in\n        }'''\n    format = \"graphviz\"\n  )\";\n\n  auto driver_flow = GetDriverFlow();\n  driver_flow->BuildAndRun(\"RunUnit\", toml_content, 3 * 1000);\n}\n\nTEST_F(InferenceAscendFlowUnitTest, RunUnitEncrypt) {\n  std::string toml_content = R\"(\n    [driver]\n    skip-default=true\n    dir=[\")\" + test_lib_dir + \"\\\",\\\"\" +\n                             test_data_dir + \"\\\"]\\n    \" +\n                             R\"([graph]\n    graphconf = '''digraph demo {                                                                            \n          prepare_infer_data[type=flowunit, flowunit=prepare_infer_data, device=cpu, deviceid=0, label=\"<out>\"]             \n          atc_inference[type=flowunit, flowunit=acl_inference_encrypt, device=ascend, deviceid=0, label=\"<input> | <output:0>\", batch_size=1]\n          check_infer_result[type=flowunit, flowunit=check_infer_result, device=cpu, deviceid=0, label=\"<in>\", batch_size=1]  \n                                  \n          prepare_infer_data:out -> atc_inference:input\n          atc_inference:output:0 -> check_infer_result:in\n        }'''\n    format = \"graphviz\"\n  )\";\n\n  auto driver_flow = GetDriverFlow();\n  driver_flow->BuildAndRun(\"RunUnit\", toml_content, 3 * 1000);\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/drivers/devices/ascend/flowunit/inference/flowunit_desc.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <stdio.h>\n\n#include <memory>\n\n#include \"atc_inference_flowunit.h\"\n#include \"modelbox/base/driver_api_helper.h\"\n#include \"modelbox/base/status.h\"\n#include \"modelbox/device/ascend/device_ascend.h\"\n#include \"modelbox/flowunit.h\"\n\nconstexpr const char *FLOWUNIT_NAME = \"acl_inference\";\nconstexpr const char *FLOWUNIT_DESC = \"A ascend inference flowunit\";\n\nstd::shared_ptr<modelbox::DriverFactory> CreateDriverFactory() {\n  std::shared_ptr<modelbox::DriverFactory> factory =\n      std::make_shared<AtcInferenceFlowUnitFactory>();\n  return factory;\n}\n\nvoid DriverDescription(modelbox::DriverDesc *desc) {\n  desc->SetName(FLOWUNIT_NAME);\n  desc->SetClass(modelbox::DRIVER_CLASS_INFERENCE);\n  desc->SetType(modelbox::DEVICE_TYPE);\n  desc->SetDescription(FLOWUNIT_DESC);\n}\n\nmodelbox::Status DriverInit() {\n  // Driver Init.\n  return modelbox::STATUS_OK;\n}\n\nvoid DriverFini() {\n  // Driver Fini.\n}\n"
  },
  {
    "path": "src/drivers/devices/ascend/flowunit/inference/test_toml/modelbox.test.ascend.inference.encrypt.in",
    "content": "[base]\r\nname = \"acl_inference_encrypt\"\r\ndevice = \"ascend\"\r\nversion = \"1.0.0\"\r\ndescription = \"an ascend inference flowunit\"\r\nentry = \"@CMAKE_SOURCE_DIR@/test/assets/atc_inference/2d_2048_w_stage1_pad0_en.om\"\r\ntype = \"inference\"\r\nvirtual_type = \"acl\"\r\n\r\n[encryption]\r\nplugin_name = \"modeldecrypt-plugin\"\r\nplugin_version = \"1.0.0\"\r\nrootkey = \"+bo3telmmykFCNYg9Hn8dSJlpOH1CTBWeYNYhYtxNrbNQhNkrXW5dC5CdURgqJ2HpnbaviiKCY9Di+x83S+kmAQGx0zM7Z9pJqSKCBV2upjx\"\r\npasswd = \"Y4smkXW0eNaaANVPgfxMN+3ddfeaxEkf2g/+pRNMHFYFkDFspXocPAmAB06fSDFqcs0bg1joo6gSgWj0bacDsg==\"\r\n\r\n[input]\r\n[input.input1]\r\nname = \"input\"\r\ntype = \"float\"\r\ndevice = \"ascend\"\r\n\r\n[output]\r\n[output.output1]\r\nname = \"output:0\"\r\ntype = \"float\""
  },
  {
    "path": "src/drivers/devices/ascend/flowunit/inference/test_toml/modelbox.test.ascend.inference.in",
    "content": "[base]\nname = \"acl_inference\"\ndevice = \"ascend\"\nversion = \"1.0.0\"\ndescription = \"an ascend inference flowunit\"\nentry = \"@CMAKE_SOURCE_DIR@/test/assets/atc_inference/2d_2048_w_stage1_pad0.om\"\ntype = \"inference\"\nvirtual_type = \"acl\"\n\n[input]\n[input.input1]\nname = \"input\"\n\n[output]\n[output.output1]\nname = \"output:0\""
  },
  {
    "path": "src/drivers/devices/ascend/flowunit/mindspore_lite_inference/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10.2)\n\nset(UNIT_DEVICE \"ascend\")\nset(UNIT_NAME \"mindspore-lite-inference\")\n\nproject(modelbox-flowunit-${UNIT_DEVICE}-${UNIT_NAME})\n\nif(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_BINARY_DIR})\n    message(FATAL_ERROR \"Do not build in source directory!\")\nendif()\n\nif (NOT MINDSPORE_LITE_FOUND) \n    message(STATUS \"Not found mindspore-lite, disable ${UNIT_NAME} flowunit\")\n    return()\nendif()\nset(CMAKE_CXX_STANDARD 17)\n\nset(MINDSPORE_FLOWUNIT_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR})\n\nfile(GLOB_RECURSE UNIT_SOURCE \n    ${MINDSPORE_FLOWUNIT_SOURCE_DIR}/*.cpp \n    ${MINDSPORE_FLOWUNIT_SOURCE_DIR}/*.cc \n    ${MINDSPORE_FLOWUNIT_SOURCE_DIR}/*.c)\ngroup_source_test_files(MODELBOX_UNIT_SOURCE MODELBOX_UNIT_TEST_SOURCE \"_test.c*\" ${UNIT_SOURCE})\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${MINDSPORE_ENGINE_SRC_DIR})\ninclude_directories(${MINDSPORE_FLOWUNIT_SOURCE_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_ASCEND_INCLUDE})\ninclude_directories(${LIBMODELBOX_VIRTUALDRIVER_INFERENCE_INCLUDE})\ninclude_directories(${MINDSPORE_LITE_INCLUDE_DIR})\ninclude_directories(${ACL_INCLUDE_DIR})\ninclude_directories(${LIBMODELBOX_FLOWUNIT_INFERENCE_MINDSPORE_LITE_INCLUDE})\ninclude_directories(${MODELBOX_COMMON_INFERENCE_INCLUDE})\n\nset(MODELBOX_UNIT_SHARED libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-shared)\nset(MODELBOX_UNIT_SOURCE_INCLUDE ${MINDSPORE_FLOWUNIT_SOURCE_DIR})\n\nconfigure_file(${CMAKE_CURRENT_SOURCE_DIR}/test_toml/modelbox.test.mindspore_lite.ascend.inference.in ${CMAKE_BINARY_DIR}/test/test-working-dir/data/virtual_mindspore_lite_ascend_infer_test.toml @ONLY)\nconfigure_file(${CMAKE_CURRENT_SOURCE_DIR}/test_toml/modelbox.test.mindspore_lite.ascend.inference.encrypt.in ${CMAKE_BINARY_DIR}/test/test-working-dir/data/virtual_mindspore_lite_ascend_infer_test_en.toml @ONLY)\n\n\nadd_library(${MODELBOX_UNIT_SHARED} SHARED ${MODELBOX_UNIT_SOURCE})\nset(LIBMODELBOX_FLOWUNIT_INFERENCE_ASCEND_SHARED ${MODELBOX_UNIT_SHARED})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES \n    SOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n\ntarget_compile_options(${MODELBOX_UNIT_SHARED} PUBLIC -fvisibility=hidden)\n\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} rt)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} dl)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_DEVICE_ASCEND_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_VIRTUALDRIVER_INFERENCE_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_FLOWUNIT_INFERENCE_MINDSPORE_LITE_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${MODELBOX_COMMON_INFERENCE_LIBRARY})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}\")\n\ninstall(TARGETS ${MODELBOX_UNIT_SHARED}\n        COMPONENT ascend-device-flowunit\n        RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n        LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        ARCHIVE DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        OPTIONAL)\ninstall(DIRECTORY ${HEADER} \n        DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR}\n        COMPONENT ascend-device-flowunit-devel)\n\nset(LIBMODELBOX_FLOWUNIT_MINDSPORE_LITE_ASCEND_SHARED ${MODELBOX_UNIT_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_MINDSPORE_LITE_ASCEND_INCLUDE ${MODELBOX_UNIT_SOURCE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_MINDSPORE_LITE_ASCEND_SOURCES ${MODELBOX_UNIT_SOURCE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_MINDSPORE_LITE_ASCEND_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}.so CACHE INTERNAL \"\")\n\n# driver test\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_UNIT_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_UNIT_SHARED})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")"
  },
  {
    "path": "src/drivers/devices/ascend/flowunit/mindspore_lite_inference/flowunit_desc.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <stdio.h>\n\n#include <memory>\n\n#include \"mindspore_ascend_inference_flowunit.h\"\n#include \"modelbox/base/driver_api_helper.h\"\n#include \"modelbox/base/status.h\"\n#include \"modelbox/device/ascend/device_ascend.h\"\n#include \"modelbox/flowunit.h\"\n\nconstexpr const char *FLOWUNIT_NAME = \"mindspore_inference\";\nconstexpr const char *FLOWUNIT_DESC = \"A mindspore ascend inference flowunit\";\n\nstd::shared_ptr<modelbox::DriverFactory> CreateDriverFactory() {\n  std::shared_ptr<modelbox::DriverFactory> factory =\n      std::make_shared<MindSporeInferenceAsendFlowUnitFactory>();\n  return factory;\n}\n\nvoid DriverDescription(modelbox::DriverDesc *desc) {\n  desc->SetName(FLOWUNIT_NAME);\n  desc->SetClass(modelbox::DRIVER_CLASS_INFERENCE);\n  desc->SetType(modelbox::DEVICE_TYPE);\n  desc->SetDescription(FLOWUNIT_DESC);\n  desc->SetGlobal(true);\n  return;\n}\n\nmodelbox::Status DriverInit() {\n  // Driver Init.\n  return modelbox::STATUS_OK;\n}\n\nvoid DriverFini() {\n  // Driver Fini.\n}\n"
  },
  {
    "path": "src/drivers/devices/ascend/flowunit/mindspore_lite_inference/mindspore_ascend_inference_flowunit.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"mindspore_ascend_inference_flowunit.h\"\n\nMindSporeInferenceAsendFlowUnit::MindSporeInferenceAsendFlowUnit() = default;\n\nMindSporeInferenceAsendFlowUnit::~MindSporeInferenceAsendFlowUnit() = default;\n\nmodelbox::Status MindSporeInferenceAsendFlowUnit::Open(\n    const std::shared_ptr<modelbox::Configuration> &opts) {\n  auto context = std::make_shared<mindspore::Context>();\n  auto &device_list = context->MutableDeviceInfo();\n  auto ascend_device_info = std::make_shared<mindspore::AscendDeviceInfo>();\n  ascend_device_info->SetDeviceID(dev_id_);\n  device_list.push_back(ascend_device_info);\n\n  auto cpu_device_info = std::make_shared<mindspore::CPUDeviceInfo>();\n  device_list.push_back(cpu_device_info);\n\n  infer_ = std::make_shared<MindSporeInference>(GetBindDevice(), context);\n  return infer_->Open(opts, this->GetFlowUnitDesc());\n}\n\nmodelbox::Status MindSporeInferenceAsendFlowUnit::AscendProcess(\n    std::shared_ptr<modelbox::DataContext> data_ctx, aclrtStream stream) {\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status MindSporeInferenceAsendFlowUnit::Process(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  return infer_->Infer(data_ctx);\n}\n\nmodelbox::Status MindSporeInferenceAsendFlowUnit::Close() {\n  infer_ = nullptr;\n  return modelbox::STATUS_OK;\n}\n\nstd::shared_ptr<modelbox::FlowUnit>\nMindSporeInferenceAsendFlowUnitFactory::VirtualCreateFlowUnit(\n    const std::string &unit_name, const std::string &unit_type,\n    const std::string &virtual_type) {\n  auto inference_flowunit = std::make_shared<MindSporeInferenceAsendFlowUnit>();\n  return inference_flowunit;\n};\n"
  },
  {
    "path": "src/drivers/devices/ascend/flowunit/mindspore_lite_inference/mindspore_ascend_inference_flowunit.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_INFERENCE_MINDSPORE_ASEND_H_\n#define MODELBOX_FLOWUNIT_INFERENCE_MINDSPORE_ASEND_H_\n\n#include <modelbox/flowunit.h>\n\n#include \"mindspore_inference.h\"\n#include \"modelbox/device/ascend/device_ascend.h\"\n\nconstexpr const char *FLOWUNIT_TYPE = \"ascend\";\n\nclass MindSporeInferenceAsendFlowUnit : public modelbox::AscendFlowUnit {\n public:\n  MindSporeInferenceAsendFlowUnit();\n  virtual ~MindSporeInferenceAsendFlowUnit();\n\n  modelbox::Status Open(\n      const std::shared_ptr<modelbox::Configuration> &opts) override;\n\n  modelbox::Status AscendProcess(\n      std::shared_ptr<modelbox::DataContext> data_ctx,\n      aclrtStream stream) override;\n\n  modelbox::Status Process(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override;\n\n  modelbox::Status Close() override;\n\n private:\n  std::shared_ptr<MindSporeInference> infer_;\n};\n\nclass MindSporeInferenceAsendFlowUnitFactory\n    : public modelbox::FlowUnitFactory {\n public:\n  MindSporeInferenceAsendFlowUnitFactory() = default;\n  virtual ~MindSporeInferenceAsendFlowUnitFactory() = default;\n\n  std::shared_ptr<modelbox::FlowUnit> VirtualCreateFlowUnit(\n      const std::string &unit_name, const std::string &unit_type,\n      const std::string &virtual_type);\n\n  std::string GetFlowUnitFactoryType() { return FLOWUNIT_TYPE; };\n  std::string GetVirtualType() { return INFERENCE_TYPE; };\n  std::string GetFlowUnitInputDeviceType() override { return \"cpu\"; };\n\n  std::map<std::string, std::shared_ptr<modelbox::FlowUnitDesc>>\n  FlowUnitProbe() {\n    return std::map<std::string, std::shared_ptr<modelbox::FlowUnitDesc>>();\n  };\n};\n\n#endif  // MODELBOX_FLOWUNIT_INFERENCE_MINDSPORE_ASEND_H_\n"
  },
  {
    "path": "src/drivers/devices/ascend/flowunit/mindspore_lite_inference/mindspore_lite_ascend_inference_flowunit_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <dsmi_common_interface.h>\n\n#include <functional>\n#include <future>\n#include <random>\n#include <thread>\n\n#include \"flowunit_mockflowunit/flowunit_mockflowunit.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"mindspore_inference_flowunit_test.h\"\n#include \"modelbox/base/log.h\"\n#include \"modelbox/base/utils.h\"\n#include \"modelbox/buffer.h\"\n#include \"test/mock/minimodelbox/mockflow.h\"\n\nusing ::testing::_;\n\nnamespace modelbox {\nclass InferenceMindSporeLiteAscendFlowUnitTest : public testing::Test {\n public:\n  InferenceMindSporeLiteAscendFlowUnitTest()\n      : mindspore_flow_(std::make_shared<InferenceMindSporeFlowUnitTest>()) {}\n\n protected:\n  virtual void SetUp() {\n    int32_t count = 0;\n    auto dsmi_ret = dsmi_get_device_count(&count);\n    if (dsmi_ret != 0) {\n      MBLOG_INFO << \"no ascend device, skip test suit\";\n      GTEST_SKIP();\n    }\n\n    auto ret = mindspore_flow_->Init();\n    EXPECT_EQ(ret, STATUS_OK);\n\n    const std::string src_file =\n        test_assets + \"/mindspore_inference/\" + test_model_file;\n    const std::string src_toml = test_data_dir + \"/\" + test_toml_file;\n    mindspore_inference_path = test_data_dir + \"/mindspore_inference\";\n    mkdir(mindspore_inference_path.c_str(), 0700);\n    dest_model_file = mindspore_inference_path + \"/\" + test_model_file;\n    dest_toml_file = mindspore_inference_path + \"/\" + test_toml_file;\n    CopyFile(src_file, dest_model_file, true);\n    CopyFile(src_toml, dest_toml_file, true);\n    const std::string src_file_en =\n        test_assets + \"/mindspore_inference/\" + test_model_file_en;\n    const std::string src_toml_en = test_data_dir + \"/\" + test_toml_file_en;\n    dest_model_file_en = mindspore_inference_path + \"/\" + test_model_file_en;\n    dest_toml_file_en = mindspore_inference_path + \"/\" + test_toml_file_en;\n    CopyFile(src_file_en, dest_model_file_en, true);\n    CopyFile(src_toml_en, dest_toml_file_en, true);\n  }\n\n  virtual void TearDown() {\n    remove(dest_model_file.c_str());\n    remove(dest_toml_file.c_str());\n    remove(dest_model_file_en.c_str());\n    remove(dest_toml_file_en.c_str());\n    remove(mindspore_inference_path.c_str());\n\n    mindspore_flow_ = nullptr;\n  };\n\n  const std::string test_lib_dir = TEST_DRIVER_DIR,\n                    test_data_dir = TEST_DATA_DIR, test_assets = TEST_ASSETS,\n                    test_model_file = \"tensor_add.mindir\",\n                    test_toml_file = \"virtual_mindspore_infer_test.toml\",\n                    test_model_file_en = \"tensor_add_en.mindir\",\n                    test_toml_file_en = \"virtual_mindspore_infer_test_en.toml\";\n\n  std::string mindspore_inference_path, dest_model_file, dest_toml_file,\n      dest_model_file_en, dest_toml_file_en;\n\n  std::shared_ptr<InferenceMindSporeFlowUnitTest> mindspore_flow_;\n};\n\nTEST_F(InferenceMindSporeLiteAscendFlowUnitTest, RunUnit) {\n  std::string toml_content = R\"(\n    [driver]\n    skip-default=true\n    dir=[\")\" + test_lib_dir + \"\\\",\\\"\" +\n                             test_data_dir + \"\\\"]\\n    \" +\n                             R\"([graph]\n    graphconf = '''digraph demo {                                                                            \n          prepare_ms_infer_data[type=flowunit, flowunit=prepare_ms_infer_data, device=cpu, deviceid=0]             \n          mindspore_inference[type=flowunit, flowunit=mindspore_inference, device=ascend, deviceid=0, batch_size=2]\n          check_ms_infer_result[type=flowunit, flowunit=check_ms_infer_result, device=cpu, deviceid=0, batch_size=2]  \n                                  \n          prepare_ms_infer_data:out1 -> mindspore_inference:x_\n          prepare_ms_infer_data:out2 -> mindspore_inference:y_\n          mindspore_inference:\"Default/Add-op3\"-> check_ms_infer_result:in\n        }'''\n    format = \"graphviz\"\n  )\";\n  auto ret = mindspore_flow_->Run(\"RunUnit\", toml_content);\n  EXPECT_EQ(ret, STATUS_STOP);\n}\n\nTEST_F(InferenceMindSporeLiteAscendFlowUnitTest, RunUnitEncrypt) {\n  std::string toml_content = R\"(\n    [driver]\n    skip-default=true\n    dir=[\")\" + test_lib_dir + \"\\\",\\\"\" +\n                             test_data_dir + \"\\\"]\\n    \" +\n                             R\"([graph]\n    graphconf = '''digraph demo {                                                                            \n          prepare_ms_infer_data[type=flowunit, flowunit=prepare_ms_infer_data, device=cpu, deviceid=0]             \n          mindspore_inference[type=flowunit, flowunit=mindspore_inference_encrypt, device=ascend, deviceid=0, batch_size=2]\n          check_ms_infer_result[type=flowunit, flowunit=check_ms_infer_result, device=cpu, deviceid=0, batch_size=2]  \n                                  \n          prepare_ms_infer_data:out1 -> mindspore_inference:x_\n          prepare_ms_infer_data:out2 -> mindspore_inference:y_\n          mindspore_inference:\"Default/Add-op3\" -> check_ms_infer_result:in\n        }'''\n    format = \"graphviz\"\n  )\";\n  auto ret = mindspore_flow_->Run(\"RunUnit\", toml_content);\n  EXPECT_EQ(ret, STATUS_STOP);\n}\n\n}  // namespace modelbox\n"
  },
  {
    "path": "src/drivers/devices/ascend/flowunit/mindspore_lite_inference/test_toml/modelbox.test.mindspore_lite.ascend.inference.encrypt.in",
    "content": "[base]\nname = \"mindspore_inference_encrypt\"\ndevice = \"ascend\"\nversion = \"1.0.0\"\ndescription = \"an mindspore ascend inference flowunit\"\nentry = \"@CMAKE_SOURCE_DIR@/test/assets/mindspore_inference/tensor_add_en.ms\"\ntype = \"inference\"\nvirtual_type = \"mindspore\"\n\n[encryption]\nplugin_name = \"modeldecrypt-plugin\"\nplugin_version = \"1.0.0\"\nrootkey = \"5yQMTJz5vFZFD7ABFyr6dCwjAVrPv5QTv3tfKSZ/cAAJz4Qnoj6VEiSCg2xVAr/z2MXsfSI1NZXYQ9zGqcMPB9+8H1NnSokrs3jKe7bSNDdo\"\npasswd = \"zTYD4Jued4ZotTeD8yBxVApnj74pMPlZQoG56FkGN2bcsBpcU/4IFQ4DGD55nNtZ2MUjoMYvWrVxSVuCMW5cqw==\"\n\n[input]\n[input.input1]\nname = \"x_\"\ntype = \"float\"\ndevice = \"cpu\"\n\n[input.input2]\nname = \"y_\"\ntype = \"float\"\ndevice = \"cpu\"\n\n[output]\n[output.output1]\nname = \"Default/Add-op3\"\ntype = \"float\"\n"
  },
  {
    "path": "src/drivers/devices/ascend/flowunit/mindspore_lite_inference/test_toml/modelbox.test.mindspore_lite.ascend.inference.in",
    "content": "[base]\nname = \"mindspore_inference\"\ndevice = \"ascend\"\nversion = \"1.0.0\"\ndescription = \"an mindspore ascend inference flowunit\"\nentry = \"@CMAKE_SOURCE_DIR@/test/assets/mindspore_inference/tensor_add.ms\"\ntype = \"inference\"\nvirtual_type = \"mindspore\"\n\n[config]\ninput_format = \"NCHW\"\n\n[input]\n[input.input1]\nname = \"x_\"\ntype = \"float\"\ndevice = \"cpu\"\n\n[input.input2]\nname = \"y_\"\ntype = \"float\"\ndevice = \"cpu\"\n\n[output]\n[output.output1]\nname = \"Default/Add-op3\"\ntype = \"float\"\n"
  },
  {
    "path": "src/drivers/devices/ascend/flowunit/padding/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nset(UNIT_DEVICE \"ascend\")\nset(UNIT_NAME \"padding\")\n\nproject(modelbox-flowunit-${UNIT_DEVICE}-${UNIT_NAME})\n\nfile(GLOB_RECURSE UNIT_SOURCE *.cpp *.cc *.c)\ngroup_source_test_files(MODELBOX_UNIT_SOURCE MODELBOX_UNIT_TEST_SOURCE \"_test.c*\" ${UNIT_SOURCE})\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_ASCEND_INCLUDE})\ninclude_directories(${ACL_INCLUDE_DIR})\ninclude_directories(${MODELBOX_COMMON_IMAGE_PROCESS_INCLUDE})\n\nset(MODELBOX_UNIT_SHARED libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-shared)\nset(MODELBOX_UNIT_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\nadd_library(${MODELBOX_UNIT_SHARED} SHARED ${MODELBOX_UNIT_SOURCE})\nset(LIBMODELBOX_FLOWUNIT_PADDING_ASCEND_SHARED ${MODELBOX_UNIT_SHARED})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES \n    SOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} rt)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} dl)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_DEVICE_ASCEND_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${ACL_LIBRARIES})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${MODELBOX_COMMON_IMAGE_PROCESS_LIBRARY})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}\")\n\ninstall(TARGETS ${MODELBOX_UNIT_SHARED}\n        COMPONENT ascend-device-flowunit\n        RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n        LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        ARCHIVE DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        OPTIONAL)\ninstall(DIRECTORY ${HEADER} \n        DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR}\n        COMPONENT ascend-device-flowunit-devel)\n\nset(LIBMODELBOX_FLOWUNIT_PADDING_ASCEND_SHARED ${MODELBOX_UNIT_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_PADDING_ASCEND_INCLUDE ${MODELBOX_UNIT_SOURCE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_PADDING_ASCEND_SOURCES ${MODELBOX_UNIT_SOURCE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_PADDING_ASCEND_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}.so CACHE INTERNAL \"\")\n\n# driver test\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_UNIT_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_UNIT_SHARED})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")"
  },
  {
    "path": "src/drivers/devices/ascend/flowunit/padding/padding_flowunit.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"padding_flowunit.h\"\n\n#include \"image_process.h\"\n#include \"modelbox/flowunit_api_helper.h\"\n\n#define YUV420SP_SIZE(width, height) ((width) * (height)*3 / 2)\n#define ALIGNMENT_DOWN(size) ((size)&0xfffffffe)\n#define MINI_WIDTH_STRIDE 32\n#define MINI_WIDTHE_OFFSET 15\n\nconst std::string output_img_pix_fmt = \"nv12\";\n\nconst std::map<std::string, AlignType> kVerticalAlignType = {\n    {\"top\", AlignType::BEGIN},\n    {\"center\", AlignType::CENTER},\n    {\"bottom\", AlignType::END}};\n\nconst std::map<std::string, AlignType> kHorizontalAlignType = {\n    {\"left\", AlignType::BEGIN},\n    {\"center\", AlignType::CENTER},\n    {\"right\", AlignType::END}};\n\nconst std::map<std::string, int32_t> AsendResizeInterpolation = {\n    {\"default\", 0},\n    {\"bilinear_opencv\", 1},\n    {\"nearest_neighbor_opencv\", 2},\n    {\"bilinear_tensorflow\", 3},\n    {\"nearest_neighbor_tensorflow\", 4}};\n\nmodelbox::Status PaddingFlowUnit::Open(\n    const std::shared_ptr<modelbox::Configuration> &opts) {\n  out_image_.width_ = opts->GetUint32(\"width\", 0);\n  if (out_image_.width_ == 0) {\n    out_image_.width_ = opts->GetUint32(\"image_width\", 0);\n  }\n\n  out_image_.height_ = opts->GetUint32(\"height\", 0);\n  if (out_image_.height_ == 0) {\n    out_image_.height_ = opts->GetUint32(\"image_height\", 0);\n  }\n\n  if (out_image_.width_ == 0 || out_image_.height_ == 0) {\n    MBLOG_ERROR << \"Dest width or dest height not valid\";\n    return modelbox::STATUS_BADCONF;\n  }\n\n  auto vertical_align_str = opts->GetString(\"vertical_align\", \"top\");\n  auto item = kVerticalAlignType.find(vertical_align_str);\n  if (item == kVerticalAlignType.end()) {\n    MBLOG_ERROR << \"vertical align must be one of [top|center|bottom]\";\n    return modelbox::STATUS_BADCONF;\n  }\n  vertical_align_ = item->second;\n\n  auto horizontal_align_str = opts->GetString(\"horizontal_align\", \"left\");\n  item = kHorizontalAlignType.find(horizontal_align_str);\n  if (item == kHorizontalAlignType.end()) {\n    MBLOG_ERROR << \"horizontal align must be one of [left|center|right]\";\n    return modelbox::STATUS_BADCONF;\n  }\n  horizontal_align_ = item->second;\n\n  padding_data_ = opts->GetUint8s(\"padding_data\", {0, 0, 0});\n  if (padding_data_.size() != 3) {\n    MBLOG_ERROR << \"padding data size must be 3\";\n    return modelbox::STATUS_BADCONF;\n  }\n\n  need_scale_ = opts->GetBool(\"need_scale\", true);\n  auto interpolation_str = opts->GetString(\"interpolation\", \"default\");\n  auto interpolation_item = AsendResizeInterpolation.find(interpolation_str);\n  if (interpolation_item == AsendResizeInterpolation.end()) {\n    MBLOG_ERROR << \"not support interpolation \" << interpolation_str;\n    return modelbox::STATUS_BADCONF;\n  }\n  interpolation_ = interpolation_item->second;\n\n  out_image_.width_stride_ = imageprocess::align_up(\n      out_image_.width_, imageprocess::ASCEND_WIDTH_ALIGN);\n  out_image_.height_stride_ = imageprocess::align_up(\n      out_image_.height_, imageprocess::ASCEND_HEIGHT_ALIGN);\n  size_t buffer_size = 0;\n  auto ret =\n      imageprocess::GetImageBytes(output_img_pix_fmt, out_image_.width_stride_,\n                                  out_image_.height_stride_, buffer_size);\n  if (ret != modelbox::STATUS_SUCCESS) {\n    MBLOG_ERROR << \"get image bytes failed, err \" << ret;\n    return ret;\n  }\n  out_image_.buffer_size_ = buffer_size;\n\n  auto acl_ret = aclrtSetDevice(dev_id_);\n  if (acl_ret != 0) {\n    MBLOG_ERROR << \"failed set device \" << dev_id_;\n    return modelbox::STATUS_FAULT;\n  }\n\n  auto alloc_buffer = [&](uint32_t size,\n                          const std::string &device) -> std::shared_ptr<void> {\n    void *buffer = nullptr;\n    if (device == \"cpu\") {\n      if (0 == aclrtMallocHost(&buffer, size)) {\n        std::shared_ptr<void> shared_buffer_cpu(\n            buffer, [this](void *ptr) { aclrtFreeHost(ptr); });\n        return shared_buffer_cpu;\n      }\n\n    } else {\n      if (0 == acldvppMalloc((void **)&buffer, size)) {\n        std::shared_ptr<void> shared_buffer_device(\n            buffer, [this](void *ptr) { acldvppFree(ptr); });\n        return shared_buffer_device;\n      }\n    }\n\n    return nullptr;\n  };\n  auto host_buffer = alloc_buffer(buffer_size, \"cpu\");\n  if (host_buffer == nullptr) {\n    MBLOG_ERROR << \"malloc host buffer failed, buffer size: \" << buffer_size;\n    return modelbox::STATUS_FAULT;\n  }\n\n  size_t y_size = out_image_.width_stride_ * out_image_.height_stride_;\n  size_t uv_size = y_size / 2;\n  aclrtMemset(host_buffer.get(), y_size, padding_data_[0], y_size);\n\n  u_int8_t *uv_buffer = (u_int8_t *)host_buffer.get() + y_size;\n  for (size_t i = 0; i < uv_size; i = i + 2) {\n    uv_buffer[i] = padding_data_[1];\n    uv_buffer[i + 1] = padding_data_[2];\n  }\n\n  buffer_ = alloc_buffer(buffer_size, \"ascend\");\n  if (buffer_ == nullptr) {\n    MBLOG_ERROR << \"malloc device buffer failed, buffer size: \" << buffer_size;\n    return modelbox::STATUS_FAULT;\n  }\n\n  acl_ret = aclrtMemcpy(buffer_.get(), buffer_size, host_buffer.get(),\n                        buffer_size, ACL_MEMCPY_HOST_TO_DEVICE);\n  if (acl_ret) {\n    return {modelbox::STATUS_FAULT, \"failed copy host to device\"};\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status PaddingFlowUnit::AscendProcess(\n    std::shared_ptr<modelbox::DataContext> data_ctx, aclrtStream stream) {\n  auto input_img_buffer_list = data_ctx->Input(IN_IMG);\n  auto img_count = input_img_buffer_list->Size();\n  if (img_count == 0) {\n    MBLOG_ERROR << \"input img buffer list is empty\";\n    return modelbox::STATUS_INVALID;\n  }\n\n  auto output_img_buffer_list = data_ctx->Output(OUT_IMG);\n  out_image_.width_stride_ = imageprocess::align_up(\n      out_image_.width_, imageprocess::ASCEND_WIDTH_ALIGN);\n  out_image_.height_stride_ = imageprocess::align_up(\n      out_image_.height_, imageprocess::ASCEND_HEIGHT_ALIGN);\n  size_t buffer_size = 0;\n  auto ret =\n      imageprocess::GetImageBytes(output_img_pix_fmt, out_image_.width_stride_,\n                                  out_image_.height_stride_, buffer_size);\n  if (!ret) {\n    MBLOG_ERROR << \"get image bytes failed, err \" << ret;\n    return ret;\n  }\n  out_image_.buffer_size_ = buffer_size;\n\n  std::vector<size_t> output_shape(img_count, out_image_.buffer_size_);\n  ret = output_img_buffer_list->Build(output_shape, false);\n  if (!ret) {\n    MBLOG_ERROR << \"Build output failed, err \" << ret;\n    return ret;\n  }\n\n  output_img_buffer_list->CopyMeta(input_img_buffer_list);\n\n  for (size_t i = 0; i < img_count; ++i) {\n    auto in_img_buffer = input_img_buffer_list->At(i);\n    auto out_img_buffer = output_img_buffer_list->At(i);\n    auto ret = ProcessOneImg(in_img_buffer, out_img_buffer, stream);\n    if (!ret) {\n      MBLOG_ERROR << \"Padding image failed, err \" << ret;\n      return ret;\n    }\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status SetInImageSize(std::shared_ptr<modelbox::Buffer> &in_image,\n                                ImageSize &ori_image, std::string &pix_fmt) {\n  auto ret = in_image->Get(\"width\", ori_image.width_);\n  if (!ret) {\n    return {modelbox::STATUS_FAULT, \"get in_image width failed\"};\n  }\n  ret = in_image->Get(\"height\", ori_image.height_);\n  if (!ret) {\n    return {modelbox::STATUS_FAULT, \"get in_image height failed\"};\n  }\n\n  ret = in_image->Get(\"pix_fmt\", pix_fmt);\n  if (!ret) {\n    return {modelbox::STATUS_FAULT, \"get in_image pix_fmt failed\"};\n  }\n\n  ori_image.width_stride_ = imageprocess::align_up(\n      ori_image.width_, imageprocess::ASCEND_WIDTH_ALIGN);\n  ori_image.width_stride_ = ori_image.width_stride_ < MINI_WIDTH_STRIDE\n                                ? MINI_WIDTH_STRIDE\n                                : ori_image.width_stride_;\n\n  ori_image.height_stride_ = imageprocess::align_up(\n      ori_image.height_, imageprocess::ASCEND_HEIGHT_ALIGN);\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status PaddingFlowUnit::ProcessOneImg(\n    std::shared_ptr<modelbox::Buffer> &in_image,\n    std::shared_ptr<modelbox::Buffer> &out_image, aclrtStream stream) {\n  ResizeCropParam param;\n  ImageSize ori_image;\n  std::string pix_fmt = \"nv12\";\n  if (modelbox::STATUS_SUCCESS !=\n      SetInImageSize(in_image, ori_image, pix_fmt)) {\n    MBLOG_ERROR << \"get in image property failed\";\n    return {modelbox::STATUS_FAULT, \"get in image property failed\"};\n  }\n\n  Rect dest_rect;\n  if (modelbox::STATUS_SUCCESS !=\n      FillDestRoi(ori_image, dest_rect, param.crop_area, param.paste_area)) {\n    MBLOG_ERROR << \"FillDestRoi failed\";\n    return {modelbox::STATUS_FAULT, \"FillDestRoi failed\"};\n  }\n\n  auto status_ret = CreateDesc(in_image->ConstData(), in_image->GetBytes(),\n                               ori_image, param.in_img_desc, pix_fmt);\n  if (status_ret != modelbox::STATUS_SUCCESS) {\n    MBLOG_ERROR << \"create desc for in_image failed\";\n    return status_ret;\n  }\n\n  if (aclrtMemcpy((void *)out_image->ConstData(), out_image_.buffer_size_,\n                  buffer_.get(), out_image_.buffer_size_,\n                  ACL_MEMCPY_DEVICE_TO_DEVICE) != 0) {\n    MBLOG_ERROR << \"failed copy padding data to out image\";\n    return modelbox::STATUS_FAULT;\n  }\n  status_ret = CreateDesc(out_image->ConstData(), out_image->GetBytes(),\n                          out_image_, param.out_img_desc, output_img_pix_fmt);\n  if (status_ret != modelbox::STATUS_SUCCESS) {\n    MBLOG_ERROR << \"create out image descprition failed\";\n    return status_ret;\n  }\n\n  status_ret = CropResizeAndPaste(param, stream);\n  if (!status_ret) {\n    MBLOG_ERROR << \"CropResizeAndPaste failed\";\n    return status_ret;\n  }\n\n  return imageprocess::SetOutImgMeta(out_image, output_img_pix_fmt,\n                                     param.out_img_desc);\n}\n\nmodelbox::Status PaddingFlowUnit::CreateDesc(\n    const void *buffer, const int32_t &buffer_size, ImageSize &image_size,\n    std::shared_ptr<acldvppPicDesc> &pic_desc, const std::string &pix_fmt) {\n  if (!modelbox::IsMemAligned((uintptr_t)buffer,\n                              modelbox::ASCEND_ASYNC_ALIGN)) {\n    return {modelbox::STATUS_FAULT,\n            \"Input mem not aligned, ptr \" + std::to_string((uintptr_t)buffer)};\n  }\n\n  pic_desc =\n      CreateImgDesc(buffer_size, (void *)buffer, pix_fmt,\n                    imageprocess::ImageShape{\n                        image_size.width_, image_size.height_,\n                        image_size.width_stride_, image_size.height_stride_},\n                    imageprocess::ImgDescDestroyFlag::DESC_ONLY);\n  if (pic_desc == nullptr) {\n    MBLOG_ERROR << \"CreateImgDesc failed\";\n    return modelbox::StatusError;\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status PaddingFlowUnit::FillDestRoi(\n    ImageSize &in_image_size, Rect &dest_roi,\n    std::shared_ptr<acldvppRoiConfig> &crop_area,\n    std::shared_ptr<acldvppRoiConfig> &paste_area) {\n  if (need_scale_) {\n    MBLOG_DEBUG << \"in image width:\" << in_image_size.width_\n                << \",height:\" << in_image_size.height_;\n    auto w_scale = (float)in_image_size.width_ / out_image_.width_;\n    auto h_scale = (float)in_image_size.height_ / out_image_.height_;\n    auto scale = std::max(w_scale, h_scale);\n    dest_roi.width = in_image_size.width_ / scale;\n    dest_roi.height = in_image_size.height_ / scale;\n\n    auto min_x = (out_image_.width_ - dest_roi.width) >> 1;\n    min_x = (min_x + MINI_WIDTHE_OFFSET) >> 4 << 4;\n    dest_roi.width = out_image_.width_ - 2 * min_x;\n    if (dest_roi.width < MINI_WIDTH_STRIDE) {\n      MBLOG_ERROR << \"input w/h is too small than output\";\n      return modelbox::STATUS_INVALID;\n    }\n    scale = (float)in_image_size.width_ / dest_roi.width;\n    dest_roi.height = in_image_size.height_ / scale;\n    dest_roi.height = dest_roi.height >> 1 << 1;\n    dest_roi.width =\n        dest_roi.width > out_image_.width_ ? out_image_.width_ : dest_roi.width;\n    dest_roi.height = dest_roi.height > out_image_.height_ ? out_image_.height_\n                                                           : dest_roi.height;\n    MBLOG_DEBUG << \"dest_roi width:\" << dest_roi.width\n                << \",height:\" << dest_roi.height;\n  } else {\n    if (in_image_size.width_ > out_image_.width_ ||\n        in_image_size.height_ > out_image_.height_) {\n      MBLOG_ERROR << \"src image[w:\" << in_image_size.width_\n                  << \",h:\" << in_image_size.height_\n                  << \"] is great than dest size[w:\" << out_image_.width_\n                  << \",h:\" << out_image_.height_\n                  << \"]. But need_scale is false\";\n      return modelbox::STATUS_INVALID;\n    }\n\n    dest_roi.width = in_image_size.width_;\n    dest_roi.height = in_image_size.height_;\n  }\n\n  auto *crop_area_local = acldvppCreateRoiConfig(0, in_image_size.width_ - 1, 0,\n                                                 in_image_size.height_ - 1);\n  if (crop_area_local == nullptr) {\n    MBLOG_ERROR << \"failed create roi config for crop area\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  crop_area = std::shared_ptr<acldvppRoiConfig>(\n      crop_area_local, [](acldvppRoiConfig *config) {\n        if (config != nullptr) {\n          acldvppDestroyRoiConfig(config);\n        }\n      });\n  dest_roi.x =\n      GetAlignOffset(horizontal_align_, out_image_.width_, dest_roi.width) >>\n      4 << 4;\n  dest_roi.width = ALIGNMENT_DOWN(dest_roi.width);\n  dest_roi.y = ALIGNMENT_DOWN(\n      GetAlignOffset(vertical_align_, out_image_.height_, dest_roi.height));\n  dest_roi.height = ALIGNMENT_DOWN(dest_roi.height);\n  auto *paste_area_local =\n      acldvppCreateRoiConfig(dest_roi.x, dest_roi.x + dest_roi.width - 1,\n                             dest_roi.y, dest_roi.y + dest_roi.height - 1);\n  if (paste_area_local == nullptr) {\n    MBLOG_ERROR << \"failed create roi config for paste area\";\n    return modelbox::STATUS_FAULT;\n  }\n  paste_area = std::shared_ptr<acldvppRoiConfig>(\n      paste_area_local, [](acldvppRoiConfig *config) {\n        if (config != nullptr) {\n          acldvppDestroyRoiConfig(config);\n        }\n      });\n\n  return modelbox::STATUS_OK;\n}\n\nuint32_t PaddingFlowUnit::GetAlignOffset(AlignType type, uint32_t dest_range,\n                                         uint32_t roi_range) {\n  if (roi_range >= dest_range) {\n    return 0;\n  }\n\n  uint32_t offset = 0;\n  switch (type) {\n    case AlignType::BEGIN:\n      break;\n\n    case AlignType::CENTER:\n      offset = (dest_range - roi_range) / 2;\n      break;\n\n    case AlignType::END:\n      offset = dest_range - roi_range;\n      break;\n\n    default:\n      break;\n  }\n\n  return offset;\n}\n\nmodelbox::Status PaddingFlowUnit::CropResizeAndPaste(ResizeCropParam &param,\n                                                     aclrtStream stream) {\n  auto chan_desc = imageprocess::GetDvppChannel(dev_id_);\n  if (chan_desc == nullptr) {\n    MBLOG_ERROR << \"Get dvpp channel failed\";\n    return {modelbox::STATUS_FAULT, \"Get dvpp channel failed\"};\n  }\n  auto *resize_cfg = acldvppCreateResizeConfig();\n  if (resize_cfg == nullptr) {\n    MBLOG_ERROR << \"acldvppCreateResizeConfig return null\";\n    return {modelbox::STATUS_FAULT, \"acldvppCreateResizeConfig return null\"};\n  }\n\n  Defer { acldvppDestroyResizeConfig(resize_cfg); };\n  auto acl_ret =\n      acldvppSetResizeConfigInterpolation(resize_cfg, interpolation_);\n  if (acl_ret != 0) {\n    MBLOG_ERROR << \"failed set interpolation for resize config\";\n    return {modelbox::STATUS_FAULT,\n            \"failed set interpolation for resize config\"};\n  }\n  acl_ret = acldvppVpcCropResizePasteAsync(\n      chan_desc.get(), param.in_img_desc.get(), param.out_img_desc.get(),\n      param.crop_area.get(), param.paste_area.get(), resize_cfg, stream);\n  if (acl_ret != ACL_SUCCESS) {\n    MBLOG_ERROR << \"acldvppVpcCropResizePasteAsync failed, err \" +\n                       std::to_string(acl_ret);\n    return modelbox::STATUS_FAULT;\n  }\n\n  acl_ret = aclrtSynchronizeStream(stream);\n  if (acl_ret != ACL_SUCCESS) {\n    MBLOG_ERROR << \"aclrtSynchronizeStream failed, err \" << acl_ret;\n    return modelbox::STATUS_FAULT;\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status PaddingFlowUnit::Close() { return modelbox::STATUS_OK; }\n\nMODELBOX_FLOWUNIT(PaddingFlowUnit, desc) {\n  desc.SetFlowUnitName(FLOWUNIT_NAME);\n  desc.SetFlowUnitGroupType(\"Image\");\n  desc.AddFlowUnitInput({IN_IMG, modelbox::ASCEND_MEM_DVPP});\n  desc.AddFlowUnitOutput({OUT_IMG, modelbox::ASCEND_MEM_DVPP});\n  desc.SetFlowType(modelbox::NORMAL);\n  desc.SetInputContiguous(false);\n  desc.SetDescription(FLOWUNIT_DESC);\n  desc.AddFlowUnitOption(modelbox::FlowUnitOption(\"image_width\", \"int\", true,\n                                                  \"0\", \"the padding width\"));\n  desc.AddFlowUnitOption(modelbox::FlowUnitOption(\"image_height\", \"int\", true,\n                                                  \"0\", \"the padding height\"));\n  desc.AddFlowUnitOption(modelbox::FlowUnitOption(\n      \"vertical_align\", \"string\", false, \"top\", \"vertical align type\"));\n  desc.AddFlowUnitOption(modelbox::FlowUnitOption(\n      \"horizontal_align\", \"string\", false, \"left\", \"horizontal align type\"));\n  desc.AddFlowUnitOption(modelbox::FlowUnitOption(\n      \"padding_data\", \"string\", false, \"0,0,0\", \"the padding data\"));\n}\n\nMODELBOX_DRIVER_FLOWUNIT(desc) {\n  desc.Desc.SetName(FLOWUNIT_NAME);\n  desc.Desc.SetClass(modelbox::DRIVER_CLASS_FLOWUNIT);\n  desc.Desc.SetType(FLOWUNIT_TYPE);\n  desc.Desc.SetDescription(FLOWUNIT_DESC);\n  desc.Desc.SetVersion(\"1.0.0\");\n}"
  },
  {
    "path": "src/drivers/devices/ascend/flowunit/padding/padding_flowunit.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_ASCEND_PADDING_H_\n#define MODELBOX_FLOWUNIT_ASCEND_PADDING_H_\n\n#define ENABLE_DVPP_INTERFACE\n#define ACL_ENABLE\n\n#include <acl/ops/acl_dvpp.h>\n#include <modelbox/base/device.h>\n#include <modelbox/base/status.h>\n#include <modelbox/buffer.h>\n#include <modelbox/device/ascend/device_ascend.h>\n#include <modelbox/flow.h>\n#include <modelbox/flowunit.h>\n\nconstexpr const char *FLOWUNIT_TYPE = \"ascend\";\nconstexpr const char *FLOWUNIT_NAME = \"padding\";\nconstexpr const char *FLOWUNIT_DESC =\n    \"\\n\\t@Brief: A padding flowunit on ascend device \\n\"\n    \"\\t@Port paramter: the input port buffer type and the output port buffer \"\n    \"type are image. \\n\"\n    \"\\t  The image type buffer contain the following meta fields:\\n\"\n    \"\\t\\tField Name: width,         Type: int32_t\\n\"\n    \"\\t\\tField Name: height,        Type: int32_t\\n\"\n    \"\\t\\tField Name: width_stride,  Type: int32_t\\n\"\n    \"\\t\\tField Name: height_stride, Type: int32_t\\n\"\n    \"\\t\\tField Name: pix_fmt,       Type: string\\n\"\n    \"\\t\\tField Name: layout,        Type: int32_t\\n\"\n    \"\\t\\tField Name: shape,         Type: vector<size_t>\\n\"\n    \"\\t\\tField Name: type,          Type: ModelBoxDataType::MODELBOX_UINT8\\n\"\n    \"\\t@Constraint: the field value range of this flowunit support：'pix_fmt': \"\n    \"[nv12], 'layout': [hwc]. \";\nconstexpr const char *IN_IMG = \"in_image\";\nconstexpr const char *OUT_IMG = \"out_image\";\nenum class AlignType { BEGIN, CENTER, END };\nclass Rect {\n public:\n  int32_t x;\n  int32_t y;\n  int32_t width;\n  int32_t height;\n};\n\nclass ImageSize {\n public:\n  int32_t width_;\n  int32_t height_;\n  int32_t width_stride_;\n  int32_t height_stride_;\n  int32_t buffer_size_;\n};\n\nclass ResizeCropParam {\n public:\n  std::shared_ptr<acldvppPicDesc> in_img_desc;\n  std::shared_ptr<acldvppPicDesc> resize_img_desc;\n  std::shared_ptr<acldvppRoiConfig> crop_area;\n  std::shared_ptr<acldvppRoiConfig> paste_area;\n  std::shared_ptr<acldvppPicDesc> out_img_desc;\n};\n\nclass PaddingFlowUnit : public modelbox::AscendFlowUnit {\n public:\n  PaddingFlowUnit() = default;\n  ~PaddingFlowUnit() override = default;\n\n  modelbox::Status Open(\n      const std::shared_ptr<modelbox::Configuration> &opts) override;\n\n  modelbox::Status Close() override;\n\n  modelbox::Status AscendProcess(\n      std::shared_ptr<modelbox::DataContext> data_ctx,\n      aclrtStream stream) override;\n\n private:\n  modelbox::Status ProcessOneImg(std::shared_ptr<modelbox::Buffer> &in_image,\n                                 std::shared_ptr<modelbox::Buffer> &out_image,\n                                 aclrtStream stream);\n\n  modelbox::Status CreateDesc(const void *buffer, const int32_t &buffer_size,\n                              ImageSize &image_size,\n                              std::shared_ptr<acldvppPicDesc> &pic_desc,\n                              const std::string &pix_fmt);\n\n  modelbox::Status GetInputDesc(\n      const std::shared_ptr<modelbox::Buffer> &in_image,\n      std::shared_ptr<acldvppPicDesc> &in_img_desc);\n\n  modelbox::Status GetOutputDesc(\n      const std::shared_ptr<modelbox::Buffer> &out_image,\n      std::shared_ptr<acldvppPicDesc> &out_img_desc,\n      std::shared_ptr<acldvppPicDesc> &resize_img_desc);\n\n  modelbox::Status CropResizeAndPaste(ResizeCropParam &param,\n                                      aclrtStream stream);\n\n  modelbox::Status FillDestRoi(ImageSize &in_image_size, Rect &dest_roi,\n                               std::shared_ptr<acldvppRoiConfig> &crop_area,\n                               std::shared_ptr<acldvppRoiConfig> &paste_area);\n\n  uint32_t GetAlignOffset(AlignType type, uint32_t dest_range,\n                          uint32_t roi_range);\n\n  ImageSize out_image_;\n  std::shared_ptr<void> buffer_{nullptr};\n\n  AlignType vertical_align_{AlignType::BEGIN};\n  AlignType horizontal_align_{AlignType::BEGIN};\n\n  std::vector<uint8_t> padding_data_;\n  bool need_scale_{true};\n  int32_t interpolation_{0};\n};\n\n#endif  // MODELBOX_FLOWUNIT_ASCEND_RESIZE_H_\n"
  },
  {
    "path": "src/drivers/devices/ascend/flowunit/padding/padding_flowunit_test.cc",
    "content": "#/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <acl/acl_rt.h>\n#include <dsmi_common_interface.h>\n#include <securec.h>\n\n#include <fstream>\n#include <functional>\n#include <future>\n#include <opencv2/opencv.hpp>\n#include <random>\n#include <thread>\n\n#include \"driver_flow_test.h\"\n#include \"flowunit_mockflowunit/flowunit_mockflowunit.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"modelbox/base/log.h\"\n#include \"modelbox/base/utils.h\"\n#include \"modelbox/buffer.h\"\n#include \"test/mock/minimodelbox/mockflow.h\"\n\nnamespace modelbox {\n\nStatus yuvI420ToNV12(uint8_t *in_data, int32_t w, int32_t h,\n                     uint8_t *out_data) {\n  int size = w * h;\n  auto ret = memcpy_s(out_data, size, in_data, size);\n  if (ret != 0) {\n    MBLOG_ERROR << \"copy Y data to out data failed, datasize = \" << size;\n    return modelbox::STATUS_FAULT;\n  }\n  for (int i = 0, j = 0; i < w * h / 4; i++, j += 2) {\n    auto ret_u = memcpy_s(out_data + size + j, 1, in_data + size + i, 1);\n    auto ret_v =\n        memcpy_s(out_data + size + j + 1, 1, in_data + i + size * 5 / 4, 1);\n    if (ret_u != 0 || ret_v != 0) {\n      MBLOG_ERROR << \"copy u/v data to out data failed\";\n      return modelbox::STATUS_FAULT;\n    }\n  }\n  return modelbox::STATUS_SUCCESS;\n}\n\nclass AscendPaddingFlowUnitTest : public testing::Test {\n public:\n  AscendPaddingFlowUnitTest() : driver_flow_(std::make_shared<MockFlow>()) {}\n\n protected:\n  void SetUp() override {\n    // Test ascend runtime\n    int32_t count = 0;\n    auto dsmi_ret = dsmi_get_device_count(&count);\n    if (dsmi_ret != 0) {\n      MBLOG_INFO << \"no ascend device, skip test suit\";\n      GTEST_SKIP();\n    }\n  }\n\n  void TearDown() override { driver_flow_ = nullptr; };\n\n  std::shared_ptr<MockFlow> GetDriverFlow();\n\n  const std::string test_lib_dir = TEST_DRIVER_DIR,\n                    test_data_dir = TEST_DATA_DIR, test_assets = TEST_ASSETS;\n\n private:\n  std::shared_ptr<MockFlow> driver_flow_;\n};\n\nstd::shared_ptr<MockFlow> AscendPaddingFlowUnitTest::GetDriverFlow() {\n  return driver_flow_;\n}\n\nTEST_F(AscendPaddingFlowUnitTest, TestPaddingImage) {\n  std::string toml_content = R\"(\n    [driver]\n    skip-default=true\n    dir=[\")\" + test_lib_dir + \"\\\",\\\"\" +\n                             test_data_dir + \"\\\"]\\n\" +\n                             R\"([graph]\n    graphconf = '''digraph demo {\n          input[type=input]\n          output[type=output]\n          padding[type=flowunit, flowunit=padding, device=ascend, deviceid=0, label=\"<in_image> | <out_image>\",image_width=208, image_height=100,\n          vertical_align=top, horizontal_align=center, padding_data = \"255,255,0\"]\n\n          input -> padding:in_image\n          padding:out_image -> output\n        }'''\n    format = \"graphviz\"\n  )\";\n  auto driver_flow = GetDriverFlow();\n  driver_flow->BuildAndRun(\"TestPaddingImage\", toml_content, 10);\n\n  auto img = cv::imread(std::string(TEST_ASSETS) + \"/test.jpg\");\n  cv::Mat I420data;\n  cv::cvtColor(img, I420data, cv::COLOR_RGB2YUV_I420);\n  int size = img.cols * img.rows * 3 / 2;\n  cv::Mat nv12_data(img.cols * 3 / 2, img.rows, CV_8UC1);\n\n  auto convert_status =\n      yuvI420ToNV12(I420data.data, img.cols, img.rows, nv12_data.data);\n  ASSERT_EQ(convert_status, modelbox::STATUS_SUCCESS);\n\n  auto extern_data = driver_flow->GetFlow()->CreateExternalDataMap();\n\n  auto in_img_buffer_list = extern_data->CreateBufferList();\n  in_img_buffer_list->Build({img.total() * img.elemSize() / 2});\n  auto in_img_buffer = in_img_buffer_list->At(0);\n  in_img_buffer->Set(\"width\", img.cols);\n  in_img_buffer->Set(\"height\", img.rows);\n  in_img_buffer->Set(\"pix_fmt\", std::string(\"nv12\"));\n  auto e_ret = memcpy_s(in_img_buffer->MutableData(), in_img_buffer->GetBytes(),\n                        nv12_data.data, size);\n  EXPECT_EQ(e_ret, 0);\n\n  auto status = extern_data->Send(\"input\", in_img_buffer_list);\n  EXPECT_EQ(status, STATUS_OK);\n\n  OutputBufferList map_buffer_list;\n  status = extern_data->Recv(map_buffer_list);\n  EXPECT_EQ(status, STATUS_OK);\n\n  auto output_buffer_list = map_buffer_list[\"output\"];\n  ASSERT_EQ(output_buffer_list->Size(), 1);\n\n  auto output_buffer = output_buffer_list->At(0);\n  ASSERT_EQ(output_buffer->GetBytes(), 208 * 100 * 3 / 2);\n\n  cv::Mat yuv_out_img(100 * 3 / 2, 208, CV_8UC1);\n  auto acl_ret = aclrtSetDevice(0);\n  EXPECT_EQ(acl_ret, ACL_SUCCESS);\n\n  acl_ret = aclrtMemcpy(yuv_out_img.data, output_buffer->GetBytes(),\n                        output_buffer->ConstData(), output_buffer->GetBytes(),\n                        aclrtMemcpyKind::ACL_MEMCPY_DEVICE_TO_HOST);\n  EXPECT_EQ(acl_ret, ACL_SUCCESS);\n\n  auto image_size = yuv_out_img.rows * yuv_out_img.cols * yuv_out_img.elemSize();\n  char expected_img[image_size];\n  std::ifstream infile;\n  infile.open(std::string(TEST_ASSETS) + \"/ascend_padding_yuv\");\n  infile.read((char *)expected_img, image_size);\n\n  EXPECT_EQ(memcmp((char *)yuv_out_img.data, expected_img, image_size), 0);\n\n  driver_flow->GetFlow()->Wait(3 * 1000);\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/drivers/devices/ascend/flowunit/resize/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nset(UNIT_DEVICE \"ascend\")\nset(UNIT_NAME \"resize\")\n\nproject(modelbox-flowunit-${UNIT_DEVICE}-${UNIT_NAME})\n\nfile(GLOB_RECURSE UNIT_SOURCE *.cpp *.cc *.c)\ngroup_source_test_files(MODELBOX_UNIT_SOURCE MODELBOX_UNIT_TEST_SOURCE \"_test.c*\" ${UNIT_SOURCE})\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_ASCEND_INCLUDE})\ninclude_directories(${ACL_INCLUDE_DIR})\ninclude_directories(${MODELBOX_COMMON_IMAGE_PROCESS_INCLUDE})\n\nset(MODELBOX_UNIT_SHARED libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-shared)\nset(MODELBOX_UNIT_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\nadd_library(${MODELBOX_UNIT_SHARED} SHARED ${MODELBOX_UNIT_SOURCE})\nset(LIBMODELBOX_FLOWUNIT_RESIZE_ASCEND_SHARED ${MODELBOX_UNIT_SHARED})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES \n    SOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} rt)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} dl)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_DEVICE_ASCEND_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${ACL_LIBRARIES})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${MODELBOX_COMMON_IMAGE_PROCESS_LIBRARY})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}\")\n\ninstall(TARGETS ${MODELBOX_UNIT_SHARED}\n        COMPONENT ascend-device-flowunit\n        RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n        LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        ARCHIVE DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        OPTIONAL)\ninstall(DIRECTORY ${HEADER} \n        DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR}\n        COMPONENT ascend-device-flowunit-devel)\n\nset(LIBMODELBOX_FLOWUNIT_RESIZE_ASCEND_SHARED ${MODELBOX_UNIT_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_RESIZE_ASCEND_INCLUDE ${MODELBOX_UNIT_SOURCE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_RESIZE_ASCEND_SOURCES ${MODELBOX_UNIT_SOURCE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_RESIZE_ASCEND_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}.so CACHE INTERNAL \"\")\n\n# driver test\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_UNIT_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_UNIT_SHARED})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")"
  },
  {
    "path": "src/drivers/devices/ascend/flowunit/resize/resize_flowunit.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"resize_flowunit.h\"\n\n#include \"image_process.h\"\n#include \"modelbox/flowunit_api_helper.h\"\n\nconst std::string output_img_pix_fmt = \"nv12\";\n\nmodelbox::Status ResizeFlowUnit::Open(\n    const std::shared_ptr<modelbox::Configuration> &opts) {\n  dest_width_ = opts->GetUint32(\"width\", 0);\n  if (dest_width_ == 0) {\n    dest_width_ = opts->GetUint32(\"image_width\", 0);\n  }\n\n  dest_height_ = opts->GetUint32(\"height\", 0);\n  if (dest_height_ == 0) {\n    dest_height_ = opts->GetUint32(\"image_height\", 0);\n  }\n\n  if (dest_width_ == 0 || dest_height_ == 0) {\n    MBLOG_ERROR << \"Dest width or dest height not valid\";\n    return modelbox::STATUS_BADCONF;\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status ResizeFlowUnit::AscendProcess(\n    std::shared_ptr<modelbox::DataContext> data_ctx, aclrtStream stream) {\n  auto input_img_buffer_list = data_ctx->Input(IN_IMG);\n  auto img_count = input_img_buffer_list->Size();\n  if (img_count == 0) {\n    MBLOG_ERROR << \"input img buffer list is empty\";\n    return modelbox::STATUS_INVALID;\n  }\n\n  auto output_img_buffer_list = data_ctx->Output(OUT_IMG);\n  size_t buffer_size = 0;\n  auto align_w =\n      imageprocess::align_up(dest_width_, imageprocess::ASCEND_WIDTH_ALIGN);\n  auto align_h =\n      imageprocess::align_up(dest_height_, imageprocess::ASCEND_HEIGHT_ALIGN);\n  auto ret = imageprocess::GetImageBytes(output_img_pix_fmt, align_w, align_h,\n                                         buffer_size);\n  if (!ret) {\n    MBLOG_ERROR << \"get image bytes failed, err \" << ret;\n    return ret;\n  }\n\n  std::vector<size_t> output_shape(img_count, buffer_size);\n  ret = output_img_buffer_list->Build(output_shape, false);\n  if (!ret) {\n    MBLOG_ERROR << \"Build output failed, err \" << ret;\n    return ret;\n  }\n\n  output_img_buffer_list->CopyMeta(input_img_buffer_list);\n  for (size_t i = 0; i < img_count; ++i) {\n    auto in_img_buffer = input_img_buffer_list->At(i);\n    auto out_img_buffer = output_img_buffer_list->At(i);\n    auto ret = ProcessOneImg(in_img_buffer, out_img_buffer, stream);\n    if (!ret) {\n      MBLOG_ERROR << \"Resize image failed, err \" << ret;\n      return ret;\n    }\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status ResizeFlowUnit::ProcessOneImg(\n    std::shared_ptr<modelbox::Buffer> &in_image,\n    std::shared_ptr<modelbox::Buffer> &out_image, aclrtStream stream) {\n  auto chan_desc = imageprocess::GetDvppChannel(dev_id_);\n  if (chan_desc == nullptr) {\n    return {modelbox::STATUS_FAULT, \"Get dvpp channel failed\"};\n  }\n\n  std::shared_ptr<acldvppPicDesc> in_img_desc;\n  auto ret = GetInputDesc(in_image, in_img_desc);\n  if (!ret) {\n    return ret;\n  }\n\n  std::shared_ptr<acldvppPicDesc> out_img_desc;\n  ret = GetOutputDesc(out_image, out_img_desc);\n  if (!ret) {\n    return ret;\n  }\n\n  ret = Resize(chan_desc, in_img_desc, out_img_desc, out_image, stream);\n  if (!ret) {\n    return ret;\n  }\n\n  return imageprocess::SetOutImgMeta(out_image, output_img_pix_fmt,\n                                     out_img_desc);\n}\n\nmodelbox::Status ResizeFlowUnit::GetInputDesc(\n    const std::shared_ptr<modelbox::Buffer> &in_image,\n    std::shared_ptr<acldvppPicDesc> &in_img_desc) {\n  std::string in_pix_fmt;\n  int32_t in_img_width = 0;\n  int32_t in_img_height = 0;\n  int32_t in_img_width_stride = 0;\n  int32_t in_img_height_stride = 0;\n  auto ret = imageprocess::GetImgParam(in_image, in_pix_fmt, in_img_width,\n                                       in_img_height, in_img_width_stride,\n                                       in_img_height_stride);\n  if (!ret) {\n    return ret;\n  }\n\n  if (!modelbox::IsMemAligned((uintptr_t)in_image->ConstData(),\n                              modelbox::ASCEND_ASYNC_ALIGN)) {\n    return {modelbox::STATUS_FAULT,\n            \"Input mem not aligned, ptr \" +\n                std::to_string((uintptr_t)in_image->ConstData())};\n  }\n\n  in_img_desc = CreateImgDesc(\n      in_image->GetBytes(), (void *)in_image->ConstData(), in_pix_fmt,\n      imageprocess::ImageShape{in_img_width, in_img_height, in_img_width_stride,\n                               in_img_height_stride},\n      imageprocess::ImgDescDestroyFlag::DESC_ONLY);\n  if (in_img_desc == nullptr) {\n    return modelbox::StatusError;\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status ResizeFlowUnit::GetOutputDesc(\n    const std::shared_ptr<modelbox::Buffer> &out_image,\n    std::shared_ptr<acldvppPicDesc> &out_img_desc) {\n  if (!modelbox::IsMemAligned((uintptr_t)out_image->MutableData(),\n                              modelbox::ASCEND_ASYNC_ALIGN)) {\n    return {modelbox::STATUS_FAULT,\n            \"Output mem not aligned, ptr \" +\n                std::to_string((uintptr_t)out_image->MutableData())};\n  }\n\n  auto align_w = imageprocess::align_up((int32_t)dest_width_,\n                                        imageprocess::ASCEND_WIDTH_ALIGN);\n  auto align_h = imageprocess::align_up((int32_t)dest_height_,\n                                        imageprocess::ASCEND_HEIGHT_ALIGN);\n  out_img_desc = CreateImgDesc(\n      out_image->GetBytes(), (void *)out_image->MutableData(),\n      output_img_pix_fmt,\n      imageprocess::ImageShape{(int32_t)dest_width_, (int32_t)dest_height_,\n                               align_w, align_h},\n      imageprocess::ImgDescDestroyFlag::DESC_ONLY);\n  if (out_img_desc == nullptr) {\n    return modelbox::StatusError;\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status ResizeFlowUnit::Resize(\n    std::shared_ptr<acldvppChannelDesc> &chan_desc,\n    std::shared_ptr<acldvppPicDesc> &in_img_desc,\n    std::shared_ptr<acldvppPicDesc> &out_img_desc,\n    std::shared_ptr<modelbox::Buffer> &out_image, aclrtStream stream) {\n  auto *resize_cfg = acldvppCreateResizeConfig();\n  if (resize_cfg == nullptr) {\n    return {modelbox::STATUS_FAULT, \"acldvppCreateResizeConfig return null\"};\n  }\n\n  Defer { acldvppDestroyResizeConfig(resize_cfg); };\n  auto acl_ret = acldvppVpcResizeAsync(chan_desc.get(), in_img_desc.get(),\n                                       out_img_desc.get(), resize_cfg, stream);\n  if (acl_ret != ACL_SUCCESS) {\n    std::string err_msg =\n        \"acldvppVpcCropAsync failed, err \" + std::to_string(acl_ret);\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  acl_ret = aclrtSynchronizeStream(stream);\n  if (acl_ret != ACL_SUCCESS) {\n    MBLOG_ERROR << \"aclrtSynchronizeStream failed, err \" << acl_ret;\n    return modelbox::STATUS_FAULT;\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status ResizeFlowUnit::Close() { return modelbox::STATUS_OK; }\n\nMODELBOX_FLOWUNIT(ResizeFlowUnit, desc) {\n  desc.SetFlowUnitName(FLOWUNIT_NAME);\n  desc.SetFlowUnitGroupType(\"Image\");\n  desc.AddFlowUnitInput({IN_IMG, modelbox::ASCEND_MEM_DVPP});\n  desc.AddFlowUnitOutput({OUT_IMG, modelbox::ASCEND_MEM_DVPP});\n  desc.SetFlowType(modelbox::NORMAL);\n  desc.SetInputContiguous(false);\n  desc.SetDescription(FLOWUNIT_DESC);\n  desc.AddFlowUnitOption(modelbox::FlowUnitOption(\"image_width\", \"int\", true,\n                                                  \"0\", \"the resize width\"));\n  desc.AddFlowUnitOption(modelbox::FlowUnitOption(\"image_height\", \"int\", true,\n                                                  \"0\", \"the resize height\"));\n}\n\nMODELBOX_DRIVER_FLOWUNIT(desc) {\n  desc.Desc.SetName(FLOWUNIT_NAME);\n  desc.Desc.SetClass(modelbox::DRIVER_CLASS_FLOWUNIT);\n  desc.Desc.SetType(FLOWUNIT_TYPE);\n  desc.Desc.SetDescription(FLOWUNIT_DESC);\n  desc.Desc.SetVersion(\"1.0.0\");\n}"
  },
  {
    "path": "src/drivers/devices/ascend/flowunit/resize/resize_flowunit.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#ifndef MODELBOX_FLOWUNIT_ASCEND_RESIZE_H_\n#define MODELBOX_FLOWUNIT_ASCEND_RESIZE_H_\n\n#define ENABLE_DVPP_INTERFACE\n#define ACL_ENABLE\n\n#include <acl/ops/acl_dvpp.h>\n#include <modelbox/base/device.h>\n#include <modelbox/base/status.h>\n#include <modelbox/buffer.h>\n#include <modelbox/flow.h>\n#include <modelbox/flowunit.h>\n#include <modelbox/device/ascend/device_ascend.h>\n\nconstexpr const char *FLOWUNIT_TYPE = \"ascend\";\nconstexpr const char *FLOWUNIT_NAME = \"resize\";\nconstexpr const char *FLOWUNIT_DESC =\n    \"\\n\\t@Brief: A resize flowunit on ascend device. \\n\"\n    \"\\t@Port parameter: The input port buffer type and the output port buffer \"\n    \"type are image. \\n\"\n    \"\\t  The image type buffer contain the following meta fields:\\n\"\n    \"\\t\\tField Name: width,         Type: int32_t\\n\"\n    \"\\t\\tField Name: height,        Type: int32_t\\n\"\n    \"\\t\\tField Name: width_stride,  Type: int32_t\\n\"\n    \"\\t\\tField Name: height_stride, Type: int32_t\\n\"\n    \"\\t\\tField Name: channel,       Type: int32_t\\n\"\n    \"\\t\\tField Name: pix_fmt,       Type: string\\n\"\n    \"\\t\\tField Name: layout,        Type: int32_t\\n\"\n    \"\\t\\tField Name: shape,         Type: vector<size_t>\\n\"\n    \"\\t\\tField Name: type,          Type: ModelBoxDataType::MODELBOX_UINT8\\n\"\n    \"\\t@Constraint: The field value range of this flowunit support: 'pix_fmt': \"\n    \"[nv12], 'layout': [hwc]. \";\nconstexpr const char *IN_IMG = \"in_image\";\nconstexpr const char *OUT_IMG = \"out_image\";\n\nclass ResizeFlowUnit : public modelbox::AscendFlowUnit {\n public:\n  ResizeFlowUnit() = default;\n  ~ResizeFlowUnit() override = default;\n\n  modelbox::Status Open(\n      const std::shared_ptr<modelbox::Configuration> &opts) override;\n\n  modelbox::Status Close() override;\n\n  modelbox::Status AscendProcess(\n      std::shared_ptr<modelbox::DataContext> data_ctx,\n      aclrtStream stream) override;\n\n private:\n  modelbox::Status ProcessOneImg(std::shared_ptr<modelbox::Buffer> &in_image,\n                               std::shared_ptr<modelbox::Buffer> &out_image,\n                               aclrtStream stream);\n\n  modelbox::Status GetInputDesc(const std::shared_ptr<modelbox::Buffer> &in_image,\n                              std::shared_ptr<acldvppPicDesc> &in_img_desc);\n\n  modelbox::Status GetOutputDesc(const std::shared_ptr<modelbox::Buffer> &out_image,\n                               std::shared_ptr<acldvppPicDesc> &out_img_desc);\n\n  modelbox::Status Resize(std::shared_ptr<acldvppChannelDesc> &chan_desc,\n                        std::shared_ptr<acldvppPicDesc> &in_img_desc,\n                        std::shared_ptr<acldvppPicDesc> &out_img_desc,\n                        std::shared_ptr<modelbox::Buffer> &out_image,\n                        aclrtStream stream);\n\n  uint32_t dest_width_{0};\n  uint32_t dest_height_{0};\n};\n\n#endif  // MODELBOX_FLOWUNIT_ASCEND_RESIZE_H_\n"
  },
  {
    "path": "src/drivers/devices/ascend/flowunit/resize/resize_flowunit_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <acl/acl_rt.h>\n#include <dsmi_common_interface.h>\n#include <securec.h>\n\n#include <fstream>\n#include <functional>\n#include <future>\n#include <opencv2/opencv.hpp>\n#include <random>\n#include <thread>\n\n#include \"driver_flow_test.h\"\n#include \"flowunit_mockflowunit/flowunit_mockflowunit.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"modelbox/base/log.h\"\n#include \"modelbox/base/utils.h\"\n#include \"modelbox/buffer.h\"\n#include \"test/mock/minimodelbox/mockflow.h\"\n\nnamespace modelbox {\nclass ResizeFlowUnitTest : public testing::Test {\n public:\n  ResizeFlowUnitTest() : driver_flow_(std::make_shared<MockFlow>()) {}\n\n protected:\n  void SetUp() override {\n    // Test ascend runtime\n    int32_t count = 0;\n    auto dsmi_ret = dsmi_get_device_count(&count);\n    if (dsmi_ret != 0) {\n      MBLOG_INFO << \"no ascend device, skip test suit\";\n      GTEST_SKIP();\n    }\n  }\n\n  void TearDown() override { driver_flow_ = nullptr; };\n\n  std::shared_ptr<MockFlow> GetDriverFlow();\n\n  const std::string test_lib_dir = TEST_DRIVER_DIR,\n                    test_data_dir = TEST_DATA_DIR, test_assets = TEST_ASSETS;\n\n private:\n  std::shared_ptr<MockFlow> driver_flow_;\n};\n\nstd::shared_ptr<MockFlow> ResizeFlowUnitTest::GetDriverFlow() {\n  return driver_flow_;\n}\n\nTEST_F(ResizeFlowUnitTest, RunUnit) {\n  std::string toml_content = R\"(\n    [driver]\n    skip-default=true\n    dir=[\")\" + test_lib_dir + \"\\\",\\\"\" +\n                             test_data_dir + \"\\\"]\\n\" +\n                             R\"([graph]\n    graphconf = '''digraph demo {\n          input[type=input]\n          output[type=output]\n          resize[type=flowunit, flowunit=resize, device=ascend, deviceid=0, width=112, height=110]\n\n          input -> resize:in_image\n          resize:out_image -> output\n        }'''\n    format = \"graphviz\"\n  )\";\n\n  auto driver_flow = GetDriverFlow();\n  driver_flow->BuildAndRun(\"RunUnit\", toml_content, 10);\n\n  auto img = cv::imread(std::string(TEST_ASSETS) + \"/test.jpg\");\n  auto extern_data = driver_flow->GetFlow()->CreateExternalDataMap();\n  // in img\n  auto in_img_buffer_list = extern_data->CreateBufferList();\n  in_img_buffer_list->Build({img.total() * img.elemSize()});\n  auto in_img_buffer = in_img_buffer_list->At(0);\n  in_img_buffer->Set(\"width\", img.cols);\n  in_img_buffer->Set(\"height\", img.rows);\n  in_img_buffer->Set(\"width_stride\", img.cols * 3);\n  in_img_buffer->Set(\"height_stride\", img.rows);\n  in_img_buffer->Set(\"pix_fmt\", std::string(\"bgr\"));\n  auto e_ret = memcpy_s(in_img_buffer->MutableData(), in_img_buffer->GetBytes(),\n                        img.data, img.total() * img.elemSize());\n  EXPECT_EQ(e_ret, 0);\n  auto status = extern_data->Send(\"input\", in_img_buffer_list);\n  EXPECT_EQ(status, STATUS_OK);\n  // check output\n  OutputBufferList map_buffer_list;\n  status = extern_data->Recv(map_buffer_list);\n  EXPECT_EQ(status, STATUS_OK);\n  auto output_buffer_list = map_buffer_list[\"output\"];\n  ASSERT_EQ(output_buffer_list->Size(), 1);\n  auto output_buffer = output_buffer_list->At(0);\n  ASSERT_EQ(output_buffer->GetBytes(), 112 * 110 * 3 / 2);\n  cv::Mat yuv_out_img(110 * 3 / 2, 112, CV_8UC1);\n  auto acl_ret = aclrtSetDevice(0);\n  EXPECT_EQ(acl_ret, ACL_SUCCESS);\n  acl_ret = aclrtMemcpy(yuv_out_img.data, output_buffer->GetBytes(),\n                        output_buffer->ConstData(), output_buffer->GetBytes(),\n                        aclrtMemcpyKind::ACL_MEMCPY_DEVICE_TO_HOST);\n  EXPECT_EQ(acl_ret, ACL_SUCCESS);\n\n  auto image_size = yuv_out_img.rows * yuv_out_img.cols * yuv_out_img.elemSize();\n  char expected_img[image_size];\n  std::ifstream infile;\n  infile.open(std::string(TEST_ASSETS) + \"/ascend_resize_yuv\");\n  infile.read((char *)expected_img, image_size);\n\n  EXPECT_EQ(memcmp((char *)yuv_out_img.data, expected_img, image_size), 0);\n\n  driver_flow->GetFlow()->Wait(3 * 1000);\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/drivers/devices/ascend/flowunit/video_decoder/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nset(UNIT_DEVICE \"ascend\")\nset(UNIT_NAME \"video_decoder\")\n\nproject(modelbox-flowunit-${UNIT_DEVICE}-${UNIT_NAME})\n\nfile(GLOB_RECURSE UNIT_SOURCE *.cpp *.cc *.c)\ngroup_source_test_files(MODELBOX_UNIT_SOURCE MODELBOX_UNIT_TEST_SOURCE \"_test.c*\" ${UNIT_SOURCE})\n\nadd_definitions(-DENABLE_DVPP_INTERFACE)\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_ASCEND_INCLUDE})\ninclude_directories(${ACL_INCLUDE_DIR})\ninclude_directories(${MODELBOX_COMMON_IMAGE_PROCESS_INCLUDE})\n\nset(MODELBOX_UNIT_SHARED libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-shared)\nset(MODELBOX_UNIT_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\nadd_library(${MODELBOX_UNIT_SHARED} SHARED ${MODELBOX_UNIT_SOURCE})\nset(LIBMODELBOX_FLOWUNIT_INFERENCE_ASCEND_SHARED ${MODELBOX_UNIT_SHARED})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES \n    SOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} rt)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} dl)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_DEVICE_ASCEND_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${ACL_LIBRARIES})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${MODELBOX_COMMON_IMAGE_PROCESS_LIBRARY})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}\")\n\ninstall(TARGETS ${MODELBOX_UNIT_SHARED}\n        COMPONENT ascend-device-flowunit\n        RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n        LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        ARCHIVE DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        OPTIONAL)\ninstall(DIRECTORY ${HEADER} \n        DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR}\n        COMPONENT ascend-device-flowunit-devel)\n\nset(LIBMODELBOX_FLOWUNIT_INFERENCE_ASCEND_SHARED ${MODELBOX_UNIT_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_INFERENCE_ASCEND_INCLUDE ${MODELBOX_UNIT_SOURCE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_INFERENCE_ASCEND_SOURCES ${MODELBOX_UNIT_SOURCE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_INFERENCE_ASCEND_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}.so CACHE INTERNAL \"\")\n\n# driver test\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_UNIT_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_UNIT_SHARED})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")"
  },
  {
    "path": "src/drivers/devices/ascend/flowunit/video_decoder/ascend_video_decode.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"ascend_video_decode.h\"\n\n#include <fstream>\n#include <string>\n\n#include \"modelbox/device/ascend/device_ascend.h\"\n#define ACL_ENABLE\n#include \"image_process.h\"\n\nvoid DestroyStreamDesc(acldvppStreamDesc *stream_desc) {\n  if (stream_desc == nullptr) {\n    return;\n  }\n\n  auto *data_ptr = acldvppGetStreamDescData(stream_desc);\n  if (data_ptr != nullptr) {\n    acldvppFree(data_ptr);\n  }\n\n  auto ret = acldvppDestroyStreamDesc(stream_desc);\n  if (ret != ACL_ERROR_NONE) {\n    MBLOG_ERROR << \"fail to destroy stream desc, err code \" << ret;\n  }\n}\n\nvoid DestroyPicDesc(acldvppPicDesc *pic_desc) {\n  if (pic_desc == nullptr) {\n    return;\n  }\n\n  auto *data_ptr = acldvppGetPicDescData(pic_desc);\n  if (data_ptr != nullptr) {\n    acldvppFree(data_ptr);\n  }\n\n  auto ret = acldvppDestroyPicDesc(pic_desc);\n  if (ret != ACL_ERROR_NONE) {\n    MBLOG_ERROR << \"destroy pic desc failed, err code \" << ret;\n  }\n}\n\nThreadHandler::ThreadHandler(\n    int device_id, int instance_id,\n    const std::shared_ptr<modelbox::DataContext> &data_ctx)\n    : device_id_(device_id), instance_id_(instance_id), data_ctx_(data_ctx) {}\n\nThreadHandler::~ThreadHandler() {\n  UpdateNeedStop();\n  pthread_join(threadId_, nullptr);\n}\n\nvoid *ThreadHandler::ThreadFunc(void *arg) {\n  auto *thread_handler = (ThreadHandler *)arg;\n  auto ret = aclrtSetDevice(thread_handler->device_id_);\n  if (ret != ACL_ERROR_NONE) {\n    MBLOG_ERROR << \"acl set device \" << thread_handler->device_id_ << \" failed\";\n    return ((void *)(-1));\n  }\n\n  bool pstart = false;\n  while (!thread_handler->need_stop_) {\n    aclError ret = aclrtProcessReport(ACL_PROCESS_WAIT_TIME_OUT_MS);\n    if (ret == ACL_ERROR_THREAD_NOT_SUBSCRIBE ||\n        (ret == ACL_ERROR_WAIT_CALLBACK_TIMEOUT && !pstart)) {\n      continue;\n    }\n\n    if (ret == ACL_ERROR_NONE) {\n      pstart = true;\n      continue;\n    }\n\n    pstart = false;\n  }\n\n  return nullptr;\n}\n\nmodelbox::Status ThreadHandler::CreateThread() {\n  int createThreadErr = pthread_create(&threadId_, nullptr,\n                                       ThreadHandler::ThreadFunc, (void *)this);\n  if (createThreadErr != 0) {\n    const auto *errMsg = \"create thread failed, err = \" + createThreadErr;\n    MBLOG_ERROR << errMsg;\n    return {modelbox::STATUS_FAULT, errMsg};\n  }\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nvoid ThreadHandler::UpdateNeedStop() { need_stop_ = true; }\n\nDvppVideoDecodeContext::~DvppVideoDecodeContext() {\n  std::vector<std::shared_ptr<DvppFrame>> not_consumed_frame_list;\n  queue_->PopBatch(&not_consumed_frame_list, -1);\n  for (auto &frame : not_consumed_frame_list) {\n    auto *pic_desc = frame->GetPicDesc().get();\n    if (pic_desc == nullptr) {\n      continue;\n    }\n\n    auto *data = acldvppGetPicDescData(pic_desc);\n    if (data == nullptr) {\n      continue;\n    }\n\n    auto ret = acldvppFree(data);\n    if (ret != ACL_SUCCESS) {\n      MBLOG_WARN << \"acl dvpp free failed, addr \" << (void *)data << \", ret \"\n                 << ret;\n    }\n  }\n}\n\nAscendVideoDecoder::AscendVideoDecoder(int instance_id, int device_id,\n                                       int32_t rate_num, int32_t rate_den,\n                                       int32_t format, int32_t entype)\n    : instance_id_(instance_id),\n      device_id_(device_id),\n      rate_num_(rate_num),\n      rate_den_(rate_den),\n      format_(format),\n      entype_(entype){};\n\nAscendVideoDecoder::~AscendVideoDecoder() {\n  vdecChannelDesc_ = nullptr;\n  thread_handler_ = nullptr;\n};\n\nvoid AscendVideoDecoder::Callback(acldvppStreamDesc *input,\n                                  acldvppPicDesc *output, void *userData) {\n  if (input == nullptr) {\n    MBLOG_WARN << \"dvpp decoder callback input is nullptr\";\n    return;\n  }\n\n  void *vdecInBufferDev = acldvppGetStreamDescData(input);\n  if (vdecInBufferDev != nullptr) {\n    aclError ret = acldvppFree(vdecInBufferDev);\n    if (ret != ACL_ERROR_NONE) {\n      MBLOG_ERROR << \"fail to free input stream desc data, err code \" << ret;\n    }\n  }\n\n  aclError des_ret = acldvppDestroyStreamDesc(input);\n  if (des_ret != ACL_ERROR_NONE) {\n    MBLOG_ERROR << \"fail to destroy input stream desc, err code \" << des_ret;\n  }\n\n  if (output == nullptr) {\n    MBLOG_WARN << \"dvpp decoder callback output is nullptr.\";\n    return;\n  }\n\n  auto dvpp_frame = std::make_shared<DvppFrame>();\n  dvpp_frame->GetPicDesc().reset(output, [](acldvppPicDesc *picDesc) {\n    // will not free pic buffer\n    auto ret = acldvppDestroyPicDesc(picDesc);\n    if (ret != ACL_ERROR_NONE) {\n      MBLOG_ERROR << \"destroy pic desc failed, err code \" << ret;\n    }\n  });\n\n  void *vdecOutBufferDev = acldvppGetPicDescData(output);\n  if (vdecOutBufferDev == nullptr) {\n    MBLOG_ERROR << \"dvpp decoder callback output data is nullptr.\";\n    return;\n  }\n\n  auto output_size = acldvppGetPicDescSize(output);\n  if (output_size == 0) {\n    acldvppFree(vdecOutBufferDev);\n    MBLOG_ERROR << \"dvpp decoder callback output size is zero.\";\n    return;\n  }\n\n  auto acl_ret = acldvppGetPicDescRetCode(output);\n  if (acl_ret != 0) {\n    acldvppFree(vdecOutBufferDev);\n    MBLOG_ERROR << \"vdec failed, err code \" << acl_ret;\n    return;\n  }\n\n  if (userData == nullptr) {\n    acldvppFree(vdecOutBufferDev);\n    MBLOG_ERROR << \"call back userData is nullptr.\";\n    return;\n  }\n\n  auto *ctx = (DvppVideoDecodeContext *)userData;\n  auto queue = ctx->GetCacheQueue();\n  if (queue == nullptr) {\n    acldvppFree(vdecOutBufferDev);\n    MBLOG_ERROR << \"get cache queue failed.\";\n    return;\n  }\n  auto res = queue->Push(dvpp_frame);\n  if (!res) {\n    acldvppFree(vdecOutBufferDev);\n    MBLOG_INFO << \"dvpp video decoder callback queue push failed.\";\n  }\n}\n\nmodelbox::Status AscendVideoDecoder::Init(\n    const std::shared_ptr<modelbox::DataContext> &data_ctx) {\n  vdecChannelDesc_ = nullptr;\n  aclError ret = aclrtSetDevice(device_id_);\n  if (ret != ACL_ERROR_NONE) {\n    auto errMsg = \"acl set device \" + std::to_string(device_id_) +\n                  \" failed, err code\" + std::to_string(ret);\n    MBLOG_ERROR << errMsg;\n    return {modelbox::STATUS_FAULT, errMsg};\n  }\n\n  thread_handler_ =\n      std::make_shared<ThreadHandler>(device_id_, instance_id_, data_ctx);\n  auto status = thread_handler_->CreateThread();\n  if (status != modelbox::STATUS_SUCCESS) {\n    auto errMsg = \"create thread failed, \" + status.WrapErrormsgs();\n    MBLOG_ERROR << errMsg;\n    return {modelbox::STATUS_FAULT, errMsg};\n  }\n\n  bool setup_result = false;\n  DeferCond { return !setup_result; };\n\n  DeferCondAdd { thread_handler_ = nullptr; };\n\n  aclvdecChannelDesc *vdecChannelDescPtr = aclvdecCreateChannelDesc();\n  if (vdecChannelDescPtr == nullptr) {\n    const auto *errMsg =\n        \"fail to create vdec channel desc, pls check npu log for more details.\";\n    MBLOG_ERROR << errMsg;\n    return {modelbox::STATUS_FAULT, errMsg};\n  }\n\n  DeferCondAdd {\n    ret = aclvdecDestroyChannelDesc(vdecChannelDescPtr);\n    if (ret != ACL_ERROR_NONE) {\n      auto errMsg =\n          \"fail to destroy channel desc, err code \" + std::to_string(ret);\n      MBLOG_ERROR << errMsg;\n    }\n  };\n\n  ret = aclvdecSetChannelDescChannelId(vdecChannelDescPtr, instance_id_);\n  if (ret != ACL_ERROR_NONE) {\n    auto errMsg = \"fail to set vdec ChannelId, err code \" + std::to_string(ret);\n    MBLOG_ERROR << errMsg;\n    return {modelbox::STATUS_FAULT, errMsg};\n  }\n\n  ret = aclvdecSetChannelDescThreadId(vdecChannelDescPtr,\n                                      thread_handler_->GetThreadId());\n  if (ret != ACL_ERROR_NONE) {\n    auto errMsg = \"fail to create threadId, err code \" + std::to_string(ret);\n    MBLOG_ERROR << errMsg;\n    return {modelbox::STATUS_FAULT, errMsg};\n  }\n\n  // callback func\n  ret = aclvdecSetChannelDescCallback(vdecChannelDescPtr, Callback);\n  if (ret != ACL_ERROR_NONE) {\n    auto errMsg = \"fail to set vdec Callback, err code \" + std::to_string(ret);\n    MBLOG_ERROR << errMsg;\n    return {modelbox::STATUS_FAULT, errMsg};\n  }\n\n  ret = aclvdecSetChannelDescEnType(vdecChannelDescPtr,\n                                    static_cast<acldvppStreamFormat>(entype_));\n  if (ret != ACL_ERROR_NONE) {\n    auto errMsg = \"fail to set vdec EnType, err code \" + std::to_string(ret);\n    MBLOG_ERROR << errMsg;\n    return {modelbox::STATUS_FAULT, errMsg};\n  }\n\n  ret = aclvdecSetChannelDescOutPicFormat(\n      vdecChannelDescPtr, static_cast<acldvppPixelFormat>(format_));\n  if (ret != ACL_ERROR_NONE) {\n    auto errMsg =\n        \"fail to set vdec OutPicFormat, err code \" + std::to_string(ret);\n    MBLOG_ERROR << errMsg;\n    return {modelbox::STATUS_FAULT, errMsg};\n  }\n\n  // create vdec channel\n  ret = aclvdecCreateChannel(vdecChannelDescPtr);\n  if (ret != ACL_ERROR_NONE) {\n    auto errMsg =\n        \"fail to create vdec channel, err code \" + std::to_string(ret);\n    MBLOG_ERROR << errMsg;\n    return {modelbox::STATUS_FAULT, errMsg};\n  }\n\n  auto device_id = device_id_;\n  vdecChannelDesc_.reset(\n      vdecChannelDescPtr, [this, device_id](aclvdecChannelDesc *p) {\n        auto ret = aclvdecDestroyChannel(p);\n        if (ret != ACL_ERROR_NONE) {\n          MBLOG_ERROR << \"fail to destroy vdec channel, err: \" << ret;\n        }\n        ret = aclvdecDestroyChannelDesc(p);\n        if (ret != ACL_ERROR_NONE) {\n          MBLOG_ERROR << \"fail to destroy vdec channel desc, err: \" << ret;\n        }\n\n        this->thread_handler_ = nullptr;\n      });\n\n  setup_result = true;\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nstd::shared_ptr<acldvppPicDesc> AscendVideoDecoder::SetUpFrame(\n    const std::shared_ptr<DvppPacket> &dvpp_packet) {\n  auto width = dvpp_packet->GetWidth();\n  auto height = dvpp_packet->GetHeight();\n  auto align_width =\n      imageprocess::align_up(width, imageprocess::ASCEND_WIDTH_ALIGN);\n  auto align_height =\n      imageprocess::align_up(height, imageprocess::ASCEND_HEIGHT_ALIGN);\n  auto width_stride = 0;\n  auto ret =\n      imageprocess::GetWidthStride(OUTPUT_PIX_FMT, align_width, width_stride);\n  if (!ret) {\n    MBLOG_ERROR << \"Get width stride failed, ret \" << ret;\n    return nullptr;\n  }\n\n  size_t size = 0;\n  ret = imageprocess::GetImageBytes(OUTPUT_PIX_FMT, align_width, align_height,\n                                    size);\n  if (!ret) {\n    MBLOG_ERROR << \"Get image bytes failed, ret \" << ret;\n    return nullptr;\n  }\n\n  auto dvpp_pic_desc_ptr = CreateImgDesc(\n      size, OUTPUT_PIX_FMT,\n      imageprocess::ImageShape{width, height, width_stride, align_height},\n      imageprocess::ImgDescDestroyFlag::NONE);\n  if (dvpp_pic_desc_ptr == nullptr) {\n    MBLOG_ERROR << \"Create image desc failed, ret \" << modelbox::StatusError;\n    return nullptr;\n  }\n\n  return dvpp_pic_desc_ptr;\n}\n\nmodelbox::Status AscendVideoDecoder::ProcessLastPacket(\n    const std::shared_ptr<DvppPacket> &dvpp_packet,\n    const std::shared_ptr<DvppVideoDecodeContext> &dvpp_decoder_ctx) {\n  MBLOG_INFO << \"process the last packet.\";\n\n  aclError ret = ACL_ERROR_NONE;\n\n  ret = aclvdecSendFrame(vdecChannelDesc_.get(), dvpp_packet->GetStreamDesc(),\n                         nullptr, nullptr, (void *)dvpp_decoder_ctx.get());\n  if (ret != ACL_ERROR_NONE) {\n    MBLOG_ERROR << \"send eos frame failed, err code: \" << ret;\n    DestroyStreamDesc(dvpp_packet->GetStreamDesc());\n  }\n\n  return modelbox::STATUS_NODATA;\n}\n\nmodelbox::Status AscendVideoDecoder::Decode(\n    const std::shared_ptr<DvppPacket> &dvpp_packet,\n    const std::shared_ptr<DvppVideoDecodeContext> &dvpp_decoder_ctx) {\n  aclError ret = aclrtSetDevice(device_id_);\n  if (ret != ACL_ERROR_NONE) {\n    auto errMsg = \"acl set device \" + std::to_string(device_id_) + \" failed\";\n    MBLOG_ERROR << errMsg;\n    return {modelbox::STATUS_FAULT, errMsg};\n  }\n\n  if (dvpp_packet->IsEnd()) {\n    auto status = ProcessLastPacket(dvpp_packet, dvpp_decoder_ctx);\n    if (status == modelbox::STATUS_FAULT) {\n      return {modelbox::STATUS_FAULT, \"send the last packet failed.\"};\n    }\n    return status;\n  }\n\n  auto pic_desc = SetUpFrame(dvpp_packet);\n  if (pic_desc == nullptr) {\n    const auto *errMsg = \"set up frame failed\";\n    MBLOG_ERROR << errMsg;\n    return {modelbox::STATUS_FAULT, errMsg};\n  }\n\n  ret =\n      aclvdecSendFrame(vdecChannelDesc_.get(), dvpp_packet->GetStreamDesc(),\n                       pic_desc.get(), nullptr, (void *)dvpp_decoder_ctx.get());\n\n  if (ret != ACL_ERROR_NONE) {\n    MBLOG_ERROR << \"send vdec frame failed, err code \" << ret;\n    DestroyStreamDesc(dvpp_packet->GetStreamDesc());\n    DestroyPicDesc(pic_desc.get());\n  }\n\n  return modelbox::STATUS_SUCCESS;\n}"
  },
  {
    "path": "src/drivers/devices/ascend/flowunit/video_decoder/ascend_video_decode.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#ifndef MODELBOX_FLOWUNIT_ASCEND_VIDEODECODE_H_\n#define MODELBOX_FLOWUNIT_ASCEND_VIDEODECODE_H_\n\n#include <acl/acl.h>\n#include <modelbox/base/blocking_queue.h>\n#include <modelbox/base/configuration.h>\n#include <modelbox/base/status.h>\n#include <modelbox/data_context.h>\n\n#include <string>\n#include <vector>\n\n#include \"acl/ops/acl_dvpp.h\"\n\nconstexpr const int ACL_PROCESS_WAIT_TIME_OUT_MS = 50;\nconstexpr const uint32_t YUV_BYTES_NU = 3;\nconstexpr const uint32_t YUV_BYTES_DE = 2;\nconstexpr const char *INSTANCE_ID = \"instance_id\";\nconstexpr const char *OUTPUT_PIX_FMT = \"nv12\";\n\nclass ThreadHandler {\n public:\n  ThreadHandler(int device_id, int instance_id,\n                const std::shared_ptr<modelbox::DataContext> &data_ctx);\n  virtual ~ThreadHandler();\n\n  modelbox::Status CreateThread();\n  pthread_t GetThreadId() { return threadId_; }\n  void UpdateNeedStop();\n\n private:\n  static void *ThreadFunc(void *arg);\n  pthread_t threadId_{0};\n  int device_id_;\n  int instance_id_;\n  std::weak_ptr<modelbox::DataContext> data_ctx_;\n  std::atomic<bool> need_stop_{false};\n};\n\nclass DvppPacket {\n public:\n  DvppPacket(size_t size, int32_t width, int32_t height, int32_t pts)\n      : size_(size), width_(width), height_(height), pts_(pts){};\n\n  DvppPacket() { stream_desc_ = nullptr; };\n\n  virtual ~DvppPacket() = default;\n\n  acldvppStreamDesc *GetStreamDesc() { return stream_desc_; };\n  void SetStreamDesc(acldvppStreamDesc *stream_desc) {\n    stream_desc_ = stream_desc;\n  }\n\n  int32_t GetPts() { return pts_; }\n\n  int32_t GetWidth() { return width_; };\n  int32_t GetHeight() { return height_; };\n  bool IsEnd() { return is_end_; };\n  void SetEnd(bool is_end) { is_end_ = is_end; };\n\n private:\n  uint32_t size_{0};\n  int32_t width_{0};\n  int32_t height_{0};\n  int32_t pts_{0};\n  bool is_end_{false};\n  acldvppStreamDesc *stream_desc_ = nullptr;\n};\n\nclass DvppFrame {\n public:\n  DvppFrame() = default;\n  virtual ~DvppFrame() = default;\n\n  std::shared_ptr<acldvppPicDesc> &GetPicDesc() { return pic_desc_; }\n\n private:\n  std::shared_ptr<acldvppPicDesc> pic_desc_;\n};\n\nclass DvppVideoDecodeContext {\n public:\n  DvppVideoDecodeContext() {\n    queue_ =\n        std::make_shared<modelbox::BlockingQueue<std::shared_ptr<DvppFrame>>>();\n  };\n\n  virtual ~DvppVideoDecodeContext();\n\n  std::shared_ptr<modelbox::BlockingQueue<std::shared_ptr<DvppFrame>>>\n  GetCacheQueue() {\n    return queue_;\n  }\n\n private:\n  std::shared_ptr<modelbox::BlockingQueue<std::shared_ptr<DvppFrame>>> queue_;\n};\n\nclass AscendVideoDecoder {\n public:\n  AscendVideoDecoder(int instance_id, int device_id, int32_t rate_num,\n                     int32_t rate_den, int32_t format, int32_t entype);\n  virtual ~AscendVideoDecoder();\n\n  modelbox::Status Init(const std::shared_ptr<modelbox::DataContext> &data_ctx);\n\n  modelbox::Status Decode(\n      const std::shared_ptr<DvppPacket> &dvpp_packet,\n      const std::shared_ptr<DvppVideoDecodeContext> &dvpp_decoder_ctx);\n\n  int32_t GetRateNum() { return rate_num_; }\n  int32_t GetRateDen() { return rate_den_; }\n\n private:\n  static void Callback(acldvppStreamDesc *input, acldvppPicDesc *output,\n                       void *userData);\n  modelbox::Status ProcessLastPacket(\n      const std::shared_ptr<DvppPacket> &dvpp_packet,\n      const std::shared_ptr<DvppVideoDecodeContext> &dvpp_decoder_ctx);\n  std::shared_ptr<acldvppPicDesc> SetUpFrame(\n      const std::shared_ptr<DvppPacket> &dvpp_packet);\n  int32_t instance_id_{0};\n  int32_t device_id_{0};\n  int32_t rate_num_{0};\n  int32_t rate_den_{0};\n  int32_t format_{1};\n  int32_t entype_{2};\n  std::shared_ptr<aclvdecChannelDesc> vdecChannelDesc_ = nullptr;\n  std::shared_ptr<ThreadHandler> thread_handler_ = nullptr;\n};\n\n#endif"
  },
  {
    "path": "src/drivers/devices/ascend/flowunit/video_decoder/video_decoder_flowunit.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"video_decoder_flowunit.h\"\n\n#include <securec.h>\n\n#include <fstream>\n#include <string>\n\n#include \"ascend_video_decode.h\"\n#include \"modelbox/base/timer.h\"\n#include \"modelbox/base/utils.h\"\n#include \"modelbox/flowunit.h\"\n#include \"modelbox/flowunit_api_helper.h\"\n\nVideoDecodeFlowUnit::VideoDecodeFlowUnit() = default;\nVideoDecodeFlowUnit::~VideoDecodeFlowUnit() = default;\n\nstatic std::map<std::string, int32_t> fmt_trans_map = {\n    {\"nv12\", PIXEL_FORMAT_YUV_SEMIPLANAR_420}};\n\nconstexpr int32_t PROFILE_BASELINE = 66;\nconstexpr int32_t PROFILE_MAIN = 77;\nconstexpr int32_t PROFILE_HIGH = 100;\nconstexpr int32_t PROFILE_DEFAULT = -1;\n\nstatic std::unordered_map<AVCodecID, std::unordered_map<int32_t, int32_t>>\n    encode_type_map = {{AV_CODEC_ID_HEVC, {{PROFILE_DEFAULT, H265_MAIN_LEVEL}}},\n                       {AV_CODEC_ID_H264,\n                        {{PROFILE_BASELINE, H264_BASELINE_LEVEL},\n                         {PROFILE_MAIN, H264_MAIN_LEVEL},\n                         {PROFILE_HIGH, H264_HIGH_LEVEL},\n                         {PROFILE_DEFAULT, H264_MAIN_LEVEL}}}};\n\nint32_t VideoDecodeFlowUnit::GetDvppEncodeType(AVCodecID codec_id,\n                                               int32_t profile_id) {\n  auto codec_item = encode_type_map.find(codec_id);\n  if (codec_item == encode_type_map.end()) {\n    MBLOG_ERROR << \"Not support codec id \" << codec_id;\n    return -1;\n  }\n\n  auto &codec_profile_map = codec_item->second;\n  auto profile_item = codec_profile_map.find(profile_id);\n  if (profile_item == codec_profile_map.end()) {\n    return codec_profile_map[PROFILE_DEFAULT];\n  }\n\n  return profile_item->second;\n}\n\nmodelbox::Status VideoDecodeFlowUnit::GetDecoderParam(\n    const std::shared_ptr<modelbox::DataContext> &data_ctx, int32_t &rate_num,\n    int32_t &rate_den, int32_t &encode_type) {\n  auto input_packet = data_ctx->Input(VIDEO_PACKET_INPUT);\n  if (input_packet == nullptr) {\n    return {modelbox::STATUS_FAULT, \"get input failed.\"};\n  }\n\n  auto buffer = input_packet->At(0);\n  auto res = buffer->Get(\"rate_num\", rate_num);\n  if (!res) {\n    return {modelbox::STATUS_FAULT, \"get rate_num failed.\"};\n  }\n\n  res = buffer->Get(\"rate_den\", rate_den);\n  if (!res) {\n    return {modelbox::STATUS_FAULT, \"get rate_den failed.\"};\n  }\n\n  auto in_meta = data_ctx->GetInputMeta(VIDEO_PACKET_INPUT);\n  auto codec_id =\n      std::static_pointer_cast<AVCodecID>(in_meta->GetMeta(CODEC_META));\n  if (codec_id == nullptr) {\n    return {modelbox::STATUS_FAULT, \"get codec id failed.\"};\n  }\n\n  auto profile_id =\n      std::static_pointer_cast<int32_t>(in_meta->GetMeta(PROFILE_META));\n  if (profile_id == nullptr) {\n    return {modelbox::STATUS_FAULT, \"get profile id failed.\"};\n  }\n\n  encode_type = GetDvppEncodeType(*codec_id, *profile_id);\n  if (encode_type == -1) {\n    return {modelbox::STATUS_FAULT, \"get dvpp encode type failed.\"};\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nconstexpr int32_t MAX_VDEC_CHAN = 128;\n\nvoid VideoDecodeFlowUnit::InitInstanceId() {\n  for (int i = 0; i < MAX_VDEC_CHAN; i++) {\n    instance_available_map_[i] = true;\n  }\n}\n\nint32_t VideoDecodeFlowUnit::FindTheMinimumAvailableId() {\n  std::lock_guard<std::mutex> lk(mutex);\n  for (auto &instance_item : instance_available_map_) {\n    {\n      if (instance_item.second) {\n        instance_item.second = false;\n        return instance_item.first;\n      }\n    }\n  }\n\n  return -1;\n}\n\nvoid VideoDecodeFlowUnit::RestoreInstanceId(int32_t instance_id) {\n  std::lock_guard<std::mutex> lk(mutex);\n  instance_available_map_[instance_id] = true;\n}\n\nmodelbox::Status VideoDecodeFlowUnit::Open(\n    const std::shared_ptr<modelbox::Configuration> &opts) {\n  std::string fmt = opts->GetString(\"pix_fmt\", \"nv12\");\n\n  auto iter = fmt_trans_map.find(fmt);\n  if (iter == fmt_trans_map.end()) {\n    MBLOG_ERROR << \"Not support pix fmt \" << fmt;\n    return modelbox::STATUS_BADCONF;\n  }\n\n  format_ = fmt_trans_map[fmt];\n\n  InitInstanceId();\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status VideoDecodeFlowUnit::ReopenDecoder(\n    std::shared_ptr<modelbox::DataContext> &data_ctx,\n    const std::shared_ptr<modelbox::Buffer> &flag_buffer) {\n  auto old_source_url =\n      std::static_pointer_cast<std::string>(data_ctx->GetPrivate(SOURCE_URL_META));\n  auto old_codec_id =\n      std::static_pointer_cast<AVCodecID>(data_ctx->GetPrivate(CODEC_ID_META));\n\n  if (old_source_url == nullptr || old_codec_id == nullptr) {\n    MBLOG_ERROR << \"Reopen decoder failed, source url or codec id is null\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  std::string source_url;\n  AVCodecID codec_id;\n  if (flag_buffer->Get(SOURCE_URL_META, source_url) == false) {\n    return modelbox::STATUS_SUCCESS;\n  }\n\n  if (flag_buffer->Get(CODEC_ID_META, codec_id) == false) {\n    return modelbox::STATUS_SUCCESS;\n  }\n\n  if (source_url == *old_source_url && codec_id == *old_codec_id) {\n    return modelbox::STATUS_SUCCESS;\n  }\n\n  MBLOG_WARN << \"Reopen decoder, source url or codec id changed\";\n  auto ret = CloseDecoder(data_ctx);\n  if (ret != modelbox::STATUS_SUCCESS) {\n    MBLOG_ERROR << \"Close decoder failed\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  int32_t rate_num;\n  int32_t rate_den;\n  int32_t encode_type;\n\n  auto res = flag_buffer->Get(\"rate_num\", rate_num);\n  if (!res) {\n    return {modelbox::STATUS_FAULT, \"get rate_num failed.\"};\n  }\n\n  res = flag_buffer->Get(\"rate_den\", rate_den);\n  if (!res) {\n    return {modelbox::STATUS_FAULT, \"get rate_den failed.\"};\n  }\n\n  auto in_meta = data_ctx->GetInputMeta(VIDEO_PACKET_INPUT);\n  auto profile_id =\n      std::static_pointer_cast<int32_t>(in_meta->GetMeta(PROFILE_META));\n  if (profile_id == nullptr) {\n    return {modelbox::STATUS_FAULT, \"get profile id failed.\"};\n  }\n\n  encode_type = GetDvppEncodeType(codec_id, *profile_id);\n  if (encode_type == -1) {\n    return {modelbox::STATUS_FAULT, \"get dvpp encode type failed.\"};\n  }\n\n  return NewDecoder(data_ctx, source_url, codec_id, rate_num, rate_den,\n                    encode_type);\n}\n\nmodelbox::Status VideoDecodeFlowUnit::CloseDecoder(\n    std::shared_ptr<modelbox::DataContext> &data_ctx) {\n  auto instance_id =\n      std::static_pointer_cast<int32_t>(data_ctx->GetPrivate(INSTANCE_ID));\n  if (instance_id != nullptr) {\n    RestoreInstanceId(*instance_id);\n  }\n  data_ctx->SetPrivate(DVPP_DECODER, nullptr);\n  data_ctx->SetPrivate(DVPP_DECODER_CTX, nullptr);\n  data_ctx->SetPrivate(FRAME_INDEX_CTX, nullptr);\n  data_ctx->SetPrivate(INSTANCE_ID, nullptr);\n  data_ctx->SetPrivate(SOURCE_URL_META, nullptr);\n  data_ctx->SetPrivate(CODEC_ID_META, nullptr);\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status VideoDecodeFlowUnit::NewDecoder(\n    std::shared_ptr<modelbox::DataContext> &data_ctx,\n    const std::string &source_url, AVCodecID codec_id, int32_t rate_num,\n    int32_t rate_den, int32_t encode_type) {\n  int32_t instance_id = 0;\n  instance_id = FindTheMinimumAvailableId();\n  modelbox::Status ret = modelbox::STATUS_SUCCESS;\n  DeferCond { return !ret; };\n\n  if (instance_id == -1) {\n    const auto *errMsg = \"do not have available channelId to decode.\";\n    MBLOG_ERROR << errMsg;\n    return {modelbox::STATUS_FAULT, errMsg};\n  }\n\n  DeferCondAdd { RestoreInstanceId(instance_id); };\n\n  auto video_decoder = std::make_shared<AscendVideoDecoder>(\n      instance_id, dev_id_, rate_num, rate_den, format_, encode_type);\n  ret = video_decoder->Init(data_ctx);\n  if (ret != modelbox::STATUS_SUCCESS) {\n    auto errMsg = \"video decoder init failed, \" + ret.WrapErrormsgs();\n    MBLOG_ERROR << errMsg;\n    ret = {modelbox::STATUS_FAULT, errMsg};\n    return ret;\n  }\n\n  auto dvpp_decode_ctx = std::make_shared<DvppVideoDecodeContext>();\n\n  auto frame_index = std::make_shared<int64_t>();\n  *frame_index = 0;\n  auto instance_id_ptr = std::make_shared<int32_t>(instance_id);\n  data_ctx->SetPrivate(DVPP_DECODER_CTX, dvpp_decode_ctx);\n  data_ctx->SetPrivate(DVPP_DECODER, video_decoder);\n  data_ctx->SetPrivate(FRAME_INDEX_CTX, frame_index);\n  data_ctx->SetPrivate(INSTANCE_ID, instance_id_ptr);\n  data_ctx->SetPrivate(SOURCE_URL_META, std::make_shared<std::string>(source_url));\n  data_ctx->SetPrivate(CODEC_ID_META, std::make_shared<AVCodecID>(codec_id));\n  MBLOG_INFO << \"open video decode data success.\";\n\n  return ret;\n}\n\nmodelbox::Status VideoDecodeFlowUnit::DataPre(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  auto input_packet = data_ctx->Input(VIDEO_PACKET_INPUT);\n  if (input_packet == nullptr) {\n    return {modelbox::STATUS_FAULT, \"get input failed.\"};\n  }\n\n  int32_t rate_num;\n  int32_t rate_den;\n  int32_t encode_type;\n\n  auto buffer = input_packet->At(0);\n  auto res = buffer->Get(\"rate_num\", rate_num);\n  if (!res) {\n    return {modelbox::STATUS_FAULT, \"get rate_num failed.\"};\n  }\n\n  res = buffer->Get(\"rate_den\", rate_den);\n  if (!res) {\n    return {modelbox::STATUS_FAULT, \"get rate_den failed.\"};\n  }\n\n  auto in_meta = data_ctx->GetInputMeta(VIDEO_PACKET_INPUT);\n  auto codec_id =\n      std::static_pointer_cast<AVCodecID>(in_meta->GetMeta(CODEC_META));\n  if (codec_id == nullptr) {\n    return {modelbox::STATUS_FAULT, \"get codec id failed.\"};\n  }\n\n  auto source_url =\n      std::static_pointer_cast<std::string>(in_meta->GetMeta(SOURCE_URL_META));\n  if (source_url == nullptr) {\n    MBLOG_ERROR << \"Stream source url is null, init decoder failed\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  auto profile_id =\n      std::static_pointer_cast<int32_t>(in_meta->GetMeta(PROFILE_META));\n  if (profile_id == nullptr) {\n    return {modelbox::STATUS_FAULT, \"get profile id failed.\"};\n  }\n\n  encode_type = GetDvppEncodeType(*codec_id, *profile_id);\n  if (encode_type == -1) {\n    return {modelbox::STATUS_FAULT, \"get dvpp encode type failed.\"};\n  }\n\n  return NewDecoder(data_ctx, *source_url, *codec_id, rate_num, rate_den,\n                    encode_type);\n};\n\nmodelbox::Status VideoDecodeFlowUnit::DataPost(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  return CloseDecoder(data_ctx);\n}\n\nmodelbox::Status VideoDecodeFlowUnit::Close() {\n  instance_available_map_.clear();\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status VideoDecodeFlowUnit::ReadData(\n    const std::shared_ptr<modelbox::DataContext> &data_ctx,\n    std::vector<std::shared_ptr<DvppPacket>> &dvpp_packet_list,\n    std::shared_ptr<modelbox::Buffer> &flag_buffer) {\n  auto reset_flag = false;\n  auto video_packet_input = data_ctx->Input(VIDEO_PACKET_INPUT);\n  if (video_packet_input == nullptr) {\n    MBLOG_ERROR << \"video packet input is null\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  if (video_packet_input->Size() == 0) {\n    MBLOG_ERROR << \"video packet input size is 0\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  for (size_t i = 0; i < video_packet_input->Size(); ++i) {\n    auto packet_buffer = video_packet_input->At(i);\n\n    if (reset_flag == false) {\n      packet_buffer->Get(\"reset_flag\", reset_flag);\n      if (reset_flag == true) {\n        flag_buffer = packet_buffer;\n      }\n    }\n\n    std::shared_ptr<DvppPacket> dvpp_packet;\n    auto ret = ReadDvppStreamDesc(packet_buffer, dvpp_packet);\n    if (ret != modelbox::STATUS_SUCCESS) {\n      auto errMsg = \"read dvpp stream desc \" + ret.WrapErrormsgs();\n      MBLOG_ERROR << errMsg;\n      return {modelbox::STATUS_FAULT, errMsg};\n    }\n\n    dvpp_packet_list.push_back(dvpp_packet);\n  }\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status VideoDecodeFlowUnit::SetUpTheLastPacket(\n    std::shared_ptr<DvppPacket> &dvpp_packet) {\n  dvpp_packet = std::make_shared<DvppPacket>();\n  dvpp_packet->SetEnd(true);\n\n  auto *dvpp_stream_desc_ptr = acldvppCreateStreamDesc();\n  if (dvpp_stream_desc_ptr == nullptr) {\n    const auto *errMsg = \"fail to create input stream desc\";\n    MBLOG_ERROR << errMsg;\n    return {modelbox::STATUS_FAULT, errMsg};\n  }\n\n  auto ret = acldvppSetStreamDescEos(dvpp_stream_desc_ptr, 1);\n  if (ret != ACL_ERROR_NONE) {\n    auto errMsg =\n        \"fail to set data for stream desc, err code \" + std::to_string(ret);\n    MBLOG_ERROR << errMsg;\n    auto des_ret = acldvppDestroyStreamDesc(dvpp_stream_desc_ptr);\n    if (des_ret != ACL_ERROR_NONE) {\n      MBLOG_ERROR << \"fail to destroy input stream desc\";\n    }\n\n    return {modelbox::STATUS_FAULT, errMsg};\n  }\n\n  dvpp_packet->SetStreamDesc(dvpp_stream_desc_ptr);\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status VideoDecodeFlowUnit::ReadDvppStreamDesc(\n    const std::shared_ptr<modelbox::Buffer> &packet_buffer,\n    std::shared_ptr<DvppPacket> &dvpp_packet) {\n  auto size = packet_buffer->GetBytes();\n  if (size == 1) {\n    auto status = SetUpTheLastPacket(dvpp_packet);\n    if (status != modelbox::STATUS_SUCCESS) {\n      auto errMsg = \"setup the last packet failed, \" + status.WrapErrormsgs();\n      MBLOG_ERROR << errMsg;\n      return {modelbox::STATUS_FAULT, errMsg};\n    }\n\n    return status;\n  }\n\n  const auto *buffer = packet_buffer->ConstData();\n  int32_t width = 0;\n  int32_t height = 0;\n  int64_t pts = 0;\n  auto exists = packet_buffer->Get(\"width\", width);\n  if (!exists) {\n    const auto *errMsg = \"get width in input buffer meta failed.\";\n    MBLOG_ERROR << errMsg;\n    return {modelbox::STATUS_FAULT, errMsg};\n  }\n\n  exists = packet_buffer->Get(\"height\", height);\n  if (!exists) {\n    const auto *errMsg = \"get width in input buffer meta failed.\";\n    MBLOG_ERROR << errMsg;\n    return {modelbox::STATUS_FAULT, errMsg};\n  }\n\n  exists = packet_buffer->Get(\"pts\", pts);\n  if (!exists) {\n    const auto *errMsg = \"get pts in input buffer meta failed.\";\n    MBLOG_ERROR << errMsg;\n    return {modelbox::STATUS_FAULT, errMsg};\n  }\n\n  dvpp_packet = std::make_shared<DvppPacket>(size, width, height, pts);\n\n  void *temp_ptr = nullptr;\n  bool dvpp_alloc_result = false;\n  DeferCond { return dvpp_alloc_result; };\n\n  auto ret = acldvppMalloc(&temp_ptr, size);\n  if (ret != ACL_ERROR_NONE) {\n    auto errMsg = \"acldvppMalloc failed, err code \" + std::to_string(ret);\n    MBLOG_ERROR << errMsg;\n    return {modelbox::STATUS_FAULT, errMsg};\n  }\n\n  DeferCondAdd {\n    if (temp_ptr != nullptr) {\n      acldvppFree(temp_ptr);\n    }\n    temp_ptr = nullptr;\n  };\n\n  ret = aclrtMemcpy(temp_ptr, size, buffer, size, ACL_MEMCPY_HOST_TO_DEVICE);\n  if (ret != ACL_ERROR_NONE) {\n    auto errMsg = \"fail to memory copy, err code\" + std::to_string(ret);\n    MBLOG_ERROR << errMsg;\n    dvpp_alloc_result = true;\n    return {modelbox::STATUS_FAULT, errMsg};\n  }\n\n  auto *dvpp_stream_desc_ptr = acldvppCreateStreamDesc();\n  if (dvpp_stream_desc_ptr == nullptr) {\n    const auto *errMsg = \"fail to create input stream desc\";\n    MBLOG_ERROR << errMsg;\n    dvpp_alloc_result = true;\n    return {modelbox::STATUS_FAULT, errMsg};\n  }\n\n  DeferCondAdd {\n    ret = acldvppDestroyStreamDesc(dvpp_stream_desc_ptr);\n    if (ret != ACL_ERROR_NONE) {\n      MBLOG_ERROR << \"destroy stream desc failed, err code \" << ret;\n    }\n  };\n\n  ret = acldvppSetStreamDescData(dvpp_stream_desc_ptr, temp_ptr);\n  if (ret != ACL_ERROR_NONE) {\n    auto errMsg =\n        \"fail to set data for stream desc, err code \" + std::to_string(ret);\n    MBLOG_ERROR << errMsg;\n    dvpp_alloc_result = true;\n    return {modelbox::STATUS_FAULT, errMsg};\n  }\n\n  // set size for dvpp stream desc\n  ret = acldvppSetStreamDescSize(dvpp_stream_desc_ptr, size);\n  if (ret != ACL_ERROR_NONE) {\n    auto errMsg =\n        \"fail to set size for stream desc, err code \" + std::to_string(ret);\n    MBLOG_ERROR << errMsg;\n    dvpp_alloc_result = true;\n    return {modelbox::STATUS_FAULT, errMsg};\n  }\n\n  ret = acldvppSetStreamDescTimestamp(dvpp_stream_desc_ptr, (uint64_t)pts);\n  if (ret != ACL_ERROR_NONE) {\n    auto errMsg = \"fail to set size for stream time stamp, err code \" +\n                  std::to_string(ret);\n    MBLOG_ERROR << errMsg;\n    dvpp_alloc_result = true;\n    return {modelbox::STATUS_FAULT, errMsg};\n  }\n\n  dvpp_packet->SetStreamDesc(dvpp_stream_desc_ptr);\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status VideoDecodeFlowUnit::WriteData(\n    const std::shared_ptr<modelbox::DataContext> &data_ctx,\n    const std::shared_ptr<AscendVideoDecoder> &video_decoder,\n    const std::shared_ptr<DvppVideoDecodeContext> &dvpp_ctx) {\n  auto queue = dvpp_ctx->GetCacheQueue();\n  size_t size;\n\n  auto output_bufs = data_ctx->Output(FRAME_INFO_OUTPUT);\n  std::vector<std::shared_ptr<DvppFrame>> dvpp_frame;\n  size = queue->PopBatch(&dvpp_frame, -1);\n\n  if (size == 0) {\n    return modelbox::STATUS_SUCCESS;\n  }\n\n  auto frame_index =\n      std::static_pointer_cast<int64_t>(data_ctx->GetPrivate(FRAME_INDEX_CTX));\n  auto rate_num = video_decoder->GetRateNum();\n  auto rate_den = video_decoder->GetRateDen();\n\n  auto device = this->GetBindDevice();\n  for (size_t i = 0; i < size; ++i) {\n    auto *pic_desc = dvpp_frame[i]->GetPicDesc().get();\n    void *data = acldvppGetPicDescData(pic_desc);\n    if (data == nullptr) {\n      MBLOG_ERROR << \"output pic data is nullptr.\";\n      continue;\n    }\n\n    uint32_t data_size = acldvppGetPicDescSize(pic_desc);\n    if (data_size == 0) {\n      acldvppFree(data);\n      MBLOG_ERROR << \"output pic data size is 0.\";\n      continue;\n    }\n\n    std::shared_ptr<modelbox::Buffer> buffer =\n        std::make_shared<modelbox::Buffer>(device, modelbox::ASCEND_MEM_DVPP);\n    buffer->Build(data, data_size, acldvppFree);\n\n    auto width = acldvppGetPicDescWidth(pic_desc);\n    auto height = acldvppGetPicDescHeight(pic_desc);\n    auto width_stride = acldvppGetPicDescWidthStride(pic_desc);\n    auto height_stride = acldvppGetPicDescHeightStride(pic_desc);\n\n    buffer->Set(\"width\", (int)width);\n    buffer->Set(\"height\", (int)height);\n    buffer->Set(\"width_stride\", (int)width_stride);\n    buffer->Set(\"height_stride\", (int)height_stride);\n    buffer->Set(\"pix_fmt\", std::string(OUTPUT_PIX_FMT));\n    buffer->Set(\"channel\", (int32_t)1);\n    buffer->Set(\"shape\", std::vector<size_t>{(size_t)height_stride * 3 / 2,\n                                             (size_t)width_stride, 1});\n    buffer->Set(\"layout\", std::string(\"hwc\"));\n    buffer->Set(\"type\", modelbox::ModelBoxDataType::MODELBOX_UINT8);\n    buffer->Set(\"index\", *frame_index);\n    *frame_index = *frame_index + 1;\n    buffer->Set(\"rate_num\", rate_num);\n    buffer->Set(\"rate_den\", rate_den);\n    buffer->Set(\"eos\", false);\n\n    output_bufs->PushBack(buffer);\n  }\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status VideoDecodeFlowUnit::Process(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  std::shared_ptr<modelbox::Buffer> flag_buffer = nullptr;\n\n  auto acl_ret = aclrtSetDevice(dev_id_);\n  if (acl_ret != ACL_SUCCESS) {\n    MBLOG_ERROR << \"set acl device to \" << dev_id_ << \" failed, err \"\n                << acl_ret;\n    return modelbox::STATUS_FAULT;\n  }\n\n  auto video_decoder_ctx = std::static_pointer_cast<DvppVideoDecodeContext>(\n      data_ctx->GetPrivate(DVPP_DECODER_CTX));\n  auto video_decoder = std::static_pointer_cast<AscendVideoDecoder>(\n      data_ctx->GetPrivate(DVPP_DECODER));\n  if (video_decoder == nullptr || video_decoder_ctx == nullptr) {\n    MBLOG_ERROR << \"Video decoder is not init\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  auto ret = WriteData(data_ctx, video_decoder, video_decoder_ctx);\n  if (ret != modelbox::STATUS_SUCCESS) {\n    return modelbox::STATUS_FAULT;\n  }\n\n  auto event = data_ctx->Event();\n  if (event != nullptr) {\n    return modelbox::STATUS_CONTINUE;\n  }\n\n  std::vector<std::shared_ptr<DvppPacket>> dvpp_packet_list;\n  ret = ReadData(data_ctx, dvpp_packet_list, flag_buffer);\n  if (ret != modelbox::STATUS_SUCCESS) {\n    MBLOG_ERROR << \"Read av_packet input failed, err code \" + ret.ToString();\n    return modelbox::STATUS_FAULT;\n  }\n\n  if (flag_buffer) {\n    video_decoder_ctx = nullptr;\n    video_decoder = nullptr;\n    if (ReopenDecoder(data_ctx, flag_buffer) != modelbox::STATUS_SUCCESS) {\n      MBLOG_ERROR << \"Reopen decoder failed\";\n      return modelbox::STATUS_FAULT;\n    }\n\n    video_decoder_ctx = std::static_pointer_cast<DvppVideoDecodeContext>(\n        data_ctx->GetPrivate(DVPP_DECODER_CTX));\n    video_decoder = std::static_pointer_cast<AscendVideoDecoder>(\n        data_ctx->GetPrivate(DVPP_DECODER));\n    if (video_decoder == nullptr || video_decoder_ctx == nullptr) {\n      MBLOG_ERROR << \"Video decoder is not init\";\n      return modelbox::STATUS_FAULT;\n    }\n  }\n\n  size_t err_num = 0;\n  modelbox::Status decode_ret = modelbox::STATUS_SUCCESS;\n  for (auto &dvpp_pkt : dvpp_packet_list) {\n    int retry_num = 0;\n    do {\n      decode_ret = video_decoder->Decode(dvpp_pkt, video_decoder_ctx);\n      if (decode_ret == modelbox::STATUS_FAULT) {\n        MBLOG_ERROR << \"video decoder a packet failed, \"\n                    << decode_ret.WrapErrormsgs();\n        retry_num++;\n      }\n    } while (retry_num <= DECODER_RETRY_NUM &&\n             decode_ret == modelbox::STATUS_FAULT);\n\n    if (decode_ret == modelbox::STATUS_FAULT) {\n      err_num++;\n    }\n  }\n\n  if (err_num == dvpp_packet_list.size()) {\n    return {modelbox::STATUS_FAULT, \"video decoder failed.\"};\n  }\n\n  if (decode_ret == modelbox::STATUS_NODATA) {\n    MBLOG_INFO << \"write the last frame. \";\n    ret = WriteData(data_ctx, video_decoder, video_decoder_ctx);\n    if (ret != modelbox::STATUS_SUCCESS) {\n      MBLOG_ERROR << \"Write the last frame failed\";\n      return modelbox::STATUS_FAULT;\n    }\n  }\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nMODELBOX_FLOWUNIT(VideoDecodeFlowUnit, desc) {\n  desc.SetFlowUnitName(DVPP_DECODE_FLOWUNIT_NAME);\n  desc.SetFlowUnitGroupType(\"Video\");\n  desc.AddFlowUnitInput({VIDEO_PACKET_INPUT, \"cpu\"});\n  desc.AddFlowUnitOutput({FRAME_INFO_OUTPUT, modelbox::ASCEND_MEM_DVPP});\n  desc.SetFlowType(modelbox::STREAM);\n  desc.SetInputContiguous(false);\n  desc.SetResourceNice(false);\n  desc.SetDescription(FLOWUNIT_DESC);\n  desc.AddFlowUnitOption(modelbox::FlowUnitOption(\"pix_fmt\", \"string\", true,\n                                                  \"nv12\", \"the pix format\"));\n}\n\nMODELBOX_DRIVER_FLOWUNIT(desc) {\n  desc.Desc.SetName(FLOWUNIT_NAME);\n  desc.Desc.SetClass(modelbox::DRIVER_CLASS_FLOWUNIT);\n  desc.Desc.SetType(FLOWUNIT_TYPE);\n  desc.Desc.SetDescription(FLOWUNIT_DESC);\n  desc.Desc.SetVersion(\"1.0.0\");\n}\n"
  },
  {
    "path": "src/drivers/devices/ascend/flowunit/video_decoder/video_decoder_flowunit.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_DVPP_DECODE_ASCEND_H_\n#define MODELBOX_FLOWUNIT_DVPP_DECODE_ASCEND_H_\n\n#include <acl/ops/acl_dvpp.h>\n#include <modelbox/base/device.h>\n#include <modelbox/base/status.h>\n#include <modelbox/base/timer.h>\n#include <modelbox/buffer.h>\n#include <modelbox/device/ascend/device_ascend.h>\n#include <modelbox/flow.h>\n#include <modelbox/flowunit.h>\n\nextern \"C\" {\n#include <libavcodec/avcodec.h>\n}\n\n#include <algorithm>\n\n#include \"ascend_video_decode.h\"\n\nconstexpr const char *FLOWUNIT_NAME = \"video_decoder\";\nconstexpr const char *FLOWUNIT_TYPE = \"ascend\";\nconstexpr const char *FLOWUNIT_DESC =\n    \"\\n\\t@Brief: A resize flowunit on cpu. \\n\"\n    \"\\t@Port parameter: the input port buffer type is video_packet, the output \"\n    \"port buffer type is video_frame.\\n\"\n    \"\\t  The video_packet buffer contain the following meta fields:\\n\"\n    \"\\t\\tField Name: pts,           Type: int64_t\\n\"\n    \"\\t\\tField Name: dts,           Type: int64_t\\n\"\n    \"\\t\\tField Name: rate_num,      Type: int32_t\\n\"\n    \"\\t\\tField Name: rate_den,      Type: int32_t\\n\"\n    \"\\t\\tField Name: duration,      Type: int64_t\\n\"\n    \"\\t\\tField Name: time_base,     Type: double\\n\"\n    \"\\t\\tField Name: width,         Type: int32_t\\n\"\n    \"\\t\\tField Name: height,        Type: int32_t\\n\"\n    \"\\t  The video_frame buffer contain the following meta fields:\\n\"\n    \"\\t\\tField Name: index,         Type: int64_t\\n\"\n    \"\\t\\tField Name: rate_num,      Type: int32_t\\n\"\n    \"\\t\\tField Name: rate_den,      Type: int32_t\\n\"\n    \"\\t\\tField Name: duration,      Type: int64_t\\n\"\n    \"\\t\\tField Name: url,           Type: string\\n\"\n    \"\\t\\tField Name: timestamp,     Type: int64_t\\n\"\n    \"\\t\\tField Name: eos,           Type: bool\\n\"\n    \"\\t\\tField Name: width,         Type: int32_t\\n\"\n    \"\\t\\tField Name: height,        Type: int32_t\\n\"\n    \"\\t\\tField Name: width_stride,  Type: int32_t\\n\"\n    \"\\t\\tField Name: height_stride, Type: int32_t\\n\"\n    \"\\t\\tField Name: channel,       Type: int32_t\\n\"\n    \"\\t\\tField Name: pix_fmt,       Type: string\\n\"\n    \"\\t\\tField Name: layout,        Type: int32_t\\n\"\n    \"\\t\\tField Name: shape,         Type: vector<size_t>\\n\"\n    \"\\t\\tField Name: type,          Type: ModelBoxDataType::MODELBOX_UINT8\\n\"\n    \"\\t@Constraint: the flowuint 'video_decoder' must be used pair \"\n    \"with 'video_demuxer. the output buffer meta fields 'pix_fmt' is 'nv12', \"\n    \"'layout' is 'hcw'.\";\nconstexpr const char *DVPP_DECODER = \"dvpp_decode\";\nconstexpr const char *VIDEO_PACKET_INPUT = \"in_video_packet\";\nconstexpr const char *FRAME_INFO_OUTPUT = \"out_video_frame\";\nconstexpr const char *CODEC_META = \"codec_meta\";\nconstexpr const char *SOURCE_URL_META = \"source_url\";\nconstexpr const char *CODEC_ID_META = \"codec_id\";\nconstexpr const char *PROFILE_META = \"profile_meta\";\nconstexpr const char *DVPP_DECODER_CTX = \"dvpp_decode_context\";\nconstexpr const char *DVPP_DECODE_FLOWUNIT_DESC =\n    \"A dvpp_decode flowunit on Ascend\";\nconstexpr const char *DVPP_DECODE_FLOWUNIT_NAME = \"video_decoder\";\nconstexpr const char *FRAME_INDEX_CTX = \"frame_index_ctx\";\nconstexpr const int DECODER_RETRY_NUM = 3;\n\n// 此处应该是dvpp类型，但是当前不支持，先使用ascend\nconstexpr const char *DVPP_FLOWUNIT_TYPE = \"ascend\" /*\"ascend-devpp\"*/;\nconstexpr const char *DEVICE_DVPP_TYPE = \"ascend\" /*\"ascend-devpp\"*/;\n\nclass VideoDecodeFlowUnit : public modelbox::FlowUnit {\n public:\n  VideoDecodeFlowUnit();\n  ~VideoDecodeFlowUnit() override;\n\n  modelbox::Status Open(\n      const std::shared_ptr<modelbox::Configuration> &opts) override;\n\n  modelbox::Status Close() override;\n\n  modelbox::Status Process(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override;\n\n  modelbox::Status DataPre(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override;\n\n  modelbox::Status DataPost(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override;\n\n  modelbox::Status DataGroupPre(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override {\n    return modelbox::STATUS_OK;\n  };\n\n  modelbox::Status DataGroupPost(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override {\n    return modelbox::STATUS_OK;\n  };\n\n private:\n  int32_t GetDvppEncodeType(AVCodecID codec_id, int32_t profile_id);\n  modelbox::Status GetDecoderParam(\n      const std::shared_ptr<modelbox::DataContext> &data_ctx, int32_t &rate_num,\n      int32_t &rate_den, int32_t &encode_type);\n  modelbox::Status ReadData(\n      const std::shared_ptr<modelbox::DataContext> &data_ctx,\n      std::vector<std::shared_ptr<DvppPacket>> &dvpp_packet_list,\n      std::shared_ptr<modelbox::Buffer> &flag_buffer);\n  modelbox::Status ReadDvppStreamDesc(\n      const std::shared_ptr<modelbox::Buffer> &packet_buffer,\n      std::shared_ptr<DvppPacket> &dvpp_packet);\n  modelbox::Status SetUpTheLastPacket(std::shared_ptr<DvppPacket> &dvpp_packet);\n  modelbox::Status WriteData(\n      const std::shared_ptr<modelbox::DataContext> &data_ctx,\n      const std::shared_ptr<AscendVideoDecoder> &video_decoder,\n      const std::shared_ptr<DvppVideoDecodeContext> &dvpp_ctx);\n  void InitInstanceId();\n  int32_t FindTheMinimumAvailableId();\n  void RestoreInstanceId(int32_t instance_id);\n\n  modelbox::Status CloseDecoder(\n      std::shared_ptr<modelbox::DataContext> &data_ctx);\n  modelbox::Status NewDecoder(std::shared_ptr<modelbox::DataContext> &data_ctx,\n                              const std::string &source_url, AVCodecID codec_id,\n                              int32_t rate_num, int32_t rate_den,\n                              int32_t encode_type);\n  modelbox::Status ReopenDecoder(\n      std::shared_ptr<modelbox::DataContext> &data_ctx,\n      const std::shared_ptr<modelbox::Buffer> &flag_buffer);\n\n  uint32_t dest_width_{224};\n  uint32_t dest_height_{224};\n  // 1: YUV420 semi-planner（nv12), 2: YVU420 semi-planner（nv21)\n  int32_t format_{0};\n  acldvppChannelDesc *dvpp_channel_desc_{nullptr};\n  std::mutex mutex;\n  std::map<int32_t, bool> instance_available_map_;\n};\n\n#endif  // MODELBOX_FLOWUNIT_DVPP_DECODE_ASCEND_H_\n"
  },
  {
    "path": "src/drivers/devices/ascend/flowunit/video_decoder/video_decoder_flowunit_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include <dsmi_common_interface.h>\n\n#include <fstream>\n#include <functional>\n#include <future>\n#include <thread>\n\n#include \"modelbox/base/log.h\"\n#include \"modelbox/buffer.h\"\n#include \"driver_flow_test.h\"\n#include \"flowunit_mockflowunit/flowunit_mockflowunit.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"test/mock/minimodelbox/mockflow.h\"\n\nnamespace modelbox {\nclass DvppVideoDecoderFlowUnitTest : public testing::Test {\n public:\n  DvppVideoDecoderFlowUnitTest() = default;\n\n protected:\n  void SetUp() override {\n    // Test ascend runtime\n    int32_t count = 0;\n    auto dsmi_ret = dsmi_get_device_count(&count);\n    if (dsmi_ret != 0) {\n      MBLOG_INFO << \"no ascend device, skip test suit\";\n      GTEST_SKIP();\n    }\n  };\n\n  void TearDown() override{};\n\n public:\n  std::shared_ptr<MockFlow> flow_;\n\n  void StartFlow(const std::string& graph, uint64_t millisecond);\n\n private:\n  Status AddMockFlowUnit();\n};\n\nvoid DvppVideoDecoderFlowUnitTest::StartFlow(const std::string& graph,\n                                             const uint64_t millisecond) {\n  flow_ = std::make_shared<MockFlow>();\n  auto ret = AddMockFlowUnit();\n  EXPECT_EQ(ret, STATUS_SUCCESS);\n  flow_->Init(false);\n\n  flow_->BuildAndRun(\"DvppVideoDecoder\", graph, millisecond);\n}\n\nstd::string GetGraphToml(const std::string& device,\n                         const std::string& pix_fmt) {\n  const std::string test_lib_dir = TEST_DRIVER_DIR;\n  const std::string test_data_dir = TEST_DATA_DIR;\n  std::string read_frame = \"read_frame_acl\";\n  std::string start_unit = \"start_unit_acl\";\n\n  std::string toml_content =\n      R\"(\n      [log]\n      level = \"DEBUG\"\n      [driver]\n      skip-default = true\n      dir=[\")\" +\n      test_lib_dir + \"\\\",\\\"\" + test_data_dir + \"\\\"]\\n    \" +\n      R\"([graph]\n      thread-num = 16\n      max-thread-num = 100\n      graphconf = '''digraph demo {\n            start_unit_acl[type=flowunit, flowunit=start_unit_acl, device=cpu, deviceid=0, label=\"<stream_meta>\"]\n            videodemuxer[type=flowunit, flowunit=video_demuxer, device=cpu, deviceid=0, label=\"<in_video_url> | <out_video_packet>\", queue_size = 16]\n            videodecoder[type=flowunit, flowunit=video_decoder, device=)\" +\n      device +\n      R\"(, deviceid=0, label=\"<in_video_packet> | <out_video_frame>\", pix_fmt=)\" +\n      pix_fmt +\n      \", queue_size = 16]\\n            \"\n      R\"(read_frame_acl[type=flowunit, flowunit=read_frame_acl, device=cpu, deviceid=0, label=\"<frame_info>\", queue_size = 16]\n            start_unit_acl:stream_meta -> videodemuxer:in_video_url\n            videodemuxer:out_video_packet -> videodecoder:in_video_packet\n            videodecoder:out_video_frame -> read_frame_acl:frame_info\n          }'''\n      format = \"graphviz\"\n    )\";\n  return toml_content;\n}\n\nStatus DvppVideoDecoderFlowUnitTest::AddMockFlowUnit() {\n  {\n    auto mock_desc =\n        GenerateFlowunitDesc(\"start_unit_acl\", {}, {\"stream_meta\"});\n    mock_desc->SetFlowType(STREAM);\n    mock_desc->SetStreamSameCount(true);\n    auto open_func = [=](const std::shared_ptr<Configuration>& opts,\n                         const std::shared_ptr<MockFlowUnit>& mock_flowunit) {\n      for (uint32_t i = 0; i < 16; i++) {\n        auto ext_data = mock_flowunit->CreateExternalData();\n        if (!ext_data) {\n          MBLOG_ERROR << \"can not get external data.\";\n          return STATUS_FAULT;\n        }\n\n        auto source_url = std::string(TEST_ASSETS) +\n                          \"/video/avc1_5s_480x320_24fps_yuv420_8bit.mp4\";\n\n        auto output_buf = ext_data->CreateBufferList();\n        modelbox::TensorList output_tensor_list(output_buf);\n        output_tensor_list.BuildFromHost<unsigned char>(\n            {1, {source_url.size() + 1}}, (void*)source_url.data(),\n            source_url.size() + 1);\n\n        auto data_meta = std::make_shared<DataMeta>();\n        data_meta->SetMeta(\"source_url\",\n                           std::make_shared<std::string>(source_url));\n        ext_data->SetOutputMeta(data_meta);\n\n        auto status = ext_data->Send(output_buf);\n        if (!status) {\n          MBLOG_ERROR << \"external data send buffer list failed:\" << status;\n          return STATUS_FAULT;\n        }\n\n        status = ext_data->Close();\n        if (!status) {\n          MBLOG_ERROR << \"external data close failed:\" << status;\n          return STATUS_FAULT;\n        }\n      }\n\n      return modelbox::STATUS_SUCCESS;\n    };\n\n    auto process_func =\n        [=](const std::shared_ptr<DataContext>& data_ctx,\n            const std::shared_ptr<MockFlowUnit>& mock_flowunit) -> Status {\n      auto output_buf = data_ctx->Output(\"stream_meta\");\n      std::vector<size_t> shape(1, 1);\n      output_buf->Build(shape);\n      return modelbox::STATUS_OK;\n    };\n    auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n    mock_funcitons->RegisterProcessFunc(process_func);\n    mock_funcitons->RegisterOpenFunc(open_func);\n    flow_->AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc(),\n                           TEST_DRIVER_DIR);\n  }\n\n  {\n    auto mock_desc = GenerateFlowunitDesc(\"read_frame_acl\", {\"frame_info\"}, {});\n    mock_desc->SetFlowType(STREAM);\n    auto data_pre_func =\n        [=](const std::shared_ptr<DataContext>& data_ctx,\n            const std::shared_ptr<MockFlowUnit>& mock_flowunit) -> Status {\n      auto index_counter = std::make_shared<int64_t>(0);\n      data_ctx->SetPrivate(\"index\", index_counter);\n      return modelbox::STATUS_OK;\n    };\n\n    auto process_func =\n        [=](const std::shared_ptr<DataContext>& op_ctx,\n            const std::shared_ptr<MockFlowUnit>& mock_flowunit) -> Status {\n      auto index_counter =\n          std::static_pointer_cast<int64_t>(op_ctx->GetPrivate(\"index\"));\n\n      auto frame_buffer_list = op_ctx->Input(\"frame_info\");\n      EXPECT_NE(frame_buffer_list, nullptr);\n      for (size_t i = 0; i < frame_buffer_list->Size(); ++i) {\n        auto frame_buffer = frame_buffer_list->At(i);\n        if (frame_buffer->GetBytes() == 0) {\n          continue;\n        }\n\n        int64_t index = 0;\n        int32_t width = 0;\n        int32_t height = 0;\n        int32_t rate_num = 0;\n        int32_t rate_den = 0;\n        bool eos;\n        frame_buffer->Get(\"index\", index);\n        frame_buffer->Get(\"width\", width);\n        frame_buffer->Get(\"height\", height);\n        frame_buffer->Get(\"rate_num\", rate_num);\n        frame_buffer->Get(\"rate_den\", rate_den);\n        frame_buffer->Get(\"eos\", eos);\n        EXPECT_EQ(index, *index_counter);\n        *index_counter = *index_counter + 1;\n        EXPECT_EQ(width, 480);\n        EXPECT_EQ(height, 320);\n        EXPECT_EQ(rate_num, 24);\n        EXPECT_EQ(rate_den, 1);\n        EXPECT_FALSE(eos);\n      }\n      return modelbox::STATUS_OK;\n    };\n    auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n    mock_funcitons->RegisterProcessFunc(process_func);\n    mock_funcitons->RegisterDataPreFunc(data_pre_func);\n    flow_->AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc(),\n                           TEST_DRIVER_DIR);\n  }\n\n  return STATUS_SUCCESS;\n}\n\nTEST_F(DvppVideoDecoderFlowUnitTest, ascendDecoderRgbTest) {\n  auto toml_content = GetGraphToml(\"ascend\", \"nv12\");\n  StartFlow(toml_content, 10 * 1000);\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/drivers/devices/cpu/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nproject(modelbox-drivers-cpu)\n\nif(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_BINARY_DIR})\n    message(FATAL_ERROR \"Do not build in source directory!\")\nendif()\n\ninclude_directories(${HUAWEI_SECURE_C_INCLUDE_DIR})\n\nadd_subdirectory(core)\nadd_subdirectory(flowunit)\n"
  },
  {
    "path": "src/drivers/devices/cpu/core/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nset(DEVICE_NAME \"cpu\")\nproject(modelbox-devices-${DEVICE_NAME})\n\nfile(GLOB_RECURSE UNIT_SOURCE *.cpp *.cc *.c)\ngroup_source_test_files(LIBMODELBOX_DEVICE_CPU_SOURCES MODELBOX_UNIT_TEST_SOURCE \"_test.c*\" ${UNIT_SOURCE})\nset(LIBMODELBOX_DEVICE_CPU_INCLUDE ${CMAKE_CURRENT_LIST_DIR}/include)\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_CPU_INCLUDE})\n\nset(HEADER \n    ${LIBMODELBOX_DEVICE_CPU_INCLUDE}/modelbox\n)\n\nset(LIBMODELBOX_DEVICE_CPU_STATIC libmodelbox-device-${DEVICE_NAME}-static)\nset(LIBMODELBOX_DEVICE_CPU_SHARED libmodelbox-device-${DEVICE_NAME}-shared)\n\nadd_library(${LIBMODELBOX_DEVICE_CPU_STATIC} STATIC ${LIBMODELBOX_DEVICE_CPU_SOURCES})\nadd_library(${LIBMODELBOX_DEVICE_CPU_SHARED} SHARED ${LIBMODELBOX_DEVICE_CPU_SOURCES})\n\nset_target_properties(${LIBMODELBOX_DEVICE_CPU_SHARED} PROPERTIES \n    SOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n\ntarget_link_libraries(${LIBMODELBOX_DEVICE_CPU_STATIC} ${LIBMODELBOX_SHARED})\ntarget_link_libraries(${LIBMODELBOX_DEVICE_CPU_STATIC} ${HUAWEI_SECURE_C_LIBRARIES})\ntarget_link_libraries(${LIBMODELBOX_DEVICE_CPU_STATIC} pthread)\ntarget_link_libraries(${LIBMODELBOX_DEVICE_CPU_STATIC} rt)\ntarget_link_libraries(${LIBMODELBOX_DEVICE_CPU_STATIC} dl)\n\ntarget_link_libraries(${LIBMODELBOX_DEVICE_CPU_SHARED} ${LIBMODELBOX_SHARED})\ntarget_link_libraries(${LIBMODELBOX_DEVICE_CPU_SHARED} ${HUAWEI_SECURE_C_LIBRARIES})\ntarget_link_libraries(${LIBMODELBOX_DEVICE_CPU_SHARED} pthread)\ntarget_link_libraries(${LIBMODELBOX_DEVICE_CPU_SHARED} rt)\ntarget_link_libraries(${LIBMODELBOX_DEVICE_CPU_SHARED} dl)\n\nset_target_properties(${LIBMODELBOX_DEVICE_CPU_STATIC} ${LIBMODELBOX_DEVICE_CPU_SHARED} \n    PROPERTIES OUTPUT_NAME \"modelbox-device-${DEVICE_NAME}\"\n)\nset_target_properties(${LIBMODELBOX_DEVICE_CPU_STATIC} ${LIBMODELBOX_DEVICE_CPU_SHARED}\n    PROPERTIES\n    ARCHIVE_OUTPUT_DIRECTORY \"${TEST_WORKING_LIB_DIR}\"\n    RUNTIME_OUTPUT_DIRECTORY \"${TEST_WORKING_BIN_DIR}\"\n)\n\nconfigure_file(${CMAKE_CURRENT_LIST_DIR}/libmodelbox-device-${DEVICE_NAME}.pc.in ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-device-${DEVICE_NAME}.pc @ONLY)\n\ninstall(TARGETS ${LIBMODELBOX_DEVICE_CPU_SHARED} \n    COMPONENT cpu-device-flowunit\n    RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n    LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    OPTIONAL\n    )\n\ninstall(TARGETS ${LIBMODELBOX_DEVICE_CPU_STATIC} \n    COMPONENT cpu-device-flowunit-devel\n    ARCHIVE DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    OPTIONAL\n    )\n\ninstall(DIRECTORY ${HEADER} \n    DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR} \n    COMPONENT cpu-device-flowunit-devel\n    )\n\ninstall(FILES \n    ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-device-${DEVICE_NAME}.pc \n    DESTINATION ${CMAKE_INSTALL_LIBDIR}/pkgconfig \n    COMPONENT cpu-device-flowunit-devel\n    )\n\n\nset(LIBMODELBOX_DEVICE_CPU_SHARED_LIBRARIES ${LIBMODELBOX_DEVICE_CPU_SHARED_LIBRARIES} CACHE INTERNAL \"\")\nset(LIBMODELBOX_DEVICE_CPU_SHARED ${LIBMODELBOX_DEVICE_CPU_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_DEVICE_CPU_BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR} CACHE INTERNAL \"\")\nset(LIBMODELBOX_DEVICE_CPU_INCLUDE ${LIBMODELBOX_DEVICE_CPU_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_DEVICE_CPU_SOURCES ${LIBMODELBOX_DEVICE_CPU_SOURCES} CACHE INTERNAL \"\")\nset(LIBMODELBOX_DEVICE_CPU_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-device-${DEVICE_NAME}.so CACHE INTERNAL \"\")\n\n# driver test\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_UNIT_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${LIBMODELBOX_DEVICE_CPU_SHARED})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")\n\n\n"
  },
  {
    "path": "src/drivers/devices/cpu/core/cpu_memory.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/device/cpu/cpu_memory.h\"\n\n#include <securec.h>\n\n#include \"modelbox/base/collector.h\"\n#include \"modelbox/base/os.h\"\n\nnamespace modelbox {\n\nstatic RefVar<CpuMemoryPool> kCpuMemoryPool;\n\nCpuMemory::CpuMemory(const std::shared_ptr<Device> &device,\n                     const std::shared_ptr<DeviceMemoryManager> &mem_mgr,\n                     const std::shared_ptr<void> &device_mem_ptr, size_t size)\n    : DeviceMemory(device, mem_mgr, device_mem_ptr, size, true) {}\n\nCpuMemory::~CpuMemory() = default;\n\nStatus CpuMemory::ReadFrom(\n    const std::shared_ptr<const DeviceMemory> &src_memory, size_t src_offset,\n    size_t src_size, size_t dest_offset) {\n  if (!CheckReadFromParam(src_memory, src_offset, src_size, dest_offset)) {\n    MBLOG_ERROR << \"Check read param failed\";\n    return STATUS_INVALID;\n  }\n\n  auto mem_mgr = src_memory->mem_mgr_;\n  return mem_mgr->DeviceMemoryCopy(shared_from_this(), dest_offset, src_memory,\n                                   src_offset, src_size,\n                                   DeviceMemoryCopyKind::ToHost);\n}\n\nStatus CpuMemory::Verify() const {\n  auto mem_size = offset_ + capacity_;\n  if (mem_size == 0) {\n    return STATUS_OK;\n  }\n\n  auto *magic_code = (uint64_t *)((uint8_t *)device_mem_ptr_.get() + mem_size);\n  if (MEM_MAGIC_CODE != *magic_code) {\n    MBLOG_ERROR << \"Host memory verify failed, magic code wrong\";\n    return STATUS_FAULT;\n  }\n\n  return STATUS_OK;\n}\n\nCpuMemoryPool::CpuMemoryPool() = default;\n\nStatus CpuMemoryPool::Init() {\n  auto status = InitSlabCache();\n  if (!status) {\n    return {status, \"init mempool failed.\"};\n  }\n\n  return STATUS_OK;\n}\n\nCpuMemoryPool::~CpuMemoryPool() {\n  ClearAllSlabs();\n  if (flush_timer_) {\n    flush_timer_->Stop();\n    flush_timer_ = nullptr;\n  }\n}\n\nvoid CpuMemoryPool::OnTimer() {\n  // TODO support config shrink time.\n}\n\nvoid *CpuMemoryPool::MemAlloc(size_t size) {\n  auto *cpu_mem_ptr = (uint8_t *)malloc(size);\n  if (cpu_mem_ptr == nullptr) {\n    MBLOG_ERROR << \"cpu_mem_ptr is null\";\n  }\n  return cpu_mem_ptr;\n}\n\nvoid CpuMemoryPool::MemFree(void *ptr) { free(ptr); }\n\nCpuMemoryManager::CpuMemoryManager(const std::string &device_id)\n    : DeviceMemoryManager(device_id) {}\n\nCpuMemoryManager::~CpuMemoryManager() = default;\n\nStatus CpuMemoryManager::Init() {\n  static std::once_flag flag;\n  std::call_once(flag, []() {\n    kCpuMemoryPool.MakeFunc([](int index) -> std::shared_ptr<CpuMemoryPool> {\n      auto pool = std::make_shared<CpuMemoryPool>();\n      if (pool->Init() != STATUS_OK) {\n        return nullptr;\n      }\n\n      pool->SetName(\"cpu\");\n      return pool;\n    });\n  });\n\n  mem_pool_ = kCpuMemoryPool.Get();\n  if (mem_pool_ == nullptr) {\n    const auto *err_msg = \"Get cpu memory pool failed.\";\n    MBLOG_ERROR << err_msg;\n    return {STATUS_NOMEM, err_msg};\n  }\n\n  return STATUS_OK;\n}\n\nstd::shared_ptr<DeviceMemory> CpuMemoryManager::MakeDeviceMemory(\n    const std::shared_ptr<Device> &device, std::shared_ptr<void> mem_ptr,\n    size_t size) {\n  return std::make_shared<CpuMemory>(device, shared_from_this(), mem_ptr, size);\n}\n\nstd::shared_ptr<void> CpuMemoryManager::AllocSharedPtr(size_t size,\n                                                       uint32_t mem_flags) {\n  if (mem_pool_ == nullptr) {\n    MBLOG_ERROR << \"memory pool is not init.\";\n    return nullptr;\n  }\n\n  auto mem_size = size + sizeof(DeviceMemory::MEM_MAGIC_CODE);\n  auto cpu_mem_ptr = mem_pool_->AllocSharedPtr(mem_size);\n  if (cpu_mem_ptr == nullptr) {\n    MBLOG_ERROR << \"Cpu malloc failed, size \" << mem_size;\n    return nullptr;\n  }\n\n  *((uint64_t *)((u_char *)cpu_mem_ptr.get() + size)) =\n      DeviceMemory::MEM_MAGIC_CODE;\n  return cpu_mem_ptr;\n}\n\nvoid *CpuMemoryManager::Malloc(size_t size, uint32_t mem_flags) {\n  if (mem_pool_ == nullptr) {\n    MBLOG_ERROR << \"memory pool is not init.\";\n    return nullptr;\n  }\n\n  return mem_pool_->MemAlloc(size);\n}\n\nvoid CpuMemoryManager::Free(void *mem_ptr, uint32_t mem_flags) {\n  if (mem_pool_ == nullptr) {\n    MBLOG_ERROR << \"memory pool is not init.\";\n    return;\n  }\n\n  mem_pool_->MemFree(mem_ptr);\n}\n\nStatus CpuMemoryManager::Copy(void *dest, size_t dest_size,\n                              const void *src_buffer, size_t src_size,\n                              DeviceMemoryCopyKind kind) {\n  auto ret = memcpy_s(dest, dest_size, src_buffer, src_size);\n  if (EOK != ret) {\n    MBLOG_ERROR << \"Cpu memcpy failed, ret \" << ret << \", src size \" << src_size\n                << \", dest size \" << dest_size;\n    return STATUS_FAULT;\n  }\n\n  return STATUS_OK;\n}\n\nStatus CpuMemoryManager::GetDeviceMemUsage(size_t *free, size_t *total) const {\n  return os->GetMemoryUsage(free, total);\n}\n\nStatus CpuMemoryManager::DeviceMemoryCopy(\n    const std::shared_ptr<DeviceMemory> &dest_memory, size_t dest_offset,\n    const std::shared_ptr<const DeviceMemory> &src_memory, size_t src_offset,\n    size_t src_size, DeviceMemoryCopyKind copy_kind) {\n  auto *dest_ptr = dest_memory->GetPtr<uint8_t>().get() + dest_offset;\n  const auto *src_ptr = src_memory->GetConstPtr<uint8_t>().get() + src_offset;\n  auto ret = memcpy_s(dest_ptr, src_size, src_ptr, src_size);\n  if (EOK != ret) {\n    MBLOG_ERROR << \"Cpu memcpy failed, ret \" << ret << \", src size \" << src_size\n                << \", dest size \" << src_size;\n    return modelbox::STATUS_FAULT;\n  }\n\n  return STATUS_OK;\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/drivers/devices/cpu/core/device_cpu.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include \"modelbox/device/cpu/device_cpu.h\"\n\n#include \"modelbox/base/log.h\"\n#include \"modelbox/base/os.h\"\n#include \"modelbox/base/utils.h\"\n\nnamespace modelbox {\nCPU::CPU(const std::shared_ptr<DeviceMemoryManager> &mem_mgr)\n    : Device(mem_mgr) {}\n\nCPU::~CPU() = default;\n\nstd::string CPU::GetType() const { return DEVICE_TYPE; }\n\nStatus CPU::DeviceExecute(const DevExecuteCallBack &fun, int32_t priority,\n                          size_t count) {\n  if (0 == count) {\n    return STATUS_OK;\n  }\n\n  std::vector<std::future<Status>> future_list(count - 1);\n  for (size_t i = 0; i < count - 1; ++i) {\n    auto future_status = executor_->Run(fun, priority, i);\n    future_list[i] = std::move(future_status);\n  }\n\n  auto status = fun(count - 1);\n  std::vector<Status> future_status(count, STATUS_OK);\n  future_status[count - 1] = status;\n  for (size_t i = 0; i < future_list.size(); ++i) {\n    future_list[i].wait();\n    future_status[i] = future_list[i].get();\n  }\n\n  auto ret = STATUS_OK;\n  for (const auto &status : future_status) {\n    if (!status) {\n      return status;\n    }\n  }\n\n  return STATUS_OK;\n};\n\nCPUFactory::CPUFactory() = default;\nCPUFactory::~CPUFactory() = default;\n\nstd::map<std::string, std::shared_ptr<DeviceDesc>> CPUFactory::DeviceProbe() {\n  std::map<std::string, std::shared_ptr<DeviceDesc>> return_map;\n  size_t free;\n  size_t total;\n  std::shared_ptr<CPUDesc> device_desc = std::make_shared<CPUDesc>();\n  device_desc->SetDeviceDesc(\"Host cpu device.\");\n  device_desc->SetDeviceId(\"0\");\n  os->GetMemoryUsage(&free, &total);\n  device_desc->SetDeviceMemory(GetBytesReadable(total));\n  device_desc->SetDeviceType(\"CPU\");\n  return_map.insert(std::make_pair(\"0\", device_desc));\n  return return_map;\n}\n\nstd::string CPUFactory::GetDeviceFactoryType() { return DEVICE_TYPE; }\n\nstd::vector<std::string> CPUFactory::GetDeviceList() {\n  std::vector<std::string> cpuIds;\n  cpuIds.emplace_back(\"0\");\n  return cpuIds;\n}\n\nstd::shared_ptr<Device> CPUFactory::CreateDevice(const std::string &device_id) {\n  auto mem_mgr = std::make_shared<CpuMemoryManager>(device_id);\n  auto status = mem_mgr->Init();\n  if (!status) {\n    StatusError = status;\n    return nullptr;\n  }\n  return std::make_shared<CPU>(mem_mgr);\n}\n\nCPUDesc::CPUDesc() = default;\n\nCPUDesc::~CPUDesc() = default;\n\n}  // namespace modelbox\n"
  },
  {
    "path": "src/drivers/devices/cpu/core/driver_desc.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include \"modelbox/base/driver_api_helper.h\"\n#include \"modelbox/device/cpu/device_cpu.h\"\n\n#include <stdio.h>\n#include <memory>\n\n\nstd::shared_ptr<modelbox::Timer> kDeviceTimer;\n\nmodelbox::Timer *GetTimer() { return kDeviceTimer.get(); }\n\nstd::shared_ptr<modelbox::DriverFactory> CreateDriverFactory() {\n  std::shared_ptr<modelbox::DriverFactory> factory =\n      std::make_shared<modelbox::CPUFactory>();\n  return factory;\n}\n\nvoid DriverDescription(modelbox::DriverDesc *desc) {\n  desc->SetClass(modelbox::DRIVER_CLASS_DEVICE);\n  desc->SetType(modelbox::DEVICE_TYPE);\n  desc->SetName(modelbox::DEVICE_DRIVER_NAME);\n  desc->SetDescription(modelbox::DEVICE_DRIVER_DESCRIPTION);\n}\n\nmodelbox::Status DriverInit() {\n  if (kDeviceTimer != nullptr) {\n    return modelbox::STATUS_OK;\n  } \n\n  kDeviceTimer = std::make_shared<modelbox::Timer>();\n  kDeviceTimer->SetName(\"Cpu-Timer\");\n  kDeviceTimer->Start();\n  return modelbox::STATUS_OK;\n}\n\nvoid DriverFini() {\n  if (kDeviceTimer == nullptr) {\n    return;\n  }\n\n  // Driver Fini.\n  kDeviceTimer->Stop();\n  kDeviceTimer = nullptr;\n}\n\n"
  },
  {
    "path": "src/drivers/devices/cpu/core/include/modelbox/device/cpu/cpu_memory.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_CPU_MEMORY_H_\n#define MODELBOX_CPU_MEMORY_H_\n\n#include <modelbox/base/device.h>\n#include <modelbox/base/memory_pool.h>\n#include <modelbox/base/timer.h>\n\nextern modelbox::Timer *GetTimer();\n\nnamespace modelbox {\n\nclass CpuMemory;\nclass CpuMemoryManager;\n\nclass CpuMemory : public DeviceMemory {\n public:\n  /**\n   * @brief Construct a host memory with physical mem ptr, called by cpu device\n   * @param device Memory belong to\n   * @param mem_mgr Device Memory manager\n   * @param device_mem_ptr Memory pointer\n   * @param size Memory size\n   */\n  CpuMemory(const std::shared_ptr<Device> &device,\n            const std::shared_ptr<DeviceMemoryManager> &mem_mgr,\n            const std::shared_ptr<void> &device_mem_ptr, size_t size);\n\n  ~CpuMemory() override;\n  /**\n   * @brief Read data from other device memory\n   * @param src_memory Memory read from\n   * @param src_offset Offset in the memory read from\n   * @param src_size Size in the memory read from\n   * @param dest_offset Offset in memory write to\n   * @return Status\n   */\n  Status ReadFrom(const std::shared_ptr<const DeviceMemory> &src_memory,\n                  size_t src_offset, size_t src_size,\n                  size_t dest_offset = 0) override;\n\n  /**\n   * @brief Check memory out of bound; Make checksum\n   * @return Result of verify, 0 is ok\n   */\n  Status Verify() const override;\n};\n\nclass CpuMemoryPool : public MemoryPoolBase {\n public:\n  CpuMemoryPool();\n\n  ~CpuMemoryPool() override;\n\n  Status Init();\n\n  void *MemAlloc(size_t size) override;\n\n  void MemFree(void *ptr) override;\n\n  virtual void OnTimer();\n\n private:\n  std::shared_ptr<TimerTask> flush_timer_;\n};\n\nclass CpuMemoryManager : public DeviceMemoryManager {\n public:\n  CpuMemoryManager(const std::string &device_id);\n\n  ~CpuMemoryManager() override;\n\n  /**\n   * @brief Init memory manager\n   * @return init result\n   */\n  Status Init();\n\n  /**\n   * @brief Create a specified memory container\n   * @param device pointer to device\n   * @param mem_ptr shared pointer to memory\n   * @param size memory size\n   * @return Empty memory container\n   */\n  std::shared_ptr<DeviceMemory> MakeDeviceMemory(\n      const std::shared_ptr<Device> &device, std::shared_ptr<void> mem_ptr,\n      size_t size) override;\n\n  /**\n   * @brief Implement by specified device, alloc memory\n   * @param size Memory size to allocate\n   * @param mem_flags Flags to create device memory\n   * @return Device memory in shared ptr\n   */\n  std::shared_ptr<void> AllocSharedPtr(size_t size,\n                                       uint32_t mem_flags = 0) override;\n\n  /**\n   * @brief Implement by specified device, alloc memory\n   * @param size Memory size to allocate\n   * @param mem_flags Flags to create device memory\n   * @return Device memory.\n   */\n  void *Malloc(size_t size, uint32_t mem_flags = 0) override;\n\n  /**\n   * @brief Implement by specified device, free memory\n   * @param mem_ptr Memory to free\n   * @param mem_flags Flags of device memory\n   */\n  void Free(void *mem_ptr, uint32_t mem_flags = 0) override;\n\n  /**\n   * @brief Implement by specified device, copy data from src to dest\n   * @param dest dest buffer to write\n   * @param dest_size dest buffer size\n   * @param src_buffer src buffer to read\n   * @param src_size read data size\n   * @param kind data copy kind\n   * @return Status\n   */\n  Status Copy(void *dest, size_t dest_size, const void *src_buffer,\n              size_t src_size, DeviceMemoryCopyKind kind) override;\n\n  /**\n   * @brief Copy memory between current device and host\n   * @param dest_memory Destination memory\n   * @param dest_offset Destination memory offset\n   * @param src_memory Source memory\n   * @param src_offset Source offset\n   * @param src_size Source memory size\n   * @param copy_kind memory copy mode\n   * @return Status\n   */\n  Status DeviceMemoryCopy(\n      const std::shared_ptr<DeviceMemory> &dest_memory, size_t dest_offset,\n      const std::shared_ptr<const DeviceMemory> &src_memory, size_t src_offset,\n      size_t src_size,\n      DeviceMemoryCopyKind copy_kind = DeviceMemoryCopyKind::FromHost) override;\n\n  /**\n   * @brief Get device memory info\n   * @return Status\n   */\n  Status GetDeviceMemUsage(size_t *free, size_t *total) const override;\n\n private:\n  std::shared_ptr<CpuMemoryPool> mem_pool_;\n};\n\n}  // namespace modelbox\n#endif  // MODELBOX_CPU_MEMORY_H_"
  },
  {
    "path": "src/drivers/devices/cpu/core/include/modelbox/device/cpu/device_cpu.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#ifndef MODELBOX_DEVICE_CPU_H_\n#define MODELBOX_DEVICE_CPU_H_\n\n#include <modelbox/base/device.h>\n#include <modelbox/flow.h>\n#include \"modelbox/device/cpu/cpu_memory.h\"\n\nnamespace modelbox {\n\nconstexpr const char *DEVICE_TYPE = \"cpu\";\nconstexpr const char *DEVICE_DRIVER_NAME = \"device-cpu\";\nconstexpr const char *DEVICE_DRIVER_DESCRIPTION = \"A cpu device driver\";\n\nclass CPU : public Device {\n public:\n  CPU(const std::shared_ptr<DeviceMemoryManager> &mem_mgr);\n  ~CPU() override;\n  std::string GetType() const override;\n\n  Status DeviceExecute(const DevExecuteCallBack &fun, int32_t priority,\n                       size_t count) override;\n};\n\nclass CPUFactory : public DeviceFactory {\n public:\n  CPUFactory();\n  ~CPUFactory() override;\n\n  std::map<std::string, std::shared_ptr<DeviceDesc>> DeviceProbe() override;\n  std::string GetDeviceFactoryType() override;\n  std::vector<std::string> GetDeviceList() override;\n  std::shared_ptr<Device> CreateDevice(const std::string &device_id) override;\n};\n\nclass CPUDesc : public DeviceDesc {\n public:\n  CPUDesc();\n  ~CPUDesc() override;\n};\n\n}  // namespace modelbox\n\n#endif  // MODELBOX_DEVICE_CPU_H_\n"
  },
  {
    "path": "src/drivers/devices/cpu/core/libmodelbox-device-cpu.pc.in",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nprefix=/usr\nexec_prefix=${prefix}\nlibdir=${prefix}/lib\nincludedir=${prefix}/include/modelbox/device/cpu\n\nName: libmodelbox-device-cpu\nDescription: modelbox cpu device SDK\nVersion: @MODELBOX_VERSION_STRING@\nLibs: -L${libdir} -lmodelbox-device-cpu\nCflags: -I${includedir}"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nproject(modelbox-drivers-cpu-flowunit)\n\nif(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_BINARY_DIR})\n    message(FATAL_ERROR \"Do not build in source directory!\")\nendif()\n\nsubdirlist(SUBDIRS ${CMAKE_CURRENT_SOURCE_DIR} \"CMakeLists.txt\")\n\nforeach(subdir ${SUBDIRS})\n    add_subdirectory(${subdir})\nendforeach()\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/base64_decoder/CMakeLists.txt",
    "content": "#\n# Copyright 2022 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\ncmake_minimum_required(VERSION 3.10)\n\nset(UNIT_DEVICE \"cpu\")\nset(UNIT_NAME \"base64_decoder\")\n\nproject(modelbox-flowunit-${UNIT_DEVICE}-${UNIT_NAME})\n\nfile(GLOB_RECURSE UNIT_SOURCE *.cpp *.cc *.c)\ngroup_source_test_files(MODELBOX_UNIT_SOURCE MODELBOX_UNIT_TEST_SOURCE \"_test.c*\" ${UNIT_SOURCE})\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${NLOHMANN_INCLUDE_DIR})\n\nset(MODELBOX_UNIT_SHARED libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-shared)\nset(MODELBOX_UNIT_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\nadd_library(${MODELBOX_UNIT_SHARED} SHARED ${MODELBOX_UNIT_SOURCE})\nset(LIBMODELBOX_FLOWUNIT_BASE64_DECODER_SHARED ${MODELBOX_UNIT_SHARED})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES\n        SOVERSION ${MODELBOX_VERSION_MAJOR}\n        VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} rt)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} dl)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${MODELBOX_UNIT_LINK_LIBRARY})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_SHARED})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}\")\n\ninstall(TARGETS ${MODELBOX_UNIT_SHARED}\n        COMPONENT cpu-device-flowunit\n        RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n        LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        ARCHIVE DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        OPTIONAL)\ninstall(DIRECTORY ${HEADER}\n        DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR}\n        COMPONENT cpu-device-flowunit-devel)\n\nset(LIBMODELBOX_FLOWUNIT_BASE64_DECODER_CPU_SHARED ${MODELBOX_UNIT_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_BASE64_DECODER_CPU_INCLUDE ${MODELBOX_UNIT_SOURCE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_BASE64_DECODER_CPU_SOURCES ${MODELBOX_UNIT_SOURCE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_BASE64_DECODER_CPU_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}.so CACHE INTERNAL \"\")\n\n# driver test\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_UNIT_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_UNIT_SHARED})\nlist(APPEND DRIVER_UNIT_TEST_LINK_LIBRARIES ${MODELBOX_UNIT_LINK_LIBRARY})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")\nset(TEST_INCLUDE ${TEST_INCLUDE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_LINK_LIBRARIES ${DRIVER_UNIT_TEST_LINK_LIBRARIES} CACHE INTERNAL \"\")\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/base64_decoder/base64_decoder.cc",
    "content": "/*\n * Copyright 2022 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"base64_decoder.h\"\n\n#include <modelbox/base/crypto.h>\n#include <securec.h>\n\n#include <nlohmann/json.hpp>\n\n#include \"modelbox/flowunit_api_helper.h\"\n\nusing nlohmann::json;\n\nBase64DecoderFlowUnit::Base64DecoderFlowUnit() = default;\nBase64DecoderFlowUnit::~Base64DecoderFlowUnit() = default;\n\nmodelbox::Status Base64DecoderFlowUnit::Open(\n    const std::shared_ptr<modelbox::Configuration> &opts) {\n  // data_format only surport [raw,json]\n  data_format_ = opts->GetString(\"data_format\", \"raw\");\n  if (data_format_ != \"raw\" && data_format_ != \"json\") {\n    MBLOG_ERROR << \"Valid data_format is: \" << data_format_;\n    return {modelbox::STATUS_BADCONF, \"Valid data_format is: \" + data_format_};\n  }\n\n  decoder_key_ = opts->GetString(\"key\", \"\");\n  return modelbox::STATUS_OK;\n}\n\nstd::string Base64DecoderFlowUnit::JsonDecode(const std::string &buffer) {\n  try {\n    auto data_body = json::parse(buffer);\n\n    if (!data_body.contains(decoder_key_)) {\n      MBLOG_ERROR << decoder_key_ << \" isn't exist\";\n      return \"\";\n    }\n\n    if (!data_body[decoder_key_].is_string()) {\n      MBLOG_ERROR << \"data isn't string, key:\" << decoder_key_;\n      return \"\";\n    }\n\n    return std::move(data_body[decoder_key_].get<std::string>());\n  } catch (std::exception const &e) {\n    MBLOG_ERROR << \"failed to json decode exception: \" << e.what();\n    return \"\";\n  }\n}\n\nstd::shared_ptr<modelbox::Buffer> Base64DecoderFlowUnit::Base64Decoder(\n    const std::string &buffer) {\n  if (buffer.empty()) {\n    MBLOG_ERROR << \"input is empty\";\n    return nullptr;\n  }\n\n  std::vector<u_char> input_data;\n  auto ret = modelbox::Base64Decode(buffer, &input_data);\n  if (ret != modelbox::STATUS_OK) {\n    MBLOG_ERROR << \"base64 decode fail reason: \" << ret.Errormsg();\n    return nullptr;\n  }\n\n  auto out_buffer = std::make_shared<modelbox::Buffer>(GetBindDevice());\n  if (out_buffer == nullptr) {\n    MBLOG_ERROR << \"failed to make out buffer\";\n    return nullptr;\n  }\n\n  ret = out_buffer->Build(input_data.size());\n  if (ret != modelbox::STATUS_OK) {\n    MBLOG_ERROR << \"build buffer fail size: \" << input_data.size()\n                << \" reason: \" << ret.Errormsg();\n    return nullptr;\n  }\n\n  auto e_ret = memcpy_s(out_buffer->MutableData(), out_buffer->GetBytes(),\n                        input_data.data(), input_data.size());\n  if (e_ret != EOK) {\n    MBLOG_ERROR << \"failed to memcpy ret: \" << e_ret;\n    return nullptr;\n  }\n\n  return out_buffer;\n}\n\nmodelbox::Status Base64DecoderFlowUnit::Process(\n    std::shared_ptr<modelbox::DataContext> ctx) {\n  // get input\n  auto input_bufs = ctx->Input(\"in_data\");\n  auto output_bufs = ctx->Output(\"out_data\");\n  if (input_bufs->Size() <= 0) {\n    auto msg = \"input data batch is \" + std::to_string(input_bufs->Size());\n    MBLOG_ERROR << msg;\n    return {modelbox::STATUS_FAULT, msg};\n  }\n\n  std::vector<size_t> output_shape;\n  for (auto &buffer : *input_bufs) {\n    int data_len = buffer->GetBytes();\n    if (data_len <= 0) {\n      const auto *msg = \"in data size is invalied\";\n      MBLOG_ERROR << msg;\n      return {modelbox::STATUS_FAULT, msg};\n    }\n\n    std::string in_data_str((char *)buffer->ConstData(), data_len);\n\n    if (data_format_ == \"json\") {\n      in_data_str = JsonDecode(in_data_str);\n    }\n\n    auto out_buf = Base64Decoder(in_data_str);\n    if (out_buf == nullptr) {\n      const auto *msg = \"out buf is nullptr\";\n      MBLOG_ERROR << msg;\n      return {modelbox::STATUS_FAULT, msg};\n    }\n\n    output_bufs->PushBack(out_buf);\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nMODELBOX_FLOWUNIT(Base64DecoderFlowUnit, desc) {\n  desc.SetFlowUnitName(FLOWUNIT_NAME);\n  desc.SetFlowUnitGroupType(\"Image\");\n  desc.AddFlowUnitInput({\"in_data\", \"cpu\"});\n  desc.AddFlowUnitOutput({\"out_data\"});\n\n  desc.SetFlowType(modelbox::NORMAL);\n  desc.SetInputContiguous(false);\n  desc.SetDescription(FLOWUNIT_DESC);\n}\n\nMODELBOX_DRIVER_FLOWUNIT(desc) {\n  desc.Desc.SetName(FLOWUNIT_NAME);\n  desc.Desc.SetClass(modelbox::DRIVER_CLASS_FLOWUNIT);\n  desc.Desc.SetType(FLOWUNIT_TYPE);\n  desc.Desc.SetDescription(FLOWUNIT_DESC);\n  desc.Desc.SetVersion(\"1.0.0\");\n}\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/base64_decoder/base64_decoder.h",
    "content": "/*\n * Copyright 2022 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_BASE64_DECODER_CPU_H_\n#define MODELBOX_FLOWUNIT_BASE64_DECODER_CPU_H_\n\n#include <modelbox/base/device.h>\n#include <modelbox/flow.h>\n#include <modelbox/flowunit.h>\n\nconstexpr const char *FLOWUNIT_NAME = \"base64_decoder\";\nconstexpr const char *FLOWUNIT_TYPE = \"cpu\";\nconstexpr const char *FLOWUNIT_DESC =\n    \"\\n\\t@Brief: base64 decoder flowunit on cpu. \\n\"\n    \"\\t@Port parameter: The input port buffer type is image file binary, the \"\n    \"output port buffer type are image. \\n\"\n    \"\\t  The image type buffer contains the following meta fields:\\n\"\n    \"\\t\\tField Name: width,               Type: int32_t\\n\"\n    \"\\t\\tField Name: height,              Type: int32_t\\n\"\n    \"\\t\\tField Name: width_stride,        Type: int32_t\\n\"\n    \"\\t\\tField Name: height_stride,       Type: int32_t\\n\"\n    \"\\t\\tField Name: channel,             Type: int32_t\\n\"\n    \"\\t\\tField Name: pix_fmt,             Type: string\\n\"\n    \"\\t\\tField Name: layout,              Type: int32_t\\n\"\n    \"\\t\\tField Name: shape,               Type: vector<size_t>\\n\"\n    \"\\t\\tField Name: type,                Type: \"\n    \"ModelBoxDataType::MODELBOX_UINT8\\n\"\n    \"\\t@Constraint:\";\n\nclass Base64DecoderFlowUnit : public modelbox::FlowUnit {\n public:\n  Base64DecoderFlowUnit();\n  ~Base64DecoderFlowUnit() override;\n\n  modelbox::Status Open(\n      const std::shared_ptr<modelbox::Configuration> &opts) override;\n  modelbox::Status Process(std::shared_ptr<modelbox::DataContext> ct) override;\n\n private:\n  std::shared_ptr<modelbox::Buffer> Base64Decoder(const std::string &buffer);\n  std::string JsonDecode(const std::string &buffer);\n\n private:\n  std::string data_format_;\n  std::string decoder_key_;\n};\n\n#endif  // MODELBOX_FLOWUNIT_BASE64_DECODER_CPU_H_\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/base64_decoder/base64_decoder_test.cc",
    "content": "/*\n * Copyright 2022 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <securec.h>\n\n#include <nlohmann/json.hpp>\n\n#include \"driver_flow_test.h\"\n#include \"flowunit_mockflowunit/flowunit_mockflowunit.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"modelbox/base/crypto.h\"\n#include \"modelbox/base/log.h\"\n#include \"modelbox/base/utils.h\"\n#include \"modelbox/buffer.h\"\n#include \"test/mock/minimodelbox/mockflow.h\"\n\nnamespace modelbox {\nclass Base64DecoderFlowUnitTest : public testing::Test {\n public:\n  Base64DecoderFlowUnitTest()\n      : driver_flow_(std::make_shared<DriverFlowTest>()) {}\n\n protected:\n  void SetUp() override {}\n\n  void TearDown() override { driver_flow_ = nullptr; };\n\n  std::shared_ptr<DriverFlowTest> GetDriverFlow();\n\n  const std::string test_lib_dir = TEST_DRIVER_DIR,\n                    test_data_dir = TEST_DATA_DIR, test_assets = TEST_ASSETS;\n\n private:\n  std::shared_ptr<DriverFlowTest> driver_flow_;\n};\n\nstd::shared_ptr<DriverFlowTest> Base64DecoderFlowUnitTest::GetDriverFlow() {\n  return driver_flow_;\n}\n\nTEST_F(Base64DecoderFlowUnitTest, DecodeTest) {\n  std::string toml_content = R\"(\n    [driver]\n    skip-default=true\n    dir=[\")\" + test_lib_dir + \"\\\",\\\"\" +\n                             test_data_dir + \"\\\"]\\n\" +\n                             R\"([graph]\n    graphconf = '''digraph demo {\n          input[type=input]\n          output[type=output]\n          base64_decoder[type=flowunit, flowunit=base64_decoder, device=cpu, deviceid=0, batch_size=3]\n          input -> base64_decoder:in_data\n          base64_decoder:out_data -> output\n        }'''\n    format = \"graphviz\"\n  )\";\n\n  auto driver_flow = GetDriverFlow();\n  auto ret = driver_flow->BuildAndRun(\"DecodeTest\", toml_content, -1);\n  EXPECT_EQ(ret, STATUS_SUCCESS);\n\n  MBLOG_INFO << toml_content;\n\n  std::string test_text = \"this is base64 decoder test text\";\n  std::vector<unsigned char> in_text(test_text.begin(), test_text.end());\n  std::string base64_text;\n  EXPECT_TRUE(modelbox::Base64Encode(in_text, &base64_text));\n\n  auto extern_data = driver_flow->GetFlow()->CreateExternalDataMap();\n  auto in_buffer_list = extern_data->CreateBufferList();\n  in_buffer_list->Build({base64_text.size()});\n  auto in_buffer = in_buffer_list->At(0);\n  auto e_ret = memcpy_s(in_buffer->MutableData(), in_buffer->GetBytes(),\n                        base64_text.c_str(), base64_text.size());\n  EXPECT_EQ(e_ret, EOK);\n\n  auto status = extern_data->Send(\"input\", in_buffer_list);\n  EXPECT_EQ(status, STATUS_OK);\n\n  // check output\n  OutputBufferList map_buffer_list;\n  status = extern_data->Recv(map_buffer_list);\n  EXPECT_EQ(status, STATUS_OK);\n  auto output_buffer_list = map_buffer_list[\"output\"];\n  ASSERT_EQ(output_buffer_list->Size(), 1);\n  auto output_buffer = output_buffer_list->At(0);\n  ASSERT_EQ(output_buffer->GetBytes(), test_text.size());\n\n  std::shared_ptr<unsigned char> out_buf(\n      new (std::nothrow) unsigned char[output_buffer->GetBytes()],\n      std::default_delete<unsigned char[]>());\n  e_ret = memset_s(out_buf.get(), output_buffer->GetBytes(), 0,\n                   output_buffer->GetBytes());\n  EXPECT_EQ(e_ret, EOK);\n\n  e_ret = memcpy_s(out_buf.get(), output_buffer->GetBytes(),\n                   output_buffer->ConstData(), output_buffer->GetBytes());\n  EXPECT_EQ(e_ret, EOK);\n\n  // cmp memory\n  EXPECT_EQ(memcmp(out_buf.get(), test_text.c_str(), output_buffer->GetBytes()),\n            0);\n\n  driver_flow->GetFlow()->Wait(3 * 1000);\n}\n\nTEST_F(Base64DecoderFlowUnitTest, JsonDecodeTest) {\n  std::string toml_content = R\"(\n    [driver]\n    skip-default=true\n    dir=[\")\" + test_lib_dir + \"\\\",\\\"\" +\n                             test_data_dir + \"\\\"]\\n\" +\n                             R\"([graph]\n    graphconf = '''digraph demo {\n          input[type=input]\n          output[type=output]\n          base64_decoder[type=flowunit, flowunit=base64_decoder, device=cpu, deviceid=0, batch_size=3, data_format=json, key=data_base64]\n          input -> base64_decoder:in_data\n          base64_decoder:out_data -> output\n        }'''\n    format = \"graphviz\"\n  )\";\n\n  auto driver_flow = GetDriverFlow();\n  auto ret = driver_flow->BuildAndRun(\"DecodeTest\", toml_content, -1);\n  EXPECT_EQ(ret, STATUS_SUCCESS);\n\n  MBLOG_INFO << toml_content;\n\n  std::string test_text = \"this is base64 decoder test text\";\n  std::vector<unsigned char> in_text(test_text.begin(), test_text.end());\n  std::string base64_text;\n  EXPECT_TRUE(modelbox::Base64Encode(in_text, &base64_text));\n\n  nlohmann::json base64_data_json;\n  base64_data_json[\"data_base64\"] = base64_text;\n  std::string base64_data_json_str = base64_data_json.dump();\n\n  auto extern_data = driver_flow->GetFlow()->CreateExternalDataMap();\n  auto in_buffer_list = extern_data->CreateBufferList();\n  in_buffer_list->Build({base64_data_json_str.size()});\n  auto in_buffer = in_buffer_list->At(0);\n  auto e_ret =\n      memcpy_s(in_buffer->MutableData(), in_buffer->GetBytes(),\n               base64_data_json_str.c_str(), base64_data_json_str.size());\n  EXPECT_EQ(e_ret, EOK);\n\n  auto status = extern_data->Send(\"input\", in_buffer_list);\n  EXPECT_EQ(status, STATUS_OK);\n\n  // check output\n  OutputBufferList map_buffer_list;\n  status = extern_data->Recv(map_buffer_list);\n  EXPECT_EQ(status, STATUS_OK);\n  auto output_buffer_list = map_buffer_list[\"output\"];\n  ASSERT_EQ(output_buffer_list->Size(), 1);\n  auto output_buffer = output_buffer_list->At(0);\n  ASSERT_EQ(output_buffer->GetBytes(), test_text.size());\n\n  std::shared_ptr<unsigned char> out_buf(\n      new (std::nothrow) unsigned char[output_buffer->GetBytes()],\n      std::default_delete<unsigned char[]>());\n  e_ret = memset_s(out_buf.get(), output_buffer->GetBytes(), 0,\n                   output_buffer->GetBytes());\n  EXPECT_EQ(e_ret, EOK);\n\n  e_ret = memcpy_s(out_buf.get(), output_buffer->GetBytes(),\n                   output_buffer->ConstData(), output_buffer->GetBytes());\n  EXPECT_EQ(e_ret, EOK);\n\n  // cmp memory\n  EXPECT_EQ(memcmp(out_buf.get(), test_text.c_str(), output_buffer->GetBytes()),\n            0);\n\n  driver_flow->GetFlow()->Wait(3 * 1000);\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/color_transpose/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nset(UNIT_DEVICE \"cpu\")\nset(UNIT_NAME \"packed_planar_transpose\")\n\nproject(modelbox-flowunit-${UNIT_DEVICE}-${UNIT_NAME})\n\nif (NOT OPENCV_FOUND) \n    message(STATUS \"Not found opencv, disable packed_planar_transpose flowunit\")\n    return()\nendif()\n\nfile(GLOB_RECURSE UNIT_SOURCE *.cpp *.cc *.c)\ngroup_source_test_files(MODELBOX_UNIT_SOURCE MODELBOX_UNIT_TEST_SOURCE \"_test.c*\" ${UNIT_SOURCE})\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_CPU_INCLUDE})\ninclude_directories(${OpenCV_INCLUDE_DIRS})\n\nset(MODELBOX_UNIT_SHARED modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-shared)\nset(MODELBOX_UNIT_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\nadd_library(${MODELBOX_UNIT_SHARED} SHARED ${MODELBOX_UNIT_SOURCE})\n\nset(LIBMODELBOX_FLOWUNIT_RESIZE_CPU_SHARED ${MODELBOX_UNIT_SHARED})\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES \n    SOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n\nset(MODELBOX_UNIT_LINK_LIBRARY ${OpenCV_LIBS})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_DEVICE_CPU_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} rt)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} dl)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${MODELBOX_UNIT_LINK_LIBRARY})\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}\")\n\ninstall(TARGETS ${MODELBOX_UNIT_SHARED} \n    COMPONENT cpu-device-flowunit\n    RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n    LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    OPTIONAL\n    )\n\n\ninstall(DIRECTORY ${HEADER} \n    DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR} \n    COMPONENT cpu-device-flowunit-devel\n    )\n\nset(LIBMODELBOX_FLOWUNIT_COLORTRANSPOSE_CPU_SHARED ${MODELBOX_UNIT_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_COLORTRANSPOSE_CPU_INCLUDE ${MODELBOX_UNIT_SOURCE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_COLORTRANSPOSE_CPU_SOURCES ${MODELBOX_UNIT_SOURCE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_COLORTRANSPOSE_CPU_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}.so CACHE INTERNAL \"\")\n\n# driver test\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_UNIT_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_UNIT_SHARED})\nlist(APPEND DRIVER_UNIT_TEST_LINK_LIBRARIES ${MODELBOX_UNIT_LINK_LIBRARY})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_LINK_LIBRARIES ${DRIVER_UNIT_TEST_LINK_LIBRARIES} CACHE INTERNAL \"\")\n\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/color_transpose/color_transpose_flowunit.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"color_transpose_flowunit.h\"\n\n#include <securec.h>\n\n#include \"modelbox/flowunit.h\"\n#include \"modelbox/flowunit_api_helper.h\"\n\nColorTransposeFlowUnit::ColorTransposeFlowUnit() = default;\nColorTransposeFlowUnit::~ColorTransposeFlowUnit() = default;\n\nmodelbox::Status ColorTransposeFlowUnit::Open(\n    const std::shared_ptr<modelbox::Configuration> &opts) {\n  return modelbox::STATUS_OK;\n}\nmodelbox::Status ColorTransposeFlowUnit::Close() { return modelbox::STATUS_OK; }\n\nmodelbox::Status ColorTransposeFlowUnit::CheckParam(\n    modelbox::ModelBoxDataType type, const std::string &pix_fmt,\n    const std::string &layout) {\n  if (type != modelbox::ModelBoxDataType::MODELBOX_UINT8) {\n    return {modelbox::STATUS_INVALID, \"type must be uint8\"};\n  }\n\n  if (pix_fmt != \"rgb\" && pix_fmt != \"bgr\") {\n    return {modelbox::STATUS_INVALID, \"pix_fmt should be [rgb, bgr]\"};\n  }\n\n  if (layout != \"hwc\") {\n    return {modelbox::STATUS_INVALID, \"layout must be hwc\"};\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status ColorTransposeFlowUnit::Process(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  MBLOG_DEBUG << \"color_transpose process begin\";\n  auto input_buf = data_ctx->Input(\"in_image\");\n  auto output_buf = data_ctx->Output(\"out_image\");\n\n  std::vector<size_t> shape_vector;\n  for (size_t i = 0; i < input_buf->Size(); ++i) {\n    shape_vector.push_back(input_buf->At(i)->GetBytes());\n  }\n  output_buf->Build(shape_vector);\n  output_buf->CopyMeta(input_buf);\n\n  for (size_t i = 0; i < input_buf->Size(); ++i) {\n    int32_t width = 0;\n    int32_t height = 0;\n    int32_t channel = 0;\n    std::string pix_fmt;\n    std::string layout;\n    modelbox::ModelBoxDataType type = modelbox::MODELBOX_TYPE_INVALID;\n    bool metaresult = true;\n    metaresult = input_buf->At(i)->Get(\"width\", width) ? metaresult : false;\n    metaresult = input_buf->At(i)->Get(\"height\", height) ? metaresult : false;\n    metaresult = input_buf->At(i)->Get(\"channel\", channel) ? metaresult : false;\n    metaresult = input_buf->At(i)->Get(\"pix_fmt\", pix_fmt) ? metaresult : false;\n    metaresult = input_buf->At(i)->Get(\"type\", type) ? metaresult : false;\n    metaresult = input_buf->At(i)->Get(\"layout\", layout) ? metaresult : false;\n\n    if (metaresult == false) {\n      const auto *msg = \"buffer meta is invalid.\";\n      MBLOG_ERROR << msg;\n      return {modelbox::STATUS_BADCONF, msg};\n    }\n\n    auto ret = CheckParam(type, pix_fmt, layout);\n    if (!ret) {\n      MBLOG_ERROR << \"input buffer meta invalid, detail: \" << ret;\n      return ret;\n    }\n\n    size_t elem_size = width * height;\n\n    const auto *input_data =\n        static_cast<const u_char *>(input_buf->ConstBufferData(i));\n    auto *output_data = static_cast<u_char *>(output_buf->MutableBufferData(i));\n    if (input_data == nullptr || output_data == nullptr) {\n      return {modelbox::STATUS_NOMEM};\n    }\n\n    for (size_t i = 0; i < (size_t)channel; ++i) {\n      for (size_t j = 0; j < elem_size; ++j) {\n        output_data[i * elem_size + j] = input_data[j * channel + i];\n      }\n    }\n    auto buffer = output_buf->At(i);\n    buffer->CopyMeta(input_buf->At(i));\n    buffer->Set(\"width\", width);\n    buffer->Set(\"height\", height);\n    buffer->Set(\"width_stride\", width * 3);\n    buffer->Set(\"height_stride\", height);\n    buffer->Set(\"channel\", channel);\n    buffer->Set(\"pix_fmt\", pix_fmt);\n    buffer->Set(\"layout\", std::string(\"chw\"));\n    buffer->Set(\"shape\", std::vector<size_t>{(size_t)channel, (size_t)height,\n                                             (size_t)width});\n    buffer->Set(\"type\", type);\n  }\n\n  MBLOG_DEBUG << \"color_transpose process data finish\";\n  return modelbox::STATUS_OK;\n}\n\nMODELBOX_FLOWUNIT(ColorTransposeFlowUnit, desc) {\n  desc.SetFlowUnitName(FLOWUNIT_NAME);\n  desc.SetFlowUnitGroupType(\"Image\");\n  desc.AddFlowUnitInput({\"in_image\"});\n  desc.AddFlowUnitOutput({\"out_image\"});\n  desc.SetFlowType(modelbox::NORMAL);\n  desc.SetDescription(FLOWUNIT_DESC);\n}\n\nMODELBOX_DRIVER_FLOWUNIT(desc) {\n  desc.Desc.SetName(FLOWUNIT_NAME);\n  desc.Desc.SetClass(modelbox::DRIVER_CLASS_FLOWUNIT);\n  desc.Desc.SetType(FLOWUNIT_TYPE);\n  desc.Desc.SetDescription(FLOWUNIT_DESC);\n  desc.Desc.SetVersion(\"1.0.0\");\n}\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/color_transpose/color_transpose_flowunit.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_COLORTRANSPOSEFLOWUNIT_CPU_H_\n#define MODELBOX_FLOWUNIT_COLORTRANSPOSEFLOWUNIT_CPU_H_\n\n#include <modelbox/base/device.h>\n#include <modelbox/base/status.h>\n#include <modelbox/flow.h>\n\n#include <algorithm>\n\n#include \"modelbox/buffer.h\"\n#include \"modelbox/flowunit.h\"\n\nconstexpr const char *FLOWUNIT_NAME = \"packed_planar_transpose\";\nconstexpr const char *FLOWUNIT_TYPE = \"cpu\";\nconstexpr const char *FLOWUNIT_DESC =\n    \"\\n\\t@Brief: Convert the image format from packed to planar. \\n\"\n    \"\\t@Port parameter: The input port 'in_image' and the output port \"\n    \"'out_image' buffer type are image. \\n\"\n    \"\\t  The image type buffer contain the following meta fields:\\n\"\n    \"\\t\\tField Name: width,         Type: int32_t\\n\"\n    \"\\t\\tField Name: height,        Type: int32_t\\n\"\n    \"\\t\\tField Name: width_stride,  Type: int32_t\\n\"\n    \"\\t\\tField Name: height_stride, Type: int32_t\\n\"\n    \"\\t\\tField Name: channel,       Type: int32_t\\n\"\n    \"\\t\\tField Name: pix_fmt,       Type: string\\n\"\n    \"\\t\\tField Name: layout,        Type: int32_t\\n\"\n    \"\\t\\tField Name: shape,         Type: vector<size_t>\\n\"\n    \"\\t\\tField Name: type,          Type: ModelBoxDataType::MODELBOX_UINT8\\n\"\n    \"\\t@Constraint: The field value range of this flowunit support: 'pix_fmt': [rgb,bgr], 'layout': [hwc]\";\n\nclass ColorTransposeFlowUnit : public modelbox::FlowUnit {\n public:\n  ColorTransposeFlowUnit();\n  ~ColorTransposeFlowUnit() override;\n\n  modelbox::Status Open(\n      const std::shared_ptr<modelbox::Configuration> &opts) override;\n\n  modelbox::Status Close() override;\n\n  /* run when processing data */\n  modelbox::Status Process(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override;\n\n private:\n  modelbox::Status CheckParam(modelbox::ModelBoxDataType type,\n                              const std::string &pix_fmt,\n                              const std::string &layout);\n};\n\n#endif  // MODELBOX_FLOWUNIT_COLORTRANSPOSEFLOWUNIT_CPU_H_\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/common_yolobox/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nset(UNIT_DEVICE \"cpu\")\nset(UNIT_NAME \"yolov3_postprocess\")\n\nproject(modelbox-flowunit-${UNIT_DEVICE}-${UNIT_NAME})\n\nif (NOT OPENCV_FOUND) \n    message(STATUS \"Not found opencv, disable yolobox flowunit\")\n    return()\nendif()\n\nfile(GLOB_RECURSE UNIT_SOURCE *.cpp *.cc *.c)\ngroup_source_test_files(MODELBOX_UNIT_SOURCE MODELBOX_UNIT_TEST_SOURCE \"_test.c*\" ${UNIT_SOURCE})\n\nconfigure_file(${CMAKE_CURRENT_SOURCE_DIR}/modelbox.test.yolobox.in ${CMAKE_BINARY_DIR}/test/test-working-dir/data/modelbox.test.yolobox.toml @ONLY)\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_CPU_INCLUDE})\ninclude_directories(${LIBMODELBOX_VIRTUALDRIVER_YOLOBOX_INCLUDE})\n\nset(MODELBOX_UNIT_SHARED modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-shared)\nset(MODELBOX_UNIT_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\nadd_library(${MODELBOX_UNIT_SHARED} SHARED ${MODELBOX_UNIT_SOURCE})\n\nset(LIBMODELBOX_FLOWUNIT_YOLOBOX_CPU_SHARED ${MODELBOX_UNIT_SHARED})\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES \n    SOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_DEVICE_CPU_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} rt)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} dl)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${MODELBOX_UNIT_LINK_LIBRARY})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_VIRTUALDRIVER_YOLOBOX_SHARED})\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}\")\n\ninstall(TARGETS ${MODELBOX_UNIT_SHARED} \n    COMPONENT cpu-device-flowunit\n    RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n    LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    OPTIONAL\n    )\n\ninstall(DIRECTORY ${HEADER} \n    DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR} \n    COMPONENT cpu-device-flowunit\n    )\n\nset(LIBMODELBOX_FLOWUNIT_YOLOBOX_CPU_SHARED ${MODELBOX_UNIT_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_YOLOBOX_CPU_INCLUDE ${MODELBOX_UNIT_SOURCE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_YOLOBOX_CPU_SOURCES ${MODELBOX_UNIT_SOURCE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_YOLOBOX_CPU_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}.so CACHE INTERNAL \"\")\n\n# driver test\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_UNIT_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_UNIT_SHARED})\nlist(APPEND DRIVER_UNIT_TEST_LINK_LIBRARIES ${MODELBOX_UNIT_LINK_LIBRARY})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_LINK_LIBRARIES ${DRIVER_UNIT_TEST_LINK_LIBRARIES} CACHE INTERNAL \"\")\n\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/common_yolobox/flowunit_desc.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <stdio.h>\n\n#include <memory>\n\n#include \"modelbox/base/driver_api_helper.h\"\n#include \"modelbox/base/status.h\"\n#include \"modelbox/device/cpu/device_cpu.h\"\n#include \"modelbox/flowunit.h\"\n#include \"yolobox_flowunit.h\"\n\nstd::shared_ptr<modelbox::DriverFactory> CreateDriverFactory() {\n  std::shared_ptr<modelbox::DriverFactory> factory =\n      std::make_shared<YoloboxFlowUnitFactory>();\n  return factory;\n}\n\nvoid DriverDescription(modelbox::DriverDesc *desc) {\n  desc->SetName(FLOWUNIT_NAME);\n  desc->SetClass(modelbox::DRIVER_CLASS_FLOWUNIT);\n  desc->SetType(FLOWUNIT_TYPE);\n  desc->SetDescription(FLOWUNIT_DESC);\n  desc->SetVersion(\"1.0.0\");\n}\n\nmodelbox::Status DriverInit() {\n  // Driver Init.\n  return modelbox::STATUS_OK;\n}\n\nvoid DriverFini() {\n  // Driver Fini.\n}\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/common_yolobox/modelbox.test.yolobox.in",
    "content": "[base]\nname = \"test_yolobox\"\nversion = \"1.0.0\"\ndescription = \"a common cpu yolobox flowunit\"\ntype = \"yolo_postprocess\"\nvirtual_type = \"yolov3_postprocess\"\ndevice = \"cpu\"\n\n[config]\ninput_width = 800\ninput_height = 480\nclass_num = 1\nscore_threshold = [\"0.6\",\"0.7\"]\nnms_threshold = [\"0.45\",\"0.3\"]\nyolo_output_layer_num = 2\nyolo_output_layer_wh = [\"25\",\"15\",\"50\",\"30\"]\nanchor_num = [\"4\",\"4\"]\nanchor_biases = [\"100.0\",\"72.0\",\"173.12\",\"55.04\",\"165.12\",\"132.0\",\"280.0\",\"252.0\",\" 10.0\",\"8.0\",\"20.0\",\"16.0\",\"30.0\",\"24.0\",\"67.0\",\"56.0\"]\n\n[input]\n[input.input1]\nname = \"in_1\"\n[input.input2]\nname = \"in_2\"\n\n[output]\n[output.output1]\nname = \"out_1\"\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/common_yolobox/yolo_helper.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include \"yolo_helper.h\"\n\n#include <modelbox/base/log.h>\n\n#include <cmath>\n\nconstexpr int32_t CLASS_BACKGROUND = -1;\n\nvoid YoloHelper::GetBoundingBox(\n    const float *single_layer_result, size_t layer_index,\n    std::function<void(float x, float y, float w, float h, float box_score,\n                       int category)> const &save_box_func) {\n  auto output_width = param_.layer_wh_[2 * layer_index];\n  auto output_height = param_.layer_wh_[2 * layer_index + 1];\n  auto step = output_height * output_width;\n  auto anchor_size = (5 + param_.class_num_) * output_height * output_width;\n  auto anchor_num = param_.anchor_num_[layer_index];\n  int category = 0;\n  float score = 0;\n  for (size_t anchor_index = 0; anchor_index < anchor_num; ++anchor_index) {\n    const auto *anchor_data = single_layer_result + anchor_index * anchor_size;\n    for (int32_t h = 0; h < output_height; ++h) {\n      for (int32_t w = 0; w < output_width; ++w) {\n        auto confidence = Sigmoid(anchor_data[4 * step + h * output_width + w]);\n        const auto *score_data = anchor_data + 5 * step + h * output_width + w;\n        GetCategoryAndScore(score_data, step, param_.class_num_, category,\n                            score);\n        if (category == CLASS_BACKGROUND) {\n          continue;\n        }\n\n        GetOneBoundingBox(anchor_data, category, score * confidence,\n                          layer_index, step, h, w, anchor_index, save_box_func);\n      }\n    }\n  }\n}\n\nvoid YoloHelper::GetCategoryAndScore(const float *input, int32_t step,\n                                     int32_t class_num, int32_t &category,\n                                     float &score) {\n  if (class_num == 1) {\n    score = 1;\n    category = 0;\n  } else {\n    float max_score = -1;\n    int32_t max_score_category = CLASS_BACKGROUND;\n    for (int32_t c = 0; c < class_num; ++c) {\n      if (input[c * step] > max_score) {\n        max_score_category = c;\n        max_score = input[c * step];\n      }\n    }\n\n    float sum = 0;\n    for (int c = 0; c < class_num; ++c) {\n      auto e = static_cast<float>(std::exp(input[c * step] - max_score));\n      sum += e;\n    }\n\n    score = static_cast<float>(\n        std::exp(input[max_score_category * step] - max_score) / sum);\n    category = max_score_category;\n  }\n}\n\nvoid YoloHelper::GetOneBoundingBox(\n    const float *anchor_data, int32_t category, float box_score,\n    size_t layer_index, int32_t step, int32_t feature_map_h,\n    int32_t feature_map_w, size_t anchor_index,\n    std::function<void(float x, float y, float w, float h, float box_score,\n                       int32_t category)> const &save_box_func) {\n  if (box_score < param_.score_threshold_[category]) {\n    return;\n  }\n\n  auto feature_width = param_.layer_wh_[2 * layer_index];\n  auto feature_height = param_.layer_wh_[2 * layer_index + 1];\n  float box_x;\n  float box_y;\n  float box_w;\n  float box_h;\n  float x_bias;\n  float y_bias;\n\n  auto offset = feature_map_h * feature_width + feature_map_w;\n  box_x = (feature_map_w + Sigmoid(anchor_data[offset])) / float(feature_width);\n  box_y = (feature_map_h + Sigmoid(anchor_data[step + offset])) /\n          float(feature_height);\n  x_bias =\n      param_.anchor_biases_[GetAnchorBiasesOffset(layer_index, anchor_index)];\n  y_bias =\n      param_\n          .anchor_biases_[GetAnchorBiasesOffset(layer_index, anchor_index) + 1];\n  box_w = (float)(std::exp(anchor_data[2 * step + offset]) * x_bias /\n                  param_.input_width_);\n  box_h = (float)(std::exp(anchor_data[3 * step + offset]) * y_bias /\n                  param_.input_height_);\n\n  box_x = std::max((box_x - box_w / 2.0F), 0.0F);\n  box_y = std::max((box_y - box_h / 2.0F), 0.0F);\n  box_w = std::min(box_w, 1 - box_x);\n  box_h = std::min(box_h, 1 - box_y);\n  if (param_.scale_to_input) {\n    box_x = box_x * param_.input_width_;\n    box_y = box_y * param_.input_height_;\n    box_w = box_w * param_.input_width_;\n    box_h = box_h * param_.input_height_;\n  }\n\n  if (box_w > 0 && box_h > 0 && box_x < param_.input_width_ &&\n      box_y < param_.input_height_) {\n    save_box_func(box_x, box_y, box_w, box_h, box_score, category);\n  }\n}\n\nsize_t YoloHelper::GetAnchorBiasesOffset(size_t layer_index,\n                                         size_t anchor_index) {\n  size_t offset = 0;\n  for (size_t li = 0; li < layer_index; ++li) {\n    offset += param_.anchor_num_[li] * 2;\n  }\n\n  offset += anchor_index * 2;\n  return offset;\n}"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/common_yolobox/yolo_helper.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#ifndef MODELBOX_FLOWUNIT_YOLO_HELPER_H\n#define MODELBOX_FLOWUNIT_YOLO_HELPER_H\n\n#include <algorithm>\n#include <cmath>\n#include <functional>\n#include <memory>\n#include <string>\n#include <unordered_set>\n#include <utility>\n#include <vector>\n\nclass YoloParam {\n public:\n  int32_t input_width_;\n  int32_t input_height_;\n  int32_t class_num_;\n  std::vector<float> score_threshold_;\n  std::vector<float> nms_threshold_;\n  int32_t layer_num_;\n  std::vector<int32_t> layer_wh_;\n  std::vector<uint64_t> anchor_num_;\n  std::vector<float> anchor_biases_;\n  bool scale_to_input;\n};\n\nclass YoloHelper {\n public:\n  YoloHelper(YoloParam param) : param_{std::move(param)} {}\n\n  virtual ~YoloHelper() = default;\n\n  void GetBoundingBox(\n      const float *single_layer_result, size_t layer_index,\n      std::function<void(float x, float y, float w, float h, float box_score,\n                         int category)> const &save_box_func);\n\n  template <class T>\n  void Sort(std::vector<T> &box_list,\n            std::function<bool(const T &box1, const T &box2)> const &compare);\n\n  template <class T>\n  void NMS(\n      std::vector<T> &src_box_list, std::vector<T> &dst_box_list,\n      std::function<bool(const T &box1, const T &box2,\n                         std::vector<float> &nms_threshold)> const &overlap);\n\n private:\n  inline float Sigmoid(float x) {\n    return static_cast<float>(1. / (1. + std::exp(-x)));\n  }\n\n  void GetCategoryAndScore(const float *input, int32_t step, int32_t class_num,\n                           int32_t &category, float &score);\n\n  void GetOneBoundingBox(\n      const float *anchor_data, int32_t category, float box_score,\n      size_t layer_index, int32_t step, int32_t feature_map_h,\n      int32_t feature_map_w, size_t anchor_index,\n      std::function<void(float x, float y, float w, float h, float box_score,\n                         int32_t category)> const &save_box_func);\n\n  size_t GetAnchorBiasesOffset(size_t layer_index, size_t anchor_index);\n\n  YoloParam param_;\n};\n\ntemplate <class T>\nvoid YoloHelper::Sort(\n    std::vector<T> &box_list,\n    std::function<bool(const T &box1, const T &box2)> const &compare) {\n  std::sort(box_list.begin(), box_list.end(), compare);\n}\n\ntemplate <class T>\nvoid YoloHelper::NMS(\n    std::vector<T> &src_box_list, std::vector<T> &dst_box_list,\n    std::function<bool(const T &box1, const T &box2,\n                       std::vector<float> &nms_threshold)> const &overlap) {\n  auto size = src_box_list.size();\n  std::unordered_set<size_t> set;\n  for (size_t i = 0; i < size; ++i) {\n    if (set.find(i) != set.end()) {\n      // Has been tested\n      continue;\n    }\n\n    dst_box_list.push_back(src_box_list[i]);\n    // Find box that overlap >= threshold\n    for (size_t j = i + 1; j < size; ++j) {\n      if (set.find(j) != set.end()) {\n        continue;\n      }\n\n      if (overlap(src_box_list[i], src_box_list[j], param_.nms_threshold_)) {\n        set.insert(j);  // Will not access this box next time\n      }\n    }\n  }\n}\n\n#endif  // MODELBOX_FLOWUNIT_YOLO_HELPER_H"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/common_yolobox/yolobox_flowuint_test.cc",
    "content": "#include <fstream>\n#include <string>\n\n#include \"modelbox/base/log.h\"\n#include \"modelbox/graph.h\"\n#include \"modelbox/session_context.h\"\n#include \"driver_flow_test.h\"\n#include \"flowunit_mockflowunit/flowunit_mockflowunit.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"test/mock/minimodelbox/mockflow.h\"\n#include \"yolobox_flowunit.h\"\n\nnamespace modelbox {\nclass CommonYoloboxFlowUintTest : public testing::Test {\n public:\n  CommonYoloboxFlowUintTest() : driver_flow_(std::make_shared<MockFlow>()) {}\n\n protected:\n  void SetUp() override {\n    auto ret = AddMockFlowUnit();\n    EXPECT_EQ(ret, STATUS_OK);\n\n    const std::string src_toml = test_data_dir + \"/\" + test_toml_file;\n    yolobox_dir = test_data_dir + \"/yolobox/\";\n    dest_toml = yolobox_dir + test_toml_file;\n    mkdir(yolobox_dir.c_str(), 0700);\n    CopyFile(src_toml, dest_toml, 0);\n  }\n\n  void TearDown() override {\n    auto ret = remove(dest_toml.c_str());\n    EXPECT_EQ(ret, 0);\n    ret = remove(yolobox_dir.c_str());\n    EXPECT_EQ(ret, 0);\n    driver_flow_ = nullptr;\n  }\n\n  std::shared_ptr<MockFlow> GetDriverFlow() { return driver_flow_; }\n\n  const std::string driver_lib_dir = TEST_DRIVER_DIR;\n  const std::string test_lib_dir = TEST_LIB_DIR;\n  const std::string test_data_dir = TEST_DATA_DIR;\n  const std::string test_toml_file = \"modelbox.test.yolobox.toml\";\n  std::string yolobox_dir;\n  std::string dest_toml;\n\n  void ReadFile(const char *path, char *buf, int len);\n\n private:\n  Status AddMockFlowUnit() { return STATUS_OK; }\n  std::shared_ptr<MockFlow> driver_flow_;\n};\n\nvoid CommonYoloboxFlowUintTest::ReadFile(const char *path, char *buf, int len) {\n  std::ifstream fd(path, std::ios::binary);\n  fd.read(buf, len);\n  fd.close();\n}\n\nTEST_F(CommonYoloboxFlowUintTest, Process) {\n  std::string toml_content = R\"(\n    [driver]\n    skip-default=true\n    dir=[\")\" + test_lib_dir + \"\\\",\\\"\" +\n                             test_data_dir + \"\\\",\\\"\" + driver_lib_dir +\n                             \"\\\"]\\n    \" +\n                             R\"(\n    [graph]\n    graphconf = '''digraph demo {\n          input1[type=input]\n          input2[type=input]\n          output1[type=output]\n          test_yolobox[type=flowunit, flowunit=test_yolobox, device=cpu, deviceid=0, label=\"<layer15_conv> | <layer22_conv> | <Out_1>\"]\n          input1 ->test_yolobox:in_1\n          input2 ->test_yolobox:in_2\n          test_yolobox:out_1->output1\n        }'''\n    format = \"graphviz\"\n  )\";\n  auto driver_flow = GetDriverFlow();\n  auto ret = driver_flow->BuildAndRun(\"InitUnit\", toml_content, 10);\n  auto flow = driver_flow->GetFlow();\n\n  {\n    auto ext_data = flow->CreateExternalDataMap();\n    auto layer15_conv = ext_data->CreateBufferList();\n    layer15_conv->Build({36000});\n    layer15_conv->Set(\"shape\", std::vector<size_t>({24, 15, 25}));\n    auto *data = (char *)layer15_conv->MutableData();\n    ReadFile(TEST_ASSETS \"/yolobox/data_36000_0\", data, 36000);\n    auto status = ext_data->Send(\"input1\", layer15_conv);\n    EXPECT_EQ(status, STATUS_OK);\n\n    auto layer22_conv = ext_data->CreateBufferList();\n    layer22_conv->Build({144000});\n    layer22_conv->Set(\"shape\", std::vector<size_t>({24, 30, 50}));\n    data = (char *)layer22_conv->MutableData();\n    ReadFile(TEST_ASSETS \"/yolobox/data_144000_0\", data, 144000);\n    status = ext_data->Send(\"input2\", layer22_conv);\n    EXPECT_EQ(status, STATUS_OK);\n\n    status = ext_data->Shutdown();\n    EXPECT_EQ(status, STATUS_OK);\n\n    OutputBufferList map_buffer_list;\n\n    status = ext_data->Recv(map_buffer_list);\n    EXPECT_EQ(status, STATUS_OK);\n\n    auto buffer_list = map_buffer_list[\"output1\"];\n    for (size_t batch_idx = 0; batch_idx < buffer_list->Size(); ++batch_idx) {\n      auto bbox_count =\n          buffer_list->At(batch_idx)->GetBytes() / sizeof(BoundingBox);\n      const auto *boxes = static_cast<const BoundingBox *>(\n          buffer_list->At(batch_idx)->ConstData());\n      std::vector<std::vector<float>> result{\n          {741.81, 0.0, 219.01, 229.03, 0.998},\n          {1067.44, 1.78, 130.7, 79.52, 0.950},\n          {16.24, 815.40, 395.60, 263.49, 0.779}};\n      for (size_t bbox_idx = 0; bbox_idx < bbox_count; ++bbox_idx) {\n        MBLOG_INFO << \" batch_idx:\" << batch_idx << \" bbox_idx:\" << bbox_idx\n                   << \" [\" << boxes[bbox_idx].x_ << \" \" << boxes[bbox_idx].y_\n                   << \" \" << boxes[bbox_idx].w_ << \" \" << boxes[bbox_idx].h_\n                   << \"]\"\n                   << \" score:\" << boxes[bbox_idx].score_\n                   << \", category:\" << boxes[bbox_idx].category_;\n        const float w_scale = 1920 / 800.0F;\n        const float h_scale = 1080 / 480.0F;\n        EXPECT_NEAR(boxes[bbox_idx].x_ * w_scale, result[bbox_idx][0], 0.1);\n        EXPECT_NEAR(boxes[bbox_idx].y_ * h_scale, result[bbox_idx][1], 0.1);\n        EXPECT_NEAR(boxes[bbox_idx].w_ * w_scale, result[bbox_idx][2], 0.1);\n        EXPECT_NEAR(boxes[bbox_idx].h_ * h_scale, result[bbox_idx][3], 0.1);\n        EXPECT_NEAR(boxes[bbox_idx].score_, result[bbox_idx][4], 0.01);\n      }\n    }\n  }\n\n  EXPECT_EQ(flow->Wait(3 * 1000), STATUS_TIMEDOUT);\n}\n\n}  // namespace modelbox\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/common_yolobox/yolobox_flowunit.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"yolobox_flowunit.h\"\n\n#include <math.h>\n#include <securec.h>\n\n#include <cmath>\n#include <vector>\n\n#include \"modelbox/device/cpu/device_cpu.h\"\n#include \"modelbox/flowunit.h\"\n#include \"virtualdriver_yolobox.h\"\n\nYoloboxFlowUnit::YoloboxFlowUnit() = default;\n\nYoloboxFlowUnit::~YoloboxFlowUnit() = default;\n\nmodelbox::Status YoloboxFlowUnit::InitYoloParam(YoloParam &param) {\n  auto config =\n      std::dynamic_pointer_cast<YoloBoxVirtualFlowUnitDesc>(GetFlowUnitDesc())\n          ->GetConfiguration();\n  param.input_width_ = config->GetInt32(INPUT_WIDTH);\n  param.input_height_ = config->GetInt32(INPUT_HEIGHT);\n  param.class_num_ = config->GetInt32(CLASS_NUM);\n  param.score_threshold_ = config->GetFloats(SCORE_THRESHOLD);\n  param.nms_threshold_ = config->GetFloats(NMS_THRESHOLD);\n  param.layer_num_ = config->GetInt32(YOLO_OUTPUT_LAYER_NUM);\n  param.layer_wh_ = config->GetInt32s(YOLO_OUTPUT_LAYER_WH);\n  param.anchor_num_ = config->GetUint64s(ANCHOR_NUM);\n  param.anchor_biases_ = config->GetFloats(ANCHOR_BIASES);\n  param.scale_to_input = config->GetBool(SCALE_TO_INPUT, true);\n\n  if (param.score_threshold_.empty()) {\n    MBLOG_ERROR << SCORE_THRESHOLD << \" should not empty\";\n    return modelbox::STATUS_BADCONF;\n  }\n\n  if (param.nms_threshold_.empty()) {\n    MBLOG_ERROR << NMS_THRESHOLD << \" should not empty\";\n    return modelbox::STATUS_BADCONF;\n  }\n\n  if (param.layer_wh_.size() != ((size_t)param.layer_num_ * 2)) {\n    MBLOG_ERROR << YOLO_OUTPUT_LAYER_WH << \" size != \" << YOLO_OUTPUT_LAYER_NUM\n                << \" * 2\";\n    return modelbox::STATUS_BADCONF;\n  }\n\n  if (param.anchor_num_.size() != (size_t)param.layer_num_) {\n    MBLOG_ERROR << ANCHOR_NUM << \" size != \" << YOLO_OUTPUT_LAYER_NUM;\n    return modelbox::STATUS_BADCONF;\n  }\n\n  auto total_anchor = std::accumulate(param.anchor_num_.begin(),\n                                      param.anchor_num_.end(), (size_t)0);\n  if ((total_anchor * 2) != param.anchor_biases_.size()) {\n    MBLOG_ERROR << ANCHOR_BIASES << \" size != total anchor number * 2\";\n    return modelbox::STATUS_BADCONF;\n  }\n\n  // Auto fill last value to meet class num, so developer does not need to write\n  // same value for all class\n  while ((size_t)param.class_num_ > param.score_threshold_.size()) {\n    param.score_threshold_.push_back(param.score_threshold_.back());\n  }\n\n  while ((size_t)param.class_num_ > param.nms_threshold_.size()) {\n    param.nms_threshold_.push_back(param.nms_threshold_.back());\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status YoloboxFlowUnit::Open(\n    const std::shared_ptr<modelbox::Configuration> &opts) {\n  YoloParam param;\n  auto ret = InitYoloParam(param);\n  if (!ret) {\n    return ret;\n  }\n\n  yolo_helper_ = std::make_shared<YoloHelper>(param);\n  auto desc = GetFlowUnitDesc();\n  auto input_list = desc->GetFlowUnitInput();\n  for (auto &input : input_list) {\n    input_name_list_.push_back(input.GetPortName());\n  }\n\n  if (input_name_list_.empty()) {\n    MBLOG_ERROR << \"Input is empty\";\n    return modelbox::STATUS_BADCONF;\n  }\n\n  auto output_list = desc->GetFlowUnitOutput();\n  for (auto &output : output_list) {\n    output_name_list_.push_back(output.GetPortName());\n  }\n\n  if (output_name_list_.size() != 1) {\n    MBLOG_ERROR << \"Should only has one output port\";\n    return modelbox::STATUS_BADCONF;\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status YoloboxFlowUnit::ReadTensorData(\n    std::vector<std::vector<std::shared_ptr<modelbox::Buffer>>> &tensor_data,\n    std::shared_ptr<modelbox::DataContext> &data_ctx) {\n  tensor_data.clear();\n  size_t batch_size = 0;\n  for (const auto &tensor_name : input_name_list_) {\n    auto input_buffers = data_ctx->Input(tensor_name);\n    if (batch_size == 0) {\n      batch_size = input_buffers->Size();\n      tensor_data.resize(batch_size);\n    } else if (input_buffers->Size() != batch_size) {\n      MBLOG_ERROR << \"buffers [\" << tensor_name << \"] size [\"\n                  << input_buffers->Size() << \"] is not same with other[\"\n                  << batch_size << \"]\";\n      return modelbox::STATUS_FAULT;\n    }\n\n    for (size_t batch_index = 0; batch_index < batch_size; ++batch_index) {\n      tensor_data[batch_index].push_back(input_buffers->At(batch_index));\n    }\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nbool Comp(const BoundingBox &box1, const BoundingBox &box2) {\n  return box1.score_ > box2.score_;\n}\n\nbool Overlap(const BoundingBox &box1, const BoundingBox &box2,\n             std::vector<float> &nms_threshold) {\n  if (box1.category_ != box2.category_) {\n    return false;\n  }\n\n  float threshold = nms_threshold[box1.category_];\n  float left = std::max(box1.x_, box2.x_);\n  float right = std::min(box1.x_ + box1.w_, box2.x_ + box2.w_);\n  float top = std::max(box1.y_, box2.y_);\n  float down = std::min(box1.y_ + box1.h_, box2.y_ + box2.h_);\n  if (left >= right or top >= down) {\n    return 0.0F;\n  }\n\n  float inter_area = (right - left) * (down - top);\n  float union_area = (box1.w_ * box1.h_ + box2.w_ * box2.h_ - inter_area);\n  return inter_area >= threshold * union_area;\n}\n\nmodelbox::Status YoloboxFlowUnit::SendBoxData(\n    std::vector<std::vector<BoundingBox>> &box_data,\n    std::shared_ptr<modelbox::DataContext> &data_ctx) {\n  std::vector<size_t> shape;\n  shape.reserve(box_data.size());\n  for (auto &boxes : box_data) {\n    shape.push_back(boxes.size() * sizeof(BoundingBox));\n  }\n\n  auto output_buffers = data_ctx->Output(output_name_list_[0]);\n  output_buffers->Build(shape);\n  for (size_t batch_index = 0; batch_index < box_data.size(); ++batch_index) {\n    auto &box_data_for_single_batch = box_data[batch_index];\n    auto *box_buffer_ptr =\n        (BoundingBox *)(output_buffers->At(batch_index)->MutableData());\n    if (box_buffer_ptr == nullptr) {\n      continue;\n    }\n\n    for (size_t box_index = 0; box_index < box_data_for_single_batch.size();\n         ++box_index) {\n      box_buffer_ptr[box_index] = box_data_for_single_batch[box_index];\n    }\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status YoloboxFlowUnit::Process(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  std::vector<std::vector<std::shared_ptr<modelbox::Buffer>>> tensor_data;\n  auto ret = ReadTensorData(tensor_data, data_ctx);\n  if (!ret) {\n    return ret;\n  }\n\n  std::vector<std::vector<BoundingBox>> detected_boxes_mul_batch;\n  for (auto &tensors_in_one_batch : tensor_data) {\n    std::vector<BoundingBox> detected_boxes_single_batch;\n    for (size_t tensor_index = 0; tensor_index < tensors_in_one_batch.size();\n         ++tensor_index) {\n      auto &tensor = tensors_in_one_batch[tensor_index];\n      yolo_helper_->GetBoundingBox(\n          (const float *)tensor->ConstData(), tensor_index,\n          [&detected_boxes_single_batch](float x, float y, float w, float h,\n                                         float box_score, int category) {\n            detected_boxes_single_batch.emplace_back(x, y, w, h, category,\n                                                     box_score);\n          });\n    }\n\n    yolo_helper_->Sort<BoundingBox>(detected_boxes_single_batch, Comp);\n    std::vector<BoundingBox> final_boxes;\n    yolo_helper_->NMS<BoundingBox>(detected_boxes_single_batch, final_boxes,\n                                   Overlap);\n    detected_boxes_mul_batch.push_back(final_boxes);\n  }\n\n  ret = SendBoxData(detected_boxes_mul_batch, data_ctx);\n  if (!ret) {\n    return ret;\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nstd::string YoloboxFlowUnitFactory::GetFlowUnitFactoryType() {\n  return modelbox::DEVICE_TYPE;\n}\n\nstd::string YoloboxFlowUnitFactory::GetVirtualType() { return YOLO_TYPE; }\n\nstd::shared_ptr<modelbox::FlowUnit>\nYoloboxFlowUnitFactory::VirtualCreateFlowUnit(const std::string &unit_name,\n                                              const std::string &unit_type,\n                                              const std::string &virtual_type) {\n  return std::make_shared<YoloboxFlowUnit>();\n}\n\nstd::map<std::string, std::shared_ptr<modelbox::FlowUnitDesc>>\nYoloboxFlowUnitFactory::FlowUnitProbe() {\n  return {};\n}\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/common_yolobox/yolobox_flowunit.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_YOLOBOXFLOWUNIT_CPU_H_\n#define MODELBOX_FLOWUNIT_YOLOBOXFLOWUNIT_CPU_H_\n\n#include <modelbox/base/device.h>\n#include <modelbox/base/status.h>\n#include <modelbox/flow.h>\n\n#include <algorithm>\n#include <map>\n#include <string>\n#include <vector>\n\n#include \"modelbox/buffer.h\"\n#include \"modelbox/flowunit.h\"\n#include \"yolo_helper.h\"\n\nclass BoundingBox {\n public:\n  float x_;\n  float y_;\n  float w_;\n  float h_;\n  int32_t category_;\n  float score_;\n\n  BoundingBox(float x, float y, float w, float h, int32_t category, float score)\n      : x_(x), y_(y), w_(w), h_(h), category_(category), score_(score) {}\n  ~BoundingBox() = default;\n};\n\nconstexpr const char *FLOWUNIT_NAME = \"yolov3_postprocess\";\nconstexpr const char *FLOWUNIT_TYPE = \"cpu\";\nconstexpr const char *FLOWUNIT_DESC = \"A cpu yolobox flowunit\";\nconstexpr const char *YOLO_TYPE = \"yolov3_postprocess\";\nconstexpr const char *INPUT_WIDTH = \"input_width\";\nconstexpr const char *INPUT_HEIGHT = \"input_height\";\nconstexpr const char *CLASS_NUM = \"class_num\";\n\n// will auto fill last value, if len(score_list) != class_num\nconstexpr const char *SCORE_THRESHOLD = \"score_threshold\";\n\n// will auto fill last value, if len(nms_list) != class_num\nconstexpr const char *NMS_THRESHOLD = \"nms_threshold\";\nconstexpr const char *YOLO_OUTPUT_LAYER_NUM = \"yolo_output_layer_num\";\nconstexpr const char *YOLO_OUTPUT_LAYER_WH = \"yolo_output_layer_wh\";\nconstexpr const char *ANCHOR_NUM = \"anchor_num\";\nconstexpr const char *ANCHOR_BIASES = \"anchor_biases\";\n\n// will scale result to input as default\nconstexpr const char *SCALE_TO_INPUT = \"scale_to_input\";\n\nclass YoloboxFlowUnit : public modelbox::FlowUnit {\n public:\n  YoloboxFlowUnit();\n  ~YoloboxFlowUnit() override;\n\n  modelbox::Status Open(\n      const std::shared_ptr<modelbox::Configuration> &opts) override;\n\n  modelbox::Status Close() override { return modelbox::STATUS_OK; };\n\n  /* run when processing data */\n  modelbox::Status Process(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override;\n\n private:\n  modelbox::Status InitYoloParam(YoloParam &param);\n\n  modelbox::Status ReadTensorData(\n      std::vector<std::vector<std::shared_ptr<modelbox::Buffer>>> &tensor_data,\n      std::shared_ptr<modelbox::DataContext> &data_ctx);\n\n  modelbox::Status SendBoxData(\n      std::vector<std::vector<BoundingBox>> &box_data,\n      std::shared_ptr<modelbox::DataContext> &data_ctx);\n\n  std::shared_ptr<YoloHelper> yolo_helper_;\n  std::vector<std::string> input_name_list_;\n  std::vector<std::string> output_name_list_;\n};\n\nclass YoloboxFlowUnitDesc : public modelbox::FlowUnitDesc {\n public:\n  YoloboxFlowUnitDesc() = default;\n  ~YoloboxFlowUnitDesc() override = default;\n};\n\nclass YoloboxFlowUnitFactory : public modelbox::FlowUnitFactory {\n public:\n  YoloboxFlowUnitFactory() = default;\n  ~YoloboxFlowUnitFactory() override = default;\n\n  std::shared_ptr<modelbox::FlowUnit> VirtualCreateFlowUnit(\n      const std::string &unit_name, const std::string &unit_type,\n      const std::string &virtual_type) override;\n\n  std::string GetFlowUnitFactoryType() override;\n  std::string GetVirtualType() override;\n\n  std::map<std::string, std::shared_ptr<modelbox::FlowUnitDesc>>\n  FlowUnitProbe() override;\n};\n\n#endif  // MODELBOX_FLOWUNIT_YOLOBOXFLOWUNIT_CPU_H_"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/cv_crop/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nset(UNIT_DEVICE \"cpu\")\nset(UNIT_NAME \"crop\")\n\nproject(modelbox-flowunit-${UNIT_DEVICE}-${UNIT_NAME})\n\nfind_package(OpenCV)\nif (NOT OPENCV_FOUND) \n    message(STATUS \"Not found opencv, disable crop flowunit\")\n    return()\nendif()\n\nfile(GLOB_RECURSE UNIT_SOURCE *.cpp *.cc *.c)\ngroup_source_test_files(MODELBOX_UNIT_SOURCE MODELBOX_UNIT_TEST_SOURCE \"_test.c*\" ${UNIT_SOURCE})\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_CPU_INCLUDE})\ninclude_directories(${OPENCV_INCLUDE_DIR})\ninclude_directories(${MODELBOX_COMMON_IMAGE_PROCESS_INCLUDE})\n\nset(MODELBOX_UNIT_SHARED modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-shared)\nset(MODELBOX_UNIT_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\nadd_library(${MODELBOX_UNIT_SHARED} SHARED ${MODELBOX_UNIT_SOURCE})\n\nset(LIBMODELBOX_FLOWUNIT_CV_CROP_CPU_SHARED ${MODELBOX_UNIT_SHARED})\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES \n    SOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n\nset(MODELBOX_UNIT_LINK_LIBRARY ${OpenCV_LIBS})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_DEVICE_CPU_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} rt)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} dl)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${MODELBOX_UNIT_LINK_LIBRARY})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${MODELBOX_COMMON_IMAGE_PROCESS_LIBRARY})\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}\")\n\n\ninstall(TARGETS ${MODELBOX_UNIT_SHARED} \n    COMPONENT cpu-device-flowunit\n    RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n    LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    OPTIONAL\n    )\n\n\ninstall(DIRECTORY ${HEADER} \n    DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR} \n    COMPONENT cpu-device-flowunit\n    )\n\nset(LIBMODELBOX_FLOWUNIT_CV_CROP_CPU_SHARED ${MODELBOX_UNIT_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_CV_CROP_CPU_INCLUDE ${MODELBOX_UNIT_SOURCE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_CV_CROP_CPU_SOURCES ${MODELBOX_UNIT_SOURCE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_CV_CROP_CPU_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}.so CACHE INTERNAL \"\")\n\n# driver test\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_UNIT_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_UNIT_SHARED})\nlist(APPEND DRIVER_UNIT_TEST_LINK_LIBRARIES ${MODELBOX_UNIT_LINK_LIBRARY})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_LINK_LIBRARIES ${DRIVER_UNIT_TEST_LINK_LIBRARIES} CACHE INTERNAL \"\")\n\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/cv_crop/cv_crop_flowunit.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"cv_crop_flowunit.h\"\n\n#include <securec.h>\n\n#include \"image_process.h\"\n#include \"modelbox/flowunit.h\"\n#include \"modelbox/flowunit_api_helper.h\"\n\nCVCropFlowUnit::CVCropFlowUnit() = default;\nCVCropFlowUnit::~CVCropFlowUnit() = default;\n\nmodelbox::Status CVCropFlowUnit::Open(\n    const std::shared_ptr<modelbox::Configuration> &opts) {\n  return modelbox::STATUS_OK;\n}\nmodelbox::Status CVCropFlowUnit::Close() { return modelbox::STATUS_OK; }\n\nmodelbox::Status CVCropFlowUnit::Process(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  MBLOG_DEBUG << \"process image cv_crop\";\n\n  auto input_img_bufs = data_ctx->Input(\"in_image\");\n  if (input_img_bufs->Size() <= 0) {\n    auto errMsg =\n        \"in_image image batch is \" + std::to_string(input_img_bufs->Size());\n    MBLOG_ERROR << errMsg;\n    return {modelbox::STATUS_FAULT, errMsg};\n  }\n\n  auto input_box_bufs = data_ctx->Input(\"in_region\");\n  if (input_box_bufs->Size() <= 0) {\n    auto errMsg =\n        \"in_region roi box batch is \" + std::to_string(input_box_bufs->Size());\n    MBLOG_ERROR << errMsg;\n    return {modelbox::STATUS_FAULT, errMsg};\n  }\n\n  if (input_img_bufs->Size() != input_box_bufs->Size()) {\n    auto errMsg = \"in_image batch is not match in_region batch. in_image is \" +\n                  std::to_string(input_img_bufs->Size()) + \",in_region is \" +\n                  std::to_string(input_box_bufs->Size());\n    MBLOG_ERROR << errMsg;\n    return {modelbox::STATUS_FAULT, errMsg};\n  }\n\n  auto output_bufs = data_ctx->Output(\"out_image\");\n  output_bufs->CopyMeta(input_img_bufs);\n\n  for (size_t i = 0; i < input_img_bufs->Size(); ++i) {\n    int32_t width = 0;\n    int32_t height = 0;\n    int32_t width_dest = 0;\n    int32_t height_dest = 0;\n    int32_t channel = 0;\n    std::string pix_fmt;\n\n    bool exists = false;\n    auto img_buffer = input_img_bufs->At(i);\n    exists = img_buffer->Get(\"height\", height);\n    if (!exists) {\n      MBLOG_ERROR << \"meta don't have key height\";\n      return {modelbox::STATUS_NOTSUPPORT, \"meta don't have key height\"};\n    }\n\n    exists = img_buffer->Get(\"width\", width);\n    if (!exists) {\n      MBLOG_ERROR << \"meta don't have key width\";\n      return {modelbox::STATUS_NOTSUPPORT, \"meta don't have key width\"};\n    }\n\n    exists = img_buffer->Get(\"pix_fmt\", pix_fmt);\n    if (!exists && !img_buffer->Get(\"channel\", channel)) {\n      MBLOG_ERROR << \"meta don't have key pix_fmt or channel\";\n      return {modelbox::STATUS_NOTSUPPORT,\n              \"meta don't have key pix_fmt or channel\"};\n    }\n\n    if (exists && pix_fmt != \"rgb\" && pix_fmt != \"bgr\") {\n      MBLOG_ERROR << \"unsupport pix format.\";\n      return {modelbox::STATUS_NOTSUPPORT, \"unsupport pix format.\"};\n    }\n\n    channel = RGB_CHANNLES;\n\n    const auto *bbox = static_cast<const imageprocess::RoiBox *>(\n        input_box_bufs->ConstBufferData(i));\n    if (!imageprocess::CheckRoiBoxVaild(bbox, width, height)) {\n      return {modelbox::STATUS_FAULT, \"roi box param is invaild !\"};\n    }\n\n    MBLOG_DEBUG << \"crop bbox :  \" << bbox->x << \" \" << bbox->y << \" \"\n                << bbox->w << \" \" << bbox->h;\n    auto *input_data = const_cast<void *>(input_img_bufs->ConstBufferData(i));\n    cv::Mat img_data(cv::Size(width, height), CV_8UC3, input_data);\n    MBLOG_DEBUG << \"ori image : cols \" << img_data.cols << \" rows \"\n                << img_data.rows << \" channel \" << img_data.channels();\n\n    cv::Rect my_roi(bbox->x, bbox->y, bbox->w, bbox->h);\n    cv::Mat cropped;\n    cropped = img_data(my_roi);\n    auto img_dest = std::make_shared<cv::Mat>();\n    cropped.copyTo(*img_dest);\n    size_t size_bytes = img_dest->total() * img_dest->elemSize();\n    output_bufs->EmplaceBack(img_dest->data, size_bytes, [img_dest](void *ptr) {\n      /* Only capture pkt */\n    });\n    auto output_buffer = output_bufs->Back();\n    width_dest = img_dest->cols;\n    height_dest = img_dest->rows;\n    output_buffer->Set(\"width\", width_dest);\n    output_buffer->Set(\"height\", height_dest);\n    output_buffer->Set(\"width_stride\", width_dest * 3);\n    output_buffer->Set(\"height_stride\", height_dest);\n    output_buffer->Set(\"channel\", channel);\n    output_buffer->Set(\"pix_fmt\", pix_fmt);\n    output_buffer->Set(\"type\", modelbox::ModelBoxDataType::MODELBOX_UINT8);\n    output_buffer->Set(\"shape\", std::vector<size_t>{(size_t)height_dest,\n                                                    (size_t)width_dest, 3});\n    output_buffer->Set(\"layout\", std::string(\"hwc\"));\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nMODELBOX_FLOWUNIT(CVCropFlowUnit, desc) {\n  desc.SetFlowUnitName(FLOWUNIT_NAME);\n  desc.SetFlowUnitGroupType(\"Image\");\n  desc.AddFlowUnitInput({\"in_image\"});\n  desc.AddFlowUnitInput({\"in_region\"});\n  desc.AddFlowUnitOutput({\"out_image\"});\n  desc.SetFlowType(modelbox::NORMAL);\n  desc.SetInputContiguous(false);\n  desc.SetDescription(FLOWUNIT_DESC);\n}\n\nMODELBOX_DRIVER_FLOWUNIT(desc) {\n  desc.Desc.SetName(FLOWUNIT_NAME);\n  desc.Desc.SetClass(modelbox::DRIVER_CLASS_FLOWUNIT);\n  desc.Desc.SetType(FLOWUNIT_TYPE);\n  desc.Desc.SetDescription(FLOWUNIT_DESC);\n  desc.Desc.SetVersion(\"1.0.0\");\n}\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/cv_crop/cv_crop_flowunit.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_CV_CROP_FLOWUNIT_CPU_H_\n#define MODELBOX_FLOWUNIT_CV_CROP_FLOWUNIT_CPU_H_\n\n#include <modelbox/base/device.h>\n#include <modelbox/base/status.h>\n#include <modelbox/flow.h>\n\n#include <algorithm>\n#include <opencv2/opencv.hpp>\n\n#include \"modelbox/buffer.h\"\n#include \"modelbox/flowunit.h\"\n\nconstexpr const char *FLOWUNIT_NAME = \"crop\";\nconstexpr const char *FLOWUNIT_TYPE = \"cpu\";\nconstexpr const char *FLOWUNIT_DESC =\n    \"\\n\\t@Brief: An OpenCV crop flowunit on cpu. \\n\"\n    \"\\t@Port parameter: The input port 'in_image' and the output port \"\n    \"'out_image' buffer type are image. \\n\"\n    \"\\t  The image type buffer contain the following meta fields:\\n\"\n    \"\\t\\tField Name: width,         Type: int32_t\\n\"\n    \"\\t\\tField Name: height,        Type: int32_t\\n\"\n    \"\\t\\tField Name: width_stride,  Type: int32_t\\n\"\n    \"\\t\\tField Name: height_stride, Type: int32_t\\n\"\n    \"\\t\\tField Name: channel,       Type: int32_t\\n\"\n    \"\\t\\tField Name: pix_fmt,       Type: string\\n\"\n    \"\\t\\tField Name: layout,        Type: int32_t\\n\"\n    \"\\t\\tField Name: shape,         Type: vector<size_t>\\n\"\n    \"\\t\\tField Name: type,          Type: ModelBoxDataType::MODELBOX_UINT8\\n\"\n    \"\\t  The other input port 'in_region' buffer type is rectangle, the memory \"\n    \"arrangement is [x,y,w,h].\\n\"\n    \"\\t  it contain the following meta fields: \\n\"\n    \"\\t\\tField Name: type,          Type: ModelBoxDataType::MODELBOX_UINT8\\n\"\n    \"\\t@Constraint: The field value range of this flowunit support: 'pix_fmt': \"\n    \"[rgb_packed,bgr_packed], 'layout': [hwc]. One image can only be cropped \"\n    \"with one \"\n    \"rectangle and output one crop image.\";\nconst int RGB_CHANNLES = 3;\n\nclass CVCropFlowUnit : public modelbox::FlowUnit {\n public:\n  CVCropFlowUnit();\n  ~CVCropFlowUnit() override;\n\n  modelbox::Status Open(\n      const std::shared_ptr<modelbox::Configuration> &opts) override;\n\n  modelbox::Status Close() override;\n\n  /* run when processing data */\n  modelbox::Status Process(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override;\n};\n\n#endif  // MODELBOX_FLOWUNIT_CV_CROP_FLOWUNIT_CPU_H_\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/cv_crop/cv_crop_flowunit_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"cv_crop_flowunit.h\"\n\n#include <securec.h>\n\n#include <functional>\n#include <future>\n#include <opencv2/opencv.hpp>\n#include <random>\n#include <thread>\n\n#include \"driver_flow_test.h\"\n#include \"flowunit_mockflowunit/flowunit_mockflowunit.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"modelbox/base/log.h\"\n#include \"modelbox/base/utils.h\"\n#include \"modelbox/buffer.h\"\n#include \"test/mock/minimodelbox/mockflow.h\"\n\nnamespace modelbox {\nclass CVCropFlowUnitTest : public testing::Test {\n public:\n  CVCropFlowUnitTest() : driver_flow_(std::make_shared<MockFlow>()) {}\n\n protected:\n  void SetUp() override {\n    auto ret = AddMockFlowUnit();\n    EXPECT_EQ(ret, STATUS_OK);\n  };\n\n  void TearDown() override { driver_flow_ = nullptr; };\n  std::shared_ptr<MockFlow> GetDriverFlow();\n\n private:\n  Status AddMockFlowUnit();\n  std::shared_ptr<MockFlow> driver_flow_;\n};\n\nstd::shared_ptr<MockFlow> CVCropFlowUnitTest::GetDriverFlow() {\n  return driver_flow_;\n}\n\ntypedef struct RoiBox {\n  int32_t x, y, w, h;\n} RoiBox;\n\nStatus CVCropFlowUnitTest::AddMockFlowUnit() {\n  {\n    auto mock_desc =\n        GenerateFlowunitDesc(\"test_0_1_cv_crop\", {}, {\"Out_img\", \"Out_box\"});\n    mock_desc->SetFlowType(STREAM);\n    mock_desc->SetMaxBatchSize(16);\n    auto open_func = [=](const std::shared_ptr<Configuration> &opts,\n                         const std::shared_ptr<MockFlowUnit> &mock_flowunit) {\n      std::weak_ptr<MockFlowUnit> mock_flowunit_wp;\n      mock_flowunit_wp = mock_flowunit;\n      auto spt = mock_flowunit_wp.lock();\n      auto ext_data = spt->CreateExternalData();\n      if (!ext_data) {\n        const auto *err_msg = \"can not get external data.\";\n        modelbox::Status ret = {modelbox::STATUS_NODATA, err_msg};\n        MBLOG_ERROR << err_msg;\n        return ret;\n      }\n\n      auto buffer_list = ext_data->CreateBufferList();\n      buffer_list->Build({10 * sizeof(int)});\n      auto *data = (int *)buffer_list->MutableData();\n      for (size_t i = 0; i < 10; i++) {\n        data[i] = i;\n      }\n\n      auto status = ext_data->Send(buffer_list);\n      if (!status) {\n        MBLOG_ERROR << \"external data send buffer list failed:\" << status;\n        return status;\n      }\n\n      status = ext_data->Close();\n      if (!status) {\n        MBLOG_ERROR << \"external data close failed:\" << status;\n        return status;\n      }\n\n      return modelbox::STATUS_OK;\n    };\n\n    auto process_func =\n        [=](const std::shared_ptr<DataContext> &data_ctx,\n            const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n      MBLOG_INFO << \"test_0_1_cv_crop process\";\n\n      auto output_img_bufs = data_ctx->Output(\"Out_img\");\n\n      uint32_t batch_size = 10;\n\n      std::string img_path = std::string(TEST_ASSETS) + \"/test.jpg\";\n      cv::Mat img_data = cv::imread(img_path);\n      MBLOG_INFO << \"image col \" << img_data.cols << \"  row \" << img_data.rows\n                 << \" channel:\" << img_data.channels();\n      std::vector<size_t> img_shape_vector(\n          batch_size, img_data.total() * img_data.elemSize());\n\n      output_img_bufs->Build(img_shape_vector);\n\n      for (size_t i = 0; i < batch_size; ++i) {\n        std::string img_path = std::string(TEST_ASSETS) + \"/test.jpg\";\n        cv::Mat img_data = cv::imread(img_path);\n        int32_t cols = img_data.cols;\n        int32_t rows = img_data.rows;\n        int32_t channels = img_data.channels();\n        output_img_bufs->At(i)->Set(\"width\", cols);\n        output_img_bufs->At(i)->Set(\"height\", rows);\n        output_img_bufs->At(i)->Set(\"channel\", channels);\n        auto *output_img_data =\n            static_cast<uchar *>(output_img_bufs->MutableBufferData(i));\n        memcpy_s(output_img_data, output_img_bufs->At(i)->GetBytes(),\n                 img_data.data, img_data.total() * img_data.elemSize());\n      }\n\n      auto output_box_bufs = data_ctx->Output(\"Out_box\");\n\n      std::vector<size_t> box_shape_vector(batch_size, sizeof(RoiBox));\n\n      output_box_bufs->Build(box_shape_vector);\n\n      for (size_t i = 0; i < 5; ++i) {\n        auto *output_box1_data = output_box_bufs->MutableBufferData(2 * i);\n        std::shared_ptr<RoiBox> bbox1 = std::make_shared<RoiBox>();\n        bbox1->w = 100;\n        bbox1->h = 110;\n        bbox1->x = 30;\n        bbox1->y = 100;\n        memcpy_s(output_box1_data, sizeof(RoiBox), bbox1.get(), sizeof(RoiBox));\n\n        auto *output_box2_data = output_box_bufs->MutableBufferData(2 * i + 1);\n        std::shared_ptr<RoiBox> bbox2 = std::make_shared<RoiBox>();\n        bbox2->w = 50;\n        bbox2->h = 90;\n        bbox2->x = 60;\n        bbox2->y = 130;\n        memcpy_s(output_box2_data, sizeof(RoiBox), bbox2.get(), sizeof(RoiBox));\n      }\n\n      MBLOG_INFO << \"finsish test_0_1_cv_crop\";\n\n      return modelbox::STATUS_OK;\n    };\n    auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n    mock_funcitons->RegisterOpenFunc(open_func);\n    mock_funcitons->RegisterProcessFunc(process_func);\n    driver_flow_->AddFlowUnitDesc(\n        mock_desc, mock_funcitons->GenerateCreateFunc(), TEST_DRIVER_DIR);\n  }\n\n  {\n    auto mock_desc = GenerateFlowunitDesc(\"test_1_0_cv_crop\", {\"In_img\"}, {});\n    mock_desc->SetFlowType(STREAM);\n    mock_desc->SetMaxBatchSize(16);\n    auto process_func =\n        [=](const std::shared_ptr<DataContext> &op_ctx,\n            const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n      MBLOG_INFO << \"test_1_0_cv_crop process\";\n\n      auto input_buf = op_ctx->Input(\"In_img\");\n      if (input_buf->Size() <= 0) {\n        auto errMsg =\n            \"input images size is \" + std::to_string(input_buf->Size());\n        MBLOG_ERROR << errMsg;\n      }\n\n      for (size_t i = 0; i < input_buf->Size(); ++i) {\n        int32_t width = 0;\n        int32_t height = 0;\n        int32_t channels = 0;\n\n        bool exists = false;\n\n        exists = input_buf->At(i)->Get(\"width\", width);\n        if (!exists) {\n          MBLOG_ERROR << \"meta don't have key width\";\n        }\n        exists = input_buf->At(i)->Get(\"height\", height);\n        if (!exists) {\n          MBLOG_ERROR << \"meta don't have key height\";\n        }\n        exists = input_buf->At(i)->Get(\"channel\", channels);\n        if (!exists) {\n          MBLOG_ERROR << \"meta don't have key channel\";\n        }\n\n        const auto *input_data =\n            static_cast<const uchar *>(input_buf->ConstBufferData(i));\n\n        cv::Mat img_data(cv::Size(width, height), CV_8UC3);\n        memcpy_s(img_data.data, img_data.total() * img_data.elemSize(),\n                 input_data, input_buf->At(i)->GetBytes());\n        std::string name =\n            std::string(TEST_DATA_DIR) + \"/test\" + std::to_string(i) + \".jpg\";\n        cv::imwrite(name, img_data);\n      }\n      MBLOG_INFO << \"finsish test_1_0_cv_crop\";\n\n      return modelbox::STATUS_OK;\n    };\n    auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n    mock_funcitons->RegisterProcessFunc(process_func);\n    driver_flow_->AddFlowUnitDesc(\n        mock_desc, mock_funcitons->GenerateCreateFunc(), TEST_DRIVER_DIR);\n  }\n  return STATUS_OK;\n}\n\nTEST_F(CVCropFlowUnitTest, InitUnit) {\n  const std::string test_lib_dir = TEST_DRIVER_DIR;\n  std::string toml_content = R\"(\n    [driver]\n    skip-default=true\n    dir=[\")\" + test_lib_dir + \"\\\"]\\n    \" +\n                             R\"([graph]\n    graphconf = '''digraph demo {                                                                            \n          test_0_1_cv_crop[type=flowunit, flowunit=test_0_1_cv_crop, device=cpu, deviceid=0, label=\"<Out_img> | <Out_box>\", batch_size=10]\n          cv_crop[type=flowunit, flowunit=crop, device=cpu, deviceid=0, label=\"<in_image> | <in_region> | <out_image>\", batch_size=10]\n          test_1_0_cv_crop[type=flowunit, flowunit=test_1_0_cv_crop, device=cpu, deviceid=0, label=\"<In_img>\", batch_size=10]                                \n          test_0_1_cv_crop:Out_img  -> cv_crop:in_image \n          test_0_1_cv_crop:Out_box -> cv_crop:in_region\n          cv_crop:out_image -> test_1_0_cv_crop:In_img                                                                     \n        }'''\n    format = \"graphviz\"\n  )\";\n\n  MBLOG_INFO << toml_content;\n  auto ret =\n      GetDriverFlow()->BuildAndRun(\"CVCropFlowUnit\", toml_content, 3 * 1000);\n  EXPECT_EQ(ret, STATUS_SUCCESS);\n\n  std::vector<std::string> filePath;\n  ListFiles(std::string(TEST_DATA_DIR), \"*\", &filePath);\n  for (auto &elem : filePath) {\n    MBLOG_DEBUG << \"filePath: \" << elem;\n  }\n\n  for (size_t i = 0; i < 5; i++) {\n    for (size_t j = 0; j < 2; j++) {\n      std::string expected_file_path = std::string(TEST_ASSETS) +\n                                       \"/crop_result_\" + std::to_string(j) +\n                                       \".jpg\";\n      cv::Mat expected_img = cv::imread(expected_file_path);\n\n      std::string crop_result_file_path = std::string(TEST_DATA_DIR) + \"/test\" +\n                                          std::to_string(2 * i + j) + \".jpg\";\n      cv::Mat crop_result_img = cv::imread(crop_result_file_path);\n\n      int result_data_size =\n          crop_result_img.total() * crop_result_img.elemSize();\n      int expected_data_size = expected_img.total() * expected_img.elemSize();\n      EXPECT_EQ(result_data_size, expected_data_size);\n\n      int ret =\n          memcmp(crop_result_img.data, expected_img.data, result_data_size);\n      EXPECT_EQ(ret, 0);\n\n      auto rmret = remove(crop_result_file_path.c_str());\n      EXPECT_EQ(rmret, 0);\n    }\n  }\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/data_source_generator/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nset(UNIT_DEVICE \"cpu\")\nset(UNIT_NAME \"data_source_generator\")\n\nproject(modelbox-flowunit-${UNIT_DEVICE}-${UNIT_NAME})\n\nfile(GLOB_RECURSE UNIT_SOURCE *.cpp *.cc *.c)\ngroup_source_test_files(MODELBOX_UNIT_SOURCE MODELBOX_UNIT_TEST_SOURCE \"_test.c*\" ${UNIT_SOURCE})\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_CPU_INCLUDE})\ninclude_directories(${OpenCV_INCLUDE_DIRS})\n\n\nset(MODELBOX_UNIT_SHARED modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-shared)\nset(MODELBOX_UNIT_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\nadd_library(${MODELBOX_UNIT_SHARED} SHARED ${MODELBOX_UNIT_SOURCE})\n\nset(LIBMODELBOX_FLOWUNIT_RESIZE_CPU_SHARED ${MODELBOX_UNIT_SHARED})\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES \n    SOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_DEVICE_CPU_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} rt)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} dl)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${MODELBOX_UNIT_LINK_LIBRARY})\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}\")\n\ninstall(TARGETS ${MODELBOX_UNIT_SHARED} \n    COMPONENT cpu-device-flowunit\n    RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n    LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    OPTIONAL\n    )\n\n\ninstall(DIRECTORY ${HEADER} \n    DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR} \n    COMPONENT cpu-device-flowunit-devel\n    )\n\nset(LIBMODELBOX_FLOWUNIT_VIDEOINPUT_CPU_SHARED ${MODELBOX_UNIT_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_VIDEOINPUT_CPU_INCLUDE ${MODELBOX_UNIT_SOURCE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_VIDEOINPUT_CPU_SOURCES ${MODELBOX_UNIT_SOURCE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_VIDEOINPUT_CPU_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}.so CACHE INTERNAL \"\")\n\n# driver test\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_UNIT_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_UNIT_SHARED})\nlist(APPEND DRIVER_UNIT_TEST_LINK_LIBRARIES ${MODELBOX_UNIT_LINK_LIBRARY})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_LINK_LIBRARIES ${DRIVER_UNIT_TEST_LINK_LIBRARIES} CACHE INTERNAL \"\")\n\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/data_source_generator/data_source_generator.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"data_source_generator.h\"\n\n#include <securec.h>\n\n#include <sstream>\n#include <unordered_set>\n\n#include \"modelbox/flowunit.h\"\n#include \"modelbox/flowunit_api_helper.h\"\n\nDataSourceGeneratorFlowUnit::DataSourceGeneratorFlowUnit() = default;\nDataSourceGeneratorFlowUnit::~DataSourceGeneratorFlowUnit() = default;\n\nstatic std::unordered_set<std::string> g_predefined_keys{\"type\",\n                                                         \"flowunit\",\n                                                         \"device\",\n                                                         \"deviceid\",\n                                                         \"label\",\n                                                         \"batch_size\",\n                                                         \"queue_size\",\n                                                         \"queue_size_event\",\n                                                         \"queue_size_external\",\n                                                         \"source_type\"};\n\nmodelbox::Status DataSourceGeneratorFlowUnit::Open(\n    const std::shared_ptr<modelbox::Configuration> &opts) {\n  auto source_type = opts->GetString(\"source_type\");\n  auto all_config_keys = opts->GetKeys();\n  std::stringstream ss;\n  ss << \"{\";\n  std::unordered_set<std::string> output_keys;\n  for (const auto &key : all_config_keys) {\n    if (g_predefined_keys.find(key) != g_predefined_keys.end()) {\n      continue;\n    }\n\n    output_keys.insert(key);\n    ss << \"\\\"\" << key << \"\\\":\\\"\" << opts->GetString(key) << \"\\\",\";\n  }\n  ss.seekp(-1, std::stringstream::end);\n  ss << \"}\";\n\n  if (source_type.empty() || output_keys.empty()) {\n    MBLOG_ERROR << \"source_type and source config must be set in config\";\n    return modelbox::STATUS_BADCONF;\n  }\n\n  auto source_config = ss.str();\n  MBLOG_INFO << \"source type is : \" << source_type;\n  MBLOG_INFO << \"source config is : \" << source_config;\n\n  auto ext_data = CreateExternalData();\n  if (!ext_data) {\n    MBLOG_ERROR << \"can not get external data.\";\n  }\n\n  auto output_buffers = ext_data->CreateBufferList();\n  output_buffers->BuildFromHost({source_config.size()},\n                                (void *)source_config.data(),\n                                source_config.size());\n  auto buffer = output_buffers->At(0);\n  buffer->Set(\"source_type\", source_type);\n\n  auto status = ext_data->Send(output_buffers);\n  if (!status) {\n    MBLOG_ERROR << \"external data send buffer list failed:\" << status;\n  }\n\n  status = ext_data->Close();\n  if (!status) {\n    MBLOG_ERROR << \"external data close failed:\" << status;\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status DataSourceGeneratorFlowUnit::Close() {\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status DataSourceGeneratorFlowUnit::Process(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  auto output_buffers = data_ctx->Output(\"out_data\");\n  auto input_buffers = data_ctx->External();\n  for (auto &buffer : *input_buffers) {\n    output_buffers->PushBack(buffer);\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nMODELBOX_FLOWUNIT(DataSourceGeneratorFlowUnit, desc) {\n  desc.SetFlowUnitName(FLOWUNIT_NAME);\n  desc.SetFlowUnitGroupType(\"Input\");\n  desc.AddFlowUnitOutput({\"out_data\"});\n  desc.SetFlowType(modelbox::NORMAL);\n  desc.SetDescription(FLOWUNIT_DESC);\n}\n\nMODELBOX_DRIVER_FLOWUNIT(desc) {\n  desc.Desc.SetName(FLOWUNIT_NAME);\n  desc.Desc.SetClass(modelbox::DRIVER_CLASS_FLOWUNIT);\n  desc.Desc.SetType(FLOWUNIT_TYPE);\n  desc.Desc.SetDescription(FLOWUNIT_DESC);\n  desc.Desc.SetVersion(\"1.0.0\");\n}\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/data_source_generator/data_source_generator.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_DATA_SOURCE_GENERATOR_CPU_H_\n#define MODELBOX_FLOWUNIT_DATA_SOURCE_GENERATOR_CPU_H_\n\n#include <modelbox/base/device.h>\n#include <modelbox/base/status.h>\n#include <modelbox/flow.h>\n\n#include \"modelbox/buffer.h\"\n#include \"modelbox/flowunit.h\"\n\nconstexpr const char *FLOWUNIT_NAME = \"data_source_generator\";\nconstexpr const char *FLOWUNIT_TYPE = \"cpu\";\nconstexpr const char *FLOWUNIT_DESC =\n    \"\\n\\t@Brief: The operator can generator test data source config for \"\n    \"data_source_parser. \\n\"\n    \"\\t@Port parameter:  The output port buffer data indicate data source \"\n    \"config. \\n\"\n    \"\\t@Constraint: This flowunit is usually followed by 'data_source_parser'.\";\n\nclass DataSourceGeneratorFlowUnit : public modelbox::FlowUnit {\n public:\n  DataSourceGeneratorFlowUnit();\n  ~DataSourceGeneratorFlowUnit() override;\n\n  modelbox::Status Open(\n      const std::shared_ptr<modelbox::Configuration> &opts) override;\n\n  modelbox::Status Close() override;\n\n  /* run when processing data */\n  modelbox::Status Process(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override;\n};\n\n#endif  // MODELBOX_FLOWUNIT_DATA_SOURCE_GENERATOR_CPU_H_\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/data_source_generator/data_source_generator_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"data_source_generator.h\"\n\n#include <functional>\n#include <thread>\n\n#include \"common/mock_cert.h\"\n#include \"driver_flow_test.h\"\n#include \"flowunit_mockflowunit/flowunit_mockflowunit.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"modelbox/iam_auth.h\"\n#include \"modelbox/base/log.h\"\n#include \"modelbox/base/utils.h\"\n#include \"modelbox/buffer.h\"\n#include \"test/mock/minimodelbox/mockflow.h\"\n\nnamespace modelbox {\n\nclass DataSourceGeneratorFlowUnitTest : public testing::Test {\n public:\n  DataSourceGeneratorFlowUnitTest()\n      : driver_flow_(std::make_shared<MockFlow>()) {}\n\n protected:\n  void SetUp() override { auto ret = AddMockFlowUnit(); };\n\n  void TearDown() override { driver_flow_ = nullptr; };\n  std::shared_ptr<MockFlow> GetDriverFlow();\n  std::shared_ptr<MockFlow> RunDriverFlow();\n\n private:\n  Status AddMockFlowUnit();\n  std::shared_ptr<MockFlow> driver_flow_;\n};\n\nstd::shared_ptr<MockFlow> DataSourceGeneratorFlowUnitTest::GetDriverFlow() {\n  return driver_flow_;\n}\n\nstd::shared_ptr<MockFlow> DataSourceGeneratorFlowUnitTest::RunDriverFlow() {\n  const std::string test_lib_dir = TEST_DRIVER_DIR;\n  std::string toml_content = R\"(\n    [driver]\n    skip-default=true\n    dir=[\")\" + test_lib_dir + \"\\\"]\\n    \" +\n                             R\"([graph]\n    graphconf = '''digraph demo {\n          data_source_gengerator[type=flowunit, flowunit=data_source_generator, device=cpu, deviceid=0, source_type=\"url\", url=\"http://0.0.0.0:8080/video\", url_type=\"file\"]\n          data_source_parser[type=flowunit, flowunit=data_source_parser, device=cpu, deviceid=0]\n          data_source_parser_checker[type=flowunit, flowunit=data_source_parser_checker, device=cpu, deviceid=0]\n          data_source_gengerator:out_data -> data_source_parser:in_data\n          data_source_parser:out_video_url -> data_source_parser_checker:stream_meta\n        }'''\n    format = \"graphviz\"\n  )\";\n\n  auto driver_flow = GetDriverFlow();\n  auto ret =\n      driver_flow->BuildAndRun(\"data_source_gengerator\", toml_content, -1);\n\n  return driver_flow;\n}\n\nStatus DataSourceGeneratorFlowUnitTest::AddMockFlowUnit() {\n  auto mock_desc =\n      GenerateFlowunitDesc(\"data_source_parser_checker\", {\"stream_meta\"}, {});\n  mock_desc->SetFlowType(STREAM);\n  auto data_pre_func =\n      [=](const std::shared_ptr<DataContext>& data_ctx,\n          const std::shared_ptr<MockFlowUnit>& mock_flowunit) -> Status {\n    auto stream_meta = data_ctx->GetInputMeta(\"stream_meta\");\n    EXPECT_NE(stream_meta, nullptr);\n    if (!stream_meta) {\n      return modelbox::STATUS_SUCCESS;\n    }\n\n    auto source_url = std::static_pointer_cast<std::string>(\n        stream_meta->GetMeta(\"source_url\"));\n    EXPECT_NE(source_url, nullptr);\n    if (source_url != nullptr) {\n      EXPECT_FALSE(source_url->empty());\n      EXPECT_EQ(*source_url, \"http://0.0.0.0:8080/video\");\n    }\n    return modelbox::STATUS_SUCCESS;\n  };\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  mock_funcitons->RegisterDataPreFunc(data_pre_func);\n  driver_flow_->AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc(),\n                                TEST_DRIVER_DIR);\n  return STATUS_OK;\n}\n\nTEST_F(DataSourceGeneratorFlowUnitTest, UrlInputTest) {\n  auto driver_flow = RunDriverFlow();\n  driver_flow->GetFlow()->Wait(3 * 1000);\n}\n}  // namespace modelbox"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/data_source_parser/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nset(UNIT_DEVICE \"cpu\")\nset(UNIT_NAME \"data_source_parser\")\n\nproject(modelbox-flowunit-${UNIT_DEVICE}-${UNIT_NAME})\n\nfile(GLOB UNIT_SOURCE *.cpp *.cc *.c)\ngroup_source_test_files(MODELBOX_UNIT_SOURCE MODELBOX_UNIT_TEST_SOURCE \"_test.c*\" ${UNIT_SOURCE})\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_CPU_INCLUDE})\ninclude_directories(${MODELBOX_COMMON_DRIVER_UTIL_INCLUDE})\ninclude_directories(${MODELBOX_COMMON_IAM_AUTH_INCLUDE})\ninclude_directories(${MODELBOX_COMMON_DRIVER_UTIL_INCLUDE})\ninclude_directories(${MODELBOX_COMMON_SOURCE_CONTEXT_INCLUDE})\n\nset(MODELBOX_UNIT_SHARED modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-shared)\nset(MODELBOX_UNIT_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\nadd_library(${MODELBOX_UNIT_SHARED} SHARED ${MODELBOX_UNIT_SOURCE})\n\nset(LIBMODELBOX_FLOWUNIT_DATA_SOURCE_PARSER_CPU_SHARED ${MODELBOX_UNIT_SHARED})\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES \n    SOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_DEVICE_CPU_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} rt)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} dl)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${MODELBOX_COMMON_DRIVER_UTIL_LIBRARY})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${MODELBOX_COMMON_SOURCE_CONTEXT_LIBRARY})\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}\")\n\ninstall(TARGETS ${MODELBOX_UNIT_SHARED} \n    COMPONENT cpu-device-flowunit\n    RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n    LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    OPTIONAL\n    )\n\n\ninstall(DIRECTORY ${HEADER} \n    DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR} \n    COMPONENT cpu-device-flowunit-devel\n    )\n\nset(LIBMODELBOX_FLOWUNIT_DATA_SOURCE_PARSER_CPU_SHARED ${MODELBOX_UNIT_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_DATA_SOURCE_PARSER_CPU_INCLUDE ${MODELBOX_UNIT_SOURCE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_DATA_SOURCE_PARSER_CPU_SOURCES ${MODELBOX_UNIT_SOURCE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_DATA_SOURCE_PARSER_CPU_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}.so CACHE INTERNAL \"\")\n\n# driver test\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_UNIT_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_UNIT_SHARED})\nlist(APPEND DRIVER_UNIT_TEST_LINK_LIBRARIES ${MODELBOX_UNIT_LINK_LIBRARY})\nlist(APPEND DRIVER_UNIT_TEST_INCLUDE ${LIBMODELBOX_FLOWUNIT_DATA_SOURCE_PARSER_CPU_INCLUDE})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_LINK_LIBRARIES ${DRIVER_UNIT_TEST_LINK_LIBRARIES} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_INCLUDE ${DRIVER_UNIT_TEST_INCLUDE} CACHE INTERNAL \"\")\n\nadd_subdirectory(parser_plugin)\n\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/data_source_parser/data_source_parser_flowunit.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"data_source_parser_flowunit.h\"\n\n#include <securec.h>\n\n#include \"driver_util.h\"\n#include \"modelbox/base/config.h\"\n#include \"modelbox/flowunit.h\"\n#include \"modelbox/flowunit_api_helper.h\"\n\nmodelbox::Status DataSourceParserFlowUnit::Open(\n    const std::shared_ptr<modelbox::Configuration> &opts) {\n  auto dev_mgr = GetBindDevice()->GetDeviceManager();\n  if (dev_mgr == nullptr) {\n    MBLOG_ERROR << \"Can not get device manger\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  auto drivers = dev_mgr->GetDrivers();\n  if (drivers == nullptr) {\n    MBLOG_ERROR << \"Can not get drivers\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  auto ret = driverutil::GetPlugin<modelbox::DataSourceParserPlugin>(\n      DRIVER_CLASS_DATA_SOURCE_PARSER_PLUGIN, drivers, factories_, plugins_);\n  if (!ret) {\n    return ret;\n  }\n\n  for (auto &item : plugins_) {\n    auto ret = item.second->Init(opts);\n    if (!ret) {\n      MBLOG_ERROR << \"Init plugin \" << item.first\n                  << \" failed, detail : \" << ret.Errormsg();\n    }\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status DataSourceParserFlowUnit::Close() {\n  for (auto &item : plugins_) {\n    item.second->Deinit();\n  }\n\n  plugins_.clear();\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status DataSourceParserFlowUnit::Process(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  auto session_ctx = data_ctx->GetSessionContext();\n  if (!session_ctx) {\n    MBLOG_ERROR << \"Session data_ctx is null\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  auto input_buffer_list = data_ctx->Input(INPUT_DATA_SOURCE_CFG);\n  std::string source_type;\n  std::vector<std::string> uri_list;\n  if (input_buffer_list->Size() != 1) {\n    MBLOG_ERROR << \"Only support one data source config\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  auto buffer = input_buffer_list->At(0);\n  const auto *inbuff_data = (const char *)buffer->ConstData();\n  if (inbuff_data == nullptr) {\n    return {modelbox::STATUS_INVALID, \"input buffer is invalid.\"};\n  }\n\n  buffer->Get(INPUT_META_SOURCE_TYPE, source_type);\n  MBLOG_INFO << \"Try parse input config \" << source_type << \" for \"\n             << session_ctx->GetSessionId();\n  std::string data_source_cfg(inbuff_data, buffer->GetBytes());\n  std::shared_ptr<std::string> uri;\n  auto session_config = data_ctx->GetSessionConfig();\n  std::shared_ptr<modelbox::SourceContext> source_context =\n      Parse(session_ctx, session_config, source_type, data_source_cfg, uri);\n  if (source_context) {\n    source_context->SetDataSourceCfg(data_source_cfg);\n  } else {\n    MBLOG_ERROR << \"Parse data source \" << source_type << \" failed\";\n  }\n\n  auto ret =\n      WriteData(data_ctx, uri, source_type, data_source_cfg, source_context);\n  if (!ret) {\n    return ret;\n  }\n\n  MBLOG_INFO << \"parse input config ok for \" << session_ctx->GetSessionId();\n  return modelbox::STATUS_OK;\n}\n\nstd::shared_ptr<modelbox::SourceContext> DataSourceParserFlowUnit::Parse(\n    const std::shared_ptr<modelbox::SessionContext> &session_context,\n    const std::shared_ptr<modelbox::Configuration> &session_config,\n    const std::string &source_type, const std::string &data_source_cfg,\n    std::shared_ptr<std::string> &uri) {\n  auto plugin = GetPlugin(source_type);\n  if (plugin == nullptr) {\n    MBLOG_ERROR << \"Can not find data source parse plugin for : \" << source_type\n                << \", please check whether plugin loaded\";\n    return nullptr;\n  }\n\n  std::string uri_str;\n  modelbox::DestroyUriFunc destroy_uri_func;\n  std::string stream_type;\n  auto ret = plugin->Parse(session_context, session_config, data_source_cfg,\n                           uri_str, destroy_uri_func);\n  if (!ret) {\n    MBLOG_ERROR << \"Parse config failed, source uri is empty\";\n  }\n\n  std::shared_ptr<modelbox::SourceContext> source_context =\n      std::make_shared<modelbox::SourceContext>(plugin, source_type);\n  source_context->SetRetryParam(plugin->GetRetryEnabled(),\n                                plugin->GetRetryInterval(),\n                                plugin->GetRetryTimes());\n  plugin->GetStreamType(data_source_cfg, stream_type);\n  source_context->SetStreamType(stream_type);\n  source_context->SetSessionContext(session_context);\n  source_context->SetSessionConfig(session_config);\n\n  uri = std::shared_ptr<std::string>(new std::string(uri_str),\n                                     [destroy_uri_func](std::string *ptr) {\n                                       if (destroy_uri_func) {\n                                         destroy_uri_func(*ptr);\n                                       }\n                                       delete ptr;\n                                     });\n  return source_context;\n}\n\nstd::shared_ptr<modelbox::DataSourceParserPlugin>\nDataSourceParserFlowUnit::GetPlugin(const std::string &source_type) {\n  auto item = plugins_.find(source_type);\n  if (item == plugins_.end()) {\n    return nullptr;\n  }\n\n  return item->second;\n}\n\nmodelbox::Status DataSourceParserFlowUnit::WriteData(\n    std::shared_ptr<modelbox::DataContext> &data_ctx,\n    const std::shared_ptr<std::string> &uri, const std::string &source_type,\n    const std::string &data_source_cfg,\n    std::shared_ptr<modelbox::SourceContext> &source_context) {\n  auto input_buffer_list = data_ctx->Input(INPUT_DATA_SOURCE_CFG);\n  auto buffer = input_buffer_list->At(0);\n  auto data_meta = std::make_shared<modelbox::DataMeta>();\n  data_meta->SetMeta(STREAM_META_SOURCE_URL, uri);\n  data_meta->SetMeta(PARSER_RETRY_CONTEXT, source_context);\n  data_ctx->SetOutputMeta(OUTPUT_STREAM_META, data_meta);\n\n  auto buffer_list = data_ctx->Output(OUTPUT_STREAM_META);\n  buffer_list->Build({1});\n  return modelbox::STATUS_OK;\n}\n\nMODELBOX_FLOWUNIT(DataSourceParserFlowUnit, desc) {\n  desc.SetFlowUnitName(FLOWUNIT_NAME);\n  desc.SetFlowUnitGroupType(\"Input\");\n  desc.AddFlowUnitInput({INPUT_DATA_SOURCE_CFG});\n  desc.AddFlowUnitOutput({OUTPUT_STREAM_META});\n  desc.SetFlowType(modelbox::STREAM);\n  desc.SetStreamSameCount(false);\n  desc.SetDescription(FLOWUNIT_DESC);\n  desc.AddFlowUnitOption(modelbox::FlowUnitOption(\n      \"retry_enable\", \"bool\", false, \"false\", \"enable source parser retry\"));\n  desc.AddFlowUnitOption(\n      modelbox::FlowUnitOption(\"retry_interval_ms\", \"int\", false, \"1000\",\n                               \"the source parser retry interval in ms\"));\n  desc.AddFlowUnitOption(\n      modelbox::FlowUnitOption(\"retry_count_limit\", \"int\", false, \"-1\",\n                               \"the source parser retry count limit\"));\n}\n\nMODELBOX_DRIVER_FLOWUNIT(desc) {\n  desc.Desc.SetName(FLOWUNIT_NAME);\n  desc.Desc.SetClass(modelbox::DRIVER_CLASS_FLOWUNIT);\n  desc.Desc.SetType(FLOWUNIT_TYPE);\n  desc.Desc.SetDescription(FLOWUNIT_DESC);\n  desc.Desc.SetVersion(\"1.0.0\");\n}\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/data_source_parser/data_source_parser_flowunit.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_DATA_SOURCE_PARSER_CPU_H_\n#define MODELBOX_FLOWUNIT_DATA_SOURCE_PARSER_CPU_H_\n\n#include <modelbox/base/device.h>\n#include <modelbox/base/status.h>\n#include <modelbox/flow.h>\n\n#include <algorithm>\n#include <map>\n#include <string>\n#include <vector>\n\n#include \"modelbox/buffer.h\"\n#include \"modelbox/data_source_parser_plugin.h\"\n#include \"modelbox/flowunit.h\"\n#include \"source_context.h\"\n\nconstexpr const char *FLOWUNIT_NAME = \"data_source_parser\";\nconstexpr const char *FLOWUNIT_TYPE = \"cpu\";\nconstexpr const char *FLOWUNIT_DESC =\n    \"\\n\\t@Brief: this flowunit can obtain the video stream address or download \"\n    \"the video file to the local according to the input configuration data, \"\n    \"and output the url. Currently supported types have obs, vcn, vis, \"\n    \"resetful, url. \\n\"\n    \"\\t@Port parameter: The input buffer data type is char *, and contain the \"\n    \"following meta fields:\\n\"\n    \"\\t\\tField Name: source_type,   Type: string\\n\"\n    \"\\t  the output buffer data type is char *. \\n\"\n    \"\\t@Constraint: the field value range of this flowunit \"\n    \"support: 'source_type': \"\n    \"[obs, vcn, vis, restful, url]. This flowunit is usually followed by \"\n    \"'video_demuxer'.\";\nconstexpr const char *INPUT_DATA_SOURCE_CFG = \"in_data\";\nconstexpr const char *INPUT_META_SOURCE_TYPE = \"source_type\";\nconstexpr const char *OUTPUT_STREAM_META = \"out_video_url\";\nconstexpr const char *STREAM_META_SOURCE_URL = \"source_url\";\nconstexpr const char *SOURCE_PARSER_FLOWUNIT = \"source_parser_flowunit\";\nconstexpr const char *PARSER_RETRY_CONTEXT = \"source_context\";\n\nclass DataSourceParserFlowUnit : public modelbox::FlowUnit {\n public:\n  DataSourceParserFlowUnit() = default;\n  ~DataSourceParserFlowUnit() override = default;\n\n  modelbox::Status Open(\n      const std::shared_ptr<modelbox::Configuration> &opts) override;\n\n  modelbox::Status Close() override;\n\n  /* run when processing data */\n  modelbox::Status Process(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override;\n\n private:\n  std::shared_ptr<modelbox::SourceContext> Parse(\n      const std::shared_ptr<modelbox::SessionContext> &session_context,\n      const std::shared_ptr<modelbox::Configuration> &session_config,\n      const std::string &source_type, const std::string &data_source_cfg,\n      std::shared_ptr<std::string> &uri);\n\n  std::shared_ptr<modelbox::DataSourceParserPlugin> GetPlugin(\n      const std::string &source_type);\n\n  modelbox::Status WriteData(\n      std::shared_ptr<modelbox::DataContext> &data_ctx,\n      const std::shared_ptr<std::string> &uri, const std::string &source_type,\n      const std::string &data_source_cfg,\n      std::shared_ptr<modelbox::SourceContext> &source_context);\n\n  std::vector<std::shared_ptr<modelbox::DriverFactory>> factories_;\n  std::map<std::string, std::shared_ptr<modelbox::DataSourceParserPlugin>>\n      plugins_;\n};\n\n#endif  // MODELBOX_FLOWUNIT_DATA_SOURCE_PARSER_CPU_H_\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/data_source_parser/data_source_parser_flowunit_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"data_source_parser_flowunit.h\"\n\n#include <securec.h>\n\n#include <functional>\n#include <thread>\n\n#include \"common/mock_cert.h\"\n#include \"driver_flow_test.h\"\n#include \"flowunit_mockflowunit/flowunit_mockflowunit.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"modelbox/base/log.h\"\n#include \"modelbox/base/utils.h\"\n#include \"modelbox/buffer.h\"\n#include \"modelbox/iam_auth.h\"\n#define _TURN_OFF_PLATFORM_STRING\n#include \"cpprest/http_listener.h\"\n#include \"test/mock/minimodelbox/mockflow.h\"\n\n#define CHECK_SOURCE_OUTPUT_URL \"check_data_source_url_parser_output\"\n#define CHECK_SOURCE_OUTPUT_VIS \"check_data_source_vis_parser_output\"\n#define CHECK_SOURCE_OUTPUT_RESTFUL \"check_data_source_restful_parser_output\"\n\n#define RESTFUL_URL \"https://localhost:54321\"\n\nnamespace modelbox {\n\nclass DataSourceParserFlowUnitTest : public testing::Test {\n public:\n  DataSourceParserFlowUnitTest() : driver_flow_(std::make_shared<MockFlow>()) {}\n  void PreparationToGetCert();\n  modelbox::Status HandleFunc(const web::http::http_request &request);\n  void MockRestfulServer(std::shared_ptr<MockFlow> &driver_flow);\n\n protected:\n  void SetUp() override {\n    auto ret = AddMockFlowUnit();\n    cert_ = std::string(TEST_DATA_DIR) + \"/certificate.pem\";\n    key_ = std::string(TEST_DATA_DIR) + \"/private_key_nopass.pem\";\n\n    ASSERT_EQ(GenerateCert(key_, cert_), STATUS_OK);\n    EXPECT_EQ(ret, STATUS_OK);\n  };\n\n  void TearDown() override {\n    driver_flow_ = nullptr;\n    remove(key_.c_str());\n    remove(cert_.c_str());\n  };\n  std::shared_ptr<MockFlow> GetDriverFlow();\n  std::shared_ptr<MockFlow> RunDriverFlow(\n      const std::string &mock_flowunit_name);\n  modelbox::Status SendDataSourceCfg(std::shared_ptr<MockFlow> &driver_flow,\n                                     const std::string &data_source_cfg,\n                                     const std::string &source_type);\n\n  void GetMockKey(std::string &key, std::string &cert) {\n    key = key_;\n    cert = cert_;\n  }\n\n private:\n  Status AddMockFlowUnit();\n  Status AddMockUrl();\n  Status AddMockVis();\n  Status AddMockRestful();\n  std::string key_;\n  std::string cert_;\n  std::shared_ptr<MockFlow> driver_flow_;\n};\n\nstd::shared_ptr<MockFlow> DataSourceParserFlowUnitTest::GetDriverFlow() {\n  return driver_flow_;\n}\n\nstd::shared_ptr<MockFlow> DataSourceParserFlowUnitTest::RunDriverFlow(\n    const std::string &mock_flowunit_name) {\n  const std::string test_lib_dir = TEST_DRIVER_DIR;\n  std::string toml_content = R\"(\n    [driver]\n    skip-default=true\n    dir=[\")\" + test_lib_dir + \"\\\"]\\n    \" +\n                             R\"([graph]\n    graphconf = '''digraph demo {\n          input[type=input, device=cpu, deviceid=0]\n          data_source_parser[type=flowunit, flowunit=data_source_parser, device=cpu, deviceid=0, label=\"\", plugin_dir=\")\" +\n                             test_lib_dir + R\"(\"]\n          )\" + mock_flowunit_name +\n                             R\"([type=flowunit, flowunit=)\" +\n                             mock_flowunit_name +\n                             R\"(, device=cpu, deviceid=0, label=\"\"]\n          input -> data_source_parser:in_data\n          data_source_parser:out_video_url -> )\" +\n                             mock_flowunit_name + R\"(:stream_meta\n        }'''\n    format = \"graphviz\"\n  )\";\n\n  auto driver_flow = GetDriverFlow();\n  auto ret = driver_flow->BuildAndRun(mock_flowunit_name, toml_content, -1);\n\n  return driver_flow;\n}\n\nmodelbox::Status DataSourceParserFlowUnitTest::SendDataSourceCfg(\n    std::shared_ptr<MockFlow> &driver_flow, const std::string &data_source_cfg,\n    const std::string &source_type) {\n  auto ext_data = driver_flow->GetFlow()->CreateExternalDataMap();\n  auto buffer_list = ext_data->CreateBufferList();\n  buffer_list->Build({data_source_cfg.size()});\n  auto buffer = buffer_list->At(0);\n  memcpy_s(buffer->MutableData(), buffer->GetBytes(), data_source_cfg.data(),\n           data_source_cfg.size());\n  buffer->Set(\"source_type\", source_type);\n  ext_data->Send(\"input\", buffer_list);\n  ext_data->Shutdown();\n  return modelbox::STATUS_OK;\n}\n\nStatus DataSourceParserFlowUnitTest::AddMockFlowUnit() {\n  AddMockUrl();\n  AddMockVis();\n  AddMockRestful();\n  return modelbox::STATUS_OK;\n}\n\nStatus DataSourceParserFlowUnitTest::AddMockUrl() {\n  auto mock_desc =\n      GenerateFlowunitDesc(CHECK_SOURCE_OUTPUT_URL, {\"stream_meta\"}, {});\n  mock_desc->SetFlowType(STREAM);\n  auto data_pre_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    auto stream_meta = data_ctx->GetInputMeta(\"stream_meta\");\n    EXPECT_NE(stream_meta, nullptr);\n    if (!stream_meta) {\n      return modelbox::STATUS_SUCCESS;\n    }\n\n    auto source_url = std::static_pointer_cast<std::string>(\n        stream_meta->GetMeta(\"source_url\"));\n    EXPECT_NE(source_url, nullptr);\n    if (source_url != nullptr) {\n      EXPECT_FALSE((*source_url).empty());\n      EXPECT_EQ(\n          (*source_url)\n              .substr((*source_url).rfind(':'),\n                      (*source_url).rfind('.') - (*source_url).rfind(':') + 1),\n          \"://ip/path/test.\");\n    }\n    return modelbox::STATUS_SUCCESS;\n  };\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  mock_funcitons->RegisterDataPreFunc(data_pre_func);\n  driver_flow_->AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc(),\n                                TEST_DRIVER_DIR);\n  return STATUS_OK;\n}\n\nStatus DataSourceParserFlowUnitTest::AddMockVis() {\n  auto mock_desc =\n      GenerateFlowunitDesc(CHECK_SOURCE_OUTPUT_VIS, {\"stream_meta\"}, {});\n  mock_desc->SetFlowType(STREAM);\n  auto data_pre_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    auto stream_meta = data_ctx->GetInputMeta(\"stream_meta\");\n    EXPECT_NE(stream_meta, nullptr);\n    if (!stream_meta) {\n      return modelbox::STATUS_SUCCESS;\n    }\n\n    auto source_url = std::static_pointer_cast<std::string>(\n        stream_meta->GetMeta(\"source_url\"));\n\n    EXPECT_EQ(*source_url, \"https://test.com\");\n\n    return modelbox::STATUS_SUCCESS;\n  };\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  mock_funcitons->RegisterDataPreFunc(data_pre_func);\n  driver_flow_->AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc(),\n                                TEST_DRIVER_DIR);\n  return STATUS_OK;\n}\n\nStatus DataSourceParserFlowUnitTest::AddMockRestful() {\n  auto mock_desc =\n      GenerateFlowunitDesc(CHECK_SOURCE_OUTPUT_RESTFUL, {\"stream_meta\"}, {});\n  mock_desc->SetFlowType(STREAM);\n  auto data_pre_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    auto stream_meta = data_ctx->GetInputMeta(\"stream_meta\");\n    EXPECT_NE(stream_meta, nullptr);\n    if (!stream_meta) {\n      return modelbox::STATUS_SUCCESS;\n    }\n\n    auto source_url = std::static_pointer_cast<std::string>(\n        stream_meta->GetMeta(\"source_url\"));\n\n    EXPECT_EQ(*source_url, \"rtsp://admin:password@127.0.0.0:808/2\");\n\n    return modelbox::STATUS_SUCCESS;\n  };\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  mock_funcitons->RegisterDataPreFunc(data_pre_func);\n  driver_flow_->AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc(),\n                                TEST_DRIVER_DIR);\n  return STATUS_OK;\n}\n\nTEST_F(DataSourceParserFlowUnitTest, UrlInputTest) {\n  auto driver_flow = RunDriverFlow(CHECK_SOURCE_OUTPUT_URL);\n\n  std::string source_type = \"url\";\n  std::string data_source_cfg_file = R\"({\n        \"url\": \"https://ip/path/test.avi\",\n        \"url_type\": \"file\"\n  })\";\n  auto ret = SendDataSourceCfg(driver_flow, data_source_cfg_file, source_type);\n  EXPECT_EQ(ret, modelbox::STATUS_OK);\n\n  std::string data_source_cfg_rtsp = R\"({\n        \"url\": \"rtsp://ip/path/test.sdp\",\n        \"url_type\": \"stream\"\n  })\";\n  ret = SendDataSourceCfg(driver_flow, data_source_cfg_rtsp, source_type);\n  EXPECT_EQ(ret, modelbox::STATUS_OK);\n\n  driver_flow->GetFlow()->Wait(3 * 1000);\n}\n\nTEST_F(DataSourceParserFlowUnitTest, VisInputTest) {\n  // This test would be skipped, if no auth info is provided.\n  auto conf_builder = std::make_shared<ConfigurationBuilder>();\n  std::shared_ptr<Configuration> config_file =\n      conf_builder->Build(TEST_ASSETS + std::string(\"/auth/auth_info.toml\"));\n  if (config_file == nullptr || config_file->GetString(\"base.ak\").empty()) {\n    GTEST_SKIP();\n  }\n\n  auto driver_flow = RunDriverFlow(CHECK_SOURCE_OUTPUT_VIS);\n\n  std::string source_type = \"vis\";\n  std::string visEndPoint(config_file->GetString(\"data_source.visEndPoint\"));\n  std::string projectId(config_file->GetString(\"base.project_id\"));\n  std::string streamName(config_file->GetString(\"data_source.streamName\"));\n  std::string domainName(config_file->GetString(\"data_source.domainName\"));\n  std::string xroleName(config_file->GetString(\"data_source.xroleName\"));\n\n  std::string data_source_cfg = R\"({\n        \"visEndPoint\":\")\" + visEndPoint +\n                                R\"(\\\",\n        \"projectId\":\")\" + projectId +\n                                R\"(\\\", \n        \"streamName\":\")\" + streamName +\n                                R\"(\\\",\n        \"domainName\":\")\" + domainName +\n                                R\"(\\\",\n        \"xroleName\":\")\" + xroleName +\n                                R\"(\\\",\n        \"certificate\": true\n  })\";\n\n  PreparationToGetCert();\n\n  auto ret = SendDataSourceCfg(driver_flow, data_source_cfg, source_type);\n  EXPECT_EQ(ret, modelbox::STATUS_OK);\n\n  driver_flow->GetFlow()->Wait(3 * 1000);\n}\n\nvoid DataSourceParserFlowUnitTest::PreparationToGetCert() {\n  auto conf_builder = std::make_shared<ConfigurationBuilder>();\n  std::shared_ptr<Configuration> config_file =\n      conf_builder->Build(TEST_ASSETS + std::string(\"/auth/auth_info.toml\"));\n\n  std::string ak(config_file->GetString(\"base.ak\"));\n  std::string sk(config_file->GetString(\"base.sk\"));\n  std::string domain_id(config_file->GetString(\"base.domain_id\"));\n  std::string project_id(config_file->GetString(\"base.project_id\"));\n  std::string iam_host(config_file->GetString(\"base.iam_host\"));\n\n  modelbox::IAMAuth::GetInstance()->SetIAMHostAddress(iam_host);\n  if (modelbox::STATUS_OK != modelbox::IAMAuth::GetInstance()->SetConsigneeInfo(\n                                 ak, sk, domain_id, project_id)) {\n    MBLOG_ERROR << \"set Consignee failed\";\n    return;\n  }\n}\n\nTEST_F(DataSourceParserFlowUnitTest, CredentialTest) {\n  auto conf_builder = std::make_shared<ConfigurationBuilder>();\n  std::shared_ptr<Configuration> config_file =\n      conf_builder->Build(TEST_ASSETS + std::string(\"/auth/auth_info.toml\"));\n  if (config_file == nullptr || config_file->GetString(\"base.ak\").empty()) {\n    GTEST_SKIP();\n  }\n  modelbox::AgencyInfo agency_info;\n  agency_info.xrole_name = \"admin\";\n  agency_info.user_domain_name = \"user\";\n  modelbox::UserAgencyCredential user_credential;\n\n  modelbox::Status code =\n      modelbox::IAMAuth::GetInstance()->GetUserAgencyProjectCredential(\n          user_credential, agency_info);\n  if (modelbox::STATUS_OK != code) {\n    MBLOG_ERROR << \"failed get user project credential\";\n  }\n  EXPECT_EQ(code, modelbox::STATUS_OK);\n}\n\nTEST_F(DataSourceParserFlowUnitTest, TokenTest) {\n  auto conf_builder = std::make_shared<ConfigurationBuilder>();\n  std::shared_ptr<Configuration> config_file =\n      conf_builder->Build(TEST_ASSETS + std::string(\"/auth/auth_info.toml\"));\n  if (config_file == nullptr || config_file->GetString(\"base.ak\").empty()) {\n    GTEST_SKIP();\n  }\n  modelbox::AgencyInfo agency_info;\n  agency_info.xrole_name = \"admin\";\n  agency_info.user_domain_name = \"user\";\n  modelbox::UserAgencyToken user_token;\n  ProjectInfo project_info;\n  project_info.project_name = \"cn\";\n\n  modelbox::Status code =\n      modelbox::IAMAuth::GetInstance()->GetUserAgencyProjectToken(\n          user_token, agency_info, project_info);\n  if (modelbox::STATUS_OK != code) {\n    MBLOG_ERROR << \"failed get user project token\";\n  }\n  EXPECT_EQ(code, modelbox::STATUS_OK);\n}\n\nmodelbox::Status DataSourceParserFlowUnitTest::HandleFunc(\n    const web::http::http_request &request) {\n  utility::string_t uri = request.request_uri().to_string();\n  utility::string_t decode_uri = web::uri::decode(uri);\n  std::vector<std::string> uri_vec;\n  uri_vec = modelbox::StringSplit(uri, '?');\n  std::string file_path = uri_vec[0];\n  if (file_path != \"/test/get!@*&=Rtsp\") {\n    utility::string_t resp_body = \"Data Not Found\";\n    request.reply(web::http::status_codes::NotFound, resp_body);\n  }\n  std::string params = uri_vec[1];\n  std::vector<std::string> params_vec;\n  params_vec = modelbox::StringSplit(params, '&');\n  std::vector<std::string> param_vec;\n  std::vector<std::string> param_value;\n  for (const auto &i : params_vec) {\n    param_vec = modelbox::StringSplit(i, '=');\n    param_value.push_back(web::uri::decode(param_vec[1]));\n  }\n\n  if (param_value[0] == \"1!@#$%^&*()_2\" && param_value[1] == \"two\") {\n    utility::string_t resp_body =\n        R\"({\"data\" : {\"url1\" : {\"url2\" : \"rtsp://admin:password@127.0.0.0:808/2\"}}})\";\n    request.reply(web::http::status_codes::OK, resp_body);\n  } else {\n    utility::string_t resp_body = \"Data Not Found\";\n    request.reply(web::http::status_codes::NotFound, resp_body);\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nvoid DataSourceParserFlowUnitTest::MockRestfulServer(\n    std::shared_ptr<MockFlow> &driver_flow) {\n  std::string request_url = RESTFUL_URL;\n  std::shared_ptr<web::http::experimental::listener::http_listener> listener;\n\n  web::http::experimental::listener::http_listener_config server_config;\n  server_config.set_timeout(std::chrono::seconds(60));\n\n  std::string cert;\n  std::string key;\n\n  GetMockKey(key, cert);\n\n  if (cert.length() > 0 && key.length() > 0) {\n    server_config.set_ssl_context_callback(\n        [cert, key](boost::asio::ssl::context &ctx) {\n          ctx.set_options(boost::asio::ssl::context::default_workarounds);\n          modelbox::HardeningSSL(ctx.native_handle());\n          ctx.use_certificate_file(\n              cert, boost::asio::ssl::context_base::file_format::pem);\n          ctx.use_private_key_file(key, boost::asio::ssl::context::pem);\n        });\n  }\n\n  listener = std::make_shared<web::http::experimental::listener::http_listener>(\n      request_url, server_config);\n  listener->support(web::http::methods::GET,\n                    [this](const web::http::http_request &request) {\n                      this->HandleFunc(request);\n                    });\n\n  try {\n    listener->open().wait();\n    MBLOG_INFO << \"start to listen \";\n  } catch (std::exception const &e) {\n    MBLOG_ERROR << e.what();\n  }\n  driver_flow->GetFlow()->Wait(3 * 1000);\n}\n\nTEST_F(DataSourceParserFlowUnitTest, RestfulInputTest) {\n  auto driver_flow = RunDriverFlow(CHECK_SOURCE_OUTPUT_RESTFUL);\n  std::string source_type = \"restful\";\n  std::string data_source_cfg_with_params = R\"({\n        \"request_url\":\")\" + std::string(RESTFUL_URL) +\n                                            \"/test/get!@*&=Rtsp\" + R\"(\",\n        \"params\":[{\"param_key\":\"id\",\"param_value\":\"1!@#$%^&*()_2\"},{\"param_key\":\"name\",\"param_value\":\"two\"}],\n        \"response_url_position\":\"data/url1/url2\",\n        \"headers\":{\"header1\":\"test1\",\"header2\":\"test2\"}\n  })\";\n  auto ret =\n      SendDataSourceCfg(driver_flow, data_source_cfg_with_params, source_type);\n  EXPECT_EQ(ret, modelbox::STATUS_OK);\n\n  std::string data_source_cfg_no_params =\n      R\"({\n        \"request_url\":\")\" +\n      std::string(RESTFUL_URL) +\n      \"/test/get!@*&=Rtsp?id=1%21%40%23%24%25%5E%26%2A%28%29_2&name=two\" +\n      R\"(\",\n        \"response_url_position\":\"data/url1/url2\"\n  })\";\n  ret = SendDataSourceCfg(driver_flow, data_source_cfg_no_params, source_type);\n  EXPECT_EQ(ret, modelbox::STATUS_OK);\n\n  MockRestfulServer(driver_flow);\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/data_source_parser/parser_plugin/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nproject(data_source_parser_plugin)\n\nif(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_BINARY_DIR})\n    message(FATAL_ERROR \"Do not build in source directory!\")\nendif()\n\nif(OBS_FOUND)\n    add_subdirectory(obs_source_parser)\nelse()\n    message(STATUS \"Not found obs library, disable obs source parser plugin\")\nendif()\n\nif(VCN_FOUND)\n    add_subdirectory(vcn_source_parser)\nelse()\n    message(STATUS \"Not found vcn library, disable vcn source parser plugin\")\nendif()\n\nif(CPPREST_FOUND)\n    add_subdirectory(url_source_parser)\nelse()\n    message(STATUS \"Not found cpprest library, disable url output broker plugin\")\nendif()\n\nif(CPPREST_FOUND)\n    add_subdirectory(vis_source_parser)\nelse()\n    message(STATUS \"Not found cpprest library, disable vis output broker plugin\")\nendif()\n\nif(CPPREST_FOUND)\n    add_subdirectory(restful_source_parser)\nelse()\n    message(STATUS \"Not found cpprest library, disable restful output broker plugin\")\nendif()\n\nadd_subdirectory(vcn_restful_source_parser)"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/data_source_parser/parser_plugin/obs_source_parser/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nset(PLUGIN_NAME \"obs\")\n\nproject(modelbox-flowunit-${UNIT_DEVICE}-${UNIT_NAME}-${PLUGIN_NAME})\n\nfile(GLOB_RECURSE PLUGIN_SOURCE *.cpp *.cc *.c)\ngroup_source_test_files(MODELBOX_PLUGIN_SOURCE MODELBOX_PLUGIN_TEST_SOURCE \"_test.c*\" ${PLUGIN_SOURCE})\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_CPU_INCLUDE})\ninclude_directories(${LIBMODELBOX_DRIVER_COMMON_LIB_INCLUDE})\ninclude_directories(${MODELBOX_COMMON_OBS_CLIENT_INCLUDE})\ninclude_directories(${MODELBOX_COMMON_INCLUDE})\ninclude_directories(${NLOHMANN_INCLUDE_DIR})\n\nset(MODELBOX_PLUGIN_SHARED modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-${PLUGIN_NAME}-shared)\nset(MODELBOX_PLUGIN_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\nadd_library(${MODELBOX_PLUGIN_SHARED} SHARED ${MODELBOX_PLUGIN_SOURCE})\n\nset(LIBMODELBOX_FLOWUNIT_DATA_SOURCE_PARSER_PLUGIN_CPU_SHARED ${MODELBOX_PLUGIN_SHARED})\nset_target_properties(${MODELBOX_PLUGIN_SHARED} PROPERTIES \n    SOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n\ntarget_link_libraries(${MODELBOX_PLUGIN_SHARED} ${LIBMODELBOX_DEVICE_CPU_SHARED})\ntarget_link_libraries(${MODELBOX_PLUGIN_SHARED} ${MODELBOX_COMMON_OBS_CLIENT_LIBRARY})\ntarget_link_libraries(${MODELBOX_PLUGIN_SHARED} ${MODELBOX_COMMON_LIBRARY})\ntarget_link_libraries(${MODELBOX_PLUGIN_SHARED} ${OBS_LIBRARIES})\ntarget_link_libraries(${MODELBOX_PLUGIN_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_PLUGIN_SHARED} rt)\ntarget_link_libraries(${MODELBOX_PLUGIN_SHARED} dl)\ntarget_link_libraries(${MODELBOX_PLUGIN_SHARED} ${MODELBOX_COMMON_SOURCE_CONTEXT_LIBRARY})\ntarget_link_libraries(${MODELBOX_PLUGIN_SHARED} ${LIBMODELBOX_DRIVER_COMMON_LIB_FILE_REQUESTER})\nset_target_properties(${MODELBOX_PLUGIN_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-${PLUGIN_NAME}\")\n\ninstall(TARGETS ${MODELBOX_PLUGIN_SHARED} \n    COMPONENT cpu-device-flowunit\n    RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n    LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    OPTIONAL\n    )\n\n\ninstall(DIRECTORY ${HEADER} \n    DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR} \n    COMPONENT cpu-device-flowunit-devel\n    )\n\nset(LIBMODELBOX_FLOWUNIT_DATA_SOURCE_PARSER_PLUGIN_CPU_SHARED ${MODELBOX_PLUGIN_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_DATA_SOURCE_PARSER_PLUGIN_CPU_INCLUDE ${MODELBOX_PLUGIN_SOURCE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_DATA_SOURCE_PARSER_PLUGIN_CPU_SOURCES ${MODELBOX_PLUGIN_SOURCE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_DATA_SOURCE_PARSER_PLUGIN_CPU_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-${PLUGIN_NAME}.so CACHE INTERNAL \"\")\n\n# driver test\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_PLUGIN_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_PLUGIN_SHARED})\nlist(APPEND DRIVER_UNIT_TEST_LINK_LIBRARIES ${MODELBOX_UNIT_LINK_LIBRARY})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_LINK_LIBRARIES ${DRIVER_UNIT_TEST_LINK_LIBRARIES} CACHE INTERNAL \"\")\n\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/data_source_parser/parser_plugin/obs_source_parser/driver_desc.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <stdio.h>\n\n#include <memory>\n\n#include \"modelbox/base/driver_api_helper.h\"\n#include \"modelbox/base/status.h\"\n#include \"modelbox/device/cpu/device_cpu.h\"\n#include \"modelbox/flowunit.h\"\n#include \"obs_source_parser.h\"\n\nstd::shared_ptr<modelbox::DriverFactory> CreateDriverFactory() {\n  std::shared_ptr<modelbox::DriverFactory> factory =\n      std::make_shared<ObsSourceParserFactory>();\n  return factory;\n}\n\nvoid DriverDescription(modelbox::DriverDesc *desc) {\n  desc->SetName(DRIVER_NAME);\n  desc->SetClass(DRIVER_CLASS_DATA_SOURCE_PARSER_PLUGIN);\n  desc->SetType(DRIVER_TYPE);\n  desc->SetDescription(DRIVER_DESC);\n}\n\nmodelbox::Status DriverInit() {\n  // Driver Init.\n  return modelbox::STATUS_OK;\n}\n\nvoid DriverFini() {\n  // Driver Fini.\n}\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/data_source_parser/parser_plugin/obs_source_parser/obs_file_handler.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"obs_file_handler.h\"\n\nnamespace modelbox {\n\nStatus OBSFileHandler::Get(unsigned char *buff, size_t size, off_t off) {\n  return ObsClient::GetInstance()->GetBuffer(opt_, buff, size, off);\n}\n\nuint64_t OBSFileHandler::GetFileSize() {\n  if (file_size_ == 0) {\n    file_size_ = ObsClient::GetInstance()->GetObjectSize(opt_);\n  }\n  return file_size_;\n}\n\nvoid OBSFileHandler::SetOBSOption(const ObsOptions &opt) { opt_ = opt; }\n\n}  // namespace modelbox"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/data_source_parser/parser_plugin/obs_source_parser/obs_file_handler.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_OBS_FILE_HANDLER_H_\n#define MODELBOX_OBS_FILE_HANDLER_H_\n\n#include <modelbox/base/status.h>\n#include <modelbox/drivers/common/file_requester.h>\n#include <modelbox/obs_client.h>\n\nnamespace modelbox {\n\nclass OBSFileHandler : public FileGetHandler {\n public:\n  /**\n   * @brief get data from obs.\n   * @param buff read buffer.\n   * @param size buffer size.\n   * @param off current read offset.\n   * @param path obs file path in the bucket.\n   * @return read result.\n   */\n  modelbox::Status Get(unsigned char *buff, size_t size, off_t off) override;\n\n  /**\n   * @brief get file size from obs.\n   * @param path obs file path in the bucket.\n   * @return file size.\n   */\n  uint64_t GetFileSize() override;\n\n  void SetOBSOption(const ObsOptions &opt);\n\n private:\n  ObsOptions opt_;\n  uint64_t file_size_ = 0;\n};\n};  // namespace modelbox\n\n#endif  // MODELBOX_OBS_FILE_HANDLER_H_"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/data_source_parser/parser_plugin/obs_source_parser/obs_source_parser.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"obs_source_parser.h\"\n\n#include <dirent.h>\n#include <modelbox/base/utils.h>\n#include <modelbox/base/uuid.h>\n#include <modelbox/device/cpu/device_cpu.h>\n#include <modelbox/iam_auth.h>\n#include <modelbox/obs_client.h>\n#include <securec.h>\n#include <stdio.h>\n#include <sys/stat.h>\n#include <sys/types.h>\n#include <unistd.h>\n\n#include <ctime>\n#include <nlohmann/json.hpp>\n#include <string>\n\n#include \"obs_file_handler.h\"\n\n#define OBS_STREAM_READ_SIZE_LOW 1\n#define OBS_STREAM_READ_SIZE_NORMAL 5\n#define OBS_STREAM_READ_SIZE_HIGH 20\n\nvoid RemoveFileCallback(const std::string &uri);\n\nObsSourceParser::ObsSourceParser() = default;\nObsSourceParser::~ObsSourceParser() = default;\n\nmodelbox::Status ObsSourceParser::Init(\n    const std::shared_ptr<modelbox::Configuration> &opts) {\n  retry_enabled_ = opts->GetBool(\"retry_enable\", DATASOURCE_PARSER_RETRY_ON);\n  retry_interval_ = opts->GetInt32(\"retry_interval_ms\",\n                                   DATASOURCE_PARSER_DEFAULT_RETRY_INTERVAL);\n  retry_max_times_ = opts->GetInt32(\"retry_count_limit\",\n                                    DATASOURCE_PARSER_FILE_DEFAULT_RETRY_TIMES);\n\n  retry_enabled_ = opts->GetBool(\"obs_retry_enable\", retry_enabled_);\n  retry_interval_ = opts->GetInt32(\"obs_retry_interval_ms\", retry_interval_);\n  retry_max_times_ = opts->GetInt32(\"obs_retry_count_limit\", retry_max_times_);\n  read_type_ = opts->GetString(\"obs_download_method\", \"file\");\n\n  MBLOG_INFO << \"obs source parser config retry_enabled:\" << retry_enabled_\n             << \" retry_interval:\" << retry_interval_\n             << \" retry_max_times:\" << retry_max_times_;\n\n  if (read_type_ != \"stream\") {\n    return modelbox::STATUS_OK;\n  }\n  stream_memory_mode_ = opts->GetString(\"obs_stream_memory_mode\", \"low\");\n  max_read_size_ = OBS_STREAM_READ_SIZE_LOW;\n  if (stream_memory_mode_ == \"normal\") {\n    max_read_size_ = OBS_STREAM_READ_SIZE_NORMAL;\n  }\n  if (stream_memory_mode_ == \"high\") {\n    max_read_size_ = OBS_STREAM_READ_SIZE_HIGH;\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status ObsSourceParser::Deinit() { return modelbox::STATUS_OK; }\n\nmodelbox::Status ObsSourceParser::Parse(\n    const std::shared_ptr<modelbox::SessionContext> &session_context,\n    const std::shared_ptr<modelbox::Configuration> &session_config,\n    const std::string &config, std::string &uri,\n    modelbox::DestroyUriFunc &destroy_uri_func) {\n  OBSDownloadInfo download_info;\n  uri = \"\";\n\n  // read info from cfg\n  auto ret = GetObsInfo(download_info, config);\n  if (modelbox::STATUS_OK != ret) {\n    MBLOG_ERROR << \"failed to get obs info\";\n    return ret;\n  }\n\n  modelbox::ObsOptions obs_opt;\n  obs_opt.end_point = download_info.end_point;\n  obs_opt.bucket = download_info.bucket;\n  obs_opt.path = download_info.file_key;\n  obs_opt.domain_name = download_info.domain_name;\n  obs_opt.xrole_name = download_info.xrole_name;\n  obs_opt.ak = download_info.ak;\n  obs_opt.sk = download_info.sk;\n  obs_opt.token = download_info.token;\n  obs_opt.user_id = download_info.user_id;\n\n  std::string uuid;\n  if (modelbox::STATUS_OK != modelbox::GetUUID(&uuid)) {\n    MBLOG_WARN << \"Failed to generate a uuid for the OBS output broker! Use \"\n                  \"default id: yyyymmddhhmmss\";\n    time_t now = time(nullptr);\n    uuid = GetTimeString(&now);\n  }\n\n  if (\"stream\" == read_type_) {\n    std::shared_ptr<modelbox::OBSFileHandler> obs_handler =\n        std::make_shared<modelbox::OBSFileHandler>();\n    obs_handler->SetOBSOption(obs_opt);\n    std::string obs_uri =\n        std::string(\"/obs/\") + uuid + std::string(\"/\") + download_info.file_key;\n    uri = DEFAULT_FILE_REQUEST_URI + obs_uri;\n    modelbox::FileRequester::GetInstance()->RegisterUrlHandler(obs_uri,\n                                                               obs_handler);\n    modelbox::FileRequester::GetInstance()->SetMaxFileReadSize(max_read_size_);\n    destroy_uri_func = [obs_uri](const std::string &uri) {\n      modelbox::FileRequester::GetInstance()->DeregisterUrl(obs_uri);\n    };\n    return modelbox::STATUS_OK;\n  }\n\n  download_info.file_local_path =\n      OBS_TEMP_PATH + uuid + \"_\" +\n      download_info.file_key.substr(download_info.file_key.rfind('/') + 1);\n\n  auto obs_client = modelbox::ObsClient::GetInstance();\n  ret = obs_client->GetObject(obs_opt, download_info.file_local_path);\n  if (modelbox::STATUS_OK != ret) {\n    MBLOG_ERROR << ret.Errormsg();\n    return ret;\n  }\n\n  uri = download_info.file_local_path;\n  destroy_uri_func = RemoveFileCallback;\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status ObsSourceParser::GetStreamType(const std::string &config,\n                                                std::string &stream_type) {\n  stream_type = \"file\";\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status ObsSourceParser::GetObsInfo(OBSDownloadInfo &download_info,\n                                             const std::string &config) {\n  nlohmann::json config_json;\n  try {\n    config_json = nlohmann::json::parse(config);\n\n    auto value = config_json[\"obsEndPoint\"];\n    if (value.empty()) {\n      MBLOG_ERROR << \"obsEndPoint is empty!\";\n      return {modelbox::STATUS_BADCONF};\n    }\n    std::string http_header = \"http://\";\n    std::string https_header = \"https://\";\n    std::string end_point = value;\n\n    if (end_point.find(http_header) == 0) {\n      end_point = end_point.substr(http_header.length());\n    } else if (end_point.find(https_header) == 0) {\n      end_point = end_point.substr(https_header.length());\n    }\n    download_info.end_point = end_point;\n\n    value = config_json[\"bucket\"];\n    if (value.empty()) {\n      MBLOG_ERROR << \"bucket is empty!\";\n      return {modelbox::STATUS_BADCONF};\n    }\n    download_info.bucket = value;\n\n    value = config_json[\"path\"];\n    if (value.empty()) {\n      MBLOG_ERROR << \"path is empty!\";\n      return {modelbox::STATUS_BADCONF};\n    }\n    download_info.file_key = value;\n\n    if (config_json.contains(\"userId\")) {\n      value = config_json[\"userId\"];\n      download_info.user_id = value;\n    }\n    if (download_info.user_id.empty()) {\n      MBLOG_DEBUG << \"userId is empty!\";\n    }\n    auto domainName = config_json[\"domainName\"];\n    auto xroleName = config_json[\"xroleName\"];\n    auto ak = config_json[\"ak\"];\n    auto sk = config_json[\"sk\"];\n    auto token = config_json[\"token\"];\n    if (!domainName.empty() && !xroleName.empty()) {\n      download_info.domain_name = domainName;\n      download_info.xrole_name = xroleName;\n    }\n    if (!ak.empty() && !sk.empty()) {\n      download_info.ak = ak;\n      download_info.sk = sk;\n      if (!token.empty()) {\n        download_info.token = token;\n      }\n    }\n  } catch (const std::exception &e) {\n    MBLOG_ERROR << \"Parse data source config to json failed, detail: \"\n                << e.what();\n    return modelbox::STATUS_INVALID;\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nstd::string ObsSourceParser::GetTimeString(time_t *time) {\n  if (nullptr == time) {\n    return \"\";\n  }\n\n  tm gmtm;\n  gmtime_r(time, &gmtm);\n  return std::to_string(gmtm.tm_year + 1900) + std::to_string(gmtm.tm_mon + 1) +\n         std::to_string(gmtm.tm_mday) + std::to_string(gmtm.tm_hour) +\n         std::to_string(gmtm.tm_min) + std::to_string(gmtm.tm_sec);\n}\n\n// TODO: 多路流使用同一个输入文件时，必须在最后一路退出后再删除文件\nvoid RemoveFileCallback(const std::string &uri) {\n  if (uri.empty()) {\n    MBLOG_WARN << \"Empty uri to be removed.\" << uri;\n    return;\n  }\n\n  struct stat stat_buffer;\n  stat(uri.c_str(), &stat_buffer);\n  if (stat_buffer.st_mode & S_IFREG) {\n    if (0 == std::remove(uri.c_str())) {\n    } else {\n      MBLOG_WARN << \"Failed to remove obs downloaded file: \" << uri;\n    }\n  }\n}\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/data_source_parser/parser_plugin/obs_source_parser/obs_source_parser.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_DATA_SOURCE_PARSER_OBS_CPU_H_\n#define MODELBOX_FLOWUNIT_DATA_SOURCE_PARSER_OBS_CPU_H_\n\n#include <modelbox/base/device.h>\n#include <modelbox/base/status.h>\n#include <modelbox/data_source_parser_plugin.h>\n\n#include \"eSDKOBS.h\"\n\n#define OBS_TEMP_PATH \"/tmp/ObsDownload/\"\n\nconstexpr const char *DRIVER_NAME = \"obs\";\nconstexpr const char *DRIVER_DESC = \"An OBS data source parser plugin on CPU\";\nconstexpr const char *DRIVER_TYPE = \"cpu\";\n\ntypedef struct tag_OBSDownloadInfo {\n  std::string ak;           // temporary USER AK\n  std::string sk;           // temporary USER SK\n  std::string token;        // temporary USER Security Token\n  std::string domain_name;  // user/isv's domain name\n  std::string xrole_name;   // agency name to vas\n  std::string user_id;\n  std::string end_point;  // OBS EndPoint, for example:\n                          // obs.cn-north-7.ulanqab.huawei.com\n  std::string bucket;     // Bucket where the target file locates, for ex\n  std::string file_key;   // File Key, for example: obs-test/data/video.flv\n  std::string file_local_path;  // local path of the downloaded file\n} OBSDownloadInfo;\n\nclass ObsSourceParser : public modelbox::DataSourceParserPlugin {\n public:\n  ObsSourceParser();\n  ~ObsSourceParser() override;\n\n  modelbox::Status Init(\n      const std::shared_ptr<modelbox::Configuration> &opts) override;\n\n  modelbox::Status Deinit() override;\n\n  modelbox::Status Parse(\n      const std::shared_ptr<modelbox::SessionContext> &session_context,\n      const std::shared_ptr<modelbox::Configuration> &session_config,\n      const std::string &config, std::string &uri,\n      modelbox::DestroyUriFunc &destroy_uri_func) override;\n  modelbox::Status GetStreamType(const std::string &config,\n                                 std::string &stream_type) override;\n\n private:\n  /**\n   * @brief Get EndPoint/Bucket/FileKey in the config.\n   * @param download_info infos needed to download an object.\n   * @param config configuration string\n   * @return Successful or not\n   */\n  modelbox::Status GetObsInfo(OBSDownloadInfo &download_info,\n                              const std::string &config);\n\n  /**\n   * @brief Generate a time-string, yyyymmddhhmmss\n   * @param time in - the time to be converted to a string\n   * @return Successful or not\n   */\n  std::string GetTimeString(time_t *time);\n\n  std::string read_type_;\n  std::string stream_memory_mode_;\n  int max_read_size_ = 0;\n};\n\nclass ObsSourceParserFactory : public modelbox::DriverFactory {\n public:\n  ObsSourceParserFactory() = default;\n  ~ObsSourceParserFactory() override = default;\n\n  std::shared_ptr<modelbox::Driver> GetDriver() override {\n    std::shared_ptr<modelbox::Driver> parser =\n        std::make_shared<ObsSourceParser>();\n    return parser;\n  }\n};\n\n#endif  // MODELBOX_FLOWUNIT_DATA_SOURCE_PARSER_OBS_CPU_H_\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/data_source_parser/parser_plugin/obs_source_parser/obs_source_parser_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include <securec.h>\n\n#include <functional>\n#include <thread>\n\n#include \"modelbox/base/log.h\"\n#include \"modelbox/base/utils.h\"\n#include \"modelbox/buffer.h\"\n#include \"driver_flow_test.h\"\n#include \"flowunit_mockflowunit/flowunit_mockflowunit.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"modelbox/iam_auth.h\"\n\n#define CHECK_SOURCE_OUTPUT_OBS \"check_data_source_obs_parser_output\"\n\nusing ::testing::_;\n\nnamespace modelbox {\n\nclass DataSourceObsParserPluginTest : public testing::Test {\n public:\n  DataSourceObsParserPluginTest()\n      : driver_flow_(std::make_shared<DriverFlowTest>()) {}\n  void PreparationToGetCert();\n\n protected:\n  void SetUp() override {\n    auto ret = AddMockFlowUnit();\n    EXPECT_EQ(ret, STATUS_OK);\n  };\n\n  void TearDown() override { driver_flow_->Clear(); };\n  std::shared_ptr<DriverFlowTest> GetDriverFlow();\n  std::shared_ptr<DriverFlowTest> RunDriverFlow(\n      const std::string &mock_flowunit_name);\n  modelbox::Status SendDataSourceCfg(std::shared_ptr<DriverFlowTest> &driver_flow,\n                                   const std::string &data_source_cfg,\n                                   const std::string &source_type);\n\n private:\n  Status AddMockFlowUnit();\n  Status AddMockObs();\n  std::shared_ptr<DriverFlowTest> driver_flow_;\n};\n\nstd::shared_ptr<DriverFlowTest> DataSourceObsParserPluginTest::GetDriverFlow() {\n  return driver_flow_;\n}\n\nstd::shared_ptr<DriverFlowTest> DataSourceObsParserPluginTest::RunDriverFlow(\n    const std::string &mock_flowunit_name) {\n  const std::string test_lib_dir = TEST_DRIVER_DIR;\n  std::string toml_content = R\"(\n    [driver]\n    skip-default=true\n    dir=[\")\" + test_lib_dir + \"\\\"]\\n    \" +\n                             R\"([graph]\n    graphconf = '''digraph demo {\n          input[type=input, device=cpu, deviceid=0]\n          data_source_parser[type=flowunit, flowunit=data_source_parser, device=cpu, deviceid=0, label=\"\", plugin_dir=\")\" +\n                             test_lib_dir + R\"(\"]\n          )\" + mock_flowunit_name +\n                             R\"([type=flowunit, flowunit=)\" +\n                             mock_flowunit_name +\n                             R\"(, device=cpu, deviceid=0, label=\"\"]\n          input -> data_source_parser:in_data\n          data_source_parser:stream_meta -> )\" +\n                             mock_flowunit_name + R\"(:stream_meta\n        }'''\n    format = \"graphviz\"\n  )\";\n\n  auto driver_flow = GetDriverFlow();\n  auto ret = driver_flow->BuildAndRun(mock_flowunit_name, toml_content, -1);\n\n  return driver_flow;\n}\n\nmodelbox::Status DataSourceObsParserPluginTest::SendDataSourceCfg(\n    std::shared_ptr<DriverFlowTest> &driver_flow,\n    const std::string &data_source_cfg, const std::string &source_type) {\n  auto ext_data = driver_flow->GetFlow()->CreateExternalDataMap();\n  auto buffer_list = ext_data->CreateBufferList();\n  buffer_list->Build({data_source_cfg.size()});\n  auto buffer = buffer_list->At(0);\n  memcpy_s(buffer->MutableData(), buffer->GetBytes(), data_source_cfg.data(),\n           data_source_cfg.size());\n  buffer->Set(\"source_type\", source_type);\n  ext_data->Send(\"input\", buffer_list);\n  ext_data->Shutdown();\n  return modelbox::STATUS_OK;\n}\n\nStatus DataSourceObsParserPluginTest::AddMockFlowUnit() {\n  AddMockObs();\n  return modelbox::STATUS_OK;\n}\n\nStatus DataSourceObsParserPluginTest::AddMockObs() {\n  auto ctl_ = driver_flow_->GetMockFlowCtl();\n\n  {\n    MockFlowUnitDriverDesc desc_flowunit;\n    desc_flowunit.SetClass(\"DRIVER-FLOWUNIT\");\n    desc_flowunit.SetType(\"cpu\");\n    desc_flowunit.SetName(CHECK_SOURCE_OUTPUT_OBS);\n    desc_flowunit.SetDescription(CHECK_SOURCE_OUTPUT_OBS);\n    desc_flowunit.SetVersion(\"1.0.0\");\n    std::string file_path_flowunit = std::string(TEST_DRIVER_DIR) +\n                                     \"/libmodelbox-unit-cpu-\" +\n                                     CHECK_SOURCE_OUTPUT_OBS + \".so\";\n    desc_flowunit.SetFilePath(file_path_flowunit);\n    auto mock_flowunit = std::make_shared<MockFlowUnit>();\n    auto mock_flowunit_desc = std::make_shared<FlowUnitDesc>();\n    mock_flowunit_desc->SetFlowUnitName(CHECK_SOURCE_OUTPUT_OBS);\n    mock_flowunit_desc->AddFlowUnitInput(modelbox::FlowUnitInput(\"stream_meta\"));\n    mock_flowunit_desc->SetFlowType(modelbox::FlowType::STREAM);\n    mock_flowunit->SetFlowUnitDesc(mock_flowunit_desc);\n    std::weak_ptr<MockFlowUnit> mock_flowunit_wp;\n    mock_flowunit_wp = mock_flowunit;\n\n    EXPECT_CALL(*mock_flowunit, Open(_))\n        .WillRepeatedly(testing::Invoke(\n            [=](const std::shared_ptr<modelbox::Configuration> &flow_option) {\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, DataPre(_))\n        .WillRepeatedly(\n            testing::Invoke([&](const std::shared_ptr<DataContext> &data_ctx) {\n              auto stream_meta = data_ctx->GetInputMeta(\"stream_meta\");\n              EXPECT_NE(stream_meta, nullptr);\n              if (!stream_meta) {\n                return modelbox::STATUS_SUCCESS;\n              }\n\n              auto source_url = std::static_pointer_cast<std::string>(\n                  stream_meta->GetMeta(\"source_url\"));\n              EXPECT_NE(source_url, nullptr);\n              if (source_url != nullptr) {\n                EXPECT_FALSE((*source_url).empty());\n                EXPECT_EQ((*source_url).substr((*source_url).rfind('_') + 1),\n                          \"nv-codec-headers.tar.gz\");\n              }\n\n              return modelbox::STATUS_SUCCESS;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, DataPost(_))\n        .WillRepeatedly(\n            testing::Invoke([&](const std::shared_ptr<DataContext> &data_ctx) {\n              MBLOG_INFO << \"stream_info \"\n                         << \"DataPost\";\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit,\n                Process(testing::An<std::shared_ptr<modelbox::DataContext>>()))\n        .WillRepeatedly(\n            testing::Invoke([=](const std::shared_ptr<DataContext> &data_ctx) {\n              MBLOG_INFO << \"stream_info \"\n                         << \"Process\";\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, Close()).WillRepeatedly(testing::Invoke([=]() {\n      return modelbox::STATUS_OK;\n    }));\n    desc_flowunit.SetMockFlowUnit(mock_flowunit);\n    ctl_->AddMockDriverFlowUnit(CHECK_SOURCE_OUTPUT_OBS, \"cpu\", desc_flowunit,\n                                std::string(TEST_DRIVER_DIR));\n  }\n\n  return STATUS_OK;\n}\n\nTEST_F(DataSourceObsParserPluginTest, ObsInputTest) {\n  // This test would be skipped, if no auth info is provided.\n  auto conf_builder = std::make_shared<ConfigurationBuilder>();\n  std::shared_ptr<Configuration> config_file =\n      conf_builder->Build(TEST_ASSETS + std::string(\"/auth/auth_info.toml\"));\n  if (config_file == nullptr || config_file->GetString(\"base.ak\").empty()) {\n    GTEST_SKIP();\n  }\n\n  auto driver_flow = RunDriverFlow(CHECK_SOURCE_OUTPUT_OBS);\n  std::string source_type = \"obs\";\n\n  // construct DATA SOURCE CFG\n  std::string obsEndPoint(config_file->GetString(\"data_source.obsEndPoint\"));\n  std::string bucket(config_file->GetString(\"data_source.bucket\"));\n  std::string path(config_file->GetString(\"data_source.path\"));\n  std::string domainName(config_file->GetString(\"data_source.domainName\"));\n  std::string xroleName(config_file->GetString(\"data_source.xroleName\"));\n\n  std::string data_source_cfg = R\"({\n        \"obsEndPoint\":\")\" + obsEndPoint +\n                                R\"(\",\n        \"bucket\":\")\" + bucket + R\"(\",\n        \"path\":\")\" + path + R\"(\",\n        \"taskid\":\"test_task_id\",\n        \"domainName\":\")\" + domainName +\n                                R\"(\",\n        \"xroleName\":\")\" + xroleName +\n                                R\"(\"\n  })\";\n\n  PreparationToGetCert();\n  auto ret = SendDataSourceCfg(driver_flow, data_source_cfg, source_type);\n  EXPECT_EQ(ret, modelbox::STATUS_OK);\n\n  driver_flow->GetFlow()->Wait(3 * 1000);\n}\n\nvoid DataSourceObsParserPluginTest::PreparationToGetCert() {\n  auto conf_builder = std::make_shared<ConfigurationBuilder>();\n  std::shared_ptr<Configuration> config_file =\n      conf_builder->Build(TEST_ASSETS + std::string(\"/auth/auth_info.toml\"));\n\n  std::string ak(config_file->GetString(\"base.ak\"));\n  std::string sk(config_file->GetString(\"base.sk\"));\n  std::string domain_id(config_file->GetString(\"base.domain_id\"));\n  std::string project_id(config_file->GetString(\"base.project_id\"));\n  std::string iam_host(config_file->GetString(\"base.iam_host\"));\n\n  modelbox::IAMAuth::GetInstance()->SetIAMHostAddress(iam_host);\n  if (modelbox::STATUS_OK != modelbox::IAMAuth::GetInstance()->SetConsigneeInfo(\n                               ak, sk, domain_id, project_id)) {\n    MBLOG_ERROR << \"set Consignee failed\";\n    return;\n  }\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/data_source_parser/parser_plugin/restful_source_parser/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n \ncmake_minimum_required(VERSION 3.10)\n \nset(PLUGIN_NAME \"restful\")\n \nproject(modelbox-flowunit-${UNIT_DEVICE}-${UNIT_NAME}-${PLUGIN_NAME})\n \nfile(GLOB_RECURSE PLUGIN_SOURCE *.cpp *.cc *.c)\ngroup_source_test_files(MODELBOX_PLUGIN_SOURCE MODELBOX_PLUGIN_TEST_SOURCE \"_test.c*\" ${PLUGIN_SOURCE})\n \ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_CPU_INCLUDE})\ninclude_directories(${MODELBOX_COMMON_INCLUDE})\ninclude_directories(${NLOHMANN_INCLUDE_DIR})\n \nset(MODELBOX_PLUGIN_SHARED modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-${PLUGIN_NAME}-shared)\nset(MODELBOX_PLUGIN_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n \nadd_library(${MODELBOX_PLUGIN_SHARED} SHARED ${MODELBOX_PLUGIN_SOURCE})\n \nset(LIBMODELBOX_FLOWUNIT_DATA_SOURCE_PARSER_PLUGIN_CPU_SHARED ${MODELBOX_PLUGIN_SHARED})\nset_target_properties(${MODELBOX_PLUGIN_SHARED} PROPERTIES \n    SOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n\ntarget_link_libraries(${MODELBOX_PLUGIN_SHARED} ${CPPREST_LIBRARIES})\ntarget_link_libraries(${MODELBOX_PLUGIN_SHARED} ${LIBMODELBOX_DEVICE_CPU_SHARED})\ntarget_link_libraries(${MODELBOX_PLUGIN_SHARED} ${MODELBOX_COMMON_LIBRARY})\ntarget_link_libraries(${MODELBOX_PLUGIN_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_PLUGIN_SHARED} rt)\ntarget_link_libraries(${MODELBOX_PLUGIN_SHARED} dl)\ntarget_link_libraries(${MODELBOX_PLUGIN_SHARED} ${MODELBOX_COMMON_SOURCE_CONTEXT_LIBRARY})\nset_target_properties(${MODELBOX_PLUGIN_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-${PLUGIN_NAME}\")\n \ninstall(TARGETS ${MODELBOX_PLUGIN_SHARED} \n    COMPONENT cpu-device-flowunit\n    RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n    LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    OPTIONAL\n    )\n \n \ninstall(DIRECTORY ${HEADER} \n    DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR} \n    COMPONENT cpu-device-flowunit-devel\n    )\n \nset(LIBMODELBOX_FLOWUNIT_DATA_SOURCE_PARSER_PLUGIN_CPU_SHARED ${MODELBOX_PLUGIN_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_DATA_SOURCE_PARSER_PLUGIN_CPU_INCLUDE ${MODELBOX_PLUGIN_SOURCE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_DATA_SOURCE_PARSER_PLUGIN_CPU_SOURCES ${MODELBOX_PLUGIN_SOURCE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_DATA_SOURCE_PARSER_PLUGIN_CPU_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-${PLUGIN_NAME}.so CACHE INTERNAL \"\")\n \n# driver test\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_PLUGIN_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_PLUGIN_SHARED})\nlist(APPEND DRIVER_UNIT_TEST_LINK_LIBRARIES ${MODELBOX_UNIT_LINK_LIBRARY})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_LINK_LIBRARIES ${DRIVER_UNIT_TEST_LINK_LIBRARIES} CACHE INTERNAL \"\")"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/data_source_parser/parser_plugin/restful_source_parser/driver_desc.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <stdio.h>\n\n#include <memory>\n\n#include \"modelbox/base/driver_api_helper.h\"\n#include \"modelbox/base/status.h\"\n#include \"modelbox/device/cpu/device_cpu.h\"\n#include \"modelbox/flowunit.h\"\n#include \"restful_source_parser.h\"\n\nstd::shared_ptr<modelbox::DriverFactory> CreateDriverFactory() {\n  std::shared_ptr<modelbox::DriverFactory> factory =\n      std::make_shared<RestfulSourceParserFactory>();\n  return factory;\n}\n\nvoid DriverDescription(modelbox::DriverDesc *desc) {\n  desc->SetName(DRIVER_NAME);\n  desc->SetClass(DRIVER_CLASS_DATA_SOURCE_PARSER_PLUGIN);\n  desc->SetType(DRIVER_TYPE);\n  desc->SetDescription(DRIVER_DESC);\n}\n\nmodelbox::Status DriverInit() {\n  // Driver Init.\n  return modelbox::STATUS_OK;\n}\n\nvoid DriverFini() {\n  // Driver Fini.\n}"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/data_source_parser/parser_plugin/restful_source_parser/restful_source_parser.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"restful_source_parser.h\"\n\n#include <iomanip>\n#include <nlohmann/json.hpp>\n\nRestfulSourceParser::RestfulSourceParser() = default;\nRestfulSourceParser::~RestfulSourceParser() = default;\n\nmodelbox::Status RestfulSourceParser::Init(\n    const std::shared_ptr<modelbox::Configuration> &opts) {\n  retry_enabled_ = opts->GetBool(\"retry_enable\", DATASOURCE_PARSER_RETRY_ON);\n  retry_interval_ = opts->GetInt32(\"retry_interval_ms\",\n                                   DATASOURCE_PARSER_DEFAULT_RETRY_INTERVAL);\n  retry_max_times_ = opts->GetInt32(\n      \"retry_count_limit\", DATASOURCE_PARSER_STREAM_DEFAULT_RETRY_TIMES);\n\n  retry_enabled_ = opts->GetBool(\"restful_retry_enable\", retry_enabled_);\n  retry_interval_ =\n      opts->GetInt32(\"restful_retry_interval_ms\", retry_interval_);\n  retry_max_times_ =\n      opts->GetInt32(\"restful_retry_count_limit\", retry_max_times_);\n\n  MBLOG_INFO << \"restful source parser config retry_enabled:\" << retry_enabled_\n             << \" retry_interval:\" << retry_interval_\n             << \" retry_max_times:\" << retry_max_times_;\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status RestfulSourceParser::Deinit() { return modelbox::STATUS_OK; }\n\nmodelbox::Status RestfulSourceParser::Parse(\n    const std::shared_ptr<modelbox::SessionContext> &session_context,\n    const std::shared_ptr<modelbox::Configuration> &session_config,\n    const std::string &config, std::string &uri,\n    modelbox::DestroyUriFunc &destroy_uri_func) {\n  RestfulInputInfo input_info;\n\n  if (GetRestfulInfo(input_info, config) != modelbox::STATUS_OK) {\n    MBLOG_ERROR << \"Failed to get Restful input info\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  web::http::http_response resp;\n  auto ret = SendRestfulRequest(input_info, resp);\n  if (ret != modelbox::STATUS_OK) {\n    MBLOG_ERROR << \"Send Restful Request failed, detail: \"\n                << ret.WrapErrormsgs();\n    return modelbox::STATUS_FAULT;\n  }\n\n  ret = ProcessRestfulResponse(input_info, resp, uri);\n  if (ret != modelbox::STATUS_OK) {\n    MBLOG_ERROR << \"Process Restful Response failed.\";\n    return modelbox::STATUS_FAULT;\n  }\n  session_context->SetPrivate(\n      \"data_source_parser.restful_source_parser.response\",\n      std::make_shared<std::string>(input_info.response_body));\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status RestfulSourceParser::GetRestfulInfo(\n    RestfulInputInfo &input_info, const std::string &config) {\n  nlohmann::json config_json;\n  try {\n    config_json = nlohmann::json::parse(config);\n    std::string request_url = config_json[\"request_url\"].get<std::string>();\n    if (request_url.empty()) {\n      MBLOG_ERROR\n          << \"Invalid request url, value of key <request_url> is empty!\";\n      return modelbox::STATUS_BADCONF;\n    }\n\n    if (config_json.contains(\"params\")) {\n      auto brokers_json = config_json[\"params\"];\n      std::string encode_params;\n      for (auto &broker_json : brokers_json) {\n        auto param_key = broker_json[\"param_key\"].get<std::string>();\n        auto param_value = broker_json[\"param_value\"].get<std::string>();\n        MBLOG_DEBUG << \"param_key \" << param_key << \", param_value \"\n                    << param_value;\n        encode_params += web::uri::encode_data_string(param_key) + \"=\" +\n                         web::uri::encode_data_string(param_value) + \"&\";\n      }\n      input_info.encode_full_url =\n          request_url + \"?\" +\n          encode_params.substr(0, encode_params.length() - 1);\n\n    } else {\n      input_info.encode_full_url = request_url;\n    }\n\n    input_info.response_url_position =\n        config_json[\"response_url_position\"].get<std::string>();\n    if (input_info.response_url_position.empty()) {\n      MBLOG_ERROR << \"Invalid response url position, value of key \"\n                     \"<response_url_position> is empty!\";\n      return modelbox::STATUS_BADCONF;\n    }\n    MBLOG_DEBUG << \"response url position: \"\n                << input_info.response_url_position;\n\n    if (config_json.contains(\"headers\")) {\n      auto value = config_json[\"headers\"].get<nlohmann::json>();\n      for (const auto &header : value.items()) {\n        if (header.key().empty()) {\n          MBLOG_ERROR << \"headers key is empty!\";\n          return modelbox::STATUS_BADCONF;\n        }\n        if (!header.value().is_string()) {\n          MBLOG_ERROR << \"Key <\" << header.key() << \"> must have string value.\";\n          return modelbox::STATUS_BADCONF;\n        }\n        input_info.headers.add(_XPLATSTR(header.key()),\n                               _XPLATSTR(header.value()));\n      }\n    }\n\n    return modelbox::STATUS_OK;\n  } catch (const std::exception &e) {\n    MBLOG_ERROR << \"Parse data source config to json failed, detail: \"\n                << e.what();\n    return modelbox::STATUS_INVALID;\n  }\n}\n\nmodelbox::Status RestfulSourceParser::SendRestfulRequest(\n    RestfulInputInfo &input_info, web::http::http_response &resp) {\n  utility::string_t address = _XPLATSTR(input_info.encode_full_url);\n  web::http::uri request_uri = web::http::uri(address);\n\n  web::http::client::http_client_config client_config;\n  client_config.set_timeout(utility::seconds(30));\n  client_config.set_validate_certificates(false);\n\n  std::shared_ptr<web::http::client::http_client> client;\n  client = std::make_shared<web::http::client::http_client>(\n      web::http::uri_builder(request_uri).to_uri(), client_config);\n\n  input_info.headers.add(_XPLATSTR(\"Content-Type\"),\n                         _XPLATSTR(\"application/json\"));\n  web::http::http_request msg;\n  msg.set_method(web::http::methods::GET);\n  msg.headers() = input_info.headers;\n  try {\n    resp = client->request(msg).get();\n  } catch (std::exception const &e) {\n    return {modelbox::STATUS_FAULT, e.what()};\n  }\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status RestfulSourceParser::ProcessRestfulResponse(\n    RestfulInputInfo &input_info, web::http::http_response &resp,\n    std::string &uri) {\n  if (resp.status_code() == 200) {\n    std::string resp_info = resp.extract_string().get();\n    MBLOG_DEBUG << \"Get response from restful server success.\";\n\n    nlohmann::json resp_json;\n    try {\n      resp_json = nlohmann::json::parse(resp_info);\n\n      std::vector<std::string> rtsp_url_path;\n      rtsp_url_path =\n          modelbox::StringSplit(input_info.response_url_position, '/');\n      if (rtsp_url_path.empty()) {\n        MBLOG_ERROR << \"rtsp_url_path is empty!\";\n      }\n      for (const auto &url_path : rtsp_url_path) {\n        resp_json = resp_json[url_path];\n      }\n      uri = resp_json;\n      if (uri.empty()) {\n        MBLOG_ERROR << \"Restful rtsp address is empty!\";\n        return modelbox::STATUS_FAULT;\n      }\n\n      input_info.response_body = resp_info;\n      MBLOG_DEBUG << \"Get restful input info success.\";\n      return modelbox::STATUS_OK;\n    } catch (const std::exception &e) {\n      MBLOG_ERROR << \"Parse response body failed, detail: \" << e.what();\n      return modelbox::STATUS_INVALID;\n    }\n  } else {\n    MBLOG_ERROR << \"Get input from Restful failed.  Http response code: \"\n                << resp.status_code()\n                << \". Http response body: \" << resp.extract_string().get();\n    return modelbox::STATUS_FAULT;\n  }\n}\n\nmodelbox::Status RestfulSourceParser::GetStreamType(const std::string &config,\n                                                    std::string &stream_type) {\n  stream_type = \"stream\";\n\n  return modelbox::STATUS_OK;\n}"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/data_source_parser/parser_plugin/restful_source_parser/restful_source_parser.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_DATA_SOURCE_PARSER_RESTFUL_CPU_H_\n#define MODELBOX_FLOWUNIT_DATA_SOURCE_PARSER_RESTFUL_CPU_H_\n\n#include <modelbox/base/device.h>\n#include <modelbox/base/status.h>\n#define _TURN_OFF_PLATFORM_STRING\n#include <modelbox/data_source_parser_plugin.h>\n\n#include \"cpprest/http_client.h\"\n\nconstexpr const char *DRIVER_NAME = \"restful\";\nconstexpr const char *DRIVER_DESC =\n    \"An restful data source parser plugin on CPU\";\nconstexpr const char *DRIVER_TYPE = \"cpu\";\n\ntypedef struct tag_RestfulInputInfo {\n  std::string encode_full_url;\n  web::http::http_headers headers;\n  std::string response_url_position;\n  std::string response_body;\n} RestfulInputInfo;\n\nclass RestfulSourceParser : public modelbox::DataSourceParserPlugin {\n public:\n  RestfulSourceParser();\n  ~RestfulSourceParser() override;\n\n  modelbox::Status Init(\n      const std::shared_ptr<modelbox::Configuration> &opts) override;\n\n  modelbox::Status Deinit() override;\n\n  modelbox::Status Parse(\n      const std::shared_ptr<modelbox::SessionContext> &session_context,\n      const std::shared_ptr<modelbox::Configuration> &session_config,\n      const std::string &config, std::string &uri,\n      modelbox::DestroyUriFunc &destroy_uri_func) override;\n\n  modelbox::Status GetStreamType(const std::string &config,\n                                 std::string &stream_type) override;\n\n private:\n  modelbox::Status GetRestfulInfo(RestfulInputInfo &input_info,\n                                  const std::string &config);\n  modelbox::Status SendRestfulRequest(RestfulInputInfo &input_info,\n                                      web::http::http_response &resp);\n  modelbox::Status ProcessRestfulResponse(RestfulInputInfo &input_info,\n                                          web::http::http_response &resp,\n                                          std::string &uri);\n};\n\nclass RestfulSourceParserFactory : public modelbox::DriverFactory {\n public:\n  RestfulSourceParserFactory() = default;\n  ~RestfulSourceParserFactory() override = default;\n\n  std::shared_ptr<modelbox::Driver> GetDriver() override {\n    std::shared_ptr<modelbox::Driver> parser =\n        std::make_shared<RestfulSourceParser>();\n    return parser;\n  }\n};\n\n#endif  // MODELBOX_FLOWUNIT_DATA_SOURCE_PARSER_RESTFUL_CPU_H_"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/data_source_parser/parser_plugin/url_source_parser/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n \ncmake_minimum_required(VERSION 3.10)\n \nset(PLUGIN_NAME \"url\")\n \nproject(modelbox-flowunit-${UNIT_DEVICE}-${UNIT_NAME}-${PLUGIN_NAME})\n \nfile(GLOB_RECURSE PLUGIN_SOURCE *.cpp *.cc *.c)\ngroup_source_test_files(MODELBOX_PLUGIN_SOURCE MODELBOX_PLUGIN_TEST_SOURCE \"_test.c*\" ${PLUGIN_SOURCE})\n \ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_CPU_INCLUDE})\ninclude_directories(${MODELBOX_COMMON_INCLUDE})\ninclude_directories(${NLOHMANN_INCLUDE_DIR})\n \nset(MODELBOX_PLUGIN_SHARED modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-${PLUGIN_NAME}-shared)\nset(MODELBOX_PLUGIN_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n \nadd_library(${MODELBOX_PLUGIN_SHARED} SHARED ${MODELBOX_PLUGIN_SOURCE})\n \nset(LIBMODELBOX_FLOWUNIT_DATA_SOURCE_PARSER_PLUGIN_CPU_SHARED ${MODELBOX_PLUGIN_SHARED})\nset_target_properties(${MODELBOX_PLUGIN_SHARED} PROPERTIES \n    SOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n \ntarget_link_libraries(${MODELBOX_PLUGIN_SHARED} ${LIBMODELBOX_DEVICE_CPU_SHARED})\ntarget_link_libraries(${MODELBOX_PLUGIN_SHARED} ${MODELBOX_COMMON_LIBRARY})\ntarget_link_libraries(${MODELBOX_PLUGIN_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_PLUGIN_SHARED} rt)\ntarget_link_libraries(${MODELBOX_PLUGIN_SHARED} dl)\ntarget_link_libraries(${MODELBOX_PLUGIN_SHARED} ${MODELBOX_COMMON_SOURCE_CONTEXT_LIBRARY})\nset_target_properties(${MODELBOX_PLUGIN_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-${PLUGIN_NAME}\")\n \ninstall(TARGETS ${MODELBOX_PLUGIN_SHARED} \n    COMPONENT cpu-device-flowunit\n    RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n    LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    OPTIONAL\n    )\n \n \ninstall(DIRECTORY ${HEADER} \n    DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR} \n    COMPONENT cpu-device-flowunit-devel\n    )\n \nset(LIBMODELBOX_FLOWUNIT_DATA_SOURCE_PARSER_PLUGIN_CPU_SHARED ${MODELBOX_PLUGIN_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_DATA_SOURCE_PARSER_PLUGIN_CPU_INCLUDE ${MODELBOX_PLUGIN_SOURCE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_DATA_SOURCE_PARSER_PLUGIN_CPU_SOURCES ${MODELBOX_PLUGIN_SOURCE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_DATA_SOURCE_PARSER_PLUGIN_CPU_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-${PLUGIN_NAME}.so CACHE INTERNAL \"\")\n \n# driver test\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_PLUGIN_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_PLUGIN_SHARED})\nlist(APPEND DRIVER_UNIT_TEST_LINK_LIBRARIES ${MODELBOX_UNIT_LINK_LIBRARY})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_LINK_LIBRARIES ${DRIVER_UNIT_TEST_LINK_LIBRARIES} CACHE INTERNAL \"\")"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/data_source_parser/parser_plugin/url_source_parser/driver_desc.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <stdio.h>\n\n#include <memory>\n\n#include \"modelbox/base/driver_api_helper.h\"\n#include \"modelbox/base/status.h\"\n#include \"modelbox/device/cpu/device_cpu.h\"\n#include \"modelbox/flowunit.h\"\n#include \"url_source_parser.h\"\n\nstd::shared_ptr<modelbox::DriverFactory> CreateDriverFactory() {\n  std::shared_ptr<modelbox::DriverFactory> factory =\n      std::make_shared<UrlSourceParserFactory>();\n  return factory;\n}\n\nvoid DriverDescription(modelbox::DriverDesc *desc) {\n  desc->SetName(DRIVER_NAME);\n  desc->SetClass(DRIVER_CLASS_DATA_SOURCE_PARSER_PLUGIN);\n  desc->SetType(DRIVER_TYPE);\n  desc->SetDescription(DRIVER_DESC);\n}\n\nmodelbox::Status DriverInit() {\n  // Driver Init.\n  return modelbox::STATUS_OK;\n}\n\nvoid DriverFini() {\n  // Driver Fini.\n}"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/data_source_parser/parser_plugin/url_source_parser/url_source_parser.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"url_source_parser.h\"\n\n#include <securec.h>\n\n#include <nlohmann/json.hpp>\n\n#include \"modelbox/device/cpu/device_cpu.h\"\n#define RETRY_PARAMS_NOT_SET (-2)\n\nUrlSourceParser::UrlSourceParser() = default;\nUrlSourceParser::~UrlSourceParser() = default;\n\nmodelbox::Status UrlSourceParser::Init(\n    const std::shared_ptr<modelbox::Configuration> &opts) {\n  retry_enabled_ = opts->GetBool(\"retry_enable\", DATASOURCE_PARSER_RETRY_ON);\n  retry_interval_ = opts->GetInt32(\"retry_interval_ms\",\n                                   DATASOURCE_PARSER_DEFAULT_RETRY_INTERVAL);\n  retry_max_times_ = opts->GetInt32(\"retry_count_limit\", RETRY_PARAMS_NOT_SET);\n\n  retry_enabled_ = opts->GetBool(\"url_retry_enable\", retry_enabled_);\n  retry_interval_ = opts->GetInt32(\"url_retry_interval_ms\", retry_interval_);\n  file_retry_interval_ =\n      opts->GetInt32(\"url_file_retry_interval_ms\", retry_interval_);\n  stream_retry_interval_ =\n      opts->GetInt32(\"url_stream_retry_interval_ms\", retry_interval_);\n  retry_max_times_ = opts->GetInt32(\"url_retry_count_limit\", retry_max_times_);\n  file_retry_times_ =\n      opts->GetInt32(\"url_file_retry_count_limit\",\n                     retry_max_times_ == RETRY_PARAMS_NOT_SET\n                         ? DATASOURCE_PARSER_FILE_DEFAULT_RETRY_TIMES\n                         : retry_max_times_);\n  stream_retry_times_ =\n      opts->GetInt32(\"url_stream_retry_count_limit\",\n                     retry_max_times_ == RETRY_PARAMS_NOT_SET\n                         ? DATASOURCE_PARSER_STREAM_DEFAULT_RETRY_TIMES\n                         : retry_max_times_);\n\n  MBLOG_INFO << \"url source parser config retry_enabled:\" << retry_enabled_\n             << \" stream_retry_interval:\" << stream_retry_interval_\n             << \" file_retry_interval:\" << file_retry_interval_\n             << \" keep_alive_interval:\" << file_retry_times_\n             << \" keep_alive_interval:\" << stream_retry_times_;\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status UrlSourceParser::Deinit() { return modelbox::STATUS_OK; }\n\nmodelbox::Status UrlSourceParser::GetStreamType(const std::string &config,\n                                                std::string &stream_type) {\n  nlohmann::json json;\n  try {\n    json = nlohmann::json::parse(config);\n\n    std::string url_type = json[\"url_type\"].get<std::string>();\n    if (url_type.empty()) {\n      return {modelbox::STATUS_BADCONF, \"url_type is empty\"};\n    }\n\n    if (url_type == \"file\") {\n      stream_type = \"file\";\n    } else {\n      stream_type = \"stream\";\n    }\n  } catch (const std::exception &e) {\n    MBLOG_ERROR << \"Parse data source config to json failed, detail: \"\n                << e.what();\n    return modelbox::STATUS_INVALID;\n  }\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status UrlSourceParser::Parse(\n    const std::shared_ptr<modelbox::SessionContext> &session_context,\n    const std::shared_ptr<modelbox::Configuration> &session_config,\n    const std::string &config, std::string &uri,\n    modelbox::DestroyUriFunc &destroy_uri_func) {\n  nlohmann::json json;\n  std::string url_type;\n  if (GetStreamType(config, url_type) != modelbox::STATUS_OK) {\n    return {modelbox::STATUS_BADCONF, \"url_type is empty\"};\n  }\n  if (url_type == \"file\") {\n    retry_interval_ = file_retry_interval_;\n    retry_max_times_ = file_retry_times_;\n  } else if (url_type == \"stream\") {\n    retry_interval_ = stream_retry_interval_;\n    retry_max_times_ = stream_retry_times_;\n  } else {\n    MBLOG_ERROR << \"url input type: \" << url_type << \" is not supported\";\n    return modelbox::STATUS_BADCONF;\n  }\n\n  try {\n    json = nlohmann::json::parse(config);\n    uri = json[\"url\"].get<std::string>();\n    if (uri.empty()) {\n      return {modelbox::STATUS_BADCONF, \"uri is empty\"};\n    }\n    MBLOG_DEBUG << \"Get url address success.\";\n    return modelbox::STATUS_OK;\n  } catch (const std::exception &e) {\n    MBLOG_ERROR << \"Parse data source config to json failed, detail: \"\n                << e.what();\n    return modelbox::STATUS_INVALID;\n  }\n}\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/data_source_parser/parser_plugin/url_source_parser/url_source_parser.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_DATA_SOURCE_PARSER_URL_CPU_H_\n#define MODELBOX_FLOWUNIT_DATA_SOURCE_PARSER_URL_CPU_H_\n\n#include <modelbox/base/device.h>\n#include <modelbox/base/status.h>\n#include <modelbox/data_source_parser_plugin.h>\n\nconstexpr const char *DRIVER_NAME = \"url\";\nconstexpr const char *DRIVER_DESC = \"A url data source parser plugin on CPU\";\nconstexpr const char *DRIVER_TYPE = \"cpu\";\n\nclass UrlSourceParser : public modelbox::DataSourceParserPlugin {\n public:\n  UrlSourceParser();\n  ~UrlSourceParser() override;\n\n  modelbox::Status Init(\n      const std::shared_ptr<modelbox::Configuration> &opts) override;\n\n  modelbox::Status Deinit() override;\n\n  modelbox::Status Parse(\n      const std::shared_ptr<modelbox::SessionContext> &session_context,\n      const std::shared_ptr<modelbox::Configuration> &session_config,\n      const std::string &config, std::string &uri,\n      modelbox::DestroyUriFunc &destroy_uri_func) override;\n  modelbox::Status GetStreamType(const std::string &config,\n                                 std::string &stream_type) override;\n\n protected:\n  int32_t file_retry_interval_ = 1;\n  int32_t file_retry_times_ = 0;\n  int32_t stream_retry_interval_ = 1;\n  int32_t stream_retry_times_ = 0;\n};\n\nclass UrlSourceParserFactory : public modelbox::DriverFactory {\n public:\n  UrlSourceParserFactory() = default;\n  ~UrlSourceParserFactory() override = default;\n\n  std::shared_ptr<modelbox::Driver> GetDriver() override {\n    std::shared_ptr<modelbox::Driver> parser =\n        std::make_shared<UrlSourceParser>();\n    return parser;\n  }\n};\n\n#endif  // MODELBOX_FLOWUNIT_DATA_SOURCE_PARSER_URL_CPU_H_\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/data_source_parser/parser_plugin/vcn_common/vcn_info.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"vcn_info.h\"\n\n#include <modelbox/base/log.h>\n\n#include <nlohmann/json.hpp>\n\nnamespace modelbox {\nbool IsVcnInfoValid(const VcnInfo &info) {\n  return (!(info.user_name.empty() || info.password.empty() ||\n            info.ip.empty() || info.port.empty()));\n}\n\nmodelbox::Status GetVcnInfo(modelbox::VcnInfo &vcn_info,\n                            const std::string &config) {\n  nlohmann::json config_json;\n  try {\n    config_json = nlohmann::json::parse(config);\n    auto value = config_json[\"userName\"];\n    if (value.empty()) {\n      MBLOG_ERROR << \"vcn userName is empty!\";\n      return {modelbox::STATUS_BADCONF};\n    }\n    vcn_info.user_name = value;\n\n    value = config_json[\"password\"];\n    if (value.empty()) {\n      MBLOG_ERROR << \"vcn password is empty!\";\n      return {modelbox::STATUS_BADCONF};\n    }\n    vcn_info.password = value;\n\n    value = config_json[\"ip\"];\n    if (value.empty()) {\n      MBLOG_ERROR << \"vcn ip is empty!\";\n      return {modelbox::STATUS_BADCONF};\n    }\n    vcn_info.ip = value;\n\n    value = config_json[\"port\"];\n    if (value.empty()) {\n      MBLOG_ERROR << \"vcn port is empty!\";\n      return {modelbox::STATUS_BADCONF};\n    }\n    vcn_info.port = value;\n\n    value = config_json[\"cameraCode\"];\n    if (value.empty()) {\n      MBLOG_ERROR << \"vcn camera code is empty!\";\n      return {modelbox::STATUS_BADCONF};\n    }\n    vcn_info.camera_code = value;\n\n    value = config_json[\"streamType\"];\n    if (value.empty()) {\n      MBLOG_ERROR << \"vcn stream type is empty!\";\n      return {modelbox::STATUS_BADCONF};\n    }\n    vcn_info.stream_type = value;\n  } catch (const std::exception &e) {\n    MBLOG_ERROR << \"Parse data source config to json failed, detail: \"\n                << e.what();\n    return modelbox::STATUS_INVALID;\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nvoid ReadConfVcnCommon(const std::shared_ptr<modelbox::Configuration> &opts,\n                       int32_t &retry_enabled, int32_t &retry_interval,\n                       int32_t &retry_max_times) {\n  retry_enabled = opts->GetBool(\"vcn_retry_enable\", retry_enabled);\n  retry_interval = opts->GetInt32(\"vcn_retry_interval_ms\", retry_interval);\n  retry_max_times = opts->GetInt32(\"vcn_retry_count_limit\", retry_max_times);\n}\n}  // namespace modelbox"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/data_source_parser/parser_plugin/vcn_common/vcn_info.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_VCN_INFO_H_\n#define MODELBOX_FLOWUNIT_VCN_INFO_H_\n\n#include <modelbox/base/configuration.h>\n#include <modelbox/base/status.h>\n\n#include <string>\n#include <utility>\n\nnamespace modelbox {\n\ntypedef struct tag_VcnInfo {\n  std::string ip;\n  std::string port;\n  std::string user_name;\n  std::string password;\n  std::string camera_code;\n  uint32_t stream_type;\n} VcnInfo;\n\n/**\n * @brief   Check whether the vcn info contains valid user name/password/ip\n * and port.\n * @param   info - in, vcn info.\n * @return  true for valid, vice versa.\n */\nbool IsVcnInfoValid(const VcnInfo &info);\n\nclass VcnAccountBase {\n public:\n  VcnAccountBase(const VcnInfo &info)\n      : ip_(info.ip),\n        port_(info.port),\n        user_name_(info.user_name),\n        password_(info.password) {\n    streams_count_ = 0;\n  };\n\n  virtual ~VcnAccountBase() = default;\n\n  /**\n   * @brief   get vcn user name\n   * @return  user name\n   */\n  std::string GetUserName() const { return user_name_; };\n\n  /**\n   * @brief   get vcn user password\n   * @return  user password\n   */\n  std::string GetPassword() const { return password_; };\n\n  /**\n   * @brief   get vcn ip\n   * @return  vcn ip\n   */\n  std::string GetIp() const { return ip_; };\n\n  /**\n   * @brief   get vcn port\n   * @return  vcn port\n   */\n  std::string GetPort() const { return port_; };\n\n  /**\n   * @brief   get vcn stream count\n   * @return  stream count\n   */\n  uint32_t GetStreamsCount() const { return streams_count_; };\n\n  void AddStream() { ++streams_count_; };\n  void RemoveStream() {\n    if (streams_count_ > 0) {\n      --streams_count_;\n    }\n  };\n\n  std::string ip_;\n  std::string port_;\n  std::string user_name_;\n  std::string password_;\n  uint32_t streams_count_;\n};\n\nmodelbox::Status GetVcnInfo(modelbox::VcnInfo &vcn_info,\n                            const std::string &config);\n\nclass VcnStreamBase {\n public:\n  VcnStreamBase(std::string url, std::string camera_code)\n      : url_(std::move(url)),\n        camera_code_(std::move(camera_code)){};\n\n  virtual ~VcnStreamBase() = default;\n\n  /**\n   * @brief   get stream url\n   * @return  stream url\n   */\n  std::string GetUrl() { return url_; };\n\n protected:\n  std::string url_;\n  std::string camera_code_;\n};\n\nvoid ReadConfVcnCommon(const std::shared_ptr<modelbox::Configuration> &opts,\n                       int32_t &retry_enabled, int32_t &retry_interval,\n                       int32_t &retry_max_times);\n\n}  // namespace modelbox\n#endif  // MODELBOX_FLOWUNIT_VCN_INFO_H_"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/data_source_parser/parser_plugin/vcn_restful_source_parser/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nset(PLUGIN_NAME \"vcn_restful\")\n\nproject(modelbox-flowunit-${UNIT_DEVICE}-${UNIT_NAME}-${PLUGIN_NAME})\n\nfile(GLOB_RECURSE PLUGIN_SOURCE *.cpp *.cc *.c)\ngroup_source_test_files(MODELBOX_PLUGIN_SOURCE MODELBOX_PLUGIN_TEST_SOURCE \"_test.c*\" ${PLUGIN_SOURCE})\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_CPU_INCLUDE})\ninclude_directories(${MODELBOX_COMMON_INCLUDE})\ninclude_directories(${NLOHMANN_INCLUDE_DIR})\ninclude_directories(${MODELBOX_COMMON_DRIVER_UTIL_INCLUDE})\ninclude_directories(${CPP_HTTPLIB_INCLUDE})\ninclude_directories(${CMAKE_CURRENT_LIST_DIR}/../vcn_common)\n\nset(MODELBOX_PLUGIN_SHARED modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-${PLUGIN_NAME}-shared)\nset(MODELBOX_PLUGIN_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR} ${CMAKE_CURRENT_LIST_DIR}/../vcn_common)\nset(MODELBOX_PLUGIN_VCN_COMMON_LIST ${CMAKE_CURRENT_LIST_DIR}/../vcn_common/vcn_info.cc)\n\nadd_library(${MODELBOX_PLUGIN_SHARED} SHARED ${MODELBOX_PLUGIN_SOURCE} ${MODELBOX_PLUGIN_VCN_COMMON_LIST})\n\nset_target_properties(${MODELBOX_PLUGIN_SHARED} PROPERTIES \n    SOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n\ntarget_link_libraries(${MODELBOX_PLUGIN_SHARED} ${LIBMODELBOX_DEVICE_CPU_SHARED})\ntarget_link_libraries(${MODELBOX_PLUGIN_SHARED} ${MODELBOX_COMMON_LIBRARY})\ntarget_link_libraries(${MODELBOX_PLUGIN_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_PLUGIN_SHARED} rt)\ntarget_link_libraries(${MODELBOX_PLUGIN_SHARED} dl)\ntarget_link_libraries(${MODELBOX_PLUGIN_SHARED} ${LIBMODELBOX_SHARED})\ntarget_link_libraries(${MODELBOX_PLUGIN_SHARED} ${MODELBOX_COMMON_SOURCE_CONTEXT_LIBRARY})\ntarget_link_libraries(${MODELBOX_PLUGIN_SHARED} ${CPP_HTTPLIB_STATIC_LIBRARIES})\nset_target_properties(${MODELBOX_PLUGIN_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-${PLUGIN_NAME}\")\n\ninstall(TARGETS ${MODELBOX_PLUGIN_SHARED} \n    COMPONENT cpu-device-flowunit\n    RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n    LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    OPTIONAL\n    )\n\n\ninstall(DIRECTORY ${HEADER} \n    DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR} \n    COMPONENT cpu-device-flowunit-devel\n    )\n\n# driver test\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_PLUGIN_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_PLUGIN_SHARED})\nlist(APPEND DRIVER_UNIT_TEST_LINK_LIBRARIES ${MODELBOX_PLUGIN_SHARED})\nlist(APPEND DRIVER_UNIT_TEST_INCLUDE ${MODELBOX_PLUGIN_SOURCE_INCLUDE})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_LINK_LIBRARIES ${DRIVER_UNIT_TEST_LINK_LIBRARIES} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_INCLUDE ${DRIVER_UNIT_TEST_INCLUDE} CACHE INTERNAL \"\")\n\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/data_source_parser/parser_plugin/vcn_restful_source_parser/driver_desc.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <stdio.h>\n\n#include <memory>\n\n#include \"modelbox/base/driver_api_helper.h\"\n#include \"modelbox/base/status.h\"\n#include \"modelbox/device/cpu/device_cpu.h\"\n#include \"modelbox/flowunit.h\"\n#include \"vcn_restful_source_parser.h\"\n\nstd::shared_ptr<modelbox::DriverFactory> CreateDriverFactory() {\n  std::shared_ptr<modelbox::DriverFactory> factory =\n      std::make_shared<VcnRestfulSourceParserFactory>();\n  return factory;\n}\n\nvoid DriverDescription(modelbox::DriverDesc *desc) {\n  desc->SetName(DRIVER_NAME);\n  desc->SetClass(DRIVER_CLASS_DATA_SOURCE_PARSER_PLUGIN);\n  desc->SetType(DRIVER_TYPE);\n  desc->SetDescription(DRIVER_DESC);\n}\n\nmodelbox::Status DriverInit() {\n  // Driver Init.\n  return modelbox::STATUS_OK;\n}\n\nvoid DriverFini() {\n  // Driver Fini.\n}\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/data_source_parser/parser_plugin/vcn_restful_source_parser/vcn_restful_client.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"vcn_restful_client.h\"\n\n#include <modelbox/base/log.h>\n\nnamespace modelbox {\nstd::mutex VcnRestfulClient::vcn_client_lock_;\n\nstd::shared_ptr<VcnRestfulClient> VcnRestfulClient::GetInstance(\n    int32_t keep_alive_interval) {\n  static std::shared_ptr<VcnRestfulClient> vcn_client(\n      new VcnRestfulClient(keep_alive_interval));\n  std::lock_guard<std::mutex> lock(vcn_client_lock_);\n  static bool is_initialized = false;\n  if (is_initialized) {\n    vcn_client->SetKeepAliveInterval(keep_alive_interval);\n    return vcn_client;\n  }\n\n  auto ret = vcn_client->Init();\n  if (ret != modelbox::STATUS_OK) {\n    MBLOG_ERROR << \"failed to init vcn restful client reason: \"\n                << ret.Errormsg();\n    return nullptr;\n  }\n\n  vcn_client->SetKeepAliveInterval(keep_alive_interval);\n\n  is_initialized = true;\n  return vcn_client;\n}\n\nmodelbox::Status VcnRestfulClient::Init() {\n  restful_wrapper_ = std::make_shared<VcnRestfulWrapper>();\n  if (restful_wrapper_ == nullptr) {\n    return {modelbox::STATUS_INVALID, \"failed to create vcn wrapper\"};\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status VcnRestfulClient::AddVcnStream(\n    VcnInfo &info, std::shared_ptr<VcnStreamRestful> &stream) {\n  std::string errmsg;\n  const std::string errmsg_prefix = \"Failed to add vcn stream: \";\n  if (!IsVcnInfoValid(info)) {\n    errmsg = errmsg_prefix + \"invalid info.\";\n    return {modelbox::STATUS_INVALID, errmsg};\n  }\n\n  // check accounts record\n  std::shared_ptr<VcnAccountRestful> account = nullptr;\n  std::lock_guard<std::mutex> lock(vcn_account_lock_);\n  auto ret = GetVcnAccount(info, account);\n  if (ret != modelbox::STATUS_OK) {\n    errmsg = errmsg_prefix + ret.Errormsg();\n    return {modelbox::STATUS_INVALID, errmsg};\n  }\n\n  std::string url;\n  ret = GetVcnUrl(info, account, url);\n  if (ret != modelbox::STATUS_OK) {\n    errmsg = errmsg_prefix + ret.Errormsg();\n    return {modelbox::STATUS_INVALID, errmsg};\n  }\n\n  MBLOG_INFO << \"User name: \" << info.user_name\n             << \", successfully get url: \" << url;\n\n  stream = std::shared_ptr<VcnStreamRestful>(\n      new VcnStreamRestful(url, info.camera_code, account),\n      [this](VcnStreamRestful *stream) {\n        this->RemoveVcnStream(stream);\n        delete stream;\n      });\n\n  account->AddStream();\n\n  PullKeepAliveThread();\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status VcnRestfulClient::RemoveVcnStream(VcnStreamRestful *stream) {\n  std::string errmsg;\n  const std::string errmsg_prefix = \"Failed to remove vcn restful stream: \";\n  if (nullptr == stream) {\n    MBLOG_ERROR << errmsg_prefix + \"stream ptr is nullptr.\";\n    return modelbox::STATUS_INVALID;\n  }\n\n  std::lock_guard<std::mutex> lock(vcn_account_lock_);\n  auto account = stream->GetAccount();\n  account->RemoveStream();\n\n  if (account->GetStreamsCount() > 0) {\n    return modelbox::STATUS_OK;\n  }\n\n  auto ret = RemoveVcnAccount(account);\n  if (ret != modelbox::STATUS_OK) {\n    std::string errmsg = errmsg_prefix + ret.Errormsg();\n    return {modelbox::STATUS_INVALID, errmsg};\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status VcnRestfulClient::GetVcnAccount(\n    const VcnInfo &info, std::shared_ptr<VcnAccountRestful> &account) {\n  auto iter = std::find_if(\n      vcn_accounts_.begin(), vcn_accounts_.end(),\n      [&info](const std::shared_ptr<const VcnAccountRestful> &account_) {\n        return (account_->GetUserName() == info.user_name &&\n                account_->GetIp() == info.ip &&\n                account_->GetPort() == info.port &&\n                account_->GetPassword() == info.password);\n      });\n  if (iter != vcn_accounts_.end()) {\n    account = *iter;\n    return modelbox::STATUS_OK;\n  }\n\n  auto ret = CreateVcnAccount(info, account);\n  if (ret != modelbox::STATUS_OK) {\n    std::string errmsg =\n        std::string(\"failed to get vcn restful account reason: \") +\n        ret.Errormsg();\n    return {modelbox::STATUS_INVALID, errmsg};\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status VcnRestfulClient::CreateVcnAccount(\n    const VcnInfo &info, std::shared_ptr<VcnAccountRestful> &account) {\n  account = std::make_shared<VcnAccountRestful>(info);\n  if (account == nullptr) {\n    return {modelbox::STATUS_INVALID, \"failed to create vcn account\"};\n  }\n\n  VcnRestfulInfo restful_info(info);\n  auto ret = restful_wrapper_->Login(restful_info);\n  if (ret != modelbox::STATUS_OK) {\n    std::string errreason =\n        std::string(\"failed to login vcn restful client reason: \") +\n        ret.Errormsg();\n    return {modelbox::STATUS_INVALID, errreason};\n  }\n\n  account->SetSessionId(restful_info.jsession_id);\n\n  vcn_accounts_.emplace_back(account);\n\n  MBLOG_INFO << \"Successfully login vcn restful, User name: \"\n             << restful_info.user_name;\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status VcnRestfulClient::GetVcnUrl(\n    const VcnInfo &info, const std::shared_ptr<VcnAccountRestful> &account,\n    std::string &url) {\n  VcnRestfulInfo restful_info(info);\n  restful_info.jsession_id = account->GetSessionId();\n  auto ret = restful_wrapper_->GetUrl(restful_info, url);\n  if (ret != modelbox::STATUS_OK) {\n    std::string errreason =\n        std::string(\"failed to get vcn restful url reason: \") + ret.Errormsg();\n    return {modelbox::STATUS_INVALID, errreason};\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status VcnRestfulClient::RemoveVcnAccount(\n    const std::shared_ptr<VcnAccountRestful> &account) {\n  VcnRestfulInfo restful_info;\n  GetRestfulInfoFromAccount(account, restful_info);\n\n  auto ret = restful_wrapper_->Logout(restful_info);\n  if (ret != modelbox::STATUS_OK) {\n    std::string errreason =\n        std::string(\"failed to logout vcn restful client reason: \") +\n        ret.Errormsg();\n    return {modelbox::STATUS_INVALID, errreason};\n  }\n\n  auto iter = std::find_if(\n      vcn_accounts_.begin(), vcn_accounts_.end(),\n      [&account](const std::shared_ptr<const VcnAccountRestful> &account_) {\n        return (account_->GetUserName() == account->GetUserName() &&\n                account_->GetIp() == account->GetIp() &&\n                account_->GetPort() == account->GetPort() &&\n                account_->GetPassword() == account->GetPassword());\n      });\n  if (iter == vcn_accounts_.end()) {\n    std::string errreason =\n        \"failed to logout vcn restful client reason: The account to be deleted \"\n        \"is NOT FOUND\";\n    return {modelbox::STATUS_NOTFOUND, errreason};\n  }\n\n  vcn_accounts_.erase(iter);\n\n  MBLOG_INFO << \"remove vcn restful success ip:\" << restful_info.ip\n             << \" user name:\" << restful_info.user_name;\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status VcnRestfulClient::KeepAliveProcess() {\n  std::lock_guard<std::mutex> lock(vcn_account_lock_);\n  if (vcn_accounts_.size() <= 0) {\n    // No error when vcn_accounts_ is empty, this is normal bussiness\n    return {modelbox::STATUS_INVALID, \"\"};\n  }\n\n  if (restful_wrapper_ == nullptr) {\n    return {modelbox::STATUS_INVALID, \"wrapper is nullptr\"};\n  }\n\n  for (auto &account : vcn_accounts_) {\n    time_t now = time(nullptr);\n    if (now - account->GetKeepAliveTime() < keep_alive_interval_) {\n      continue;\n    }\n\n    VcnRestfulInfo info;\n    GetRestfulInfoFromAccount(account, info);\n    auto ret = restful_wrapper_->KeepAlive(info);\n    if (modelbox::STATUS_OK != ret) {\n      std::string msg = \"Failed to vcn restful keep alive \" + ret.Errormsg();\n      MBLOG_ERROR << msg;\n      continue;\n    }\n\n    account->SetKeepAliveTime(now);\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nvoid VcnRestfulClient::GetRestfulInfoFromAccount(\n    const std::shared_ptr<const VcnAccountRestful> &account,\n    VcnRestfulInfo &info) {\n  info.ip = account->GetIp();\n  info.port = account->GetPort();\n  info.user_name = account->GetUserName();\n  info.password = account->GetPassword();\n  info.jsession_id = account->GetSessionId();\n}\n\nmodelbox::Status VcnRestfulClient::SetRestfulWrapper(\n    const std::shared_ptr<VcnRestfulWrapper> &_restful_wrapper) {\n  if (nullptr == _restful_wrapper) {\n    return {modelbox::STATUS_INVALID, \"wrapper pointer is nullptr.\"};\n  }\n\n  restful_wrapper_ = _restful_wrapper;\n  return modelbox::STATUS_OK;\n}\n\nvoid VcnRestfulClient::PullKeepAliveThread() {\n  if (keep_alive_timer_task_ != nullptr) {\n    return;\n  }\n\n  timer_.Start();\n\n  keep_alive_timer_task_ = std::make_shared<modelbox::TimerTask>([this]() {\n    auto ret = KeepAliveProcess();\n    if (ret != modelbox::STATUS_OK && !ret.Errormsg().empty()) {\n      MBLOG_ERROR << \"failed to KeepAliveProcess reason: \" << ret.Errormsg();\n    }\n  });\n\n  if (keep_alive_timer_task_ == nullptr) {\n    MBLOG_ERROR << \"failed to create vcn restful keep alive timer task\";\n    return;\n  }\n\n  timer_.Schedule(keep_alive_timer_task_, 0, keep_alive_interval_ * 1000);\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/data_source_parser/parser_plugin/vcn_restful_source_parser/vcn_restful_client.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_VCN_RESTFUL_CLIENT_H_\n#define MODELBOX_FLOWUNIT_VCN_RESTFUL_CLIENT_H_\n\n#include <modelbox/base/status.h>\n#include <modelbox/base/timer.h>\n\n#include <mutex>\n#include <utility>\n#include <vector>\n\n#include \"vcn_info.h\"\n#include \"vcn_restful_wrapper.h\"\n\n#define KEEP_ALIVE_INTERVAL_DEFAULT_SEC 600\n\nnamespace modelbox {\n\nclass VcnStreamRestful;\nclass VcnAccountRestful;\n\n/**\n * @brief This is a singleton class, in charge of all about the VCN restful.\n */\nclass VcnRestfulClient {\n public:\n  /**\n   * @brief   Get an VcnRestfulClient object.\n   * @return  Pointer to an VcnRestfulClient object.\n   *          Notes: return nullptr if it's failed to initialize VCN restful.\n   */\n  static std::shared_ptr<VcnRestfulClient> GetInstance(\n      int32_t keep_alive_interval);\n\n  virtual ~VcnRestfulClient() = default;\n\n  modelbox::Status Init();\n\n  /**\n   * @brief   Add a vcn stream and get its url.\n   * @param   info - in, Vcn info.\n   * @param   stream - out, pointer to a VcnStreamRestful object. This object\n   * hold a vcn url;\n   * @return  Successful or not\n   */\n  modelbox::Status AddVcnStream(VcnInfo &info,\n                                std::shared_ptr<VcnStreamRestful> &stream);\n\n  /**\n   * @brief   Remove the vcn stream from VcnRestfulClient, and logout the\n   * responding account if necessary.\n   * @param   stream - in, pointer to a VcnStreamRestful object to be remove;\n   * @return  Successful or not\n   */\n  modelbox::Status RemoveVcnStream(VcnStreamRestful *stream);\n\n  /**\n   * @brief   Set a mock VCN restful wrapper for the Unit Test.\n   * @param   _restful_wrapper - pointer to an object which is derived from\n   * class 'VcnRestfulWrapper';\n   * @return  Successful or not\n   */\n  modelbox::Status SetRestfulWrapper(\n      const std::shared_ptr<VcnRestfulWrapper> &_restful_wrapper);\n\n  void SetKeepAliveInterval(int32_t keep_alive_interval) {\n    keep_alive_interval_ = keep_alive_interval;\n  }\n\n private:\n  VcnRestfulClient(int32_t keep_alive_interval)\n      : restful_wrapper_(nullptr),\n        keep_alive_interval_(keep_alive_interval),\n        keep_alive_timer_task_(nullptr) {}\n\n  modelbox::Status GetVcnAccount(const VcnInfo &info,\n                                 std::shared_ptr<VcnAccountRestful> &account);\n  modelbox::Status CreateVcnAccount(\n      const VcnInfo &info, std::shared_ptr<VcnAccountRestful> &account);\n\n  modelbox::Status GetVcnUrl(const VcnInfo &info,\n                             const std::shared_ptr<VcnAccountRestful> &account,\n                             std::string &url);\n\n  modelbox::Status RemoveVcnAccount(\n      const std::shared_ptr<VcnAccountRestful> &account);\n\n  modelbox::Status KeepAliveProcess();\n  void GetRestfulInfoFromAccount(\n      const std::shared_ptr<const VcnAccountRestful> &account,\n      VcnRestfulInfo &info);\n  void PullKeepAliveThread();\n\n  static std::mutex vcn_client_lock_;\n  std::mutex vcn_account_lock_;\n  std::vector<std::shared_ptr<VcnAccountRestful>> vcn_accounts_;\n  std::shared_ptr<VcnRestfulWrapper> restful_wrapper_;\n  int32_t keep_alive_interval_;\n  std::shared_ptr<modelbox::TimerTask> keep_alive_timer_task_;\n  modelbox::Timer timer_;\n};\n\nclass VcnAccountRestful : public VcnAccountBase {\n public:\n  VcnAccountRestful(const VcnInfo &info)\n      : VcnAccountBase(info), keep_alive_time_(0) {}\n\n  std::string GetSessionId() const { return session_id_; }\n  void SetSessionId(const std::string &session_id) { session_id_ = session_id; }\n\n  /**\n   * @brief   get keep alive use restful\n   * @return  keep alive\n   */\n  time_t GetKeepAliveTime() const { return keep_alive_time_; }\n  void SetKeepAliveTime(time_t keep_alive_time) {\n    keep_alive_time_ = keep_alive_time;\n  }\n\n private:\n  std::string session_id_;\n  time_t keep_alive_time_;\n};\n\nclass VcnStreamRestful : public VcnStreamBase {\n public:\n  VcnStreamRestful(std::string url, std::string camera_code,\n                   std::shared_ptr<VcnAccountRestful> account)\n      : VcnStreamBase(std::move(url), std::move(camera_code)),\n        account_(std::move(account)) {}\n\n  std::shared_ptr<VcnAccountRestful> GetAccount() { return account_; }\n\n private:\n  std::shared_ptr<VcnAccountRestful> account_;\n};\n\n}  // namespace modelbox\n\n#endif  // MODELBOX_FLOWUNIT_VCN_RESTFUL_CLIENT_H_"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/data_source_parser/parser_plugin/vcn_restful_source_parser/vcn_restful_source_parser.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"vcn_restful_source_parser.h\"\n\nmodelbox::Status VcnRestfulSourceParser::Init(\n    const std::shared_ptr<modelbox::Configuration> &opts) {\n  retry_enabled_ = opts->GetBool(\"retry_enable\", DATASOURCE_PARSER_RETRY_ON);\n  retry_interval_ = opts->GetInt32(\"retry_interval_ms\",\n                                   DATASOURCE_PARSER_DEFAULT_RETRY_INTERVAL);\n  retry_max_times_ = opts->GetInt32(\n      \"retry_count_limit\", DATASOURCE_PARSER_STREAM_DEFAULT_RETRY_TIMES);\n\n  ReadConfVcnCommon(opts, retry_enabled_, retry_interval_, retry_max_times_);\n\n  keep_alive_interval_ =\n      opts->GetInt32(\"vcn_keep_alive_interval_sec\", keep_alive_interval_);\n\n  MBLOG_INFO << \"vcn restful source parser config retry_enabled:\"\n             << retry_enabled_ << \" retry_interval:\" << retry_interval_\n             << \" retry_max_times:\" << retry_max_times_\n             << \" keep_alive_interval:\" << keep_alive_interval_;\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status VcnRestfulSourceParser::Deinit() {\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status VcnRestfulSourceParser::Parse(\n    const std::shared_ptr<modelbox::SessionContext> &session_context,\n    const std::shared_ptr<modelbox::Configuration> &session_config,\n    const std::string &config, std::string &uri,\n    modelbox::DestroyUriFunc &destroy_uri_func) {\n  modelbox::VcnRestfulInfo vcn_info;\n  uri = \"\";\n\n  // read info from cfg\n  auto ret = GetVcnInfo(vcn_info, config);\n  if (modelbox::STATUS_OK != ret) {\n    MBLOG_ERROR << \"failed to get vcn info.\";\n    return ret;\n  }\n\n  auto vcn_client =\n      modelbox::VcnRestfulClient::GetInstance(keep_alive_interval_);\n  if (nullptr == vcn_client) {\n    MBLOG_ERROR << \"failed to get vcn restful client instance.\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  std::shared_ptr<modelbox::VcnStreamRestful> stream;\n  ret = vcn_client->AddVcnStream(vcn_info, stream);\n  if (modelbox::STATUS_OK != ret) {\n    MBLOG_ERROR << ret.Errormsg();\n    return ret;\n  }\n\n  uri = stream->GetUrl();\n  destroy_uri_func = [stream](const std::string &uri) {\n    MBLOG_DEBUG << \"destory \" << uri;\n  };\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status VcnRestfulSourceParser::GetStreamType(\n    const std::string &config, std::string &stream_type) {\n  stream_type = \"stream\";\n\n  return modelbox::STATUS_OK;\n}"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/data_source_parser/parser_plugin/vcn_restful_source_parser/vcn_restful_source_parser.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_DATA_SOURCE_PARSER_VCN_RESTFUL_CPU_H_\n#define MODELBOX_FLOWUNIT_DATA_SOURCE_PARSER_VCN_RESTFUL_CPU_H_\n\n#include <modelbox/base/device.h>\n#include <modelbox/base/status.h>\n#include <modelbox/data_source_parser_plugin.h>\n\n#include \"vcn_restful_client.h\"\n\nconstexpr const char *DRIVER_NAME = \"vcn_restful\";\nconstexpr const char *DRIVER_DESC =\n    \"A VCN restful data source parser plugin on CPU\";\nconstexpr const char *DRIVER_TYPE = \"cpu\";\n\nclass VcnRestfulSourceParser : public modelbox::DataSourceParserPlugin {\n public:\n  VcnRestfulSourceParser() = default;\n  ~VcnRestfulSourceParser() override = default;\n\n  modelbox::Status Init(\n      const std::shared_ptr<modelbox::Configuration> &opts) override;\n\n  modelbox::Status Deinit() override;\n\n  modelbox::Status Parse(\n      const std::shared_ptr<modelbox::SessionContext> &session_context,\n      const std::shared_ptr<modelbox::Configuration> &session_config,\n      const std::string &config, std::string &uri,\n      modelbox::DestroyUriFunc &destroy_uri_func) override;\n\n  modelbox::Status GetStreamType(const std::string &config,\n                                 std::string &stream_type) override;\n\n private:\n  int32_t keep_alive_interval_{KEEP_ALIVE_INTERVAL_DEFAULT_SEC};\n};\n\nclass VcnRestfulSourceParserFactory : public modelbox::DriverFactory {\n public:\n  VcnRestfulSourceParserFactory() = default;\n  ~VcnRestfulSourceParserFactory() override = default;\n\n  std::shared_ptr<modelbox::Driver> GetDriver() override {\n    std::shared_ptr<modelbox::Driver> parser =\n        std::make_shared<VcnRestfulSourceParser>();\n    return parser;\n  }\n};\n\n#endif  // MODELBOX_FLOWUNIT_DATA_SOURCE_PARSER_VCN_RESTFUL_CPU_H_"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/data_source_parser/parser_plugin/vcn_restful_source_parser/vcn_restful_source_parser_test.cc",
    "content": "#include <securec.h>\n\n#include <functional>\n#include <thread>\n\n#include \"data_source_parser_flowunit.h\"\n#include \"driver_flow_test.h\"\n#include \"flowunit_mockflowunit/flowunit_mockflowunit.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"modelbox/base/log.h\"\n#include \"modelbox/base/utils.h\"\n#include \"modelbox/buffer.h\"\n#include \"vcn_restful_client.h\"\n#include \"vcn_restful_wrapper_mock_test.h\"\n\n#define CHECK_SOURCE_OUTPUT_VCN_RESTFUL \\\n  \"check_data_source_vcn_restful_parser_output\"\n\nusing ::testing::_;\n\nnamespace modelbox {\nclass DataSourceVcnRestfulParserPluginTest : public testing::Test {\n public:\n  DataSourceVcnRestfulParserPluginTest()\n      : driver_flow_(std::make_shared<DriverFlowTest>()) {}\n  void MockRestfulServer(std::shared_ptr<DriverFlowTest> &driver_flow);\n\n protected:\n  void SetUp() override {\n    auto ret = AddMockFlowUnit();\n    EXPECT_EQ(ret, STATUS_OK);\n  };\n\n  void TearDown() override { driver_flow_->Clear(); };\n  std::shared_ptr<DriverFlowTest> GetDriverFlow() { return driver_flow_; }\n  std::shared_ptr<DriverFlowTest> RunDriverFlow(\n      const std::string &mock_flowunit_name);\n  modelbox::Status SendDataSourceCfg(\n      std::shared_ptr<DriverFlowTest> &driver_flow,\n      const std::string &data_source_cfg, const std::string &source_type);\n\n private:\n  Status AddMockFlowUnit();\n  Status AddMockVcn();\n  std::shared_ptr<DriverFlowTest> driver_flow_;\n};\n\nstd::shared_ptr<DriverFlowTest>\nDataSourceVcnRestfulParserPluginTest::RunDriverFlow(\n    const std::string &mock_flowunit_name) {\n  const std::string test_lib_dir = TEST_DRIVER_DIR;\n  std::string toml_content = R\"(\n    [driver]\n    skip-default=true\n    dir=[\")\" + test_lib_dir + \"\\\"]\\n    \" +\n                             R\"([graph]\n    graphconf = '''digraph demo {\n          input[type=input, device=cpu, deviceid=0]\n          data_source_parser[type=flowunit, flowunit=data_source_parser, device=cpu, deviceid=0, vcn_keep_alive_interval_sec=2, label=\"\", plugin_dir=\")\" +\n                             test_lib_dir + R\"(\"]\n          )\" + mock_flowunit_name +\n                             R\"([type=flowunit, flowunit=)\" +\n                             mock_flowunit_name +\n                             R\"(, device=cpu, deviceid=0, label=\"\"]\n          input -> data_source_parser:in_data\n          data_source_parser:out_video_url -> )\" +\n                             mock_flowunit_name + R\"(:stream_meta\n        }'''\n    format = \"graphviz\"\n  )\";\n\n  auto driver_flow = GetDriverFlow();\n  auto ret = driver_flow->BuildAndRun(mock_flowunit_name, toml_content, -1);\n  if (modelbox::STATUS_OK != ret) {\n    std::string msg = \"Failed to build and run, reason:\" + ret.Errormsg();\n    MBLOG_ERROR << msg;\n  }\n\n  return driver_flow_;\n}\n\nmodelbox::Status DataSourceVcnRestfulParserPluginTest::SendDataSourceCfg(\n    std::shared_ptr<DriverFlowTest> &driver_flow,\n    const std::string &data_source_cfg, const std::string &source_type) {\n  auto ext_data = driver_flow->GetFlow()->CreateExternalDataMap();\n  auto buffer_list = ext_data->CreateBufferList();\n  buffer_list->Build({data_source_cfg.size()});\n  auto buffer = buffer_list->At(0);\n  memcpy_s(buffer->MutableData(), buffer->GetBytes(), data_source_cfg.data(),\n           data_source_cfg.size());\n  buffer->Set(\"source_type\", source_type);\n  ext_data->Send(\"input\", buffer_list);\n  std::this_thread::sleep_for(std::chrono::seconds(5));\n  ext_data->Shutdown();\n  return modelbox::STATUS_OK;\n}\n\nStatus DataSourceVcnRestfulParserPluginTest::AddMockFlowUnit() {\n  AddMockVcn();\n  return modelbox::STATUS_OK;\n}\n\nStatus DataSourceVcnRestfulParserPluginTest::AddMockVcn() {\n  auto ctl_ = driver_flow_->GetMockFlowCtl();\n\n  {\n    MockFlowUnitDriverDesc desc_flowunit;\n    desc_flowunit.SetClass(\"DRIVER-FLOWUNIT\");\n    desc_flowunit.SetType(\"cpu\");\n    desc_flowunit.SetName(CHECK_SOURCE_OUTPUT_VCN_RESTFUL);\n    desc_flowunit.SetDescription(CHECK_SOURCE_OUTPUT_VCN_RESTFUL);\n    desc_flowunit.SetVersion(\"1.0.0\");\n    std::string file_path_flowunit = std::string(TEST_DRIVER_DIR) +\n                                     \"/libmodelbox-unit-cpu-\" +\n                                     CHECK_SOURCE_OUTPUT_VCN_RESTFUL + \".so\";\n    desc_flowunit.SetFilePath(file_path_flowunit);\n    auto mock_flowunit = std::make_shared<MockFlowUnit>();\n    auto mock_flowunit_desc = std::make_shared<FlowUnitDesc>();\n    mock_flowunit_desc->SetFlowUnitName(CHECK_SOURCE_OUTPUT_VCN_RESTFUL);\n    mock_flowunit_desc->AddFlowUnitInput(\n        modelbox::FlowUnitInput(\"stream_meta\"));\n    mock_flowunit_desc->SetFlowType(modelbox::FlowType::STREAM);\n    mock_flowunit->SetFlowUnitDesc(mock_flowunit_desc);\n    std::weak_ptr<MockFlowUnit> mock_flowunit_wp;\n    mock_flowunit_wp = mock_flowunit;\n\n    EXPECT_CALL(*mock_flowunit, Open(_))\n        .WillRepeatedly(testing::Invoke(\n            [=](const std::shared_ptr<modelbox::Configuration> &flow_option) {\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, DataPre(_))\n        .WillRepeatedly(\n            testing::Invoke([&](const std::shared_ptr<DataContext> &data_ctx) {\n              auto stream_meta = data_ctx->GetInputMeta(\"stream_meta\");\n              EXPECT_NE(stream_meta, nullptr);\n              if (!stream_meta) {\n                return modelbox::STATUS_SUCCESS;\n              }\n\n              auto source_url = std::static_pointer_cast<std::string>(\n                  stream_meta->GetMeta(\"source_url\"));\n              EXPECT_NE(source_url, nullptr);\n              if (source_url != nullptr) {\n                EXPECT_FALSE((*source_url).empty());\n                EXPECT_EQ(*source_url, \"https://www.Hello_World.com\");\n              }\n\n              return modelbox::STATUS_SUCCESS;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, DataPost(_))\n        .WillRepeatedly(\n            testing::Invoke([&](const std::shared_ptr<DataContext> &data_ctx) {\n              MBLOG_INFO << \"stream_info \"\n                         << \"DataPost\";\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit,\n                Process(testing::An<std::shared_ptr<modelbox::DataContext>>()))\n        .WillRepeatedly(\n            testing::Invoke([=](const std::shared_ptr<DataContext> &data_ctx) {\n              MBLOG_INFO << \"stream_info \"\n                         << \"Process\";\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, Close()).WillRepeatedly(testing::Invoke([=]() {\n      return modelbox::STATUS_OK;\n    }));\n    desc_flowunit.SetMockFlowUnit(mock_flowunit);\n    ctl_->AddMockDriverFlowUnit(CHECK_SOURCE_OUTPUT_VCN_RESTFUL, \"cpu\",\n                                desc_flowunit, std::string(TEST_DRIVER_DIR));\n  }\n\n  return STATUS_OK;\n}\n\nTEST_F(DataSourceVcnRestfulParserPluginTest, VcnInputTest) {\n  auto driver_flow = RunDriverFlow(CHECK_SOURCE_OUTPUT_VCN_RESTFUL);\n  auto vcn_client = modelbox::VcnRestfulClient::GetInstance(600);\n  auto wrapper_mock =\n      std::shared_ptr<VcnRestfulWrapper>(new VcnRestfulWrapperMock());\n  vcn_client->SetRestfulWrapper(wrapper_mock);\n\n  std::string source_type = \"vcn_restful\";\n  std::string data_source_cfg_1 = R\"({\n        \"userName\": \"user\",\n        \"password\":\"password\",\n        \"ip\":\"192.168.1.1\",\n        \"port\":\"666\",\n        \"cameraCode\":\"01234567890123456789#01234567890123456789012345678901\",\n        \"streamType\":1\n  })\";\n\n  std::string data_source_cfg_2 = R\"({\n        \"userName\": \"user\",\n        \"password\":\"password\",\n        \"ip\":\"192.168.1.1\",\n        \"port\":\"666\",\n        \"cameraCode\":\"01234567890123456789#01234567890123456789012345678901\",\n        \"streamType\":1\n  })\";\n\n  auto ret_1 = SendDataSourceCfg(driver_flow, data_source_cfg_1, source_type);\n  auto ret_2 = SendDataSourceCfg(driver_flow, data_source_cfg_2, source_type);\n\n  EXPECT_EQ(ret_1, modelbox::STATUS_OK);\n  EXPECT_EQ(ret_2, modelbox::STATUS_OK);\n\n  driver_flow->GetFlow()->Wait(3 * 1000);\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/data_source_parser/parser_plugin/vcn_restful_source_parser/vcn_restful_wrapper.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"vcn_restful_wrapper.h\"\n\n#include <functional>\n\n#include \"modelbox/base/log.h\"\n#include \"nlohmann/json.hpp\"\n\nconstexpr const char *RESTFUL_LOGIN_URL = \"/loginInfo/login/v1.0\";\nconstexpr const char *RESTFUL_LOGOUT_URL = \"/users/logout\";\nconstexpr const char *RESTFUL_KEEP_ALIVE_URL = \"/common/keepAlive\";\nconstexpr const char *RESTFUL_GET_RTSP_URL = \"/video/rtspurl/v1.0\";\n\nconstexpr const uint64_t IVS_RESULT_SUCCESS = 0;\nconstexpr const uint64_t IVS_RESULT_PWD_EXPIRED = 119101305;\nconstexpr const uint64_t IVS_RESULT_FIRST_LOGIN = 119101308;\n\nconstexpr const uint32_t RESTFUL_STREAM_TYPE_MAX = 3;\n\nconstexpr const int HTTP_STATUS_CODE_OK = 200;\n\nnamespace modelbox {\n\nVcnRestfulWrapper::VcnRestfulWrapper() {\n  httpcli_func_map_[REQ_GET] = [](httplib::Client &cli, const std::string &path,\n                                  const httplib::Headers &headers,\n                                  const std::string &body) {\n    return cli.Get(path, headers);\n  };\n\n  httpcli_func_map_[REQ_POST] =\n      [](httplib::Client &cli, const std::string &path,\n         const httplib::Headers &headers, const std::string &body) {\n        return cli.Post(path, headers, body, \"\");\n      };\n}\n\nmodelbox::Status VcnRestfulWrapper::SendRequest(const std::string &uri,\n                                                const std::string &path,\n                                                const std::string &body,\n                                                const httplib::Headers &headers,\n                                                REQ_METHOD method,\n                                                httplib::Response &resp) {\n  httplib::Client cli(uri);\n  cli.enable_server_certificate_verification(false);\n  cli.set_write_timeout(std::chrono::seconds(30));\n\n  auto func_item = httpcli_func_map_.find(method);\n  if (func_item == httpcli_func_map_.end()) {\n    return {modelbox::STATUS_NOTSUPPORT, \"Not support http method\"};\n  }\n\n  try {\n    auto result = func_item->second(cli, path, headers, body);\n    if (result == nullptr) {\n      return {modelbox::STATUS_FAULT,\n              \"Failed to send request \" + httplib::to_string(result.error())};\n    }\n\n    resp = result.value();\n  } catch (const std::exception &e) {\n    auto msg =\n        std::string(\"Failed to send request, exception reason: \") + e.what();\n    return {modelbox::STATUS_FAULT, msg};\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status VcnRestfulWrapper::Login(VcnRestfulInfo &restful_info) {\n  if (!IsRestfulInfoValid(restful_info, true)) {\n    std::string msg = \"Failed to restful login, restful info is invalid\";\n    MBLOG_ERROR << msg;\n    return {modelbox::STATUS_INVALID, msg};\n  }\n\n  std::string uri = \"https://\" + restful_info.ip + \":\" + restful_info.port;\n\n  httplib::Headers headers;\n  headers.insert({\"Content-Type\", \"application/json\"});\n  headers.insert({\"Cache-Control\", \"no-cache\"});\n\n  nlohmann::json body;\n  body[\"userName\"] = restful_info.user_name;\n  body[\"password\"] = restful_info.password;\n\n  httplib::Response resp;\n  auto ret =\n      SendRequest(uri, RESTFUL_LOGIN_URL, body.dump(), headers, REQ_POST, resp);\n  if (modelbox::STATUS_OK != ret) {\n    std::string msg =\n        \"Failed to restful login, send request fail reason:\" + ret.Errormsg();\n    MBLOG_ERROR << msg;\n    return {modelbox::STATUS_FAULT, msg};\n  }\n\n  if (resp.status != HTTP_STATUS_CODE_OK) {\n    std::string msg =\n        \"Failed to restful login, result code:\" + std::to_string(resp.status);\n    MBLOG_ERROR << msg;\n    return {modelbox::STATUS_FAULT, msg};\n  }\n\n  ret = ParseRestfulLoginResult(resp, restful_info);\n  if (modelbox::STATUS_OK != ret) {\n    std::string msg =\n        \"Failed to restful login, parse result reason:\" + ret.Errormsg();\n    MBLOG_ERROR << msg;\n    return {modelbox::STATUS_FAULT, msg};\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status VcnRestfulWrapper::ParseRestfulLoginResult(\n    const httplib::Response &resp, VcnRestfulInfo &restful_info) {\n  try {\n    auto result_json = nlohmann::json::parse(resp.body);\n    if (!result_json.contains(\"resultCode\") ||\n        !result_json[\"resultCode\"].is_number_unsigned()) {\n      std::string msg =\n          \"Failed to parse login result, result code isn't exist or invalid\";\n      MBLOG_ERROR << msg;\n      return {modelbox::STATUS_FAULT, msg};\n    }\n\n    auto result_code = result_json[\"resultCode\"].get<uint64_t>();\n    if (result_code != IVS_RESULT_SUCCESS &&\n        result_code != IVS_RESULT_PWD_EXPIRED &&\n        result_code != IVS_RESULT_FIRST_LOGIN) {\n      std::string msg = \"Failed to parse login result, result code:\" +\n                        std::to_string(result_code);\n      MBLOG_ERROR << msg;\n      return {modelbox::STATUS_FAULT, msg};\n    }\n\n    if (result_code == IVS_RESULT_PWD_EXPIRED) {\n      MBLOG_WARN << \"restful login password has expired\";\n    }\n\n    if (result_code == IVS_RESULT_FIRST_LOGIN) {\n      MBLOG_WARN << \"restful is first login\";\n    }\n\n    if (!resp.has_header(\"Set-Cookie\")) {\n      std::string msg = \"Failed to parse login result, cookie isn't exist\";\n      MBLOG_ERROR << msg;\n      return {modelbox::STATUS_FAULT, msg};\n    }\n\n    auto cookie = resp.get_header_value(\"Set-Cookie\");\n    auto cookies = modelbox::StringSplit(cookie, ';');\n    const std::string COOKIE_PREFIX = \"JSESSIONID=\";\n    for (auto &cookie_value : cookies) {\n      auto pos = cookie_value.find(COOKIE_PREFIX);\n      if (pos != std::string::npos) {\n        restful_info.jsession_id = cookie_value.substr(COOKIE_PREFIX.size());\n        break;\n      }\n    }\n\n    if (restful_info.jsession_id.empty()) {\n      std::string msg = \"Failed to parse login result, jsession id is empty\";\n      MBLOG_ERROR << msg;\n      return {modelbox::STATUS_FAULT, msg};\n    }\n  } catch (std::exception const &e) {\n    std::string msg = std::string(\"catch exception:\") + e.what();\n    MBLOG_ERROR << msg;\n    return {modelbox::STATUS_FAULT, msg};\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status VcnRestfulWrapper::Logout(const VcnRestfulInfo &restful_info) {\n  if (!IsRestfulInfoValid(restful_info)) {\n    std::string msg = \"Failed to restful logout, restful info is invalid\";\n    MBLOG_ERROR << msg;\n    return {modelbox::STATUS_INVALID, msg};\n  }\n\n  std::string uri = \"https://\" + restful_info.ip + \":\" + restful_info.port;\n  const std::string COOKIE_PREFIX = \"JSESSIONID=\";\n\n  httplib::Headers headers;\n  headers.insert({\"Content-Type\", \"application/json\"});\n  headers.insert({\"Cache-Control\", \"no-cache\"});\n  headers.insert({\"Cookie\", COOKIE_PREFIX + restful_info.jsession_id});\n\n  httplib::Response resp;\n  auto ret = SendRequest(uri, RESTFUL_LOGOUT_URL, \"\", headers, REQ_GET, resp);\n  if (modelbox::STATUS_OK != ret) {\n    std::string msg =\n        \"Failed to restful logout, send request fail reason:\" + ret.Errormsg();\n    MBLOG_ERROR << msg;\n    return {modelbox::STATUS_FAULT, msg};\n  }\n\n  if (resp.status != HTTP_STATUS_CODE_OK) {\n    std::string msg =\n        \"Failed to restful logout, result code:\" + std::to_string(resp.status);\n    MBLOG_ERROR << msg;\n    return {modelbox::STATUS_FAULT, msg};\n  }\n\n  try {\n    auto result_json = nlohmann::json::parse(resp.body);\n    if (!result_json.contains(\"resultCode\") ||\n        !result_json[\"resultCode\"].is_number_unsigned()) {\n      std::string msg =\n          \"Failed to parse logout, result code isn't exist or invalid\";\n      MBLOG_ERROR << msg;\n      return {modelbox::STATUS_FAULT, msg};\n    }\n\n    auto result_code = result_json[\"resultCode\"].get<uint64_t>();\n    if (result_code != IVS_RESULT_SUCCESS) {\n      std::string msg =\n          \"Failed to parse logout, result code:\" + std::to_string(result_code);\n      MBLOG_ERROR << msg;\n      return {modelbox::STATUS_FAULT, msg};\n    }\n  } catch (std::exception const &e) {\n    MBLOG_ERROR << e.what();\n    return modelbox::STATUS_FAULT;\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status VcnRestfulWrapper::GetUrl(const VcnRestfulInfo &restful_info,\n                                           std::string &rtsp_url) {\n  if (!IsRestfulInfoValid(restful_info)) {\n    std::string msg = \"Failed to restful get url, restful info is invalid\";\n    MBLOG_ERROR << msg;\n    return {modelbox::STATUS_INVALID, msg};\n  }\n\n  if (restful_info.camera_code.empty() ||\n      restful_info.stream_type > RESTFUL_STREAM_TYPE_MAX ||\n      restful_info.stream_type < 0) {\n    std::string msg = \"Failed to restful get url, parameters is invalid\";\n    MBLOG_ERROR << msg;\n    return {modelbox::STATUS_INVALID, msg};\n  }\n\n  std::string uri = \"https://\" + restful_info.ip + \":\" + restful_info.port;\n  const std::string COOKIE_PREFIX = \"JSESSIONID=\";\n\n  httplib::Headers headers;\n  headers.insert({\"Content-Type\", \"application/json\"});\n  headers.insert({\"Cache-Control\", \"no-cache\"});\n  headers.insert({\"Cookie\", COOKIE_PREFIX + restful_info.jsession_id});\n\n  auto body = nlohmann::json::parse(R\"({\n    \"mediaURLParam\":{\n        \"broadCastType\":0,\n        \"packProtocolType\":1,\n        \"protocolType\":2,\n        \"serviceType\":1,\n        \"streamType\":1,\n        \"transMode\":0,\n        \"clientType\":1\n    }\n})\");\n  body[\"cameraCode\"] = restful_info.camera_code;\n  body[\"mediaURLParam\"][\"streamType\"] = restful_info.stream_type;\n\n  httplib::Response resp;\n  auto ret = SendRequest(uri, RESTFUL_GET_RTSP_URL, body.dump(), headers,\n                         REQ_POST, resp);\n  if (modelbox::STATUS_OK != ret) {\n    std::string msg =\n        \"Failed to restful get url, send request fail reason:\" + ret.Errormsg();\n    MBLOG_ERROR << msg;\n    return {modelbox::STATUS_FAULT, msg};\n  }\n\n  if (resp.status != HTTP_STATUS_CODE_OK) {\n    std::string msg =\n        \"Failed to restful get url, result code:\" + std::to_string(resp.status);\n    MBLOG_ERROR << msg;\n    return {modelbox::STATUS_FAULT, msg};\n  }\n\n  ret = ParseRestfulGetUrlResult(resp, rtsp_url);\n  if (modelbox::STATUS_OK != ret) {\n    std::string msg =\n        \"Failed to restful get url, parse result fail reason:\" + ret.Errormsg();\n    MBLOG_ERROR << msg;\n    return {modelbox::STATUS_FAULT, msg};\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status VcnRestfulWrapper::ParseRestfulGetUrlResult(\n    const httplib::Response &resp, std::string &rtsp_url) {\n  try {\n    auto result_json = nlohmann::json::parse(resp.body);\n    if (!result_json.contains(\"resultCode\") ||\n        !result_json[\"resultCode\"].is_number_unsigned()) {\n      std::string msg =\n          \"Failed to parse get url result, result code isn't exist or invalid\";\n      MBLOG_ERROR << msg;\n      return {modelbox::STATUS_FAULT, msg};\n    }\n\n    auto result_code = result_json[\"resultCode\"].get<uint64_t>();\n    if (result_code != IVS_RESULT_SUCCESS) {\n      std::string msg = \"Failed to parse get url result, result code:\" +\n                        std::to_string(result_code);\n      MBLOG_ERROR << msg;\n      return {modelbox::STATUS_FAULT, msg};\n    }\n\n    if (!result_json.contains(\"rtspURL\") ||\n        !result_json[\"rtspURL\"].is_string()) {\n      std::string msg =\n          \"Failed to get url result, rtspURL isn't exist or invalid\";\n      MBLOG_ERROR << msg;\n      return {modelbox::STATUS_FAULT, msg};\n    }\n\n    rtsp_url = result_json[\"rtspURL\"].get<std::string>();\n\n    if (rtsp_url.empty()) {\n      std::string msg = \"Failed to parse get url result, rtsp_url is empty\";\n      MBLOG_ERROR << msg;\n      return {modelbox::STATUS_FAULT, msg};\n    }\n  } catch (std::exception const &e) {\n    MBLOG_ERROR << e.what();\n    return modelbox::STATUS_FAULT;\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status VcnRestfulWrapper::KeepAlive(\n    const VcnRestfulInfo &restful_info) {\n  if (!IsRestfulInfoValid(restful_info)) {\n    std::string msg = \"Failed to restful keep alive, restful info is invalid\";\n    MBLOG_ERROR << msg;\n    return {modelbox::STATUS_INVALID, msg};\n  }\n\n  std::string uri = \"https://\" + restful_info.ip + \":\" + restful_info.port;\n  const std::string COOKIE_PREFIX = \"JSESSIONID=\";\n\n  httplib::Headers headers;\n  headers.insert({\"Content-Type\", \"application/json\"});\n  headers.insert({\"Cache-Control\", \"no-cache\"});\n  headers.insert({\"Cookie\", COOKIE_PREFIX + restful_info.jsession_id});\n\n  httplib::Response resp;\n  auto ret =\n      SendRequest(uri, RESTFUL_KEEP_ALIVE_URL, \"\", headers, REQ_GET, resp);\n  if (modelbox::STATUS_OK != ret) {\n    std::string msg =\n        \"Failed to restful keep alive, send request fail reason:\" +\n        ret.Errormsg();\n    MBLOG_ERROR << msg;\n    return {modelbox::STATUS_FAULT, msg};\n  }\n\n  if (resp.status != HTTP_STATUS_CODE_OK) {\n    std::string msg = \"Failed to restful keep alive, result code:\" +\n                      std::to_string(resp.status);\n    MBLOG_ERROR << msg;\n    return {modelbox::STATUS_FAULT, msg};\n  }\n\n  try {\n    auto result_json = nlohmann::json::parse(resp.body);\n    if (!result_json.contains(\"resultCode\") ||\n        !result_json[\"resultCode\"].is_number_unsigned()) {\n      std::string msg =\n          \"Failed to restful keep alive, result code isn't exist or invalid\";\n      MBLOG_ERROR << msg;\n      return {modelbox::STATUS_FAULT, msg};\n    }\n\n    auto result_code = result_json[\"resultCode\"].get<uint64_t>();\n    if (result_code != IVS_RESULT_SUCCESS) {\n      std::string msg = \"Failed to restful keep alive, result code:\" +\n                        std::to_string(result_code);\n      MBLOG_ERROR << msg;\n      return {modelbox::STATUS_FAULT, msg};\n    }\n  } catch (std::exception const &e) {\n    MBLOG_ERROR << e.what();\n    return modelbox::STATUS_FAULT;\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nbool VcnRestfulWrapper::IsRestfulInfoValid(const VcnRestfulInfo &restful_info,\n                                           bool is_login) {\n  if (restful_info.user_name.empty()) {\n    MBLOG_ERROR << \"user name is empty\";\n    return false;\n  }\n\n  if (restful_info.password.empty()) {\n    MBLOG_ERROR << \"user password is empty\";\n    return false;\n  }\n\n  if (restful_info.ip.empty()) {\n    MBLOG_ERROR << \"ip is empty\";\n    return false;\n  }\n\n  if (restful_info.port.empty()) {\n    MBLOG_ERROR << \"port is empty\";\n    return false;\n  }\n\n  if (is_login) {\n    return true;\n  }\n\n  if (restful_info.jsession_id.empty()) {\n    MBLOG_ERROR << \"jsession id is empty\";\n    return false;\n  }\n\n  return true;\n}\n\n}  // namespace modelbox\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/data_source_parser/parser_plugin/vcn_restful_source_parser/vcn_restful_wrapper.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_VCN_RESTFUL_WRAPPER_H_\n#define MODELBOX_FLOWUNIT_VCN_RESTFUL_WRAPPER_H_\n\n#include <modelbox/base/status.h>\n\n#include <string>\n#include <unordered_map>\n\n#define CPPHTTPLIB_OPENSSL_SUPPORT\n#include <httplib.h>\n\n#include \"vcn_info.h\"\n\nnamespace modelbox {\n\ntypedef struct tag_VcnRestfulInfo : public VcnInfo {\n  tag_VcnRestfulInfo(const VcnInfo &info) : VcnInfo(info) {}\n  tag_VcnRestfulInfo() = default;\n  std::string jsession_id;\n} VcnRestfulInfo;\n\ntypedef enum REQ_METHOD { REQ_GET, REQ_POST } REQ_METHOD;\n\nusing HttpcliFunc = httplib::Result(httplib::Client &cli,\n                                    const std::string &path,\n                                    const httplib::Headers &headers,\n                                    const std::string &body);\n\nusing HttpcliFuncMap =\n    std::unordered_map<REQ_METHOD, std::function<HttpcliFunc>>;\n\n/**\n * @brief   wrap the vcn restful apis; help to be mocked.\n */\nclass VcnRestfulWrapper {\n public:\n  VcnRestfulWrapper();\n  virtual ~VcnRestfulWrapper() = default;\n\n  /**\n   * @brief   restful login a vcn account\n   * @param   restful_info - in, a VcnRestfulInfo object, containing information\n   * to login.\n   * @return  Successful or not\n   */\n  virtual modelbox::Status Login(VcnRestfulInfo &restful_info);\n\n  /**\n   * @brief   restful logout a vcn account\n   * @param   restful_info - in, a VcnRestfulInfo object, containing\n   * information to logout.\n   * @return  Successful or not\n   */\n  virtual modelbox::Status Logout(const VcnRestfulInfo &restful_info);\n\n  virtual modelbox::Status GetUrl(const VcnRestfulInfo &restful_info,\n                                  std::string &rtsp_url);\n\n  virtual modelbox::Status KeepAlive(const VcnRestfulInfo &restful_info);\n\n private:\n  bool IsRestfulInfoValid(const VcnRestfulInfo &restful_info,\n                          bool is_login = false);\n\n  modelbox::Status ParseRestfulLoginResult(const httplib::Response &resp,\n                                           VcnRestfulInfo &restful_info);\n\n  modelbox::Status ParseRestfulGetUrlResult(const httplib::Response &resp,\n                                            std::string &rtsp_url);\n\n  modelbox::Status SendRequest(const std::string &uri, const std::string &path,\n                               const std::string &body,\n                               const httplib::Headers &headers,\n                               REQ_METHOD method, httplib::Response &resp);\n\n  HttpcliFuncMap httpcli_func_map_;\n};\n\n}  // namespace modelbox\n\n#endif  // MODELBOX_FLOWUNIT_VCN_RESTFUL_WRAPPER_H_"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/data_source_parser/parser_plugin/vcn_restful_source_parser/vcn_restful_wrapper_mock_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"vcn_restful_wrapper_mock_test.h\"\n\n#include <modelbox/base/log.h>\n\nconst std::string J_SESSION_ID_SHOULD_BE =\n    \"C3AECD84E65268D2731DA3146FDEF9B0C4B14B5C2A37EDA381D47927E993B\";\n\nnamespace modelbox {\nmodelbox::Status VcnRestfulWrapperMock::Login(VcnRestfulInfo &restful_info) {\n  std::string user_name_should_be = \"user\";\n  std::string pwd_should_be = \"password\";\n  std::string ip_should_be = \"192.168.1.1\";\n  std::string port_should_be = \"666\";\n  std::string camera_code_should_be =\n      \"01234567890123456789#01234567890123456789012345678901\";\n  uint32_t ip_type_should_be = 1;\n\n  if (user_name_should_be != restful_info.user_name) {\n    std::string msg = \"Failed to login, user name is not correct.\";\n    MBLOG_ERROR << msg;\n    return {modelbox::STATUS_FAULT, msg};\n  }\n\n  if (pwd_should_be != restful_info.password) {\n    std::string msg = \"Failed to login, pwd is not correct.\";\n    MBLOG_ERROR << msg;\n    return {modelbox::STATUS_FAULT, msg};\n  }\n\n  if (ip_should_be != restful_info.ip) {\n    std::string msg = \"Failed to login, ip is not correct.\";\n    MBLOG_ERROR << msg;\n    return {modelbox::STATUS_FAULT, msg};\n  }\n\n  if (port_should_be != restful_info.port) {\n    std::string msg = \"Failed to login, port is not correct.\";\n    MBLOG_ERROR << msg;\n    return {modelbox::STATUS_FAULT, msg};\n  }\n\n  if (camera_code_should_be != restful_info.camera_code) {\n    std::string msg = \"Failed to login, camera code is not correct.\";\n    MBLOG_ERROR << msg;\n    return {modelbox::STATUS_FAULT, msg};\n  }\n\n  if (ip_type_should_be != restful_info.stream_type) {\n    std::string msg = \"Failed to login, stream type is not correct.\";\n    MBLOG_ERROR << msg;\n    return {modelbox::STATUS_FAULT, msg};\n  }\n\n  restful_info.jsession_id = J_SESSION_ID_SHOULD_BE;\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status VcnRestfulWrapperMock::Logout(\n    const VcnRestfulInfo &restful_info) {\n  if (J_SESSION_ID_SHOULD_BE != restful_info.jsession_id) {\n    std::string msg = \"Failed to logout, session id is not correct.\";\n    MBLOG_ERROR << msg;\n    return {modelbox::STATUS_FAULT, msg};\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status VcnRestfulWrapperMock::GetUrl(\n    const VcnRestfulInfo &restful_info, std::string &rtsp_url) {\n  if (J_SESSION_ID_SHOULD_BE != restful_info.jsession_id) {\n    std::string msg = \"Failed to get url, session id is not correct.\";\n    MBLOG_ERROR << msg;\n    return {modelbox::STATUS_FAULT, msg};\n  }\n\n  rtsp_url = \"https://www.Hello_World.com\";\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status VcnRestfulWrapperMock::KeepAlive(\n    const VcnRestfulInfo &restful_info) {\n  if (J_SESSION_ID_SHOULD_BE != restful_info.jsession_id) {\n    std::string msg = \"Failed to keep alive, session id is not correct.\";\n    MBLOG_ERROR << msg;\n    return {modelbox::STATUS_FAULT, msg};\n  }\n\n  return modelbox::STATUS_OK;\n}\n}  // namespace modelbox"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/data_source_parser/parser_plugin/vcn_restful_source_parser/vcn_restful_wrapper_mock_test.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_VCN_RESTFUL_WRAPPER_MOCK_H_\n#define MODELBOX_FLOWUNIT_VCN_RESTFUL_WRAPPER_MOCK_H_\n\n#include <modelbox/base/status.h>\n\n#include \"vcn_restful_wrapper.h\"\n\nnamespace modelbox {\n\n/**\n * @brief   wrap the vcn sdk apis; help to be mocked.\n */\nclass VcnRestfulWrapperMock : public VcnRestfulWrapper {\n public:\n  modelbox::Status Login(VcnRestfulInfo &restful_info) override;\n  modelbox::Status Logout(const VcnRestfulInfo &restful_info) override;\n  modelbox::Status GetUrl(const VcnRestfulInfo &restful_info,\n                          std::string &rtsp_url) override;\n  modelbox::Status KeepAlive(const VcnRestfulInfo &restful_info) override;\n};\n\n}  // namespace modelbox\n\n#endif  // MODELBOX_FLOWUNIT_VCN_RESTFUL_WRAPPER_MOCK_H_"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/data_source_parser/parser_plugin/vcn_source_parser/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nset(PLUGIN_NAME \"vcn\")\n\nproject(modelbox-flowunit-${UNIT_DEVICE}-${UNIT_NAME}-${PLUGIN_NAME})\n\nfile(GLOB_RECURSE PLUGIN_SOURCE *.cpp *.cc *.c)\ngroup_source_test_files(MODELBOX_PLUGIN_SOURCE MODELBOX_PLUGIN_TEST_SOURCE \"_test.c*\" ${PLUGIN_SOURCE})\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_CPU_INCLUDE})\ninclude_directories(${MODELBOX_COMMON_INCLUDE})\ninclude_directories(${NLOHMANN_INCLUDE_DIR})\ninclude_directories(${VCN_INCLUDE_DIR})\ninclude_directories(${MODELBOX_COMMON_DRIVER_UTIL_INCLUDE})\ninclude_directories(${CMAKE_CURRENT_LIST_DIR}/../vcn_common)\n\nset(MODELBOX_PLUGIN_SHARED modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-${PLUGIN_NAME}-shared)\nset(MODELBOX_PLUGIN_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR} ${CMAKE_CURRENT_LIST_DIR}/../vcn_common)\nset(MODELBOX_PLUGIN_VCN_COMMON_LIST ${CMAKE_CURRENT_LIST_DIR}/../vcn_common/vcn_info.cc)\n\nadd_library(${MODELBOX_PLUGIN_SHARED} SHARED ${MODELBOX_PLUGIN_SOURCE} ${MODELBOX_PLUGIN_VCN_COMMON_LIST})\n \nset_target_properties(${MODELBOX_PLUGIN_SHARED} PROPERTIES \n    SOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n\ntarget_link_libraries(${MODELBOX_PLUGIN_SHARED} ${LIBMODELBOX_DEVICE_CPU_SHARED})\ntarget_link_libraries(${MODELBOX_PLUGIN_SHARED} ${MODELBOX_COMMON_LIBRARY})\ntarget_link_libraries(${MODELBOX_PLUGIN_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_PLUGIN_SHARED} rt)\ntarget_link_libraries(${MODELBOX_PLUGIN_SHARED} dl)\ntarget_link_libraries(${MODELBOX_PLUGIN_SHARED} ${VCN_LIBRARIES})\ntarget_link_libraries(${MODELBOX_PLUGIN_SHARED} ${LIBMODELBOX_SHARED})\ntarget_link_libraries(${MODELBOX_PLUGIN_SHARED} ${MODELBOX_COMMON_SOURCE_CONTEXT_LIBRARY})\nset_target_properties(${MODELBOX_PLUGIN_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-${PLUGIN_NAME}\")\n\ninstall(TARGETS ${MODELBOX_PLUGIN_SHARED} \n    COMPONENT cpu-device-flowunit\n    RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n    LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    OPTIONAL\n    )\n\n\ninstall(DIRECTORY ${HEADER} \n    DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR} \n    COMPONENT cpu-device-flowunit-devel\n    )\n\n# driver test\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_PLUGIN_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_PLUGIN_SHARED})\nlist(APPEND DRIVER_UNIT_TEST_LINK_LIBRARIES ${MODELBOX_PLUGIN_SHARED})\nlist(APPEND DRIVER_UNIT_TEST_INCLUDE ${MODELBOX_PLUGIN_SOURCE_INCLUDE})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_LINK_LIBRARIES ${DRIVER_UNIT_TEST_LINK_LIBRARIES} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_INCLUDE ${DRIVER_UNIT_TEST_INCLUDE} CACHE INTERNAL \"\")\n\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/data_source_parser/parser_plugin/vcn_source_parser/driver_desc.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <stdio.h>\n\n#include <memory>\n\n#include \"modelbox/base/driver_api_helper.h\"\n#include \"modelbox/base/status.h\"\n#include \"modelbox/device/cpu/device_cpu.h\"\n#include \"modelbox/flowunit.h\"\n#include \"vcn_source_parser.h\"\n\nstd::shared_ptr<modelbox::DriverFactory> CreateDriverFactory() {\n  std::shared_ptr<modelbox::DriverFactory> factory =\n      std::make_shared<VcnSourceParserFactory>();\n  return factory;\n}\n\nvoid DriverDescription(modelbox::DriverDesc *desc) {\n  desc->SetName(DRIVER_NAME);\n  desc->SetClass(DRIVER_CLASS_DATA_SOURCE_PARSER_PLUGIN);\n  desc->SetType(DRIVER_TYPE);\n  desc->SetDescription(DRIVER_DESC);\n  return;\n}\n\nmodelbox::Status DriverInit() {\n  // Driver Init.\n  return modelbox::STATUS_OK;\n}\n\nvoid DriverFini() {\n  // Driver Fini.\n}\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/data_source_parser/parser_plugin/vcn_source_parser/vcn_client.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"vcn_client.h\"\n\n#include <securec.h>\n\n#define STREAM_TYPE_MAX 3\n#define RTSP_CLIENT_TYPE 1\n\nnamespace modelbox {\n\nstd::mutex VcnClient::vcn_client_lock_;\n\nVcnClient::~VcnClient() { DeInit(); }\n\nstd::shared_ptr<VcnClient> VcnClient::GetInstance() {\n  static std::shared_ptr<VcnClient> vcn_client(new VcnClient());\n\n  std::lock_guard<std::mutex> lock(vcn_client_lock_);\n  static bool is_initialized = false;\n  if (true == is_initialized) {\n    return vcn_client;\n  }\n\n  auto ret = vcn_client->Init();\n  if (modelbox::STATUS_OK != ret.Code()) {\n    MBLOG_ERROR << ret.Errormsg();\n    return nullptr;\n  }\n  is_initialized = true;\n  return vcn_client;\n}\n\nmodelbox::Status VcnClient::SetSDKWrapper(\n    std::shared_ptr<VcnSdkWrapper> _sdk_wrapper) {\n  if (nullptr == _sdk_wrapper) {\n    return {modelbox::STATUS_INVALID, \"wrapper pointer is nullptr.\"};\n  }\n\n  if (nullptr != sdk_wrapper_) {\n    if (IVS_SUCCEED != sdk_wrapper_->VcnSdkCleanup()) {\n      MBLOG_WARN << \"Failed to clean up Vcn SDK resource.\";\n    }\n  }\n\n  sdk_wrapper_ = _sdk_wrapper;\n  sdk_wrapper_->VcnSdkInit();\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status VcnClient::Init() {\n  IVS_INT32 ret_status = IVS_SUCCEED;\n  sdk_wrapper_ = std::shared_ptr<VcnSdkWrapper>(new VcnSdkWrapper);\n  ret_status = sdk_wrapper_->VcnSdkInit();\n  if (IVS_SUCCEED != ret_status) {\n    auto err_msg = \"Failed to initialize VCN SDK, error code: \" +\n                   std::to_string(ret_status);\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status VcnClient::DeInit() {\n  IVS_INT32 ret_status = IVS_SUCCEED;\n  ret_status = sdk_wrapper_->VcnSdkCleanup();\n  if (IVS_SUCCEED != ret_status) {\n    MBLOG_ERROR << \"Failed to clean up VCN SDK resources, error code: \" +\n                       std::to_string(ret_status);\n    return modelbox::STATUS_FAULT;\n  }\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status VcnClient::GetUrl(int32_t session_id,\n                                   const std::string &camera_code,\n                                   uint32_t stream_type, std::string &url) {\n  std::string errmsg = \"\";\n  if (session_id < 0 || camera_code.empty() || stream_type > STREAM_TYPE_MAX) {\n    return {modelbox::STATUS_INVALID, \"invalid parameters.\"};\n  }\n\n  IVS_URL_MEDIA_PARAM stUrlMediaPara;\n  memset_s(&stUrlMediaPara, sizeof(IVS_URL_MEDIA_PARAM), 0,\n           sizeof(IVS_URL_MEDIA_PARAM));\n  stUrlMediaPara.ServiceType = SERVICE_TYPE_REALVIDEO;\n  stUrlMediaPara.AudioDecType = AUDIO_DEC_G711U;\n  stUrlMediaPara.BroadCastType = BROADCAST_UNICAST;\n  stUrlMediaPara.PackProtocolType = PACK_PROTOCOL_ES;\n  stUrlMediaPara.ProtocolType = PROTOCOL_RTP_OVER_TCP;\n  stUrlMediaPara.TransMode = MEDIA_TRANS;\n  stUrlMediaPara.VideoDecType = VIDEO_DEC_H264;\n  stUrlMediaPara.iClientType = RTSP_CLIENT_TYPE;\n  if (stream_type == STREAM_TYPE_SUB1) {\n    stUrlMediaPara.StreamType = STREAM_TYPE_SUB1;\n  } else if (stream_type == STREAM_TYPE_SUB2) {\n    stUrlMediaPara.StreamType = STREAM_TYPE_SUB2;\n  } else {\n    stUrlMediaPara.StreamType = STREAM_TYPE_MAIN;\n  }\n  strncpy_s(stUrlMediaPara.stTimeSpan.cStart, IVS_TIME_LEN, \" \",\n            strnlen(\" \", IVS_TIME_LEN - 1));\n  strncpy_s(stUrlMediaPara.stTimeSpan.cEnd, IVS_TIME_LEN, \" \",\n            strnlen(\" \", IVS_TIME_LEN - 1));\n\n  IVS_INT32 iRet = sdk_wrapper_->VcnSdkGetUrl(session_id, camera_code.c_str(),\n                                              &stUrlMediaPara, url);\n  if (NeedToRetry(iRet)) {\n    url = \"\";\n    return modelbox::STATUS_AGAIN;\n  } else if (IVS_SUCCEED != iRet) {\n    url = \"\";\n    return {modelbox::STATUS_FAULT, std::to_string(iRet)};\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nbool VcnClient::NeedToRetry(const IVS_INT32 error_code) {\n  switch (error_code) {\n    case IVS_SCU_ONLINE_USER_EXPIRE:\n    case IVS_SDK_RET_INVALID_SESSION_ID:\n      return true;\n    default:\n      break;\n  }\n\n  return false;\n}\n\nmodelbox::Status VcnClient::AddVcnStream(VcnInfo &info,\n                                         std::shared_ptr<VcnStream> &stream) {\n  std::string errmsg = \"\";\n  const std::string errmsg_prefix = \"Failed to add vcn stream: \";\n  if (!IsVcnInfoValid(info)) {\n    errmsg = errmsg_prefix + \"invalid info.\";\n    return {modelbox::STATUS_INVALID, errmsg};\n  }\n\n  // check accounts record\n  std::shared_ptr<VcnAccount> account = nullptr;\n  std::lock_guard<std::mutex> lock(vcn_accounts_lock_);\n  auto itr = std::find_if(vcn_accounts_.begin(), vcn_accounts_.end(),\n                          [&info](std::shared_ptr<const VcnAccount> account_) {\n                            return (account_->GetUserName() == info.user_name &&\n                                    account_->GetIp() == info.ip &&\n                                    account_->GetPort() == info.port &&\n                                    account_->GetPassword() == info.password);\n                          });\n\n  if (vcn_accounts_.end() == itr) {\n    // account not found, try to log in.\n    account = std::shared_ptr<VcnAccount>(new VcnAccount(info));\n    int32_t session_id;\n    auto ret = LoginVcnAccount(account, session_id);\n    if (modelbox::STATUS_OK != ret || session_id < SESSION_ID_MIN ||\n        session_id > SESSION_ID_MAX) {\n      stream = nullptr;\n      errmsg = errmsg_prefix + ret.Errormsg();\n      return {modelbox::STATUS_FAULT, errmsg};\n    }\n    account->SetSessionId(session_id);\n    vcn_accounts_.emplace_back(account);\n    itr = vcn_accounts_.end() - 1;\n  } else if ((*itr)->GetLoginState() == false) {\n    // the account has logged out, then log in again\n    account = *itr;\n    int32_t session_id;\n    auto ret = LoginVcnAccount(account, session_id);\n    if (modelbox::STATUS_OK != ret || session_id < SESSION_ID_MIN ||\n        session_id > SESSION_ID_MAX) {\n      stream = nullptr;\n      errmsg = errmsg_prefix + ret.Errormsg();\n      return {modelbox::STATUS_FAULT, errmsg};\n    }\n    account->SetSessionId(session_id);\n  } else {\n    account = *itr;\n  }\n\n  // get stream url\n  auto session_id = account->GetSessionId();\n  std::string url = \"\";\n  auto ret = GetUrl(session_id, info.camera_code, info.stream_type, url);\n\n  if (modelbox::STATUS_AGAIN == ret) {\n    // user login expired, need to log in again so set the session id to -1.\n    account->SetSessionId(-1);\n    errmsg = errmsg_prefix + \"user expired, try log in again.\";\n    stream = nullptr;\n    return {modelbox::STATUS_AGAIN, errmsg};\n  } else if (modelbox::STATUS_OK != ret) {\n    // something wrong happened and can not recover\n    errmsg = errmsg_prefix +\n             \"Failed to get stream url, user name: \" + account->GetUserName() +\n             \", session id: \" + std::to_string(session_id) +\n             \", camera code: \" + info.camera_code +\n             \", stream type: \" + std::to_string(info.stream_type) +\n             \", error code: \" + ret.Errormsg();\n    stream = nullptr;\n    return {ret.Code(), errmsg};\n  }\n  MBLOG_INFO << \"User name: \" << info.user_name\n             << \", session id: \" << session_id\n             << \", successfully get url: \" << url;\n\n  // add this stream record\n  stream = std::shared_ptr<VcnStream>(\n      new VcnStream(url, info.camera_code, session_id, account),\n      [this](VcnStream *stream) {\n        this->RemoveVcnStream(stream);\n        delete stream;\n      });\n  account->AddStream();\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status VcnClient::RemoveVcnStream(VcnStream *stream) {\n  std::string errmsg = \"\";\n  const std::string errmsg_prefix = \"Failed to remove vcn stream: \";\n  if (nullptr == stream) {\n    MBLOG_ERROR << errmsg_prefix + \"stream ptr is nullptr.\";\n    return modelbox::STATUS_INVALID;\n  }\n\n  std::lock_guard<std::mutex> lock(vcn_accounts_lock_);\n  auto account = stream->GetAccount();\n  account->RemoveStream();\n\n  if (account->GetStreamsCount() > 0) {\n    return modelbox::STATUS_OK;\n  }\n\n  auto ret = LogoutVcnAccount(account);\n  // Even though logout failed, the inactive account would automatically logout\n  // in a certain period.\n  if (modelbox::STATUS_OK != ret) {\n    MBLOG_WARN << errmsg_prefix + ret.Errormsg();\n  }\n\n  auto itr =\n      std::find_if(vcn_accounts_.begin(), vcn_accounts_.end(),\n                   [&account](std::shared_ptr<const VcnAccount> ele) {\n                     return (ele->GetUserName() == account->GetUserName() &&\n                             ele->GetIp() == account->GetIp() &&\n                             ele->GetPort() == account->GetPort() &&\n                             ele->GetPassword() == account->GetPassword());\n                   });\n  if (vcn_accounts_.end() == itr) {\n    MBLOG_WARN << \"The account to be deleted is NOT FOUND!\";\n    return modelbox::STATUS_NOTFOUND;\n  }\n  vcn_accounts_.erase(itr);\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status VcnClient::LoginVcnAccount(\n    std::shared_ptr<const VcnAccount> account, int32_t &session_id) {\n  IVS_INT32 iRet = IVS_SUCCEED;\n  std::string errmsg = \"\";\n\n  IVS_LOGIN_INFO stLoginInfo = {0};\n  strncpy_s(stLoginInfo.cUserName, IVS_NAME_LEN, account->GetUserName().c_str(),\n            IVS_NAME_LEN - 1);\n  strncpy_s(stLoginInfo.pPWD, IVS_PWD_LEN, account->GetPassword().c_str(),\n            IVS_PWD_LEN - 1);\n  strncpy_s(stLoginInfo.stIP.cIP, IVS_IP_LEN, account->GetIp().c_str(),\n            IVS_IP_LEN - 1);\n  int port = atoi(account->GetPort().c_str());\n  stLoginInfo.uiPort = port;\n  stLoginInfo.stIP.uiIPType = IP_V4;\n\n  if (!strncmp(stLoginInfo.pPWD, \"\", IVS_PWD_LEN - 1)) {\n    errmsg = \"Empty password.\";\n    return {modelbox::STATUS_INVALID, errmsg};\n  }\n\n  MBLOG_INFO << \"Ready to login vcn account, user name: \"\n             << stLoginInfo.cUserName << \", ip: \" << stLoginInfo.stIP.cIP\n             << \", port: \" << stLoginInfo.uiPort;\n\n  iRet = sdk_wrapper_->VcnSdkLogin(&stLoginInfo, &session_id);\n  if (IVS_SUCCEED != iRet || session_id < SESSION_ID_MIN ||\n      session_id > SESSION_ID_MAX) {\n    errmsg = \"Failed to login, error code: \" + std::to_string(iRet);\n    session_id = -1;\n    return {modelbox::STATUS_FAULT, errmsg};\n  }\n\n  MBLOG_INFO << \"Successfully login, session id: \" << session_id;\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status VcnClient::LogoutVcnAccount(\n    std::shared_ptr<VcnAccount> account) {\n  IVS_INT32 iRet = IVS_SUCCEED;\n  std::string errmsg = \"\";\n\n  if (account->GetLoginState() == false) {\n    return modelbox::STATUS_OK;\n  }\n  IVS_INT32 session_id = account->GetSessionId();\n  iRet = sdk_wrapper_->VcnSdkLogout(session_id);\n  if (IVS_SUCCEED != iRet) {\n    errmsg = \"Failed to logout, session id: \" + std::to_string(session_id) +\n             \", user name: \" + account->GetUserName() +\n             \", error code: \" + std::to_string(iRet);\n    return {modelbox::STATUS_FAULT, errmsg};\n  }\n\n  MBLOG_INFO << \"Successfully logout, session id: \" << session_id\n             << \", user name: \" << account->GetUserName();\n  return modelbox::STATUS_OK;\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/data_source_parser/parser_plugin/vcn_source_parser/vcn_client.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_VCN_CLIENT_H_\n#define MODELBOX_FLOWUNIT_VCN_CLIENT_H_\n\n#include <modelbox/base/log.h>\n#include <modelbox/base/status.h>\n\n#include <mutex>\n#include <vector>\n\n#include \"vcn_info.h\"\n#include \"vcn_sdk_wrapper.h\"\n\nnamespace modelbox {\n\nclass VcnAccount;\nclass VcnStream;\n\n/**\n * @brief This is a singleton class, in charge of all about the VCN SDK.\n */\nclass VcnClient {\n public:\n  /**\n   * @brief   Get an VcnClient object.\n   * @return  Pointer to an VcnClient object.\n   *          Notes: return nullptr if it's failed to initialize VCN SDK.\n   */\n  static std::shared_ptr<VcnClient> GetInstance();\n\n  /**\n   * @brief   Add a vcn stream and get its url.\n   * @param   info - in, Vcn info.\n   * @param   stream - out, pointer to a VcnStream object. This object hold a\n   * vcn url;\n   * @return  Successful or not\n   */\n  modelbox::Status AddVcnStream(VcnInfo &info,\n                                std::shared_ptr<VcnStream> &stream);\n\n  /**\n   * @brief   Remove the vcn stream from VcnClient, and logout the responding\n   * account if necessary.\n   * @param   stream - in, pointer to a VcnStream object to be remove;\n   * @return  Successful or not\n   */\n  modelbox::Status RemoveVcnStream(VcnStream *stream);\n\n  /**\n   * @brief   Set a mock VCN SDK wrapper for the Unit Test.\n   * @param   _sdk_wrapper - pointer to an object which is derived from class\n   * 'VcnSdkWrapper';\n   * @return  Successful or not\n   */\n  modelbox::Status SetSDKWrapper(std::shared_ptr<VcnSdkWrapper> _sdk_wrapper);\n\n  virtual ~VcnClient();\n\n private:\n  VcnClient() = default;\n\n  /**\n   * @brief   Initialize the VCN SDK.\n   * @return  Successful or not\n   */\n  modelbox::Status Init();\n\n  /**\n   * @brief   Deinitialize the VCN SDK.\n   * @return  Successful or not\n   */\n  modelbox::Status DeInit();\n\n  /**\n   * @brief   loglogin a vcn account\n   * @param   account - in, pointer to a VcnAccount object, containing\n   * information to login.\n   * @param   session_id - out, An id assigned to every vcn account that\n   * successfully login.\n   * @return  Successful or not\n   */\n  modelbox::Status LoginVcnAccount(std::shared_ptr<const VcnAccount> account,\n                                   int32_t &session_id);\n\n  /**\n   * @brief   logout a vcn account\n   * @param   account - in, pointer to a VcnAccount object, containing\n   * information to logout.\n   * @return  Successful or not\n   */\n  modelbox::Status LogoutVcnAccount(std::shared_ptr<VcnAccount> account);\n\n  /**\n   * @brief   get the vcn stream url\n   * @param   session_id - in, pointer to a VcnAccount object, containing\n   * information to logout.\n   * @param   camera_code - in, camera code from vcn config.\n   * @param   stream_type - in, stream type from vcn config.\n   * @param   url - out, stream url\n   * @return  Successful or not\n   */\n  modelbox::Status GetUrl(int32_t session_id, const std::string &camera_code,\n                          uint32_t stream_type, std::string &url);\n\n  /**\n   * @brief   check whether the error can be solved by a RETRY.\n   * @param   error_code - in, vcn sdk error code.\n   * @return  true or false, need to retry or not.\n   */\n  bool NeedToRetry(const IVS_INT32 error_code);\n\n  static std::mutex vcn_client_lock_;\n  std::mutex vcn_accounts_lock_;  // lock before any operations applied to the\n                                  // vcn_accounts_\n  std::vector<std::shared_ptr<VcnAccount>> vcn_accounts_;\n  std::shared_ptr<VcnSdkWrapper> sdk_wrapper_;\n};\n\nclass VcnAccount : public VcnAccountBase {\n  friend class VcnClient;\n\n public:\n  VcnAccount(const VcnInfo &info) : VcnAccountBase(info) { session_id_ = -1; };\n\n  virtual ~VcnAccount(){};\n  bool GetLoginState() {\n    return session_id_ >= SESSION_ID_MIN && session_id_ <= SESSION_ID_MAX;\n  };\n\n  /**\n   * @brief   get vcn session id\n   * @return  session id\n   */\n  int32_t GetSessionId() const { return session_id_; };\n\n private:\n  void SetSessionId(int32_t session_id) { session_id_ = session_id; };\n\n  int32_t session_id_;\n};\n\nclass VcnStream : public VcnStreamBase {\n  friend class modelbox::VcnClient;\n\n public:\n  VcnStream(std::string url, std::string camera_code,\n            int32_t session_id, std::shared_ptr<VcnAccount> account)\n      : VcnStreamBase(url, camera_code),\n        account_(account),\n        session_id_(session_id){};\n\n  virtual ~VcnStream(){};\n\n private:\n  std::shared_ptr<VcnAccount> GetAccount() { return account_; };\n  std::shared_ptr<VcnAccount> account_;\n  int32_t session_id_{0};\n};\n\n}  // namespace modelbox\n\n#endif  // MODELBOX_FLOWUNIT_VCN_CLIENT_H_"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/data_source_parser/parser_plugin/vcn_source_parser/vcn_sdk_wrapper.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include \"vcn_sdk_wrapper.h\"\n#include \"securec.h\"\n\n\nnamespace modelbox {\n\nIVS_INT32 VcnSdkWrapper::VcnSdkInit() { return IVS_SDK_Init(); }\n\nIVS_INT32 VcnSdkWrapper::VcnSdkLogin(IVS_LOGIN_INFO *login_req_info,\n                                     int32_t *session_id) {\n  if (nullptr == login_req_info || nullptr == session_id) {\n    return IVS_PARA_INVALID;\n  }\n  IVS_INT32 ret = IVS_SDK_Login(login_req_info, session_id);\n  return ret;\n}\n\nIVS_INT32 VcnSdkWrapper::VcnSdkLogout(IVS_INT32 session_id) {\n  IVS_INT32 ret = IVS_SDK_Logout(session_id);\n  return ret;\n}\n\nIVS_INT32 VcnSdkWrapper::VcnSdkGetUrl(\n    IVS_INT32 session_id, const IVS_CHAR *camera_code,\n    const IVS_URL_MEDIA_PARAM *url_media_param, std::string &rtsp_url) {\n  if (nullptr == camera_code || nullptr == url_media_param) {\n    return IVS_PARA_INVALID;\n  }\n  IVS_CHAR url[MAX_VCN_URL_LENGTH];\n  memset_s(url, MAX_VCN_URL_LENGTH, 0, MAX_VCN_URL_LENGTH);\n\n  IVS_INT32 ret = IVS_SDK_GetRtspURL(session_id, camera_code, url_media_param,\n                                     url, MAX_VCN_URL_LENGTH);\n  rtsp_url = url;\n  return ret;\n}\n\nIVS_INT32 VcnSdkWrapper::VcnSdkCleanup() { return IVS_SDK_Cleanup(); }\n\n}  // namespace modelbox"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/data_source_parser/parser_plugin/vcn_source_parser/vcn_sdk_wrapper.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n\n#ifndef MODELBOX_FLOWUNIT_VCN_SDK_WRAPPER_H_\n#define MODELBOX_FLOWUNIT_VCN_SDK_WRAPPER_H_\n\n#include <hwsdk.h>\n#include <IVS_SDK.h>\n#include <ivs_error.h>\n\n#include <modelbox/base/status.h>\n\n#define MAX_VCN_URL_LENGTH 2048\n#define SESSION_ID_MIN 0\n#define SESSION_ID_MAX 127\n\nnamespace modelbox {\n\n/**\n * @brief   wrap the vcn sdk apis; help to be mocked.\n */\nclass VcnSdkWrapper {\n  friend class VcnClient;\n\n private:\n  virtual IVS_INT32 VcnSdkInit();\n  virtual IVS_INT32 VcnSdkLogin(IVS_LOGIN_INFO *login_req_info,\n                                int32_t *session_id);\n  virtual IVS_INT32 VcnSdkLogout(IVS_INT32 session_id);\n  virtual IVS_INT32 VcnSdkGetUrl(IVS_INT32 session_id,\n                                 const IVS_CHAR *camera_code,\n                                 const IVS_URL_MEDIA_PARAM *url_media_param,\n                                 std::string &rtsp_url);\n  virtual IVS_INT32 VcnSdkCleanup();\n};\n\n\n}\n\n#endif  // MODELBOX_FLOWUNIT_VCN_SDK_WRAPPER_H_"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/data_source_parser/parser_plugin/vcn_source_parser/vcn_sdk_wrapper_mock_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include \"vcn_sdk_wrapper_mock_test.h\"\n#include <modelbox/base/log.h>\n\n#define SESSION_ID_FOR_TEST 127\n\nnamespace modelbox {\n\nIVS_INT32 VcnSdkWrapperMock::VcnSdkInit() { return IVS_SUCCEED; }\n\nIVS_INT32 VcnSdkWrapperMock::VcnSdkLogin(IVS_LOGIN_INFO *login_req_info,\n                                         int32_t *session_id) {\n  if (nullptr == login_req_info || nullptr == session_id) {\n    return IVS_PARA_INVALID;\n  }\n  std::string user_name = login_req_info->cUserName;\n  std::string ip = login_req_info->stIP.cIP;\n  int port = login_req_info->uiPort;\n  int ip_type = login_req_info->stIP.uiIPType;\n\n  std::string user_name_should_be = \"user\";\n  char pwd_should_be[] = \"password\";\n  std::string ip_should_be = \"192.168.1.1\";\n  int port_should_be = 666;\n  int ip_type_should_be = 0;\n\n  do {\n    if (user_name != user_name_should_be) {\n      MBLOG_ERROR << \"Failed to login, user name is not correct.\";\n      break;\n    }\n    if (strcmp(login_req_info->pPWD, pwd_should_be) != 0) {\n      MBLOG_ERROR << \"Failed to login, pwd is not correct.\";\n      break;\n    }\n    if (ip != ip_should_be) {\n      MBLOG_ERROR << \"Failed to login, ip is not correct.\";\n      break;\n    }\n    if (port != port_should_be) {\n      MBLOG_ERROR << \"Failed to login, port is not correct.\";\n      break;\n    }\n    if (ip_type != ip_type_should_be) {\n      MBLOG_ERROR << \"Failed to login, ip type is not correct.\";\n      break;\n    }\n    *session_id = SESSION_ID_FOR_TEST;\n    return IVS_SUCCEED;\n  } while (false);\n  \n  return IVS_PARA_INVALID;\n}\n\nIVS_INT32 VcnSdkWrapperMock::VcnSdkLogout(IVS_INT32 session_id) {\n  if (session_id < SESSION_ID_MIN || session_id > SESSION_ID_MAX) {\n    MBLOG_ERROR << \"Invalid session id: \" << session_id;\n    return IVS_PARA_INVALID;\n  }\n\n  return IVS_SUCCEED;\n}\n\nIVS_INT32 VcnSdkWrapperMock::VcnSdkGetUrl(\n    IVS_INT32 session_id, const IVS_CHAR *camera_code,\n    const IVS_URL_MEDIA_PARAM *url_media_param, std::string &rtsp_url) {\n  if (nullptr == camera_code || nullptr == url_media_param) {\n    MBLOG_ERROR\n        << \"Invalid parameters: camera_code or url_media_param is nullptr\";\n    rtsp_url = \"\";\n    return IVS_PARA_INVALID;\n  }\n\n  std::string camera_code_str = camera_code;\n  std::string camera_code_should_be =\n      \"01234567890123456789#01234567890123456789012345678901\";\n  if (url_media_param->StreamType != STREAM_TYPE_MAIN ||\n      camera_code_str != camera_code_should_be) {\n    MBLOG_ERROR << \"Invalid parameters: StreamType: \"\n                << url_media_param->StreamType\n                << \", camera code: \" << camera_code_str;\n    MBLOG_ERROR << \"Parameters should be: StreamType: \"\n                << STREAM_TYPE_MAIN\n                << \", camera code: \" << camera_code_should_be;\n    rtsp_url = \"\";\n    return IVS_PARA_INVALID;\n  }\n\n  rtsp_url = \"https://www.Hello_World.com\";\n  return IVS_SUCCEED;\n}\n\nIVS_INT32 VcnSdkWrapperMock::VcnSdkCleanup() { return IVS_SUCCEED; }\n\n}  // namespace modelbox"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/data_source_parser/parser_plugin/vcn_source_parser/vcn_sdk_wrapper_mock_test.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#ifndef MODELBOX_FLOWUNIT_VCN_SDK_WRAPPER_MOCK_H_\n#define MODELBOX_FLOWUNIT_VCN_SDK_WRAPPER_MOCK_H_\n\n#include <IVS_SDK.h>\n#include <modelbox/base/status.h>\n#include <hwsdk.h>\n#include <ivs_error.h>\n#include \"vcn_sdk_wrapper.h\"\n\nnamespace modelbox {\n\n/**\n * @brief   wrap the vcn sdk apis; help to be mocked.\n */\nclass VcnSdkWrapperMock : public VcnSdkWrapper {\n  friend class VcnClient;\n\n private:\n  IVS_INT32 VcnSdkInit() override;\n  IVS_INT32 VcnSdkLogin(IVS_LOGIN_INFO *login_req_info,\n                        int32_t *session_id) override;\n  IVS_INT32 VcnSdkLogout(IVS_INT32 session_id) override;\n  IVS_INT32 VcnSdkGetUrl(IVS_INT32 session_id, const IVS_CHAR *camera_code,\n                         const IVS_URL_MEDIA_PARAM *url_media_param,\n                         std::string &rtsp_url) override;\n  IVS_INT32 VcnSdkCleanup() override;\n};\n\n}  // namespace modelbox\n\n#endif  // MODELBOX_FLOWUNIT_VCN_SDK_WRAPPER_MOCK_H_"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/data_source_parser/parser_plugin/vcn_source_parser/vcn_source_parser.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"vcn_source_parser.h\"\n\n#include <dirent.h>\n#include <securec.h>\n#include <stdio.h>\n#include <sys/stat.h>\n#include <sys/types.h>\n#include <unistd.h>\n\n#include <nlohmann/json.hpp>\n#include <string>\n\n#include \"modelbox/base/utils.h\"\n#include \"modelbox/device/cpu/device_cpu.h\"\n\n#define STREAM_DEFAULT_RETRY_TIMES (-1)\n#define DEFAULT_RETRY_INTERVAL 1000\n#define RETRY_ON 1\n\nvoid RemoveFileCallback(std::string uri);\n\nVcnSourceParser::VcnSourceParser() {}\nVcnSourceParser::~VcnSourceParser() {}\n\nmodelbox::Status VcnSourceParser::Init(\n    const std::shared_ptr<modelbox::Configuration> &opts) {\n  retry_enabled_ = opts->GetBool(\"retry_enable\", DATASOURCE_PARSER_RETRY_ON);\n  retry_interval_ = opts->GetInt32(\"retry_interval_ms\",\n                                   DATASOURCE_PARSER_DEFAULT_RETRY_INTERVAL);\n  retry_max_times_ = opts->GetInt32(\n      \"retry_count_limit\", DATASOURCE_PARSER_STREAM_DEFAULT_RETRY_TIMES);\n\n  ReadConfVcnCommon(opts, retry_enabled_, retry_interval_, retry_max_times_);\n\n  MBLOG_INFO << \"vcn source parser config retry_enabled:\" << retry_enabled_\n             << \" retry_interval:\" << retry_interval_\n             << \" retry_max_times:\" << retry_max_times_;\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status VcnSourceParser::Deinit() { return modelbox::STATUS_OK; }\n\nmodelbox::Status VcnSourceParser::Parse(\n    const std::shared_ptr<modelbox::SessionContext> &session_context,\n    const std::shared_ptr<modelbox::Configuration> &session_config,\n    const std::string &config, std::string &uri,\n    modelbox::DestroyUriFunc &destroy_uri_func) {\n  modelbox::VcnInfo vcn_info;\n  uri = \"\";\n\n  // read info from cfg\n  auto ret = GetVcnInfo(vcn_info, config);\n  if (modelbox::STATUS_OK != ret) {\n    MBLOG_ERROR << \"failed to get vcn info.\";\n    return ret;\n  }\n\n  auto vcn_client = modelbox::VcnClient::GetInstance();\n  if (nullptr == vcn_client) {\n    MBLOG_ERROR << \"failed to get vcn client instance.\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  std::shared_ptr<modelbox::VcnStream> stream;\n  ret = vcn_client->AddVcnStream(vcn_info, stream);\n  if (modelbox::STATUS_OK != ret) {\n    MBLOG_ERROR << ret.Errormsg();\n    return ret;\n  }\n\n  uri = stream->GetUrl();\n  destroy_uri_func = [stream](const std::string &uri) {};\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status VcnSourceParser::GetVcnInfo(modelbox::VcnInfo &vcn_info,\n                                             const std::string &config) {\n  return modelbox::GetVcnInfo(vcn_info, config);\n}\n\nmodelbox::Status VcnSourceParser::GetStreamType(const std::string &config,\n                                                std::string &stream_type) {\n  stream_type = \"stream\";\n\n  return modelbox::STATUS_OK;\n}"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/data_source_parser/parser_plugin/vcn_source_parser/vcn_source_parser.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_DATA_SOURCE_PARSER_VCN_CPU_H_\n#define MODELBOX_FLOWUNIT_DATA_SOURCE_PARSER_VCN_CPU_H_\n\n#include <modelbox/base/device.h>\n#include <modelbox/base/status.h>\n#include <modelbox/data_source_parser_plugin.h>\n\n#include \"vcn_client.h\"\n\nconstexpr const char *DRIVER_NAME = \"vcn\";\nconstexpr const char *DRIVER_DESC = \"A VCN data source parser plugin on CPU\";\nconstexpr const char *DRIVER_TYPE = \"cpu\";\n\nclass VcnSourceParser : public modelbox::DataSourceParserPlugin {\n public:\n  VcnSourceParser();\n  ~VcnSourceParser() override;\n\n  modelbox::Status Init(\n      const std::shared_ptr<modelbox::Configuration> &opts) override;\n\n  modelbox::Status Deinit() override;\n\n  modelbox::Status Parse(\n      const std::shared_ptr<modelbox::SessionContext> &session_context,\n      const std::shared_ptr<modelbox::Configuration> &session_config,\n      const std::string &config, std::string &uri,\n      modelbox::DestroyUriFunc &destroy_uri_func) override;\n\n  modelbox::Status GetStreamType(const std::string &config,\n                                 std::string &stream_type) override;\n\n private:\n  modelbox::Status GetVcnInfo(modelbox::VcnInfo &vcn_info,\n                              const std::string &config);\n};\n\nclass VcnSourceParserFactory : public modelbox::DriverFactory {\n public:\n  VcnSourceParserFactory() = default;\n  ~VcnSourceParserFactory() override = default;\n\n  std::shared_ptr<modelbox::Driver> GetDriver() override {\n    std::shared_ptr<modelbox::Driver> parser =\n        std::make_shared<VcnSourceParser>();\n    return parser;\n  }\n};\n\n#endif  // MODELBOX_FLOWUNIT_DATA_SOURCE_PARSER_VCN_CPU_H_"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/data_source_parser/parser_plugin/vcn_source_parser/vcn_source_parser_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include \"data_source_parser_flowunit.h\"\n\n#include <securec.h>\n\n#include <functional>\n#include <thread>\n\n#include \"modelbox/base/log.h\"\n#include \"modelbox/base/utils.h\"\n#include \"modelbox/buffer.h\"\n#include \"driver_flow_test.h\"\n#include \"flowunit_mockflowunit/flowunit_mockflowunit.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"vcn_client.h\"\n#include \"vcn_sdk_wrapper_mock_test.h\"\n\n#define CHECK_SOURCE_OUTPUT_VCN \"check_data_source_vcn_parser_output\"\n\nusing ::testing::_;\n\nnamespace modelbox {\n\nclass DataSourceVcnParserPluginTest : public testing::Test {\n public:\n  DataSourceVcnParserPluginTest()\n      : driver_flow_(std::make_shared<DriverFlowTest>()) {}\n  void MockRestfulServer(std::shared_ptr<DriverFlowTest> &driver_flow);\n\n protected:\n  virtual void SetUp() {\n    auto ret = AddMockFlowUnit();\n    EXPECT_EQ(ret, STATUS_OK);\n  };\n\n  virtual void TearDown() { driver_flow_->Clear(); };\n  std::shared_ptr<DriverFlowTest> GetDriverFlow();\n  std::shared_ptr<DriverFlowTest> RunDriverFlow(\n      const std::string mock_flowunit_name);\n  modelbox::Status SendDataSourceCfg(std::shared_ptr<DriverFlowTest> &driver_flow,\n                                   const std::string &data_source_cfg,\n                                   const std::string &source_type);\n\n private:\n  Status AddMockFlowUnit();\n  Status AddMockVcn();\n  std::shared_ptr<DriverFlowTest> driver_flow_;\n};\n\nstd::shared_ptr<DriverFlowTest> DataSourceVcnParserPluginTest::GetDriverFlow() {\n  return driver_flow_;\n}\n\nstd::shared_ptr<DriverFlowTest> DataSourceVcnParserPluginTest::RunDriverFlow(\n    const std::string mock_flowunit_name) {\n  const std::string test_lib_dir = TEST_DRIVER_DIR;\n  std::string toml_content = R\"(\n    [driver]\n    skip-default=true\n    dir=[\")\" + test_lib_dir + \"\\\"]\\n    \" +\n                             R\"([graph]\n    graphconf = '''digraph demo {\n          input[type=input, device=cpu, deviceid=0]\n          data_source_parser[type=flowunit, flowunit=data_source_parser, device=cpu, deviceid=0, label=\"\", plugin_dir=\")\" +\n                             test_lib_dir + R\"(\"]\n          )\" + mock_flowunit_name +\n                             R\"([type=flowunit, flowunit=)\" +\n                             mock_flowunit_name +\n                             R\"(, device=cpu, deviceid=0, label=\"\"]\n          input -> data_source_parser:in_data\n          data_source_parser:out_video_url -> )\" +\n                             mock_flowunit_name + R\"(:stream_meta\n        }'''\n    format = \"graphviz\"\n  )\";\n\n  auto driver_flow = GetDriverFlow();\n  auto ret = driver_flow->BuildAndRun(mock_flowunit_name, toml_content, -1);\n\n  return driver_flow;\n}\n\nmodelbox::Status DataSourceVcnParserPluginTest::SendDataSourceCfg(\n    std::shared_ptr<DriverFlowTest> &driver_flow,\n    const std::string &data_source_cfg, const std::string &source_type) {\n  auto ext_data = driver_flow->GetFlow()->CreateExternalDataMap();\n  auto buffer_list = ext_data->CreateBufferList();\n  buffer_list->Build({data_source_cfg.size()});\n  auto buffer = buffer_list->At(0);\n  memcpy_s(buffer->MutableData(), buffer->GetBytes(), data_source_cfg.data(),\n           data_source_cfg.size());\n  buffer->Set(\"source_type\", source_type);\n  ext_data->Send(\"input\", buffer_list);\n  ext_data->Shutdown();\n  return modelbox::STATUS_OK;\n}\n\nStatus DataSourceVcnParserPluginTest::AddMockFlowUnit() {\n  AddMockVcn();\n  return modelbox::STATUS_OK;\n}\n\nStatus DataSourceVcnParserPluginTest::AddMockVcn() {\n  auto ctl_ = driver_flow_->GetMockFlowCtl();\n\n  {\n    MockFlowUnitDriverDesc desc_flowunit;\n    desc_flowunit.SetClass(\"DRIVER-FLOWUNIT\");\n    desc_flowunit.SetType(\"cpu\");\n    desc_flowunit.SetName(CHECK_SOURCE_OUTPUT_VCN);\n    desc_flowunit.SetDescription(CHECK_SOURCE_OUTPUT_VCN);\n    desc_flowunit.SetVersion(\"1.0.0\");\n    std::string file_path_flowunit = std::string(TEST_DRIVER_DIR) +\n                                     \"/libmodelbox-unit-cpu-\" +\n                                     CHECK_SOURCE_OUTPUT_VCN + \".so\";\n    desc_flowunit.SetFilePath(file_path_flowunit);\n    auto mock_flowunit = std::make_shared<MockFlowUnit>();\n    auto mock_flowunit_desc = std::make_shared<FlowUnitDesc>();\n    mock_flowunit_desc->SetFlowUnitName(CHECK_SOURCE_OUTPUT_VCN);\n    mock_flowunit_desc->AddFlowUnitInput(modelbox::FlowUnitInput(\"stream_meta\"));\n    mock_flowunit_desc->SetFlowType(modelbox::FlowType::STREAM);\n    mock_flowunit->SetFlowUnitDesc(mock_flowunit_desc);\n    std::weak_ptr<MockFlowUnit> mock_flowunit_wp;\n    mock_flowunit_wp = mock_flowunit;\n\n    EXPECT_CALL(*mock_flowunit, Open(_))\n        .WillRepeatedly(testing::Invoke(\n            [=](const std::shared_ptr<modelbox::Configuration> &flow_option) {\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, DataPre(_))\n        .WillRepeatedly(\n            testing::Invoke([&](std::shared_ptr<DataContext> data_ctx) {\n              auto stream_meta = data_ctx->GetInputMeta(\"stream_meta\");\n              EXPECT_NE(stream_meta, nullptr);\n              if (!stream_meta) {\n                return modelbox::STATUS_SUCCESS;\n              }\n\n              auto source_url = std::static_pointer_cast<std::string>(\n                  stream_meta->GetMeta(\"source_url\"));\n              EXPECT_NE(source_url, nullptr);\n              if (source_url != nullptr) {\n                EXPECT_FALSE((*source_url).empty());\n                EXPECT_EQ(*source_url, \"https://www.Hello_World.com\");\n              }\n\n              return modelbox::STATUS_SUCCESS;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, DataPost(_))\n        .WillRepeatedly(\n            testing::Invoke([&](std::shared_ptr<DataContext> data_ctx) {\n              MBLOG_INFO << \"stream_info \"\n                         << \"DataPost\";\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit,\n                Process(testing::An<std::shared_ptr<modelbox::DataContext>>()))\n        .WillRepeatedly(\n            testing::Invoke([=](std::shared_ptr<DataContext> data_ctx) {\n              MBLOG_INFO << \"stream_info \"\n                         << \"Process\";\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, Close()).WillRepeatedly(testing::Invoke([=]() {\n      return modelbox::STATUS_OK;\n    }));\n    desc_flowunit.SetMockFlowUnit(mock_flowunit);\n    ctl_->AddMockDriverFlowUnit(CHECK_SOURCE_OUTPUT_VCN, \"cpu\", desc_flowunit,\n                                std::string(TEST_DRIVER_DIR));\n  }\n\n  return STATUS_OK;\n}\n\nTEST_F(DataSourceVcnParserPluginTest, VcnInputTest) {\n  auto driver_flow = RunDriverFlow(CHECK_SOURCE_OUTPUT_VCN);\n  auto vcn_client = modelbox::VcnClient::GetInstance();\n  std::shared_ptr<VcnSdkWrapper> wrapper_mock =\n      std::shared_ptr<VcnSdkWrapper>(new VcnSdkWrapperMock());\n  vcn_client->SetSDKWrapper(wrapper_mock);\n\n  std::string source_type = \"vcn\";\n  std::string data_source_cfg_1 = R\"({\n        \"userName\": \"user\",\n        \"password\":\"password\",\n        \"ip\":\"192.168.1.1\",\n        \"port\":\"666\",\n        \"cameraCode\":\"01234567890123456789#01234567890123456789012345678901\",\n        \"streamType\":1\n  })\";\n\n  std::string data_source_cfg_2 = R\"({\n        \"userName\": \"user\",\n        \"password\":\"password\",\n        \"ip\":\"192.168.1.1\",\n        \"port\":\"666\",\n        \"cameraCode\":\"01234567890123456789#01234567890123456789012345678901\",\n        \"streamType\":1\n  })\";\n\n  auto ret_1 = SendDataSourceCfg(driver_flow, data_source_cfg_1, source_type);\n  auto ret_2 = SendDataSourceCfg(driver_flow, data_source_cfg_2, source_type);\n\n  EXPECT_EQ(ret_1, modelbox::STATUS_OK);\n  EXPECT_EQ(ret_2, modelbox::STATUS_OK);\n\n  driver_flow->GetFlow()->Wait(3 * 1000);\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/data_source_parser/parser_plugin/vis_source_parser/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nset(PLUGIN_NAME \"vis\")\n\nproject(modelbox-flowunit-${UNIT_DEVICE}-${UNIT_NAME}-${PLUGIN_NAME})\n\nfile(GLOB_RECURSE PLUGIN_SOURCE *.cpp *.cc *.c)\ngroup_source_test_files(MODELBOX_PLUGIN_SOURCE MODELBOX_PLUGIN_TEST_SOURCE \"_test.c*\" ${PLUGIN_SOURCE})\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_CPU_INCLUDE})\ninclude_directories(${MODELBOX_COMMON_INCLUDE})\ninclude_directories(${NLOHMANN_INCLUDE_DIR})\ninclude_directories(${MODELBOX_COMMON_DRIVER_UTIL_INCLUDE})\ninclude_directories(${MODELBOX_COMMON_IAM_AUTH_INCLUDE})\ninclude_directories(${APIGW_CPP_INCLUDE_DIR})\n\nset(MODELBOX_PLUGIN_SHARED modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-${PLUGIN_NAME}-shared)\nset(MODELBOX_PLUGIN_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\nadd_library(${MODELBOX_PLUGIN_SHARED} SHARED ${MODELBOX_PLUGIN_SOURCE})\n\nset(LIBMODELBOX_FLOWUNIT_DATA_SOURCE_PARSER_PLUGIN_CPU_SHARED ${MODELBOX_PLUGIN_SHARED})\nset_target_properties(${MODELBOX_PLUGIN_SHARED} PROPERTIES \n    SOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n\ntarget_link_libraries(${MODELBOX_PLUGIN_SHARED} ${LIBMODELBOX_DEVICE_CPU_SHARED})\ntarget_link_libraries(${MODELBOX_PLUGIN_SHARED} ${MODELBOX_COMMON_LIBRARY})\ntarget_link_libraries(${MODELBOX_PLUGIN_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_PLUGIN_SHARED} rt)\ntarget_link_libraries(${MODELBOX_PLUGIN_SHARED} dl)\ntarget_link_libraries(${MODELBOX_PLUGIN_SHARED} ${LIBMODELBOX_SHARED})\ntarget_link_libraries(${MODELBOX_PLUGIN_SHARED} ${MODELBOX_COMMON_IAM_AUTH_LIBRARY})\ntarget_link_libraries(${MODELBOX_PLUGIN_SHARED} ${MODELBOX_COMMON_SOURCE_CONTEXT_LIBRARY})\ntarget_link_libraries(${MODELBOX_PLUGIN_SHARED} ${APIGW_CPP_LIBRARIES})\nset_target_properties(${MODELBOX_PLUGIN_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-${PLUGIN_NAME}\")\n\ninstall(TARGETS ${MODELBOX_PLUGIN_SHARED} \n    COMPONENT cpu-device-flowunit\n    RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n    LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    OPTIONAL\n    )\n\n\ninstall(DIRECTORY ${HEADER} \n    DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR} \n    COMPONENT cpu-device-flowunit-devel\n    )\n\nset(LIBMODELBOX_FLOWUNIT_DATA_SOURCE_PARSER_PLUGIN_CPU_SHARED ${MODELBOX_PLUGIN_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_DATA_SOURCE_PARSER_PLUGIN_CPU_INCLUDE ${MODELBOX_PLUGIN_SOURCE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_DATA_SOURCE_PARSER_PLUGIN_CPU_SOURCES ${MODELBOX_PLUGIN_SOURCE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_DATA_SOURCE_PARSER_PLUGIN_CPU_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-${PLUGIN_NAME}.so CACHE INTERNAL \"\")\n\n# driver test\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_PLUGIN_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_PLUGIN_SHARED})\nlist(APPEND DRIVER_UNIT_TEST_LINK_LIBRARIES ${MODELBOX_UNIT_LINK_LIBRARY})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_LINK_LIBRARIES ${DRIVER_UNIT_TEST_LINK_LIBRARIES} CACHE INTERNAL \"\")\n\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/data_source_parser/parser_plugin/vis_source_parser/driver_desc.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <stdio.h>\n\n#include <memory>\n\n#include \"modelbox/base/driver_api_helper.h\"\n#include \"modelbox/base/status.h\"\n#include \"modelbox/device/cpu/device_cpu.h\"\n#include \"modelbox/flowunit.h\"\n#include \"vis_source_parser.h\"\n\nstd::shared_ptr<modelbox::DriverFactory> CreateDriverFactory() {\n  std::shared_ptr<modelbox::DriverFactory> factory =\n      std::make_shared<VisSourceParserFactory>();\n  return factory;\n}\n\nvoid DriverDescription(modelbox::DriverDesc *desc) {\n  desc->SetName(DRIVER_NAME);\n  desc->SetClass(DRIVER_CLASS_DATA_SOURCE_PARSER_PLUGIN);\n  desc->SetType(DRIVER_TYPE);\n  desc->SetDescription(DRIVER_DESC);\n}\n\nmodelbox::Status DriverInit() {\n  // Driver Init.\n  return modelbox::STATUS_OK;\n}\n\nvoid DriverFini() {\n  // Driver Fini.\n}"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/data_source_parser/parser_plugin/vis_source_parser/vis_source_parser.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"vis_source_parser.h\"\n\n#include <dirent.h>\n#include <modelbox/base/utils.h>\n#include <modelbox/base/uuid.h>\n#include <modelbox/device/cpu/device_cpu.h>\n#include <modelbox/iam_auth.h>\n#include <securec.h>\n#include <stdio.h>\n#include <sys/stat.h>\n#include <sys/types.h>\n#include <unistd.h>\n\n#include <ctime>\n#include <nlohmann/json.hpp>\n#include <string>\n\n#include \"cpprest/http_client.h\"\n#include \"signer.h\"\n\n#define STREAM_DEFAULT_RETRY_TIMES (-1)\n#define DEFAULT_RETRY_INTERVAL 1000\n#define RETRY_ON 1\n\nVisSourceParser::VisSourceParser() = default;\nVisSourceParser::~VisSourceParser() = default;\n\nmodelbox::Status VisSourceParser::Init(\n    const std::shared_ptr<modelbox::Configuration> &opts) {\n  retry_enabled_ = opts->GetBool(\"retry_enable\", DATASOURCE_PARSER_RETRY_ON);\n  retry_interval_ = opts->GetInt32(\"retry_interval_ms\",\n                                   DATASOURCE_PARSER_DEFAULT_RETRY_INTERVAL);\n  retry_max_times_ = opts->GetInt32(\n      \"retry_count_limit\", DATASOURCE_PARSER_STREAM_DEFAULT_RETRY_TIMES);\n\n  retry_enabled_ = opts->GetBool(\"vis_retry_enable\", retry_enabled_);\n  retry_interval_ = opts->GetInt32(\"vis_retry_interval_ms\", retry_interval_);\n  retry_max_times_ = opts->GetInt32(\"vis_retry_count_limit\", retry_max_times_);\n\n  MBLOG_INFO << \"vis source parser config retry_enabled:\" << retry_enabled_\n             << \" retry_interval:\" << retry_interval_\n             << \" retry_max_times:\" << retry_max_times_;\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status VisSourceParser::Deinit() { return modelbox::STATUS_OK; }\n\nmodelbox::Status VisSourceParser::Parse(\n    const std::shared_ptr<modelbox::SessionContext> &session_context,\n    const std::shared_ptr<modelbox::Configuration> &session_config,\n    const std::string &config, std::string &uri,\n    modelbox::DestroyUriFunc &destroy_uri_func) {\n  VisInputInfo input_info;\n\n  if (GetVisInfo(input_info, config) != modelbox::STATUS_OK) {\n    MBLOG_ERROR << \"Failed to get vis input info\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  if (GetTempAKSKInfo(input_info) != modelbox::STATUS_OK) {\n    MBLOG_ERROR << \"Failed to get cert info! Invalid authorization.\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  std::string struri = \"/v1/\" + input_info.project_id + \"/streams/\" +\n                       input_info.stream_name + \"/endpoint\";\n  utility::string_t address = U(input_info.end_point + struri);\n\n  MBLOG_DEBUG << \"Vis request address: \" << address;\n  web::http::uri request_uri = web::http::uri(address);\n\n  web::http::client::http_client_config client_config;\n  client_config.set_timeout(utility::seconds(30));\n\n  if (input_info.cert_flag == false) {\n    client_config.set_validate_certificates(false);\n  } else {\n    client_config.set_ssl_context_callback([&](boost::asio::ssl::context &ctx) {\n      ctx.load_verify_file(\"/etc/pki/tls/certs/ca-bundle.crt\");\n    });\n  }\n\n  std::shared_ptr<web::http::client::http_client> client;\n  client = std::make_shared<web::http::client::http_client>(\n      web::http::uri_builder(request_uri).to_uri(), client_config);\n\n  web::http::http_headers headers;\n  size_t pos = input_info.end_point.find(\"://\", 0);\n  size_t offset = std::string(\"://\").length();\n  std::string endpoint = input_info.end_point.substr(pos + offset);\n  std::shared_ptr<RequestParams> request_self =\n      std::make_shared<RequestParams>(\"GET\", endpoint, struri, \"\");\n  request_self->addHeader(\"Content-Type\", \"application/json\");\n  Signer signer(input_info.ak, input_info.sk);\n  signer.createSignature(request_self.get());\n  for (auto header : *request_self->getHeaders()) {\n    headers.add(U(header.getKey()), U(header.getValue()));\n  }\n  headers.add(U(\"X-Project-Id\"), U(input_info.project_id));\n  headers.add(U(\"X-Security-Token\"), U(input_info.token));\n\n  web::http::http_request msg;\n  msg.set_method(web::http::methods::GET);\n  msg.headers() = headers;\n\n  try {\n    MBLOG_INFO << \"Send vis get stream request to \" << address;\n    web::http::http_response resp = client->request(msg).get();\n\n    std::string resp_info = resp.extract_string().get();\n    if (resp.status_code() == 200) {\n      MBLOG_INFO << \"Get input from vis success. Http response code: \"\n                 << resp.status_code() << \". Http response body: \" << resp_info;\n\n      nlohmann::json resp_json;\n      try {\n        resp_json = nlohmann::json::parse(resp_info);\n        if (resp_json.contains(\"pull_flow_address\")) {\n          uri = resp_json[\"pull_flow_address\"].get<std::string>();\n          if (uri.empty()) {\n            MBLOG_ERROR << \"pull_flow_address is empty!\";\n            return modelbox::STATUS_BADCONF;\n          }\n          MBLOG_DEBUG << \"pull_flow_address: \" << uri;\n          return modelbox::STATUS_OK;\n        }\n\n        if (resp_json.contains(\"hls_pull_flow_address\")) {\n          uri = resp_json[\"hls_pull_flow_address\"].get<std::string>();\n          if (uri.empty()) {\n            MBLOG_ERROR << \"hls_pull_flow_address is empty!\";\n            return modelbox::STATUS_BADCONF;\n          }\n          MBLOG_DEBUG << \"hls_pull_flow_address: \" << uri;\n          return modelbox::STATUS_OK;\n        }\n\n        MBLOG_ERROR << \"No avaliable pull flow address string in response.\";\n        return modelbox::STATUS_BADCONF;\n      } catch (const std::exception &e) {\n        MBLOG_ERROR << \"Parse response body to json failed, detail: \"\n                    << e.what();\n        return modelbox::STATUS_INVALID;\n      }\n    } else {\n      MBLOG_ERROR << \"Get input from vis failed.  Http response code: \"\n                  << resp.status_code()\n                  << \". Http response body: \" << resp_info;\n      return modelbox::STATUS_FAULT;\n    }\n  } catch (std::exception const &e) {\n    MBLOG_ERROR << e.what();\n    return modelbox::STATUS_FAULT;\n  }\n}\n\nmodelbox::Status VisSourceParser::GetVisInfo(VisInputInfo &input_info,\n                                             const std::string &config) {\n  nlohmann::json config_json;\n  try {\n    config_json = nlohmann::json::parse(config);\n\n    std::string end_point;\n    end_point = config_json[\"visEndPoint\"].get<std::string>();\n    std::string::size_type idx;\n    std::string https_endpoint = \"https://\";\n    idx = end_point.find(https_endpoint);\n    if (idx != 0) {\n      end_point = \"https://\" + end_point;\n    }\n    input_info.end_point = end_point;\n    if (input_info.end_point.empty()) {\n      MBLOG_ERROR << \"Value of <visEndPoint> is empty!\";\n      return modelbox::STATUS_BADCONF;\n    }\n\n    input_info.project_id = config_json[\"projectId\"].get<std::string>();\n    if (input_info.project_id.empty()) {\n      MBLOG_ERROR << \"Value of key <projectId> is empty!\";\n      return modelbox::STATUS_BADCONF;\n    }\n\n    input_info.stream_name = config_json[\"streamName\"].get<std::string>();\n    if (input_info.stream_name.empty()) {\n      MBLOG_ERROR << \"Value of key <streamName> is empty!\";\n      return modelbox::STATUS_BADCONF;\n    }\n\n    if (config_json.contains(\"domainName\")) {\n      input_info.domain_name = config_json[\"domainName\"].get<std::string>();\n      if (input_info.domain_name.empty()) {\n        MBLOG_DEBUG << \"Value of key <domainName> is empty!\";\n      }\n    }\n\n    if (config_json.contains(\"userId\")) {\n      input_info.user_id = config_json[\"userId\"].get<std::string>();\n      if (input_info.user_id.empty()) {\n        MBLOG_DEBUG << \"Value of key <userId> is empty!\";\n      }\n    }\n\n    if (config_json.contains(\"xroleName\")) {\n      input_info.xrole_name = config_json[\"xroleName\"].get<std::string>();\n      if (input_info.xrole_name.empty()) {\n        MBLOG_DEBUG << \"Value of key <xroleName> is empty!\";\n      }\n    }\n\n    if (config_json.contains(\"certificate\")) {\n      input_info.cert_flag = config_json[\"certificate\"].get<bool>();\n    } else {\n      input_info.cert_flag = true;\n    }\n\n    return modelbox::STATUS_OK;\n  } catch (const std::exception &e) {\n    MBLOG_ERROR << \"Parse data source config to json failed, detail: \"\n                << e.what();\n    return modelbox::STATUS_INVALID;\n  }\n}\n\nmodelbox::Status VisSourceParser::GetTempAKSKInfo(VisInputInfo &input_info) {\n  modelbox::UserAgencyToken agency_user_token;\n  modelbox::AgencyInfo agency_info;\n  modelbox::ProjectInfo project_info;\n\n  agency_info.user_domain_name = input_info.domain_name;\n  agency_info.xrole_name = input_info.xrole_name;\n  project_info.project_id = input_info.project_id;\n\n  modelbox::UserAgencyCredential credential;\n  auto ret = modelbox::IAMAuth::GetInstance()->GetUserAgencyProjectCredential(\n      credential, agency_info, input_info.user_id);\n  if (ret != modelbox::STATUS_OK) {\n    std::string err_msg = \"Failed to get credential info!\";\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  input_info.ak = credential.user_ak;\n  input_info.sk = credential.user_sk;\n  input_info.token = credential.user_secure_token;\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status VisSourceParser::GetStreamType(const std::string &config,\n                                                std::string &stream_type) {\n  stream_type = \"stream\";\n\n  return modelbox::STATUS_OK;\n}"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/data_source_parser/parser_plugin/vis_source_parser/vis_source_parser.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_DATA_SOURCE_PARSER_VIS_CPU_H_\n#define MODELBOX_FLOWUNIT_DATA_SOURCE_PARSER_VIS_CPU_H_\n\n#include <modelbox/base/device.h>\n#include <modelbox/base/status.h>\n#include <modelbox/data_source_parser_plugin.h>\n\nconstexpr const char *DRIVER_NAME = \"vis\";\nconstexpr const char *DRIVER_DESC = \"An vis data source parser plugin on CPU\";\nconstexpr const char *DRIVER_TYPE = \"cpu\";\n\ntypedef struct tag_VisInputInfo {\n  std::string ak;\n  std::string sk;\n  std::string token;\n  std::string user_id;\n  std::string domain_name;\n  std::string xrole_name;\n  std::string end_point;\n  std::string project_id;\n  std::string stream_name;\n  bool cert_flag;\n} VisInputInfo;\n\nclass VisSourceParser : public modelbox::DataSourceParserPlugin {\n public:\n  VisSourceParser();\n  ~VisSourceParser() override;\n\n  modelbox::Status Init(\n      const std::shared_ptr<modelbox::Configuration> &opts) override;\n\n  modelbox::Status Deinit() override;\n\n  modelbox::Status Parse(\n      const std::shared_ptr<modelbox::SessionContext> &session_context,\n      const std::shared_ptr<modelbox::Configuration> &session_config,\n      const std::string &config, std::string &uri,\n      modelbox::DestroyUriFunc &destroy_uri_func) override;\n\n  modelbox::Status GetStreamType(const std::string &config,\n                                 std::string &stream_type) override;\n\n private:\n  modelbox::Status GetVisInfo(VisInputInfo &input_info,\n                              const std::string &config);\n\n  modelbox::Status GetTempAKSKInfo(VisInputInfo &input_info);\n};\n\nclass VisSourceParserFactory : public modelbox::DriverFactory {\n public:\n  VisSourceParserFactory() = default;\n  ~VisSourceParserFactory() override = default;\n\n  std::shared_ptr<modelbox::Driver> GetDriver() override {\n    std::shared_ptr<modelbox::Driver> parser =\n        std::make_shared<VisSourceParser>();\n    return parser;\n  }\n};\n\n#endif  // MODELBOX_FLOWUNIT_DATA_SOURCE_PARSER_VIS_CPU_H_"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/dlengine/CMakeLists.txt",
    "content": "#\n# Copyright 2022 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nset(UNIT_DEVICE \"cpu\")\nset(UNIT_NAME \"dlengine_inference\")\n\nproject(modelbox-flowunit-${UNIT_DEVICE}-${UNIT_NAME})\n\nif (NOT DLENGINE_FOUND) \n    message(STATUS \"Not found dlengine, disable ${UNIT_NAME} flowunit\")\n    return()\nendif()\n\n\nfile(GLOB_RECURSE UNIT_SOURCE *.cpp *.cc *.c)\n\ngroup_source_test_files(MODELBOX_UNIT_SOURCE MODELBOX_UNIT_TEST_SOURCE \"_test.c*\" ${UNIT_SOURCE})\n\nconfigure_file(${CMAKE_CURRENT_SOURCE_DIR}/test_toml/modelbox.test.dlengine.cpu.inference.onnx.in ${TEST_WORKING_DATA_DIR}/dlengine_cpu/modelbox.test.dlengine.cpu.inference.onnx.toml @ONLY)\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_CPU_INCLUDE})\ninclude_directories(${LIBMODELBOX_VIRTUALDRIVER_INFERENCE_INCLUDE})\ninclude_directories(${MODELBOX_COMMON_INFERENCE_INCLUDE})\ninclude_directories(${DLENGINE_INCLUDE_DIR})\ninclude_directories(${LIBMODELBOX_FLOWUNIT_INFERENCE_DLENGINE_INCLUDE})\n\nset(MODELBOX_UNIT_SHARED libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-shared)\nset(MODELBOX_UNIT_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\nadd_library(${MODELBOX_UNIT_SHARED} SHARED ${MODELBOX_UNIT_SOURCE})\nset(LIBMODELBOX_FLOWUNIT_INFERENCE_DLENGINE_CPU_SHARED ${MODELBOX_UNIT_SHARED})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES \nSOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} rt)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} dl)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${DLENGINE_LIBRARIES})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_DEVICE_CPU_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_VIRTUALDRIVER_INFERENCE_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${MODELBOX_COMMON_INFERENCE_LIBRARY})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_FLOWUNIT_INFERENCE_DLENGINE_SHARED})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}\")\n\ninstall(TARGETS ${MODELBOX_UNIT_SHARED}\n        COMPONENT cpu-device-flowunit\n        RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n        LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        ARCHIVE DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        OPTIONAL)\n\nset(LIBMODELBOX_FLOWUNIT_INFERENCE_DLENGINE_CPU_SHARED ${MODELBOX_UNIT_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_INFERENCE_DLENGINE_CPU_INCLUDE ${MODELBOX_UNIT_SOURCE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_INFERENCE_DLENGINE_CPU_SOURCES ${MODELBOX_UNIT_SOURCE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_INFERENCE_DLENGINE_CPU_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}.so CACHE INTERNAL \"\")\n\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_UNIT_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_UNIT_SHARED})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/dlengine/dlengine_cpu_inference_flowunit.cc",
    "content": "/*\n * Copyright 2022 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"dlengine_cpu_inference_flowunit.h\"\n\nconstexpr const char *BACKEND_TYPE = \"JwdLCz9nKiM=\";\n\nDLEngineCPUInferenceFlowUnit::DLEngineCPUInferenceFlowUnit()\n    : inference_(std::make_shared<DLEngineInference>()) {}\n\nDLEngineCPUInferenceFlowUnit::~DLEngineCPUInferenceFlowUnit() = default;\n\nmodelbox::Status DLEngineCPUInferenceFlowUnit::Open(\n    const std::shared_ptr<modelbox::Configuration> &config) {\n  if (!config->Contain(\"config.model_type\")) {\n    config->SetProperty(\"config.model_type\", \"onnx\");\n  }\n\n  // fix backend on cpu\n  config->SetProperty(\"config.backend_type\", BACKEND_TYPE);\n\n  return inference_->Init(config, GetFlowUnitDesc(), GetBindDevice()->GetType(),\n                          dev_id_);\n}\n\nmodelbox::Status DLEngineCPUInferenceFlowUnit::Close() {\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status DLEngineCPUInferenceFlowUnit::Process(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  return inference_->Infer(data_ctx);\n}\n\nstd::shared_ptr<modelbox::FlowUnit>\nDLEngineCPUInferenceFlowUnitFactory::VirtualCreateFlowUnit(\n    const std::string &unit_name, const std::string &unit_type,\n    const std::string &virtual_type) {\n  return std::make_shared<DLEngineCPUInferenceFlowUnit>();\n}\n\nstd::string DLEngineCPUInferenceFlowUnitFactory::GetFlowUnitFactoryType() {\n  return FLOWUNIT_TYPE;\n}\n\nstd::string DLEngineCPUInferenceFlowUnitFactory::GetVirtualType() {\n  return INFERENCE_TYPE;\n}\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/dlengine/dlengine_cpu_inference_flowunit.h",
    "content": "/*\n * Copyright 2022 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_DLENGINE_CPU_INFERENCE_H_\n#define MODELBOX_FLOWUNIT_DLENGINE_CPU_INFERENCE_H_\n\n#include \"dlengine_inference_flowunit.h\"\n#include \"modelbox/flowunit.h\"\n\nconstexpr const char *FLOWUNIT_TYPE = \"cpu\";\n\nclass DLEngineCPUInferenceFlowUnit : public modelbox::FlowUnit {\n public:\n  DLEngineCPUInferenceFlowUnit();\n\n  ~DLEngineCPUInferenceFlowUnit() override;\n\n  modelbox::Status Open(\n      const std::shared_ptr<modelbox::Configuration> &config) override;\n\n  modelbox::Status Close() override;\n\n  modelbox::Status Process(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override;\n\n private:\n  std::shared_ptr<DLEngineInference> inference_;\n};\n\nclass DLEngineCPUInferenceFlowUnitFactory : public modelbox::FlowUnitFactory {\n public:\n  std::shared_ptr<modelbox::FlowUnit> VirtualCreateFlowUnit(\n      const std::string &unit_name, const std::string &unit_type,\n      const std::string &virtual_type) override;\n\n  std::string GetFlowUnitFactoryType() override;\n\n  std::string GetVirtualType() override;\n};\n\n#endif  // MODELBOX_FLOWUNIT_DLENGINE_CPU_INFERENCE_H_\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/dlengine/dlengine_cpu_inference_flowunit_test.cc",
    "content": "/*\n * Copyright 2022 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"dlengine_inference_flowunit_test.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"test/mock/minimodelbox/mockflow.h\"\n\nclass DLEngineCPUInferenceFlowUnitTest : public testing::Test {\n protected:\n  void SetUp() override {\n    test_impl_ = std::make_shared<DLEngineInferenceFlowUnitTest>(\"cpu\");\n  }\n\n  void TearDown() override { test_impl_ = nullptr; }\n\n  std::shared_ptr<DLEngineInferenceFlowUnitTest> test_impl_;\n};\n\nTEST_F(DLEngineCPUInferenceFlowUnitTest, OnnxRunUnit) {\n  auto ret = test_impl_->SetUp(\"dlengine_inference_onnx\");\n  ASSERT_EQ(ret, modelbox::STATUS_OK);\n  test_impl_->Run(\"dlengine_cpu_onnx_RunUnit\");\n}\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/dlengine/flowunit_desc.cc",
    "content": "/*\n * Copyright 2022 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"dlengine_cpu_inference_flowunit.h\"\n#include \"modelbox/base/driver_api_helper.h\"\n#include \"modelbox/device/cpu/device_cpu.h\"\n\nconstexpr const char *FLOWUNIT_DESC = \"A dlengine cpu inference flowunit\";\n\nstd::shared_ptr<modelbox::DriverFactory> CreateDriverFactory() {\n  return std::make_shared<DLEngineCPUInferenceFlowUnitFactory>();\n}\n\nvoid DriverDescription(modelbox::DriverDesc *desc) {\n  desc->SetName(FLOWUNIT_NAME);\n  desc->SetClass(modelbox::DRIVER_CLASS_INFERENCE);\n  desc->SetType(modelbox::DEVICE_TYPE);\n  desc->SetDescription(FLOWUNIT_DESC);\n}\n\nmodelbox::Status DriverInit() { return modelbox::STATUS_OK; }\n\nvoid DriverFini() {}\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/dlengine/test_toml/modelbox.test.dlengine.cpu.inference.onnx.in",
    "content": "[base]\nname = \"dlengine_inference_onnx\"\ndevice = \"cpu\"\nversion = \"1.0.0\"\ndescription = \"an dlengine cpu inference flowunit\"\nentry = \"@CMAKE_SOURCE_DIR@/test/assets/test_model/test_dynamic.onnx\"\ntype = \"inference\"\nvirtual_type = \"dlengine\"\n\n[config]\nmodel_type = \"onnx\"\nprecision = \"FP16\" # FP32/FP16\n\n[input]\n[input.input1]\nname = \"in1\"\nmin_shape = \"1x3x16x16\"\nopt_shape = \"4x3x16x16\"\nmax_shape = \"8x3x16x16\"\n\n[input.input2]\nname = \"in2\"\nmin_shape = \"1x3x16x16\"\nopt_shape = \"4x3x16x16\"\nmax_shape = \"8x3x16x16\"\n\n[output]\n[output.output1]\nname = \"out\"\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/draw_bbox/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nset(UNIT_DEVICE \"cpu\")\nset(UNIT_NAME \"draw_bbox\")\n\nproject(modelbox-flowunit-${UNIT_DEVICE}-${UNIT_NAME})\n\nif (NOT OPENCV_FOUND) \n    message(STATUS \"Not found opencv, disable draw_bbox flowunit\")\n    return()\nendif()\n\nfile(GLOB_RECURSE UNIT_SOURCE *.cpp *.cc *.c)\ngroup_source_test_files(MODELBOX_UNIT_SOURCE MODELBOX_UNIT_TEST_SOURCE \"_test.c*\" ${UNIT_SOURCE})\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_CPU_INCLUDE})\ninclude_directories(${OpenCV_INCLUDE_DIRS})\n\nset(MODELBOX_UNIT_SHARED modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-shared)\nset(MODELBOX_UNIT_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\nadd_library(${MODELBOX_UNIT_SHARED} SHARED ${MODELBOX_UNIT_SOURCE})\n\nset(LIBMODELBOX_FLOWUNIT_DRAW_BBOX_CPU_SHARED ${MODELBOX_UNIT_SHARED})\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES \n    SOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n\nset(MODELBOX_UNIT_LINK_LIBRARY ${OpenCV_LIBS})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_DEVICE_CPU_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} rt)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} dl)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${MODELBOX_UNIT_LINK_LIBRARY})\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}\")\n\ninstall(TARGETS ${MODELBOX_UNIT_SHARED} \n    COMPONENT cpu-device-flowunit\n    RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n    LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    OPTIONAL\n    )\n\n\ninstall(DIRECTORY ${HEADER} \n    DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR} \n    COMPONENT cpu-device-flowunit\n    )\n\nset(LIBMODELBOX_FLOWUNIT_DRAW_BBOX_CPU_SHARED ${MODELBOX_UNIT_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_DRAW_BBOX_CPU_INCLUDE ${MODELBOX_UNIT_SOURCE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_DRAW_BBOX_CPU_SOURCES ${MODELBOX_UNIT_SOURCE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_DRAW_BBOX_CPU_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}.so CACHE INTERNAL \"\")\n\n# driver test\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_UNIT_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_UNIT_SHARED})\nlist(APPEND DRIVER_UNIT_TEST_LINK_LIBRARIES ${MODELBOX_UNIT_LINK_LIBRARY})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_LINK_LIBRARIES ${DRIVER_UNIT_TEST_LINK_LIBRARIES} CACHE INTERNAL \"\")\n\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/draw_bbox/draw_bbox_flowunit.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"draw_bbox_flowunit.h\"\n#include <securec.h>\n#include \"modelbox/flowunit.h\"\n#include \"modelbox/flowunit_api_helper.h\"\n\nDrawBBoxFlowUnit::DrawBBoxFlowUnit() = default;\nDrawBBoxFlowUnit::~DrawBBoxFlowUnit() = default;\n\nmodelbox::Status DrawBBoxFlowUnit::Open(\n    const std::shared_ptr<modelbox::Configuration> &opts) {\n  return modelbox::STATUS_OK;\n}\nmodelbox::Status DrawBBoxFlowUnit::Close() { return modelbox::STATUS_OK; }\n\nmodelbox::Status DrawBBoxFlowUnit::Process(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  MBLOG_INFO << \"process image draw bbox on cpu\";\n\n  auto input1_bufs = data_ctx->Input(\"in_region\");\n  if (input1_bufs->Size() <= 0) {\n    auto errMsg = \"in_region batch is \" + std::to_string(input1_bufs->Size());\n    MBLOG_ERROR << errMsg;\n    return {modelbox::STATUS_FAULT, errMsg};\n  }\n\n  auto input2_bufs = data_ctx->Input(\"in_image\");\n  if (input2_bufs->Size() <= 0) {\n    auto errMsg = \"in_image batch is \" + std::to_string(input2_bufs->Size());\n    MBLOG_ERROR << errMsg;\n    return {modelbox::STATUS_FAULT, errMsg};\n  }\n\n  if (input1_bufs->Size() != input2_bufs->Size()) {\n    auto errMsg = \"in_image batch is not match in_region batch. in_image is \" +\n                  std::to_string(input1_bufs->Size()) + \",in_region is \" +\n                  std::to_string(input2_bufs->Size());\n    MBLOG_ERROR << errMsg;\n    return {modelbox::STATUS_FAULT, errMsg};\n  }\n\n  auto output_bufs = data_ctx->Output(\"out_image\");\n\n  std::vector<size_t> shape;\n  for (size_t i = 0; i < input2_bufs->Size(); ++i) {\n    shape.emplace_back(input2_bufs->At(i)->GetBytes());\n  }\n\n  output_bufs->Build(shape);\n  MBLOG_INFO << \"begin process batch\";\n  for (size_t i = 0; i < input1_bufs->Size(); ++i) {\n    // get bboxes\n    size_t num_bboxes = input1_bufs->At(i)->GetBytes() / sizeof(BBox);\n\n    MBLOG_INFO << \"num_bboxes: \" << num_bboxes;\n\n    std::vector<std::shared_ptr<BBox>> bboxs;\n    for (size_t j = 0; j < num_bboxes; ++j) {\n      std::shared_ptr<BBox> b = std::make_shared<BBox>();\n      memcpy_s(\n          b.get(), sizeof(BBox),\n          (const char *)(input1_bufs->ConstBufferData(i)) + (sizeof(BBox) * j),\n          sizeof(BBox));\n      bboxs.push_back(b);\n    }\n\n    // get images\n    int32_t width = 0;\n    int32_t height = 0;\n    int32_t channel = 0;\n    int32_t rate_den = 0;\n    int32_t rate_num = 0;\n    input2_bufs->At(i)->Get(\"width\", width);\n    input2_bufs->At(i)->Get(\"height\", height);\n    input2_bufs->At(i)->Get(\"channel\", channel);\n    input2_bufs->At(i)->Get(\"rate_den\", rate_den);\n    input2_bufs->At(i)->Get(\"rate_num\", rate_num);\n    std::string pix_fmt = \"rgb\";\n    input2_bufs->At(i)->Get(\"pix_fmt\", pix_fmt);\n\n    MBLOG_INFO << \"w:\" << width << \",h:\" << height << \",c:\" << channel;\n\n    cv::Mat image(height, width, CV_8UC3);\n    memcpy_s(image.data, image.total() * image.elemSize(),\n             input2_bufs->ConstBufferData(i), input2_bufs->At(i)->GetBytes());\n    MBLOG_INFO << \"end get images\";\n\n    // draw bboxes\n    for (auto &b : bboxs) {\n      MBLOG_DEBUG << \"draw bbox : has box \" << b->x << \" \" << b->y << \" \"\n                  << b->w << \" \" << b->h << \" \" << b->score << \" \"\n                  << b->category;\n      cv::rectangle(image, cv::Point(b->x, b->y),\n                    cv::Point(b->x + b->w, b->y + b->h), cv::Scalar(255, 0, 0),\n                    5, 8, 0);\n    }\n\n    // output data\n    auto output_buffer = output_bufs->At(i);\n    auto *output_data = output_buffer->MutableData();\n    memcpy_s(output_data, output_buffer->GetBytes(), image.data,\n             image.total() * image.elemSize());\n    output_buffer->Set(\"width\", width);\n    output_buffer->Set(\"height\", height);\n    output_buffer->Set(\"width_stride\", width);\n    output_buffer->Set(\"height_stride\", height);\n    output_buffer->Set(\"channel\", channel);\n    output_buffer->Set(\"pix_fmt\", pix_fmt);\n    output_buffer->Set(\"layout\", std::string(\"hwc\"));\n    output_buffer->Set(\n        \"shape\",\n        std::vector<size_t>{(size_t)height, (size_t)width, (size_t)channel});\n    output_buffer->Set(\"type\", modelbox::ModelBoxDataType::MODELBOX_UINT8);\n    output_buffer->Set(\"rate_den\", rate_den);\n    output_buffer->Set(\"rate_num\", rate_num);\n  }\n\n  MBLOG_INFO << \"draw bbox finish\";\n  return modelbox::STATUS_OK;\n}\n\nMODELBOX_FLOWUNIT(DrawBBoxFlowUnit, desc) {\n  desc.SetFlowUnitName(FLOWUNIT_NAME);\n  desc.SetFlowUnitGroupType(\"Image\");\n  desc.AddFlowUnitInput({\"in_image\"});\n  desc.AddFlowUnitInput({\"in_region\"});\n  desc.AddFlowUnitOutput({\"out_image\"});\n  desc.SetFlowType(modelbox::NORMAL);\n  desc.SetInputContiguous(false);\n  desc.SetDescription(FLOWUNIT_DESC);\n}\n\nMODELBOX_DRIVER_FLOWUNIT(desc) {\n  desc.Desc.SetName(FLOWUNIT_NAME);\n  desc.Desc.SetClass(modelbox::DRIVER_CLASS_FLOWUNIT);\n  desc.Desc.SetType(FLOWUNIT_TYPE);\n  desc.Desc.SetDescription(FLOWUNIT_DESC);\n  desc.Desc.SetVersion(\"1.0.0\");\n}\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/draw_bbox/draw_bbox_flowunit.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#ifndef MODELBOX_FLOWUNIT_DRAWBBOXFLOWUNIT_CPU_H_\n#define MODELBOX_FLOWUNIT_DRAWBBOXFLOWUNIT_CPU_H_\n\n#include <modelbox/base/device.h>\n#include <modelbox/base/status.h>\n#include <modelbox/flow.h>\n\n#include <algorithm>\n#include <opencv2/opencv.hpp>\n\n#include \"modelbox/buffer.h\"\n#include \"modelbox/flowunit.h\"\n\nconstexpr const char *FLOWUNIT_NAME = \"draw_bbox\";\nconstexpr const char *FLOWUNIT_TYPE = \"cpu\";\nconstexpr const char *FLOWUNIT_DESC =\n    \"\\n\\t@Brief: draw a rectangle area on the input image. \\n\"\n    \"\\t@Port parameter: The input port 'in_image' and the output port \"\n    \"'out_image' buffer type are image. \\n\"\n    \"\\t  The image type buffer contain the following meta fields:\\n\"\n    \"\\t\\tField Name: width,         Type: int32_t\\n\"\n    \"\\t\\tField Name: height,        Type: int32_t\\n\"\n    \"\\t\\tField Name: width_stride,  Type: int32_t\\n\"\n    \"\\t\\tField Name: height_stride, Type: int32_t\\n\"\n    \"\\t\\tField Name: channel,       Type: int32_t\\n\"\n    \"\\t\\tField Name: pix_fmt,       Type: string\\n\"\n    \"\\t\\tField Name: layout,        Type: int32_t\\n\"\n    \"\\t\\tField Name: shape,         Type: vector<size_t>\\n\"\n    \"\\t\\tField Name: type,          Type: ModelBoxDataType::MODELBOX_UINT8\\n\"\n     \"\\t  The other input port 'in_region' buffer type is yolo boundingbox, the memory \"\n    \"arrangement is [float x,float y,float w,float h,int32_t condition,float score].\\n\"\n    \"\\t@Constraint: This flowunit can be only used follow the flowunit yolov3 postprocess'.\";\n\ntypedef struct BBox {\n  float x, y, w, h;\n  int category;\n  float score;\n} BBox;\n\nclass DrawBBoxFlowUnit : public modelbox::FlowUnit {\n public:\n  DrawBBoxFlowUnit();\n  ~DrawBBoxFlowUnit() override;\n\n  modelbox::Status Open(\n      const std::shared_ptr<modelbox::Configuration> &opts) override;\n\n  modelbox::Status Close() override;\n\n  /* run when processing data */\n  modelbox::Status Process(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override;\n};\n\n#endif  // MODELBOX_FLOWUNIT_DRAWBBOXFLOWUNIT_CPU_H_\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/draw_bbox/draw_bbox_flowunit_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"draw_bbox_flowunit.h\"\n\n#include <securec.h>\n\n#include <functional>\n#include <future>\n#include <opencv2/opencv.hpp>\n#include <random>\n#include <thread>\n\n#include \"driver_flow_test.h\"\n#include \"flowunit_mockflowunit/flowunit_mockflowunit.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"modelbox/base/log.h\"\n#include \"modelbox/base/utils.h\"\n#include \"modelbox/buffer.h\"\n\nusing ::testing::_;\n\nnamespace modelbox {\nclass DrawBBoxFlowUnitTest : public testing::Test {\n public:\n  DrawBBoxFlowUnitTest() : driver_flow_(std::make_shared<DriverFlowTest>()) {}\n\n protected:\n  void SetUp() override {\n    auto ret = AddMockFlowUnit();\n    EXPECT_EQ(ret, STATUS_OK);\n  };\n\n  void TearDown() override { driver_flow_->Clear(); };\n  std::shared_ptr<DriverFlowTest> GetDriverFlow();\n\n private:\n  Status AddMockFlowUnit();\n  std::shared_ptr<DriverFlowTest> driver_flow_;\n};\n\nstd::shared_ptr<DriverFlowTest> DrawBBoxFlowUnitTest::GetDriverFlow() {\n  return driver_flow_;\n}\n\nStatus DrawBBoxFlowUnitTest::AddMockFlowUnit() {\n  auto ctl_ = driver_flow_->GetMockFlowCtl();\n\n  {\n    MockFlowUnitDriverDesc desc_flowunit;\n    desc_flowunit.SetClass(\"DRIVER-FLOWUNIT\");\n    desc_flowunit.SetType(\"cpu\");\n    desc_flowunit.SetName(\"test_0_1_draw_bbox\");\n    desc_flowunit.SetDescription(\"the test in 0 out 1\");\n    desc_flowunit.SetVersion(\"1.0.0\");\n    std::string file_path_flowunit =\n        std::string(TEST_DRIVER_DIR) +\n        \"/libmodelbox-unit-cpu-test_0_1_draw_bbox.so\";\n    desc_flowunit.SetFilePath(file_path_flowunit);\n    auto mock_flowunit = std::make_shared<MockFlowUnit>();\n    auto mock_flowunit_desc = std::make_shared<FlowUnitDesc>();\n    mock_flowunit_desc->SetFlowUnitName(\"test_0_1_draw_bbox\");\n    mock_flowunit_desc->AddFlowUnitOutput(modelbox::FlowUnitOutput(\"Out_1\"));\n    mock_flowunit_desc->AddFlowUnitOutput(modelbox::FlowUnitOutput(\"Out_2\"));\n    mock_flowunit_desc->SetFlowType(STREAM);\n    mock_flowunit_desc->SetMaxBatchSize(16);\n    mock_flowunit->SetFlowUnitDesc(mock_flowunit_desc);\n    std::weak_ptr<MockFlowUnit> mock_flowunit_wp;\n    mock_flowunit_wp = mock_flowunit;\n\n    EXPECT_CALL(*mock_flowunit, Open(_))\n        .WillRepeatedly(testing::Invoke(\n            [=](const std::shared_ptr<modelbox::Configuration>& flow_option) {\n              auto spt = mock_flowunit_wp.lock();\n              auto ext_data = spt->CreateExternalData();\n              Status ret;\n              if (!ext_data) {\n                MBLOG_ERROR << \"can not get external data.\";\n                ret = {STATUS_FAULT};\n                return ret;\n              }\n\n              auto buffer_list = ext_data->CreateBufferList();\n              buffer_list->Build({10 * sizeof(int)});\n              auto *data = (int *)buffer_list->MutableData();\n              for (size_t i = 0; i < 10; i++) {\n                data[i] = i;\n              }\n\n              auto status = ext_data->Send(buffer_list);\n              if (!status) {\n                MBLOG_ERROR << \"external data send buffer list failed:\"\n                            << status;\n                return status;\n              }\n\n              status = ext_data->Close();\n              if (!status) {\n                MBLOG_ERROR << \"external data close failed:\" << status;\n                return status;\n              }\n\n              MBLOG_INFO << \"send event test_0_1_draw_bbox\";\n\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, DataPre(_))\n        .WillRepeatedly(\n            testing::Invoke([&](const std::shared_ptr<DataContext>& data_ctx) {\n              MBLOG_INFO << \"stream_info \"\n                         << \"DataPre\";\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, DataPost(_))\n        .WillRepeatedly(\n            testing::Invoke([&](const std::shared_ptr<DataContext>& data_ctx) {\n              MBLOG_INFO << \"stream_info \"\n                         << \"DataPost\";\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit,\n                Process(testing::An<std::shared_ptr<modelbox::DataContext>>()))\n        .WillRepeatedly(\n            testing::Invoke([=](const std::shared_ptr<DataContext>& data_ctx) {\n              auto output_bufs = data_ctx->Output(\"Out_1\");\n\n              std::vector<size_t> shape;\n              shape.push_back(sizeof(BBox) * 2);\n              shape.push_back(sizeof(BBox) * 2);\n              shape.push_back(sizeof(BBox) * 2);\n              shape.push_back(sizeof(BBox) * 2);\n              shape.push_back(sizeof(BBox) * 2);\n\n              output_bufs->Build(shape);\n\n              for (size_t i = 0; i < 5; ++i) {\n                auto *output_data = output_bufs->MutableBufferData(i);\n                std::shared_ptr<BBox> b1 = std::make_shared<BBox>();\n                b1->w = 20;\n                b1->h = 20;\n                b1->x = 20;\n                b1->y = 20;\n                b1->category = 1;\n                b1->score = 0.95;\n\n                memcpy_s(output_data, sizeof(BBox), b1.get(), sizeof(BBox));\n\n                std::shared_ptr<BBox> b2 = std::make_shared<BBox>();\n                b2->w = 10;\n                b2->h = 30;\n                b2->x = 60;\n                b2->y = 60;\n                b2->score = 0.9;\n                b2->category = 0;\n\n                memcpy_s((char*)output_data + sizeof(BBox), sizeof(BBox),\n                         b2.get(), sizeof(BBox));\n              }\n\n              // get image data\n              auto output2_bufs = data_ctx->Output(\"Out_2\");\n\n              std::string gimg_path = std::string(TEST_ASSETS) + \"/test.jpg\";\n\n              MBLOG_INFO << \"images path: \" << gimg_path;\n              cv::Mat img_data = cv::imread(gimg_path);\n\n              MBLOG_INFO << \"gimage col \" << img_data.cols << \"  grow \"\n                         << img_data.rows\n                         << \" gchannel:\" << img_data.channels();\n\n              int32_t gcols = img_data.cols;\n              int32_t grows = img_data.rows;\n              int32_t gchannels = img_data.channels();\n\n              std::vector<size_t> shape2;\n              shape2.push_back(img_data.total() * img_data.elemSize());\n              shape2.push_back(img_data.total() * img_data.elemSize());\n              shape2.push_back(img_data.total() * img_data.elemSize());\n              shape2.push_back(img_data.total() * img_data.elemSize());\n              shape2.push_back(img_data.total() * img_data.elemSize());\n              MBLOG_INFO << \"build\" << img_data.total() * img_data.elemSize();\n              output2_bufs->Build(shape2);\n\n              for (size_t i = 0; i < 5; ++i) {\n                MBLOG_DEBUG << \"image col \" << img_data.cols << \"  row \"\n                            << img_data.rows\n                            << \" channel:\" << img_data.channels();\n\n                output2_bufs->At(i)->Set(\"width\", gcols);\n                output2_bufs->At(i)->Set(\"height\", grows);\n                output2_bufs->At(i)->Set(\"channel\", gchannels);\n\n                auto *output2_data = output2_bufs->MutableBufferData(i);\n                memcpy_s(output2_data, output2_bufs->At(i)->GetBytes(),\n                         img_data.data, img_data.total() * img_data.elemSize());\n              }\n\n              MBLOG_INFO << \"finsish test_0_1_draw_bbox\";\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, Close()).WillRepeatedly(testing::Invoke([=]() {\n      return modelbox::STATUS_OK;\n    }));\n    desc_flowunit.SetMockFlowUnit(mock_flowunit);\n    ctl_->AddMockDriverFlowUnit(\"test_0_1_draw_bbox\", \"cpu\", desc_flowunit,\n                                std::string(TEST_DRIVER_DIR));\n  }\n\n  {\n    MockFlowUnitDriverDesc desc_flowunit;\n    desc_flowunit.SetClass(\"DRIVER-FLOWUNIT\");\n    desc_flowunit.SetType(\"cpu\");\n    desc_flowunit.SetName(\"test_1_0_draw_bbox\");\n    desc_flowunit.SetDescription(\"the test in 1 out 0\");\n    desc_flowunit.SetVersion(\"1.0.0\");\n    std::string file_path_flowunit =\n        std::string(TEST_DRIVER_DIR) +\n        \"/libmodelbox-unit-cpu-test_1_0_draw_bbox.so\";\n    desc_flowunit.SetFilePath(file_path_flowunit);\n    auto mock_flowunit = std::make_shared<MockFlowUnit>();\n    auto mock_flowunit_desc = std::make_shared<FlowUnitDesc>();\n    mock_flowunit_desc->SetFlowUnitName(\"test_1_0_draw_bbox\");\n    mock_flowunit_desc->AddFlowUnitInput(modelbox::FlowUnitInput(\"In_1\"));\n    mock_flowunit_desc->SetFlowType(STREAM);\n    mock_flowunit_desc->SetMaxBatchSize(16);\n    mock_flowunit->SetFlowUnitDesc(mock_flowunit_desc);\n    std::weak_ptr<MockFlowUnit> mock_flowunit_wp;\n    mock_flowunit_wp = mock_flowunit;\n\n    EXPECT_CALL(*mock_flowunit, Open(_))\n        .WillRepeatedly(testing::Invoke(\n            [=](const std::shared_ptr<modelbox::Configuration>& flow_option) {\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, DataPre(_))\n        .WillRepeatedly(\n            testing::Invoke([&](const std::shared_ptr<DataContext>& data_ctx) {\n              MBLOG_INFO << \"stream_info \"\n                         << \"DataPre\";\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, DataPost(_))\n        .WillRepeatedly(\n            testing::Invoke([&](const std::shared_ptr<DataContext>& data_ctx) {\n              MBLOG_INFO << \"stream_info \"\n                         << \"DataPost\";\n              return modelbox::STATUS_STOP;\n            }));\n\n    EXPECT_CALL(*mock_flowunit,\n                Process(testing::An<std::shared_ptr<modelbox::DataContext>>()))\n        .WillRepeatedly(\n            testing::Invoke([=](const std::shared_ptr<DataContext>& op_ctx) {\n              MBLOG_INFO << \"test_1_0_draw_bbox process\";\n              auto input = op_ctx->Input(\"In_1\");\n\n              for (size_t i = 0; i < input->Size(); i++) {\n                int32_t width = 0;\n                int32_t height = 0;\n                int32_t channel = 0;\n                input->At(i)->Get(\"width\", width);\n                input->At(i)->Get(\"height\", height);\n                input->At(i)->Get(\"channel\", channel);\n\n                MBLOG_DEBUG << \"w:\" << width << \",h:\" << height\n                            << \",c:\" << channel;\n                MBLOG_DEBUG << input->At(i)->GetBytes();\n\n                cv::Mat img_data(height, width, CV_8UC3);\n                memcpy_s(img_data.data, img_data.total() * img_data.elemSize(),\n                         input->ConstBufferData(i), input->At(i)->GetBytes());\n\n                std::string name = std::string(TEST_DATA_DIR) + \"/test\" +\n                                   std::to_string(i) + \".jpg\";\n                MBLOG_DEBUG << name;\n                cv::imwrite(name, img_data);\n              }\n              MBLOG_INFO << \"finish test_1_0_draw_bbox process\";\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, Close()).WillRepeatedly(testing::Invoke([=]() {\n      return modelbox::STATUS_OK;\n    }));\n    desc_flowunit.SetMockFlowUnit(mock_flowunit);\n    ctl_->AddMockDriverFlowUnit(\"test_1_0_draw_bbox\", \"cpu\", desc_flowunit,\n                                std::string(TEST_DRIVER_DIR));\n  }\n  return STATUS_OK;\n}\n\nTEST_F(DrawBBoxFlowUnitTest, InitUnit) {\n  const std::string test_lib_dir = TEST_DRIVER_DIR;\n  std::string toml_content = R\"(\n    [driver]\n    skip-default=true\n    dir=[\")\" + test_lib_dir + \"\\\"]\\n    \" +\n                             R\"([graph]\n    graphconf = '''digraph demo {                                                                            \n          test_0_1_draw_bbox[type=flowunit, flowunit=test_0_1_draw_bbox, device=cpu, deviceid=0, label=\"<Out_1> | <Out_2>\", batch_size=5]\n          draw_bbox[type=flowunit, flowunit=draw_bbox, device=cpu, deviceid=0, label=\"<in_image> | <in_region> | <out_image>\", batch_size=5]\n          test_1_0_draw_bbox[type=flowunit, flowunit=test_1_0_draw_bbox, device=cpu, deviceid=0, label=\"<In_1>\", batch_size=5]      \n\n          test_0_1_draw_bbox:Out_1 -> draw_bbox:in_region \n          test_0_1_draw_bbox:Out_2 -> draw_bbox:in_image\n          draw_bbox:out_image -> test_1_0_draw_bbox:In_1                                                                      \n        }'''\n    format = \"graphviz\"\n  )\";\n\n  MBLOG_INFO << toml_content;\n  auto driver_flow = GetDriverFlow();\n  auto ret = driver_flow->BuildAndRun(\"InitUnit\", toml_content, 3 * 1000);\n  EXPECT_EQ(ret, STATUS_STOP);\n\n  for (size_t i = 0; i < 5; ++i) {\n    std::string name =\n        std::string(TEST_DATA_DIR) + \"/test\" + std::to_string(i) + \".jpg\";\n    auto rmret = remove(name.c_str());\n    EXPECT_EQ(rmret, 0);\n  }\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/httpserver_async/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n \ncmake_minimum_required(VERSION 3.10)\n\nset(UNIT_DEVICE \"cpu\")\nset(UNIT_NAME \"httpserver_async\")\n \nproject(modelbox-flowunit-${UNIT_NAME}-${UNIT_DEVICE})\n \nfile(GLOB_RECURSE UNIT_SOURCE *.cpp *.cc *.c)\ngroup_source_test_files(MODELBOX_UNIT_SOURCE MODELBOX_UNIT_TEST_SOURCE \"_test.c*\" ${UNIT_SOURCE})\n\nif (NOT Boost_FOUND) \n    message(STATUS \"Not found boost, skip build http server async\")\n    return()\nendif()\n\nif (NOT CPPREST_FOUND) \n    message(STATUS \"Not found cpprest, skip build http server async\")\n    return()\nendif()\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_CPU_INCLUDE})\ninclude_directories(${CPPREST_INCLUDE_DIR})\ninclude_directories(${OPENSSL_INCLUDE_DIR})\ninclude_directories(${Boost_INCLUDE_DIR})\ninclude_directories(${MODELBOX_COMMON_SAFE_HTTP_INCLUDE})\n \nset(MODELBOX_UNIT_SHARED modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-shared)\nset(MODELBOX_UNIT_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n \nadd_library(${MODELBOX_UNIT_SHARED} SHARED ${MODELBOX_UNIT_SOURCE})\nset(LIBMODELBOX_FLOWUNIT_HTTPSERVER_ASYNC_CPU_SHARED ${MODELBOX_UNIT_SHARED})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES \n    SOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n \nset(MODELBOX_UNIT_LINK_LIBRARY ${CPPREST_LIBRARIES} ${Boost_LIBRARIES} ${OPENSSL_LIBRARIES})\n \ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_DEVICE_CPU_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} rt)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} dl)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${MODELBOX_UNIT_LINK_LIBRARY})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${MODELBOX_COMMON_SAFE_HTTP_LIBRARY})\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}\")\n \ninstall(TARGETS ${MODELBOX_UNIT_SHARED} \n    COMPONENT cpu-device-flowunit\n    RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n    LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    OPTIONAL\n    )\n \ninstall(DIRECTORY ${HEADER} \n    DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR} \n    COMPONENT cpu-device-flowunit-devel\n    )\n \nset(LIBMODELBOX_FLOWUNIT_HTTPSERVER_ASYNC_CPU_SHARED ${MODELBOX_UNIT_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_HTTPSERVER_ASYNC_CPU_INCLUDE ${MODELBOX_UNIT_SOURCE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_HTTPSERVER_ASYNC_CPU_SOURCES ${MODELBOX_UNIT_SOURCE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_HTTPSERVER_ASYNC_CPU_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}.so CACHE INTERNAL \"\")\n \n# driver test\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_UNIT_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_UNIT_SHARED})\nlist(APPEND DRIVER_UNIT_TEST_LINK_LIBRARIES ${MODELBOX_UNIT_LINK_LIBRARY})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_LINK_LIBRARIES ${DRIVER_UNIT_TEST_LINK_LIBRARIES} CACHE INTERNAL \"\")"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/httpserver_async/httpserver_async.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"httpserver_async.h\"\n\n#include <securec.h>\n\n#include \"modelbox/base/crypto.h\"\n#include \"modelbox/flowunit_api_helper.h\"\n\nHTTPServerAsync::HTTPServerAsync() = default;\nHTTPServerAsync::~HTTPServerAsync() = default;\n\nmodelbox::Status HTTPServerAsync::HandleFunc(web::http::http_request request) {\n  if (request.request_uri().to_string() == \"/health\") {\n    HandleHealthCheck(request);\n    return modelbox::STATUS_OK;\n  }\n\n  RequestInfo request_info;\n  request_info.method = request.method();\n  request_info.uri = request.request_uri().to_string();\n  for (auto &head : request.headers()) {\n    request_info.headers_map[head.first] = head.second;\n  }\n\n  request.extract_string().then(\n      [request, request_info,\n       this](const pplx::task<utility::string_t> &t) mutable {\n        try {\n          request_info.request_body = t.get();\n          auto handle_status = HandleTask(request, request_info);\n          if (handle_status == modelbox::STATUS_BUSY) {\n            SafeReply(request, web::http::status_codes::TooManyRequests);\n          } else if (handle_status == modelbox::STATUS_FAULT ||\n                     handle_status == modelbox::STATUS_NOMEM) {\n            SafeReply(request, web::http::status_codes::InternalError);\n          }\n        } catch (const std::exception &e) {\n          MBLOG_ERROR << \"get request body error\" << e.what();\n          SafeReply(request, web::http::status_codes::BadRequest);\n        }\n      });\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status HTTPServerAsync::HandleTask(\n    const web::http::http_request &request, const RequestInfo &request_info) {\n  auto http_limiter = HttpRequestLimiter::GetInstance();\n  if (http_limiter == nullptr) {\n    return modelbox::STATUS_BUSY;\n  }\n\n  auto size = request_info.request_body.size();\n  std::vector<std::size_t> shape = {size};\n  auto ext_data = this->CreateExternalData();\n  if (!ext_data) {\n    MBLOG_ERROR << \"can not get external data.\";\n    return modelbox::STATUS_FAULT;\n  }\n  auto session_cxt = ext_data->GetSessionContext();\n  session_cxt->SetPrivate(\"http_limiter_\" + session_cxt->GetSessionId(),\n                          http_limiter);\n  auto output_buf = ext_data->CreateBufferList();\n  output_buf->Build(shape);\n  if (size > 0) {\n    auto *outmem = output_buf->MutableBufferData(0);\n    if (outmem == nullptr) {\n      MBLOG_ERROR << \"outmem buffer is nullptr.\";\n      return modelbox::STATUS_NOMEM;\n    }\n\n    auto ret = memcpy_s(outmem, size, request_info.request_body.data(), size);\n    if (EOK != ret) {\n      MBLOG_ERROR << \"Cpu memcpy failed, ret \" << ret << \", src size \" << size\n                  << \", dest size \" << size;\n      return modelbox::STATUS_FAULT;\n    }\n  }\n\n  output_buf->At(0)->Set(\"size\", size);\n  output_buf->At(0)->Set(\"method\", (std::string)request_info.method);\n  output_buf->At(0)->Set(\"uri\", (std::string)request_info.uri);\n  output_buf->At(0)->Set(\"headers\", request_info.headers_map);\n  output_buf->At(0)->Set(\"endpoint\", request_url_);\n  output_buf->At(0)->SetGetBufferType(modelbox::BufferEnumType::STR);\n  auto status = ext_data->Send(output_buf);\n  if (!status) {\n    MBLOG_ERROR << \"external data send buffer list failed:\" << status;\n    return modelbox::STATUS_FAULT;\n  }\n\n  SafeReply(request, web::http::status_codes::Accepted);\n\n  status = ext_data->Close();\n  if (!status) {\n    MBLOG_ERROR << \"external data close failed:\" << status;\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status HTTPServerAsync::Open(\n    const std::shared_ptr<modelbox::Configuration> &opts) {\n  request_url_ = opts->GetString(\"endpoint\", \"\");\n  if (request_url_.empty()) {\n    request_url_ = \"http://127.0.0.1:8080\";\n    MBLOG_WARN << \"endpoint not set, use default endpoint: \" << request_url_;\n  }\n\n  HttpRequestLimiter::max_request_ = opts->GetUint64(\"max_requests\", 1000);\n  std::atomic_init(&HttpRequestLimiter::request_count_, (size_t)0);\n  keep_alive_time_out_sec_ = opts->GetUint64(\"keepalive_timeout_sec\", 200);\n\n  std::string key;\n  std::string enpass;\n  std::string keypass;\n  const std::string cert = opts->GetString(\"cert\", \"\");\n  if (cert.length() > 0) {\n    if (access(cert.c_str(), R_OK) != 0) {\n      return {modelbox::STATUS_BADCONF, \"certificate file is invalid.\"};\n    }\n    key = opts->GetString(\"key\", \"\");\n    if (access(key.c_str(), R_OK) != 0) {\n      return {modelbox::STATUS_BADCONF, \"key file is invalid.\"};\n    }\n    enpass = opts->GetString(\"passwd\", \"\");\n    if (enpass.empty()) {\n      MBLOG_ERROR << \"password not set\";\n      return {modelbox::STATUS_BADCONF, \"password not set\"};\n    }\n    keypass = opts->GetString(\"key_pass\", \"\");\n    if (keypass.empty()) {\n      MBLOG_ERROR << \"password key not set\";\n      return {modelbox::STATUS_BADCONF, \"password key not set\"};\n    }\n  }\n\n  web::http::experimental::listener::http_listener_config server_config;\n  server_config.set_timeout(std::chrono::seconds(keep_alive_time_out_sec_));\n  if (cert.length() > 0 && key.length() > 0) {\n    server_config.set_ssl_context_callback(\n        [cert, key, enpass, keypass](boost::asio::ssl::context &ctx) {\n          ctx.set_options(boost::asio::ssl::context::default_workarounds);\n          modelbox::HardeningSSL(ctx.native_handle());\n          if (enpass.length() > 0) {\n            ctx.set_password_callback(\n                [enpass, keypass](\n                    std::size_t max_length,\n                    boost::asio::ssl::context::password_purpose purpose)\n                    -> std::string {\n                  std::vector<char> pass;\n                  auto ret = modelbox::PassDecrypt(enpass, keypass, &pass);\n                  if (!ret) {\n                    MBLOG_ERROR << \"key password is invalid\";\n                    return \"\";\n                  }\n                  std::string res;\n                  res.insert(res.begin(), pass.begin(), pass.end());\n                  return res;\n                });\n          }\n          ctx.use_certificate_file(\n              cert, boost::asio::ssl::context_base::file_format::pem);\n          ctx.use_private_key_file(key, boost::asio::ssl::context::pem);\n        });\n  }\n\n  listener_ =\n      std::make_shared<web::http::experimental::listener::http_listener>(\n          request_url_, server_config);\n  listener_->support(web::http::methods::POST,\n                     [this](const web::http::http_request &request) {\n                       this->HandleFunc(request);\n                     });\n  listener_->support(web::http::methods::PUT,\n                     [this](const web::http::http_request &request) {\n                       this->HandleFunc(request);\n                     });\n  listener_->support(web::http::methods::GET,\n                     [this](const web::http::http_request &request) {\n                       this->HandleFunc(request);\n                     });\n  listener_->support(web::http::methods::DEL,\n                     [this](const web::http::http_request &request) {\n                       this->HandleFunc(request);\n                     });\n\n  listener_->support(web::http::methods::TRCE, HandleUnSupportMethod);\n  listener_->support(web::http::methods::OPTIONS, HandleUnSupportMethod);\n\n  try {\n    listener_->open().wait();\n    MBLOG_INFO << \"start to listen : \" << request_url_;\n  } catch (std::exception const &e) {\n    MBLOG_ERROR << e.what();\n    return {modelbox::STATUS_FAULT, e.what()};\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status HTTPServerAsync::Close() {\n  listener_->close().wait();\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status HTTPServerAsync::Process(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  auto output_buf = data_ctx->Output(\"out_request_info\");\n  auto input_buf = data_ctx->External();\n\n  for (auto &buf : *input_buf) {\n    output_buf->PushBack(buf);\n  }\n  return modelbox::STATUS_OK;\n}\n\nMODELBOX_FLOWUNIT(HTTPServerAsync, desc) {\n  desc.SetFlowUnitName(FLOWUNIT_NAME);\n  desc.AddFlowUnitOutput({\"out_request_info\"});\n  desc.SetFlowType(modelbox::NORMAL);\n  desc.SetFlowUnitGroupType(\"Input\");\n  desc.SetDescription(FLOWUNIT_DESC);\n  desc.AddFlowUnitOption(modelbox::FlowUnitOption(\"endpoint\", \"string\", true,\n                                                  \"https://127.0.0.1:8080\",\n                                                  \"http server listen URL.\"));\n  desc.AddFlowUnitOption(modelbox::FlowUnitOption(\n      \"max_requests\", \"integer\", true, \"1000\", \"max http request.\"));\n  desc.AddFlowUnitOption(\n      modelbox::FlowUnitOption(\"keepalive_timeout_sec\", \"integer\", false, \"200\",\n                               \"keep-alive timeout time(sec)\"));\n  desc.AddFlowUnitOption(\n      modelbox::FlowUnitOption(\"cert\", \"string\", false, \"\", \"cert file path\"));\n  desc.AddFlowUnitOption(\n      modelbox::FlowUnitOption(\"key\", \"string\", false, \"\", \"key file path\"));\n  desc.AddFlowUnitOption(modelbox::FlowUnitOption(\n      \"passwd\", \"string\", false, \"\", \"encrypted key file password.\"));\n  desc.AddFlowUnitOption(modelbox::FlowUnitOption(\n      \"key_pass\", \"string\", false, \"\", \"key for encrypted password.\"));\n}\n\nMODELBOX_DRIVER_FLOWUNIT(desc) {\n  desc.Desc.SetName(FLOWUNIT_NAME);\n  desc.Desc.SetClass(modelbox::DRIVER_CLASS_FLOWUNIT);\n  desc.Desc.SetType(FLOWUNIT_TYPE);\n  desc.Desc.SetDescription(FLOWUNIT_DESC);\n  desc.Desc.SetVersion(\"1.0.0\");\n}\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/httpserver_async/httpserver_async.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_HTTPSERVER_ASYNC_CPU_H_\n#define MODELBOX_FLOWUNIT_HTTPSERVER_ASYNC_CPU_H_\n\n#include \"cpprest/http_listener.h\"\n#include \"http_util.h\"\n#include \"modelbox/flowunit.h\"\n\nconstexpr const char *FLOWUNIT_NAME = \"httpserver_async\";\nconstexpr const char *FLOWUNIT_TYPE = \"cpu\";\nconstexpr const char *FLOWUNIT_DESC =\n    \"\\n\\t@Brief: Start a http/https server, reply to the response immediately \"\n    \"when a request is received, and output request info to next flowunit. \\n\"\n    \"\\t@Port parameter: The output port buffer contain the following meta \"\n    \"fields:\\n\"\n    \"\\t\\tField Name: size,        Type: size_t\\n\"\n    \"\\t\\tField Name: method,      Type: string\\n\"\n    \"\\t\\tField Name: uri,         Type: string\\n\"\n    \"\\t\\tField Name: headers,     Type: map<string,string>\\n\"\n    \"\\t\\tField Name: endpoint,    Type: string\\n\"\n    \"\\t  The the output port buffer data type is char * .\\n\"\n    \"\\t@Constraint: \";\n\nstruct RequestInfo {\n  web::http::method method;\n  utility::string_t uri;\n  std::map<std::string, std::string> headers_map;\n  utility::string_t request_body;\n};\n\nclass HTTPServerAsync : public modelbox::FlowUnit {\n public:\n  HTTPServerAsync();\n  ~HTTPServerAsync() override;\n\n  modelbox::Status Open(\n      const std::shared_ptr<modelbox::Configuration> &opts) override;\n\n  modelbox::Status Close() override;\n\n  modelbox::Status Process(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override;\n\n private:\n  modelbox::Status HandleFunc(web::http::http_request request);\n\n  modelbox::Status HandleTask(const web::http::http_request &request,\n                              const RequestInfo &request_info);\n\n  std::shared_ptr<web::http::experimental::listener::http_listener> listener_;\n  uint64_t keep_alive_time_out_sec_{200};\n  std::string request_url_;\n};\n\n#endif  // MODELBOX_FLOWUNIT_HTTPSERVER_ASYNC_CPU_H_"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/httpserver_async/httpserver_async_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <securec.h>\n\n#include \"common/mock_cert.h\"\n#include \"driver_flow_test.h\"\n#include \"flowunit_mockflowunit/flowunit_mockflowunit.h\"\n#define _TURN_OFF_PLATFORM_STRING\n#include \"cpprest/http_client.h\"\n#include \"test/mock/minimodelbox/mockflow.h\"\n\n#define REQUEST_URL \"https://localhost:56789\"\n\nnamespace modelbox {\nclass HttpServerAsyncFlowUnitTest : public testing::Test {\n public:\n  HttpServerAsyncFlowUnitTest() : driver_flow_(std::make_shared<MockFlow>()) {}\n\n protected:\n  void SetUp() override {\n    auto ret = AddMockFlowUnit();\n    EXPECT_EQ(ret, STATUS_OK);\n  };\n\n  void TearDown() override { driver_flow_ = nullptr; };\n  std::shared_ptr<MockFlow> GetDriverFlow();\n\n private:\n  Status AddMockFlowUnit();\n  std::shared_ptr<MockFlow> driver_flow_;\n};\n\nstd::shared_ptr<MockFlow> HttpServerAsyncFlowUnitTest::GetDriverFlow() {\n  return driver_flow_;\n}\n\nStatus HttpServerAsyncFlowUnitTest::AddMockFlowUnit() {\n  {\n    auto mock_desc =\n        GenerateFlowunitDesc(\"httpserver_async_post_unit\", {\"In_1\"}, {});\n    auto process_func =\n        [=](const std::shared_ptr<DataContext>& op_ctx,\n            const std::shared_ptr<MockFlowUnit>& mock_flowunit) -> Status {\n      auto input_buf = op_ctx->Input(\"In_1\");\n      std::string request_url;\n      input_buf->At(0)->Get(\"endpoint\", request_url);\n      EXPECT_EQ(REQUEST_URL, request_url);\n      auto* input_data = (char*)input_buf->ConstBufferData(0);\n      std::string request_body(input_data, input_buf->At(0)->GetBytes());\n      std::string method;\n      input_buf->At(0)->Get(\"method\", method);\n      std::string uri;\n      input_buf->At(0)->Get(\"uri\", uri);\n      if (method == \"PUT\") {\n        auto putvalue = web::json::value::object();\n        putvalue[\"param\"] = web::json::value::string(\n            \"{\\\"image_id\\\":0,\\\"algorithm\\\":\\\"face_detection\\\",\\\"alg_\"\n            \"threshold\\\":12.0}\");\n        putvalue[\"image\"] =\n            web::json::value::string(\"image base 64 data string put\");\n        std::string body_put = putvalue.serialize();\n        EXPECT_EQ(body_put, request_body);\n        EXPECT_EQ(\"/restdemo_put\", uri);\n      } else if (method == \"POST\") {\n        auto postvalue = web::json::value::object();\n        postvalue[\"param\"] = web::json::value::string(\n            \"{\\\"image_id\\\":100,\\\"algorithm\\\":\\\"vehicle_detection\\\",\"\n            \"\\\"detect_threshold\\\":0.5}\");\n        postvalue[\"image\"] =\n            web::json::value::string(\"image base 64 data string post\");\n        std::string body_post = postvalue.serialize();\n        EXPECT_EQ(body_post, request_body);\n        EXPECT_EQ(\"/restdemo_post\", uri);\n      } else if (method == \"GET\") {\n        std::string body_get;\n        EXPECT_EQ(body_get, request_body);\n        EXPECT_EQ(\"/restdemo_get\", uri);\n      } else if (method == \"DELETE\") {\n        auto delvalue = web::json::value::array();\n        delvalue[0] = web::json::value::string(\"image_id\");\n        std::string body_del = delvalue.serialize();\n        EXPECT_EQ(body_del, request_body);\n        EXPECT_EQ(\"/restdemo_del\", uri);\n      } else {\n        MBLOG_ERROR << \"unsupport method\";\n      }\n      return modelbox::STATUS_OK;\n    };\n    auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n    mock_funcitons->RegisterProcessFunc(process_func);\n    driver_flow_->AddFlowUnitDesc(\n        mock_desc, mock_funcitons->GenerateCreateFunc(), TEST_DRIVER_DIR);\n  }\n\n  return STATUS_OK;\n}\n\nvoid PutRequestAsync(\n    const web::http::uri& uri,\n    const web::http::client::http_client_config& client_config) {\n  web::http::client::http_client client(web::http::uri_builder(uri).to_uri(),\n                                        client_config);\n  web::http::http_headers headers_put;\n\n  headers_put.add(_XPLATSTR(\"Accept\"), _XPLATSTR(\"application/json\"));\n  headers_put.add(_XPLATSTR(\"Accept\"), _XPLATSTR(\"text/plain\"));\n  web::http::http_request msg_put;\n  msg_put.set_method(web::http::methods::PUT);\n  msg_put.set_request_uri(_XPLATSTR(\"/restdemo_put\"));\n  msg_put.headers() = headers_put;\n  auto putvalue = web::json::value::object();\n  putvalue[\"param\"] = web::json::value::string(\n      R\"({\"image_id\":0,\"algorithm\":\"face_detection\",\"alg_threshold\":12.0})\");\n  putvalue[\"image\"] = web::json::value::string(\"image base 64 data string put\");\n  msg_put.set_body(putvalue);\n\n  try {\n    web::http::http_response resp_put = client.request(msg_put).get();\n    MBLOG_INFO << \"put response status codes: \" << resp_put.status_code();\n  } catch (std::exception const& e) {\n    MBLOG_ERROR << e.what();\n    ASSERT_TRUE(false);\n    return;\n  }\n}\n\nvoid PostRequestAsync(\n    const web::http::uri& uri,\n    const web::http::client::http_client_config& client_config) {\n  web::http::client::http_client client(web::http::uri_builder(uri).to_uri(),\n                                        client_config);\n  web::http::http_headers headers_post;\n\n  headers_post.add(_XPLATSTR(\"Accept\"), _XPLATSTR(\"application/json\"));\n  headers_post.add(_XPLATSTR(\"Accept\"), _XPLATSTR(\"text/plain\"));\n  web::http::http_request msg_post;\n  msg_post.set_method(web::http::methods::POST);\n  msg_post.set_request_uri(_XPLATSTR(\"/restdemo_post\"));\n  msg_post.headers() = headers_post;\n  auto postvalue = web::json::value::object();\n  postvalue[\"param\"] = web::json::value::string(\n      \"{\\\"image_id\\\":100,\\\"algorithm\\\":\\\"vehicle_detection\\\",\\\"detect_\"\n      \"threshold\\\":0.5}\");\n  postvalue[\"image\"] =\n      web::json::value::string(\"image base 64 data string post\");\n  msg_post.set_body(postvalue);\n\n  try {\n    web::http::http_response resp_post = client.request(msg_post).get();\n    MBLOG_INFO << \"post response status codes: \" << resp_post.status_code();\n  } catch (std::exception const& e) {\n    MBLOG_ERROR << e.what();\n    ASSERT_TRUE(false);\n    return;\n  }\n}\n\nvoid GetRequestAsync(\n    const web::http::uri& uri,\n    const web::http::client::http_client_config& client_config) {\n  web::http::client::http_client client(web::http::uri_builder(uri).to_uri(),\n                                        client_config);\n  web::http::http_headers headers_get;\n\n  headers_get.add(_XPLATSTR(\"Accept\"), _XPLATSTR(\"application/json\"));\n  headers_get.add(_XPLATSTR(\"Accept\"), _XPLATSTR(\"text/plain\"));\n  web::http::http_request msg_get;\n  msg_get.set_method(web::http::methods::GET);\n  msg_get.set_request_uri(_XPLATSTR(\"/restdemo_get\"));\n  msg_get.headers() = headers_get;\n\n  try {\n    web::http::http_response resp_get = client.request(msg_get).get();\n    MBLOG_INFO << \"get response status codes: \" << resp_get.status_code();\n  } catch (std::exception const& e) {\n    MBLOG_ERROR << e.what();\n    ASSERT_TRUE(false);\n    return;\n  }\n}\n\nvoid DelRequestAsync(\n    const web::http::uri& uri,\n    const web::http::client::http_client_config& client_config) {\n  web::http::client::http_client client(web::http::uri_builder(uri).to_uri(),\n                                        client_config);\n  web::http::http_headers headers_del;\n\n  headers_del.add(_XPLATSTR(\"Accept\"), _XPLATSTR(\"application/json\"));\n  headers_del.add(_XPLATSTR(\"Accept\"), _XPLATSTR(\"text/plain\"));\n  web::http::http_request msg_del;\n  msg_del.set_method(web::http::methods::DEL);\n  msg_del.set_request_uri(_XPLATSTR(\"/restdemo_del\"));\n  msg_del.headers() = headers_del;\n  auto delvalue = web::json::value::array();\n  delvalue[0] = web::json::value::string(\"image_id\");\n  msg_del.set_body(delvalue);\n\n  try {\n    web::http::http_response resp_del = client.request(msg_del).get();\n    MBLOG_INFO << \"del response status codes: \" << resp_del.status_code();\n  } catch (std::exception const& e) {\n    MBLOG_ERROR << e.what();\n    ASSERT_TRUE(false);\n    return;\n  }\n}\n\nTEST_F(HttpServerAsyncFlowUnitTest, InitUnit) {\n  const std::string test_lib_dir = TEST_DRIVER_DIR;\n\n  std::string cert_file_path = std::string(TEST_DATA_DIR) + \"/certificate.pem\";\n  std::string key_file_path = std::string(TEST_DATA_DIR) + \"/private_key.pem\";\n  std::string encrypt_passwd;\n  std::string passwd_key;\n\n  ASSERT_EQ(\n      GenerateCert(&encrypt_passwd, &passwd_key, key_file_path, cert_file_path),\n      STATUS_OK);\n\n  Defer {\n    remove(key_file_path.c_str());\n    remove(cert_file_path.c_str());\n  };\n\n  std::string toml_content = R\"(\n    [driver]\n    skip-default=true\n    dir=[\")\" + test_lib_dir + \"\\\"]\\n    \" +\n                             R\"([graph]\n    graphconf = '''digraph demo {                                                                          \n          httpserver_async[type=flowunit, flowunit=httpserver_async, device=cpu, deviceid=0, label=\"<Out_1>\", endpoint=\")\" +\n                             std::string(REQUEST_URL) + R\"(\", cert=\")\" +\n                             cert_file_path + R\"(\", key=\")\" + key_file_path +\n                             R\"(\", passwd=\")\" + encrypt_passwd +\n                             R\"(\", key_pass=\")\" + passwd_key +\n                             R\"(\", max_requests=10]\n          httpserver_async_post_unit[type=flowunit, flowunit=httpserver_async_post_unit, device=cpu, deviceid=0, label=\"<In_1>\"]                       \n          httpserver_async:out_request_info -> httpserver_async_post_unit:In_1                                                                     \n        }'''\n    format = \"graphviz\"\n  )\";\n\n  MBLOG_INFO << toml_content;\n  auto driver_flow = GetDriverFlow();\n  driver_flow->BuildAndRun(\"InitUnit\", toml_content, -1);\n\n  web::http::uri uri = web::http::uri(_XPLATSTR(REQUEST_URL));\n  web::http::client::http_client_config client_config;\n  client_config.set_timeout(utility::seconds(60));\n  client_config.set_ssl_context_callback([&](boost::asio::ssl::context& ctx) {\n    ctx.load_verify_file(cert_file_path);\n  });\n\n  std::vector<std::thread> threads;\n  for (int i = 0; i < 5; ++i) {\n    threads.emplace_back(PutRequestAsync, uri, client_config);\n    threads.emplace_back(DelRequestAsync, uri, client_config);\n    threads.emplace_back(PostRequestAsync, uri, client_config);\n    threads.emplace_back(GetRequestAsync, uri, client_config);\n  }\n  for (auto& th : threads) {\n    th.join();\n  }\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/httpserver_sync/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n \ncmake_minimum_required(VERSION 3.10)\n\nset(UNIT_DEVICE \"cpu\")\nset(UNIT_NAME \"httpserver_sync\")\n\nif (NOT Boost_FOUND) \n    message(STATUS \"Not found boost, skip build http server sync\")\n    return()\nendif()\n\nif (NOT CPPREST_FOUND) \n    message(STATUS \"Not found cpprest, skip build http server sync\")\n    return()\nendif()\n\nproject(modelbox-flowunit-${UNIT_NAME}-${UNIT_DEVICE})\n \nfile(GLOB_RECURSE UNIT_SOURCE *.cpp *.cc *.c)\ngroup_source_test_files(MODELBOX_UNIT_SOURCE MODELBOX_UNIT_TEST_SOURCE \"_test.c*\" ${UNIT_SOURCE})\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_CPU_INCLUDE})\ninclude_directories(${CPPREST_INCLUDE_DIR})\ninclude_directories(${OPENSSL_INCLUDE_DIR})\ninclude_directories(${Boost_INCLUDE_DIR})\ninclude_directories(${MODELBOX_COMMON_SAFE_HTTP_INCLUDE})\n \nset(MODELBOX_UNIT_SHARED modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-shared)\nset(MODELBOX_UNIT_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n \nadd_library(${MODELBOX_UNIT_SHARED} SHARED ${MODELBOX_UNIT_SOURCE})\nset(LIBMODELBOX_FLOWUNIT_HTTPSERVER_ASYNC_CPU_SHARED ${MODELBOX_UNIT_SHARED})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES \n    SOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n \nset(MODELBOX_UNIT_LINK_LIBRARY ${CPPREST_LIBRARIES} ${Boost_LIBRARIES} ${OPENSSL_LIBRARIES})\n \ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_DEVICE_CPU_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} rt)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} dl)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${MODELBOX_UNIT_LINK_LIBRARY})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${MODELBOX_COMMON_SAFE_HTTP_LIBRARY})\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}\")\n \ninstall(TARGETS ${MODELBOX_UNIT_SHARED} \n    COMPONENT cpu-device-flowunit\n    RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n    LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    OPTIONAL\n    )\n \ninstall(DIRECTORY ${HEADER} \n    DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR} \n    COMPONENT cpu-device-flowunit-devel\n    )\n \nset(LIBMODELBOX_FLOWUNIT_HTTPSERVER_ASYNC_CPU_SHARED ${MODELBOX_UNIT_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_HTTPSERVER_ASYNC_CPU_INCLUDE ${MODELBOX_UNIT_SOURCE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_HTTPSERVER_ASYNC_CPU_SOURCES ${MODELBOX_UNIT_SOURCE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_HTTPSERVER_ASYNC_CPU_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}.so CACHE INTERNAL \"\")\n \n# driver test\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_UNIT_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_UNIT_SHARED})\nlist(APPEND DRIVER_UNIT_TEST_LINK_LIBRARIES ${MODELBOX_UNIT_LINK_LIBRARY})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_LINK_LIBRARIES ${DRIVER_UNIT_TEST_LINK_LIBRARIES} CACHE INTERNAL \"\")"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/httpserver_sync/httpserver_sync.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include \"modelbox/device/cpu/device_cpu.h\"\n#include \"modelbox/flowunit_api_helper.h\"\n\nconstexpr const char *FLOWUNIT_NAME = \"httpserver_sync\";\nconstexpr const char *FLOWUNIT_DESC = \"httpserver_sync contain flowunit 'httpserver_sync_receive' and 'httpserver_sync_reply'\";\n\nMODELBOX_DRIVER_FLOWUNIT(desc) {\n  desc.Desc.SetName(FLOWUNIT_NAME);\n  desc.Desc.SetClass(modelbox::DRIVER_CLASS_FLOWUNIT);\n  desc.Desc.SetType(modelbox::DEVICE_TYPE);\n  desc.Desc.SetDescription(FLOWUNIT_DESC);\n  desc.Desc.SetVersion(\"1.0.0\");\n}\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/httpserver_sync/httpserver_sync_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <errno.h>\n#include <securec.h>\n\n#include \"common/mock_cert.h\"\n#include \"driver_flow_test.h\"\n#include \"modelbox/base/crypto.h\"\n#define _TURN_OFF_PLATFORM_STRING\n#include \"cpprest/http_client.h\"\n#include \"test/mock/minimodelbox/mockflow.h\"\n\nconstexpr const char* REQUEST_URL_HTTPS = \"https://localhost:54321\";\nconstexpr const char* REQUEST_URL_HTTP = \"http://localhost:54321\";\n\nnamespace modelbox {\nclass HttpServerSyncFlowUnitTest : public testing::Test {\n public:\n  HttpServerSyncFlowUnitTest() : driver_flow_(std::make_shared<MockFlow>()) {}\n\n protected:\n  void SetUp() override {\n    auto ret = AddMockFlowUnit();\n    EXPECT_EQ(ret, STATUS_OK);\n  };\n\n  void TearDown() override { driver_flow_ = nullptr; };\n  std::shared_ptr<MockFlow> GetDriverFlow();\n\n private:\n  Status AddMockFlowUnit();\n  std::shared_ptr<MockFlow> driver_flow_;\n};\n\nstd::shared_ptr<MockFlow> HttpServerSyncFlowUnitTest::GetDriverFlow() {\n  return driver_flow_;\n}\n\nStatus HttpServerSyncFlowUnitTest::AddMockFlowUnit() {\n  {\n    auto mock_desc =\n        GenerateFlowunitDesc(\"receive_post_unit\", {\"In_1\"}, {\"Out_1\"});\n    auto process_func =\n        [=](const std::shared_ptr<DataContext>& op_ctx,\n            const std::shared_ptr<MockFlowUnit>& mock_flowunit) -> Status {\n      auto input_buf = op_ctx->Input(\"In_1\");\n      auto output_buf = op_ctx->Output(\"Out_1\");\n\n      std::string request_url;\n      input_buf->At(0)->Get(\"endpoint\", request_url);\n      EXPECT_EQ(REQUEST_URL_HTTPS, request_url);\n\n      const auto* input_data =\n          static_cast<const char*>(input_buf->ConstBufferData(0));\n      std::string request_body(input_data, input_buf->At(0)->GetBytes());\n      const utf8string& response_body = \"response_body: \" + request_body;\n      auto size = response_body.size();\n      std::vector<std::size_t> shape = {size};\n      output_buf->Build(shape);\n      memcpy_s(output_buf->MutableBufferData(0), size, response_body.data(),\n               size);\n\n      std::string uri;\n      input_buf->At(0)->Get(\"uri\", uri);\n      std::string method;\n      input_buf->At(0)->Get(\"method\", method);\n\n      if (method == \"PUT\") {\n        auto putvalue = web::json::value::object();\n        putvalue[\"param\"] = web::json::value::string(\n            \"{\\\"image_id\\\":0,\\\"algorithm\\\":\\\"face_detection\\\",\\\"alg_\"\n            \"threshold\\\":12.0}\");\n        putvalue[\"image\"] =\n            web::json::value::string(\"image base 64 data string put\");\n        std::string body_put = putvalue.serialize();\n        EXPECT_EQ(body_put, request_body);\n        EXPECT_EQ(\"/restdemo_put\", uri);\n      } else if (method == \"POST\") {\n        auto postvalue = web::json::value::object();\n        postvalue[\"param\"] = web::json::value::string(\n            \"{\\\"image_id\\\":100,\\\"algorithm\\\":\\\"vehicle_detection\\\",\"\n            \"\\\"detect_threshold\\\":0.5}\");\n        postvalue[\"image\"] =\n            web::json::value::string(\"image base 64 data string post\");\n        std::string body_post = postvalue.serialize();\n        EXPECT_EQ(body_post, request_body);\n        EXPECT_EQ(\"/restdemo_post\", uri);\n      } else if (method == \"GET\") {\n        std::string body_get;\n        EXPECT_EQ(body_get, request_body);\n        EXPECT_EQ(\"/restdemo_get\", uri);\n      } else if (method == \"DELETE\") {\n        auto delvalue = web::json::value::array();\n        delvalue[0] = web::json::value::string(\"image_id\");\n        std::string body_del = delvalue.serialize();\n        EXPECT_EQ(body_del, request_body);\n        EXPECT_EQ(\"/restdemo_del\", uri);\n      } else {\n        MBLOG_ERROR << \"unsupported method\";\n      }\n      return modelbox::STATUS_OK;\n    };\n    auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n    mock_funcitons->RegisterProcessFunc(process_func);\n    driver_flow_->AddFlowUnitDesc(\n        mock_desc, mock_funcitons->GenerateCreateFunc(), TEST_DRIVER_DIR);\n  }\n\n  {\n    auto mock_desc =\n        GenerateFlowunitDesc(\"receive_health_post_unit\", {\"In_1\"}, {\"Out_1\"});\n    auto process_func =\n        [=](const std::shared_ptr<DataContext>& op_ctx,\n            const std::shared_ptr<MockFlowUnit>& mock_flowunit) -> Status {\n      auto input_buf = op_ctx->Input(\"In_1\");\n      auto output_buf = op_ctx->Output(\"Out_1\");\n\n      std::string request_url;\n      input_buf->At(0)->Get(\"endpoint\", request_url);\n      EXPECT_EQ(REQUEST_URL_HTTP, request_url);\n\n      const auto* input_data =\n          static_cast<const char*>(input_buf->ConstBufferData(0));\n      std::string request_body(input_data, input_buf->At(0)->GetBytes());\n      const utf8string& response_body = \"response_body: \" + request_body;\n      auto size = response_body.size();\n      std::vector<std::size_t> shape = {size};\n      output_buf->Build(shape);\n      memcpy_s(output_buf->MutableBufferData(0), size, response_body.data(),\n               size);\n\n      std::string uri;\n      input_buf->At(0)->Get(\"uri\", uri);\n      std::string method;\n      input_buf->At(0)->Get(\"method\", method);\n\n      std::string health_uri{\"/health\"};\n      std::string body;\n      if (method == \"PUT\") {\n        EXPECT_EQ(body, request_body);\n        EXPECT_EQ(health_uri, uri);\n      } else if (method == \"POST\") {\n        EXPECT_EQ(body, request_body);\n        EXPECT_EQ(health_uri, uri);\n      } else if (method == \"GET\") {\n        EXPECT_EQ(body, request_body);\n        EXPECT_EQ(health_uri, uri);\n      } else if (method == \"DELETE\") {\n        EXPECT_EQ(body, request_body);\n        EXPECT_EQ(health_uri, uri);\n      } else {\n        MBLOG_ERROR << \"unsupported method\";\n      }\n      return modelbox::STATUS_OK;\n    };\n    auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n    mock_funcitons->RegisterProcessFunc(process_func);\n    driver_flow_->AddFlowUnitDesc(\n        mock_desc, mock_funcitons->GenerateCreateFunc(), TEST_DRIVER_DIR);\n  }\n\n  return STATUS_OK;\n}\n\nvoid PutRequestSync(const web::http::uri& uri,\n                    const web::http::client::http_client_config& client_config,\n                    const std::string& request_uri) {\n  web::http::client::http_client client(web::http::uri_builder(uri).to_uri(),\n                                        client_config);\n  web::http::http_headers headers_put;\n  headers_put.add(_XPLATSTR(\"Accept\"), _XPLATSTR(\"application/json\"));\n  headers_put.add(_XPLATSTR(\"Accept\"), _XPLATSTR(\"text/plain\"));\n  web::http::http_request msg_put;\n  msg_put.set_method(web::http::methods::PUT);\n  msg_put.set_request_uri(_XPLATSTR(request_uri));\n  msg_put.headers() = headers_put;\n  auto putvalue = web::json::value::object();\n  putvalue[\"param\"] = web::json::value::string(\n      R\"({\"image_id\":0,\"algorithm\":\"face_detection\",\"alg_threshold\":12.0})\");\n  putvalue[\"image\"] = web::json::value::string(\"image base 64 data string put\");\n  msg_put.set_body(putvalue);\n  try {\n    web::http::http_response resp_put = client.request(msg_put).get();\n    if (resp_put.status_code() == web::http::status_codes::OK) {\n      EXPECT_EQ(\"response_body: \" + putvalue.serialize(),\n                resp_put.extract_string().get());\n    } else {\n      EXPECT_EQ(\"\", resp_put.extract_string().get());\n    }\n    MBLOG_INFO << \"put response status codes: \" << resp_put.status_code();\n  } catch (std::exception const& e) {\n    MBLOG_ERROR << e.what();\n    ASSERT_TRUE(false);\n  }\n}\n\nvoid PostRequestSync(const web::http::uri& uri,\n                     const web::http::client::http_client_config& client_config,\n                     const std::string& request_uri) {\n  web::http::client::http_client client(web::http::uri_builder(uri).to_uri(),\n                                        client_config);\n  web::http::http_headers headers_post;\n  headers_post.add(_XPLATSTR(\"Accept\"), _XPLATSTR(\"application/json\"));\n  headers_post.add(_XPLATSTR(\"Accept\"), _XPLATSTR(\"text/plain\"));\n  web::http::http_request msg_post;\n  msg_post.set_method(web::http::methods::POST);\n  msg_post.set_request_uri(_XPLATSTR(request_uri));\n  msg_post.headers() = headers_post;\n  auto postvalue = web::json::value::object();\n  postvalue[\"param\"] = web::json::value::string(\n      \"{\\\"image_id\\\":100,\\\"algorithm\\\":\\\"vehicle_detection\\\",\\\"detect_\"\n      \"threshold\\\":0.5}\");\n  postvalue[\"image\"] =\n      web::json::value::string(\"image base 64 data string post\");\n  msg_post.set_body(postvalue);\n  try {\n    web::http::http_response resp_post = client.request(msg_post).get();\n    if (resp_post.status_code() == web::http::status_codes::OK) {\n      EXPECT_EQ(\"response_body: \" + postvalue.serialize(),\n                resp_post.extract_string().get());\n    } else {\n      EXPECT_EQ(\"\", resp_post.extract_string().get());\n    }\n    MBLOG_INFO << \"post response status codes: \" << resp_post.status_code();\n  } catch (std::exception const& e) {\n    MBLOG_ERROR << e.what();\n    ASSERT_TRUE(false);\n  }\n}\n\nvoid GetRequestSync(const web::http::uri& uri,\n                    const web::http::client::http_client_config& client_config,\n                    const std::string& request_uri) {\n  web::http::client::http_client client(web::http::uri_builder(uri).to_uri(),\n                                        client_config);\n  web::http::http_headers headers_get;\n  headers_get.add(_XPLATSTR(\"Accept\"), _XPLATSTR(\"application/json\"));\n  headers_get.add(_XPLATSTR(\"Accept\"), _XPLATSTR(\"text/plain\"));\n  web::http::http_request msg_get;\n  msg_get.set_method(web::http::methods::GET);\n  msg_get.set_request_uri(_XPLATSTR(request_uri));\n  msg_get.headers() = headers_get;\n  try {\n    web::http::http_response resp_get = client.request(msg_get).get();\n    if (resp_get.status_code() == web::http::status_codes::OK) {\n      EXPECT_EQ(\"response_body: \", resp_get.extract_string().get());\n    } else {\n      EXPECT_EQ(\"\", resp_get.extract_string().get());\n    }\n    MBLOG_INFO << \"get response status codes: \" << resp_get.status_code();\n  } catch (std::exception const& e) {\n    MBLOG_ERROR << e.what();\n    ASSERT_TRUE(false);\n  }\n}\n\nvoid HealthCheckRequesSync(\n    const web::http::uri& uri,\n    const web::http::client::http_client_config& client_config,\n    const std::string& request_uri, const web::http::method& method) {\n  web::http::client::http_client client(web::http::uri_builder(uri).to_uri(),\n                                        client_config);\n  web::http::http_headers headers_get;\n  headers_get.add(_XPLATSTR(\"Accept\"), _XPLATSTR(\"application/json\"));\n  headers_get.add(_XPLATSTR(\"Accept\"), _XPLATSTR(\"text/plain\"));\n  web::http::http_request msg;\n  msg.set_method(method);\n  msg.set_request_uri(_XPLATSTR(request_uri));\n  msg.headers() = headers_get;\n  auto value = web::json::value::object();\n  value[\"status\"] = web::json::value(200);\n  value[\"message\"] =\n      web::json::value::string(\"success\");\n  msg.set_body(value);\n  try {\n    web::http::http_response resp_get = client.request(msg).get();\n    EXPECT_EQ(resp_get.status_code(), web::http::status_codes::OK);\n    EXPECT_EQ(value.serialize(), resp_get.extract_string().get());\n  } catch (std::exception const& e) {\n    MBLOG_ERROR << e.what();\n    ASSERT_TRUE(false);\n  }\n}\n\nvoid DelRequestSync(const web::http::uri& uri,\n                    const web::http::client::http_client_config& client_config,\n                    const std::string& request_uri) {\n  web::http::client::http_client client(web::http::uri_builder(uri).to_uri(),\n                                        client_config);\n  web::http::http_headers headers_del;\n  headers_del.add(_XPLATSTR(\"Accept\"), _XPLATSTR(\"application/json\"));\n  headers_del.add(_XPLATSTR(\"Accept\"), _XPLATSTR(\"text/plain\"));\n  web::http::http_request msg_del;\n  msg_del.set_method(web::http::methods::DEL);\n  msg_del.set_request_uri(_XPLATSTR(request_uri));\n  msg_del.headers() = headers_del;\n  auto delvalue = web::json::value::array();\n  delvalue[0] = web::json::value::string(\"image_id\");\n  msg_del.set_body(delvalue);\n  try {\n    web::http::http_response resp_del = client.request(msg_del).get();\n    if (resp_del.status_code() == web::http::status_codes::OK) {\n      EXPECT_EQ(\"response_body: \" + delvalue.serialize(),\n                resp_del.extract_string().get());\n    } else {\n      EXPECT_EQ(\"\", resp_del.extract_string().get());\n    }\n    MBLOG_INFO << \"del response status codes: \" << resp_del.status_code();\n  } catch (std::exception const& e) {\n    MBLOG_ERROR << e.what();\n    ASSERT_TRUE(false);\n  }\n}\n\nTEST_F(HttpServerSyncFlowUnitTest, InitUnit) {\n  const std::string test_lib_dir = TEST_DRIVER_DIR;\n  std::string cert_file_path = std::string(TEST_DATA_DIR) + \"/certificate.pem\";\n  std::string key_file_path = std::string(TEST_DATA_DIR) + \"/private_key.pem\";\n  std::string encrypt_passwd;\n  std::string passwd_key;\n\n  ASSERT_EQ(\n      GenerateCert(&encrypt_passwd, &passwd_key, key_file_path, cert_file_path),\n      STATUS_OK);\n\n  Defer {\n    remove(key_file_path.c_str());\n    remove(cert_file_path.c_str());\n  };\n\n  std::string toml_content =\n      R\"(\n    [driver]\n    skip-default=true\n    dir=[\")\" +\n      test_lib_dir + \"\\\"]\\n    \" +\n      R\"([graph]\n    graphconf = '''digraph demo {                                                                          \n          httpserver_sync_receive[type=flowunit, flowunit=httpserver_sync_receive, device=cpu, deviceid=0, label=\"<out_request_info>\", endpoint=\")\" +\n      std::string(REQUEST_URL_HTTPS) + R\"(\", cert=\")\" + cert_file_path +\n      R\"(\", key=\")\" + key_file_path + R\"(\", passwd=\")\" + encrypt_passwd +\n      R\"(\", key_pass=\")\" + passwd_key +\n      R\"(\", max_requests=1000, time_out_ms=5000, keepalive_timeout_sec=10]\n          receive_post_unit[type=flowunit, flowunit=receive_post_unit, device=cpu, deviceid=0, label=\"<In_1> | <Out_1>\"] \n          httpserver_sync_reply[type=flowunit, flowunit=httpserver_sync_reply, device=cpu, deviceid=0, label=\"<In_1>\"]        \n          httpserver_sync_receive:out_request_info -> receive_post_unit:In_1   \n          receive_post_unit:Out_1 -> httpserver_sync_reply:in_reply_info                                                      \n        }'''\n    format = \"graphviz\"\n  )\";\n\n  MBLOG_INFO << toml_content;\n  auto driver_flow = GetDriverFlow();\n  driver_flow->BuildAndRun(\"InitUnit\", toml_content, -1);\n\n  web::http::uri uri = web::http::uri(_XPLATSTR(REQUEST_URL_HTTPS));\n  web::http::client::http_client_config client_config;\n  client_config.set_timeout(utility::seconds(60));\n  client_config.set_ssl_context_callback([&](boost::asio::ssl::context& ctx) {\n    ctx.load_verify_file(cert_file_path);\n  });\n\n  std::vector<std::thread> threads;\n  for (int i = 0; i < 5; ++i) {\n    threads.emplace_back(PutRequestSync, uri, client_config, \"/restdemo_put\");\n    threads.emplace_back(DelRequestSync, uri, client_config, \"/restdemo_del\");\n    threads.emplace_back(PostRequestSync, uri, client_config, \"/restdemo_post\");\n    threads.emplace_back(GetRequestSync, uri, client_config, \"/restdemo_get\");\n  }\n  for (auto& th : threads) {\n    th.join();\n  }\n}\n\nTEST_F(HttpServerSyncFlowUnitTest, HealthCheck) {\n  const std::string test_lib_dir = TEST_DRIVER_DIR;\n  std::string toml_content =\n      R\"(\n    [driver]\n    skip-default=true\n    dir=[\")\" +\n      test_lib_dir + \"\\\"]\\n    \" +\n      R\"([graph]\n    graphconf = '''digraph demo {                                                                          \n          httpserver_sync_receive[type=flowunit, flowunit=httpserver_sync_receive, device=cpu, deviceid=0, label=\"<out_request_info>\", endpoint=\")\" +\n      std::string(REQUEST_URL_HTTP) +\n      R\"(\", max_requests=10, time_out_ms=5000, keepalive_timeout_sec=10]\n          receive_post_unit[type=flowunit, flowunit=receive_health_post_unit, device=cpu, deviceid=0, label=\"<In_1> | <Out_1>\"] \n          httpserver_sync_reply[type=flowunit, flowunit=httpserver_sync_reply, device=cpu, deviceid=0, label=\"<In_1>\"]        \n          httpserver_sync_receive:out_request_info -> receive_post_unit:In_1   \n          receive_post_unit:Out_1 -> httpserver_sync_reply:in_reply_info                                                      \n        }'''\n    format = \"graphviz\"\n  )\";\n\n  MBLOG_INFO << toml_content;\n  auto driver_flow = GetDriverFlow();\n  driver_flow->BuildAndRun(\"InitUnit\", toml_content, -1);\n\n  web::http::uri uri = web::http::uri(_XPLATSTR(REQUEST_URL_HTTP));\n  web::http::client::http_client_config client_config;\n  client_config.set_timeout(utility::seconds(60));\n\n  std::vector<std::thread> threads;\n  std::string health_uri = \"/health\";\n  for (int i = 0; i < 5; ++i) {\n    threads.emplace_back(HealthCheckRequesSync, uri, client_config, health_uri,\n                         web::http::methods::GET);\n    threads.emplace_back(HealthCheckRequesSync, uri, client_config, health_uri,\n                         web::http::methods::PUT);\n    threads.emplace_back(HealthCheckRequesSync, uri, client_config, health_uri,\n                         web::http::methods::POST);\n    threads.emplace_back(HealthCheckRequesSync, uri, client_config, health_uri,\n                         web::http::methods::DEL);\n  }\n  for (auto& th : threads) {\n    th.join();\n  }\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/httpserver_sync/receive/httpserver_sync_receive.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"httpserver_sync_receive.h\"\n\n#include <securec.h>\n\n#include \"modelbox/base/crypto.h\"\n#include \"modelbox/device/cpu/device_cpu.h\"\n#include \"modelbox/flowunit_api_helper.h\"\n\nHTTPServerReceiveSync::HTTPServerReceiveSync() = default;\nHTTPServerReceiveSync::~HTTPServerReceiveSync() = default;\n\nmodelbox::Status HTTPServerReceiveSync::HandleFunc(\n    web::http::http_request request) {\n  if (request.request_uri().to_string() == \"/health\") {\n    HandleHealthCheck(request);\n    return modelbox::STATUS_OK;\n  }\n\n  {\n    std::lock_guard<std::mutex> lock(request_mutex_);\n    if (*sum_cnt_ > max_requests_) {\n      SafeReply(request, web::http::status_codes::TooManyRequests);\n      return modelbox::STATUS_BUSY;\n    }\n\n    ++*sum_cnt_;\n  }\n\n  RequestInfo request_info;\n  request_info.method = request.method();\n  request_info.uri = request.request_uri().to_string();\n  for (auto &head : request.headers()) {\n    request_info.headers_map[head.first] = head.second;\n  }\n  request.extract_string().then(\n      [this, request_info,\n       request](const pplx::task<utility::string_t> &t) mutable {\n        try {\n          request_info.request_body = t.get();\n          HandleTask(request, request_info);\n        } catch (const std::exception &e) {\n          MBLOG_ERROR << \"get request body error\" << e.what();\n          SafeReply(request, web::http::status_codes::BadRequest);\n          --*sum_cnt_;\n        }\n      });\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status HTTPServerReceiveSync::HandleTask(\n    web::http::http_request request, const RequestInfo &request_info) {\n  auto return_ret = modelbox::STATUS_OK;\n  Defer {\n    if (return_ret != modelbox::STATUS_OK) {\n      SafeReply(request, web::http::status_codes::InternalError);\n      --*sum_cnt_;\n    }\n  };\n  auto ext_data = this->CreateExternalData();\n  if (!ext_data) {\n    MBLOG_ERROR << \"can not get external data.\";\n    return_ret = modelbox::STATUS_FAULT;\n    return return_ret;\n  }\n\n  auto output_buf = ext_data->CreateBufferList();\n  if (output_buf == nullptr) {\n    MBLOG_ERROR << \"Create buffer list failed.\";\n    return_ret = modelbox::STATUS_NOMEM;\n    return return_ret;\n  }\n\n  auto size = request_info.request_body.size();\n  std::vector<std::size_t> shape = {size};\n  output_buf->Build(shape);\n  if (size > 0) {\n    auto *outmem = output_buf->MutableBufferData(0);\n    if (outmem == nullptr) {\n      MBLOG_ERROR << \"outmem buffer is nullptr.\";\n      return_ret = modelbox::STATUS_NOMEM;\n      return return_ret;\n    }\n\n    auto ret = memcpy_s(outmem, size, request_info.request_body.data(), size);\n    if (EOK != ret) {\n      MBLOG_ERROR << \"Cpu memcpy failed, ret \" << ret << \", src size \" << size\n                  << \", dest size \" << size;\n      return_ret = modelbox::STATUS_FAULT;\n      return return_ret;\n    }\n  }\n\n  output_buf->At(0)->Set(\"size\", size);\n  output_buf->At(0)->Set(\"method\", (std::string)request_info.method);\n  output_buf->At(0)->Set(\"uri\", (std::string)request_info.uri);\n  output_buf->At(0)->Set(\"headers\", request_info.headers_map);\n  output_buf->At(0)->Set(\"endpoint\", request_url_);\n  output_buf->At(0)->SetGetBufferType(modelbox::BufferEnumType::STR);\n\n  auto replied = std::make_shared<std::atomic_bool>(false);\n  auto timeout_task = std::make_shared<modelbox::TimerTask>(\n      [](const web::http::http_request &request,\n         const std::shared_ptr<std::atomic_bool> &replied,\n         const std::shared_ptr<std::atomic<uint64_t>> &sum_cnt_) {\n        auto replied_before = replied->exchange(true);\n        if (!replied_before) {\n          SafeReply(request, web::http::status_codes::RequestTimeout);\n          --*sum_cnt_;\n        }\n      },\n      request, replied, this->sum_cnt_);\n\n  auto reply = std::make_shared<ReplyHandle>(\n      [request, replied, timeout_task, this](\n          uint16_t status, const concurrency::streams::istream &body_data,\n          const utility::string_t &content_type) mutable {\n        auto replied_before = replied->exchange(true);\n        if (replied_before) {\n          return;\n        }\n\n        SafeReply(request, status, body_data, content_type);\n        timeout_task->Stop();\n        --*(this->sum_cnt_);\n      });\n  auto session_ctx = ext_data->GetSessionContext();\n  session_ctx->SetPrivate(\"reply\", reply);\n\n  auto status = ext_data->Send(output_buf);\n  if (!status) {\n    MBLOG_ERROR << \"external data send buffer list failed:\" << status;\n    return_ret = modelbox::STATUS_FAULT;\n    return return_ret;\n  }\n\n  timer_.Schedule(timeout_task, time_out_ms_, 0, false);\n  status = ext_data->Close();\n  if (!status) {\n    MBLOG_ERROR << \"external data close failed:\" << status;\n  }\n\n  return return_ret;\n}\n\nmodelbox::Status HTTPServerReceiveSync::Open(\n    const std::shared_ptr<modelbox::Configuration> &opts) {\n  timer_.Start();\n  request_url_ = opts->GetString(\"endpoint\", \"\");\n  if (request_url_.empty()) {\n    request_url_ = \"http://127.0.0.1:8080\";\n    MBLOG_WARN << \"endpoint not set, use default endpoint: \" << request_url_;\n  }\n  max_requests_ = opts->GetUint64(\"max_requests\", 1000);\n  time_out_ms_ = opts->GetUint64(\"time_out_ms\", 5000);\n  keep_alive_time_out_sec_ = opts->GetUint64(\"keepalive_timeout_sec\", 200);\n  std::string key;\n  std::string enpass;\n  std::string keypass;\n  const std::string cert = opts->GetString(\"cert\", \"\");\n  if (cert.length() > 0) {\n    if (access(cert.c_str(), R_OK) != 0) {\n      return {modelbox::STATUS_BADCONF, \"certificate file is invalid.\"};\n    }\n    key = opts->GetString(\"key\", \"\");\n    if (access(key.c_str(), R_OK) != 0) {\n      return {modelbox::STATUS_BADCONF, \"key file is invalid.\"};\n    }\n    enpass = opts->GetString(\"passwd\", \"\");\n    if (enpass.empty()) {\n      MBLOG_ERROR << \"password not set\";\n      return {modelbox::STATUS_BADCONF, \"password not set\"};\n    }\n    keypass = opts->GetString(\"key_pass\", \"\");\n    if (keypass.empty()) {\n      MBLOG_ERROR << \"password key not set\";\n      return {modelbox::STATUS_BADCONF, \"password key not set\"};\n    }\n  }\n\n  web::http::experimental::listener::http_listener_config server_config;\n  server_config.set_timeout(std::chrono::seconds(keep_alive_time_out_sec_));\n  if (cert.length() > 0 && key.length() > 0) {\n    server_config.set_ssl_context_callback(\n        [cert, key, enpass, keypass](boost::asio::ssl::context &ctx) {\n          ctx.set_options(boost::asio::ssl::context::default_workarounds);\n          modelbox::HardeningSSL(ctx.native_handle());\n          ctx.native_handle();\n          if (enpass.length() > 0) {\n            ctx.set_password_callback(\n                [enpass, keypass](\n                    std::size_t max_length,\n                    boost::asio::ssl::context::password_purpose purpose)\n                    -> std::string {\n                  std::vector<char> pass;\n                  auto ret = modelbox::PassDecrypt(enpass, keypass, &pass);\n                  if (!ret) {\n                    MBLOG_ERROR << \"key password is invalid\";\n                    return \"\";\n                  }\n                  std::string res;\n                  res.insert(res.begin(), pass.begin(), pass.end());\n                  return res;\n                });\n          }\n          ctx.use_certificate_file(\n              cert, boost::asio::ssl::context_base::file_format::pem);\n          ctx.use_private_key_file(key, boost::asio::ssl::context::pem);\n        });\n  }\n  listener_ =\n      std::make_shared<web::http::experimental::listener::http_listener>(\n          request_url_, server_config);\n  listener_->support(web::http::methods::POST,\n                     [this](const web::http::http_request &request) {\n                       this->HandleFunc(request);\n                     });\n  listener_->support(web::http::methods::PUT,\n                     [this](const web::http::http_request &request) {\n                       this->HandleFunc(request);\n                     });\n  listener_->support(web::http::methods::GET,\n                     [this](const web::http::http_request &request) {\n                       this->HandleFunc(request);\n                     });\n  listener_->support(web::http::methods::DEL,\n                     [this](const web::http::http_request &request) {\n                       this->HandleFunc(request);\n                     });\n\n  listener_->support(web::http::methods::TRCE, HandleUnSupportMethod);\n  listener_->support(web::http::methods::OPTIONS, HandleUnSupportMethod);\n  try {\n    listener_->open().wait();\n    MBLOG_INFO << \"start to listen : \" << request_url_;\n  } catch (std::exception const &e) {\n    MBLOG_ERROR << e.what();\n    return {modelbox::STATUS_FAULT, e.what()};\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status HTTPServerReceiveSync::Close() {\n  timer_.Stop();\n  listener_->close().wait();\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status HTTPServerReceiveSync::Process(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  auto output_buf = data_ctx->Output(\"out_request_info\");\n  auto input_buf = data_ctx->External();\n\n  for (auto &buf : *input_buf) {\n    output_buf->PushBack(buf);\n  }\n  return modelbox::STATUS_OK;\n}\n\nMODELBOX_FLOWUNIT(HTTPServerReceiveSync, desc) {\n  desc.SetFlowUnitName(FLOWUNIT_NAME_RECEIVE);\n  desc.AddFlowUnitOutput({\"out_request_info\"});\n  desc.SetFlowType(modelbox::NORMAL);\n  desc.SetFlowUnitGroupType(\"Input\");\n  desc.SetDescription(FLOWUNIT_DESC_RECEIVE);\n  desc.AddFlowUnitOption(modelbox::FlowUnitOption(\"endpoint\", \"string\", true,\n                                                  \"https://127.0.0.1:8080\",\n                                                  \"http server listen URL.\"));\n  desc.AddFlowUnitOption(modelbox::FlowUnitOption(\n      \"max_requests\", \"integer\", false, \"1000\", \"max http request.\"));\n  desc.AddFlowUnitOption(\n      modelbox::FlowUnitOption(\"keepalive_timeout_sec\", \"integer\", false, \"200\",\n                               \"keep-alive timeout time(sec)\"));\n  desc.AddFlowUnitOption(\n      modelbox::FlowUnitOption(\"time_out_ms\", \"integer\", false, \"5000\",\n                               \"max http request timeout. \"));\n  desc.AddFlowUnitOption(\n      modelbox::FlowUnitOption(\"cert\", \"string\", false, \"\", \"cert file path\"));\n  desc.AddFlowUnitOption(\n      modelbox::FlowUnitOption(\"key\", \"string\", false, \"\", \"key file path\"));\n  desc.AddFlowUnitOption(modelbox::FlowUnitOption(\n      \"passwd\", \"string\", false, \"\", \"encrypted key file password.\"));\n  desc.AddFlowUnitOption(modelbox::FlowUnitOption(\n      \"key_pass\", \"string\", false, \"\", \"key for encrypted password.\"));\n}\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/httpserver_sync/receive/httpserver_sync_receive.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_HTTPSERVER_SYNC_RECEIVE_CPU_H_\n#define MODELBOX_FLOWUNIT_HTTPSERVER_SYNC_RECEIVE_CPU_H_\n\n#include \"cpprest/http_listener.h\"\n#include \"http_util.h\"\n#include \"modelbox/flowunit.h\"\n\nconstexpr const char *FLOWUNIT_NAME_RECEIVE = \"httpserver_sync_receive\";\nconstexpr const char *FLOWUNIT_DESC_RECEIVE =\n    \"\\n\\t@Brief: Start a http/https server, output request info to next \"\n    \"flowunit. \\n\"\n    \"\\t@Port parameter: The output port buffer contain the following meta \"\n    \"fields:\\n\"\n    \"\\t\\tField Name: size,        Type: size_t\\n\"\n    \"\\t\\tField Name: method,      Type: string\\n\"\n    \"\\t\\tField Name: uri,         Type: string\\n\"\n    \"\\t\\tField Name: headers,     Type: map<string,string>\\n\"\n    \"\\t\\tField Name: endpoint,    Type: string\\n\"\n    \"\\t  The the output port buffer data type is char * .\\n\"\n    \"\\t@Constraint: The flowuint 'httpserver_sync_receive' must be used pair \"\n    \"with 'httpserver_sync_reply'.\";\n\nstruct RequestInfo {\n  web::http::method method;\n  utility::string_t uri;\n  std::map<std::string, std::string> headers_map;\n  utility::string_t request_body;\n};\n\nclass ReplyHandle {\n public:\n  ReplyHandle(\n      const std::function<\n          void(uint16_t status, const concurrency::streams::istream &body_data,\n               const utility::string_t &content_type)> &reply_func) {\n    reply_func_ = reply_func;\n  }\n\n  virtual ~ReplyHandle() = default;\n  void Reply(uint16_t status, const concurrency::streams::istream &body_data,\n             const utility::string_t &content_type) {\n    reply_func_(status, body_data, content_type);\n  }\n\n private:\n  std::function<void(uint16_t status,\n                     const concurrency::streams::istream &body_data,\n                     const utility::string_t &content_type)>\n      reply_func_;\n};\n\nclass HTTPServerReceiveSync : public modelbox::FlowUnit {\n public:\n  HTTPServerReceiveSync();\n  ~HTTPServerReceiveSync() override;\n\n  modelbox::Status Open(\n      const std::shared_ptr<modelbox::Configuration> &opts) override;\n\n  modelbox::Status Close() override;\n\n  modelbox::Status Process(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override;\n\n private:\n  modelbox::Status HandleFunc(web::http::http_request request);\n\n  modelbox::Status HandleTask(web::http::http_request request,\n                              const RequestInfo &request_info);\n\n  std::shared_ptr<std::atomic<uint64_t>> sum_cnt_ =\n      std::make_shared<std::atomic<uint64_t>>(0);\n  std::shared_ptr<web::http::experimental::listener::http_listener> listener_;\n  std::string request_url_;\n  uint64_t max_requests_{1000};\n  uint64_t time_out_ms_{5000};\n  uint64_t keep_alive_time_out_sec_{200};\n  std::mutex request_mutex_;\n  modelbox::Timer timer_;\n};\n\n#endif  // MODELBOX_FLOWUNIT_HTTPSERVER_SYNC_RECEIVE_CPU_H_"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/httpserver_sync/reply/httpserver_sync_reply.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"httpserver_sync_reply.h\"\n\n#include <cpprest/containerstream.h>\n#include <cpprest/rawptrstream.h>\n\n#include \"modelbox/device/cpu/device_cpu.h\"\n#include \"modelbox/flowunit_api_helper.h\"\n#include \"receive/httpserver_sync_receive.h\"\n\nHTTPServerReplySync::HTTPServerReplySync() = default;\nHTTPServerReplySync::~HTTPServerReplySync() = default;\n\nconst static std::map<std::string, std::string> content_type_map_ = {\n    {\"htm\", U(\"text/html\")},\n    {\"html\", U(\"text/html\")},\n    {\"js\", U(\"text/javascript\")},\n    {\"css\", U(\"text/css\")},\n    {\"json\", U(\"application/json\")},\n    {\"png\", U(\"image/png\")},\n    {\"gif\", U(\"image/gif\")},\n    {\"jpeg\", U(\"image/jpeg\")},\n    {\"svg\", U(\"image/svg+xml\")},\n    {\"tar\", U(\"application/x-tar\")},\n    {\"txt\", U(\"text/plain;charset=utf-8\")},\n    {\"ico\", U(\"application/octet-stream\")},\n    {\"xml\", U(\"text/xml\")},\n    {\"mpeg\", U(\"video/mpeg\")},\n    {\"mp3\", U(\"audio/mpeg\")},\n};\n\nmodelbox::Status HTTPServerReplySync::Open(\n    const std::shared_ptr<modelbox::Configuration>& opts) {\n  auto content_type = opts->GetString(\"content_type\", \"txt\");\n  auto iter = content_type_map_.find(content_type);\n  if (iter == content_type_map_.end()) {\n    auto err_msg = \"unsupport content type \" + content_type;\n    MBLOG_ERROR << err_msg;\n    return {modelbox::STATUS_BADCONF, err_msg};\n  }\n\n  content_type_ = iter->second;\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status HTTPServerReplySync::Close() { return modelbox::STATUS_OK; }\n\nmodelbox::Status HTTPServerReplySync::Process(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  auto session_ctx = data_ctx->GetSessionContext();\n  auto reply =\n      std::static_pointer_cast<ReplyHandle>(session_ctx->GetPrivate(\"reply\"));\n  if (reply == nullptr) {\n    const auto *err_msg = \"http reply handler is nullptr.\";\n    MBLOG_ERROR << err_msg;\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  auto input_data = data_ctx->Input(\"in_reply_info\")->At(0);\n  if (input_data == nullptr) {\n    const auto *err_msg = \"http reply flowunit get input data failed.\";\n    MBLOG_ERROR << err_msg;\n    reply->Reply(\n        web::http::status_codes::InternalError,\n        concurrency::streams::bytestream::open_istream<std::string>(err_msg),\n        \"text/plain;charset=utf-8\");\n    return {modelbox::STATUS_NOMEM, err_msg};\n  }\n\n  auto bytes = input_data->GetBytes();\n  const auto *data = input_data->ConstData();\n  std::string ss((char*)data, bytes);\n  auto resp_body =\n      concurrency::streams::bytestream::open_istream<std::string>(ss);\n  reply->Reply(web::http::status_codes::OK, resp_body, content_type_);\n\n  return modelbox::STATUS_OK;\n}\n\nMODELBOX_FLOWUNIT(HTTPServerReplySync, desc) {\n  desc.SetFlowUnitName(FLOWUNIT_NAME_REPLY);\n  desc.AddFlowUnitInput({\"in_reply_info\"});\n  desc.SetFlowType(modelbox::STREAM);\n  desc.SetFlowUnitGroupType(\"Output\");\n  desc.SetDescription(FLOWUNIT_DESC_REPLY);\n}\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/httpserver_sync/reply/httpserver_sync_reply.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_HTTPSERVER_SYNC_REPLY_CPU_H_\n#define MODELBOX_FLOWUNIT_HTTPSERVER_SYNC_REPLY_CPU_H_\n\n#include \"modelbox/flowunit.h\"\n\nconstexpr const char *FLOWUNIT_NAME_REPLY = \"httpserver_sync_reply\";\nconstexpr const char *FLOWUNIT_DESC_REPLY =\n    \"\\n\\t@Brief: Send reply when receive a response info.\"\n    \"flowunit.\\n\"\n    \"\\t@Port parameter: The input port buffer contain the following meta \"\n    \"fields:\\n\"\n    \"\\t\\tField Name: status,        Type: int32_t\\n\"\n    \"\\t\\tField Name: headers,       Type: map<string,string>\\n\"\n    \"\\t  The the input port buffer data type is char * .\\n\"\n    \"\\t@Constraint: The flowuint 'httpserver_sync_reply' must be used pair \"\n    \"with 'httpserver_sync_receive'.\";\n\nclass HTTPServerReplySync : public modelbox::FlowUnit {\n public:\n  HTTPServerReplySync();\n  ~HTTPServerReplySync() override;\n\n  modelbox::Status Open(\n      const std::shared_ptr<modelbox::Configuration> &opts) override;\n\n  modelbox::Status Close() override;\n\n  modelbox::Status Process(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override;\n\n private:\n  std::string content_type_;\n};\n\n#endif  // MODELBOX_FLOWUNIT_HTTPSERVER_SYNC_REPLY_CPU_H_"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/image_decoder/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nset(UNIT_DEVICE \"cpu\")\nset(UNIT_NAME \"image_decoder\")\n\nproject(modelbox-unit-${UNIT_NAME}-${UNIT_DEVICE})\n\nif (NOT OPENCV_FOUND) \n    message(STATUS \"Not found opencv, disable resize flowunit\")\n    return()\nendif()\n\nfile(GLOB_RECURSE UNIT_SOURCE *.cpp *.cc)\ngroup_source_test_files(MODELBOX_UNIT_SOURCE MODELBOX_UNIT_TEST_SOURCE \"_test.c*\" ${UNIT_SOURCE})\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_CPU_INCLUDE})\ninclude_directories(${OpenCV_INCLUDE_DIRS})\n\nset(MODELBOX_UNIT_SHARED modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-shared)\nset(MODELBOX_UNIT_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\nadd_library(${MODELBOX_UNIT_SHARED} SHARED ${MODELBOX_UNIT_SOURCE})\n\nset(LIBMODELBOX_FLOWUNIT_RESIZE_CPU_SHARED ${MODELBOX_UNIT_SHARED})\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES \n    SOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n\nset(MODELBOX_UNIT_LINK_LIBRARY ${OpenCV_LIBS})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_DEVICE_CPU_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} rt)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} dl)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${MODELBOX_UNIT_LINK_LIBRARY})\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}\")\n\ninstall(TARGETS ${MODELBOX_UNIT_SHARED} \n    COMPONENT cpu-device-flowunit\n    RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n    LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    OPTIONAL\n    )\n\n\ninstall(DIRECTORY ${HEADER} \n    DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR} \n    COMPONENT cpu-device-flowunit\n    )\n\nset(LIBMODELBOX_FLOWUNIT_RESIZE_CPU_SHARED ${MODELBOX_UNIT_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_RESIZE_CPU_INCLUDE ${MODELBOX_UNIT_SOURCE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_RESIZE_CPU_SOURCES ${MODELBOX_UNIT_SOURCE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_RESIZE_CPU_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}.so CACHE INTERNAL \"\")\n\n# driver test\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_UNIT_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_UNIT_SHARED})\nlist(APPEND DRIVER_UNIT_TEST_LINK_LIBRARIES ${MODELBOX_UNIT_LINK_LIBRARY})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_LINK_LIBRARIES ${DRIVER_UNIT_TEST_LINK_LIBRARIES} CACHE INTERNAL \"\")\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/image_decoder/image_decoder.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"image_decoder.h\"\n\n#include \"modelbox/flowunit_api_helper.h\"\n\n#include <securec.h>\n\nImageDecoderFlowUnit::ImageDecoderFlowUnit() = default;\nImageDecoderFlowUnit::~ImageDecoderFlowUnit() = default;\n\nstd::vector<std::string> CvImgPixelFormat{\"bgr\", \"rgb\", \"nv12\"};\n\nmodelbox::Status ImageDecoderFlowUnit::Open(\n    const std::shared_ptr<modelbox::Configuration> &opts) {\n  pixel_format_ = opts->GetString(\"pix_fmt\", \"bgr\");\n  if (find(CvImgPixelFormat.begin(), CvImgPixelFormat.end(), pixel_format_) ==\n      CvImgPixelFormat.end()) {\n    auto errMsg = \"pixel_format is invalid, configure is :\" + pixel_format_;\n    MBLOG_ERROR << errMsg;\n    std::string valid_format;\n    for (const auto &iter : CvImgPixelFormat) {\n      if (valid_format.length() > 0) {\n        valid_format += \", \";\n      }\n      valid_format += iter;\n    }\n    MBLOG_ERROR << \"Valid pixel_format is: \" << valid_format;\n    return {modelbox::STATUS_BADCONF, errMsg};\n  }\n  MBLOG_DEBUG << \"pixel_format \" << pixel_format_;\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status ImageDecoderFlowUnit::Close() { return modelbox::STATUS_OK; }\n\nmodelbox::Status ImageDecoderFlowUnit::Process(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  MBLOG_DEBUG << \"process image decode\";\n\n  // get input\n  auto input_bufs = data_ctx->Input(\"in_encoded_image\");\n  auto output_bufs = data_ctx->Output(\"out_image\");\n  if (input_bufs->Size() <= 0) {\n    auto errMsg = \"input images batch is \" + std::to_string(input_bufs->Size());\n    MBLOG_ERROR << errMsg;\n    return {modelbox::STATUS_FAULT, errMsg};\n  }\n\n  // decode\n  std::vector<cv::Mat> output_img_list;\n  std::vector<size_t> output_shape;\n  for (auto &buffer : *input_bufs) {\n    const auto *input_data = static_cast<const u_char *>(buffer->ConstData());\n    std::vector<u_char> input_data2(\n        input_data, input_data + buffer->GetBytes() / sizeof(u_char));\n\n    cv::Mat img_bgr = cv::imdecode(input_data2, cv::IMREAD_COLOR);\n    if (img_bgr.data == nullptr || img_bgr.size == nullptr) {\n      std::string error_msg = \"input image buffer is invalid, imdecode failed.\";\n      MBLOG_ERROR << error_msg;\n      auto buffer = std::make_shared<modelbox::Buffer>();\n      buffer->SetError(\"ImageDecoder.DecodeFailed\", error_msg);\n      output_bufs->PushBack(buffer);\n      continue;\n    }\n    cv::Mat img_dest;\n    if (pixel_format_ == \"bgr\") {\n      img_dest = img_bgr;\n    } else if (pixel_format_ == \"rgb\") {\n      cv::cvtColor(img_bgr, img_dest, cv::COLOR_BGR2RGB);\n    } else if (pixel_format_ == \"nv12\") {\n      img_dest = BGR2YUV_NV12(img_bgr);\n    }\n\n    if (!modelbox::StatusError) {\n      std::string error_msg = \"input image decode success, but transform nv12 format failed.\";\n      MBLOG_ERROR << error_msg;\n      auto buffer = std::make_shared<modelbox::Buffer>();\n      buffer->SetError(\"ImageDecoder.DecodeFailed\", error_msg);\n      output_bufs->PushBack(buffer);\n      continue;\n    }\n\n    MBLOG_DEBUG << \"decode image clos : \" << img_bgr.cols\n                << \", rows : \" << img_bgr.rows\n                << \"channles : \" << img_bgr.channels();\n\n    // build output_buffer\n    output_bufs->EmplaceBack(\n        img_dest.data, img_dest.total() * img_dest.elemSize(),\n        [img_dest](void *unused) { /* hold img dest*/ });\n    auto output_buffer = output_bufs->Back();\n    output_buffer->Set(\"width\", (int32_t)img_bgr.cols);\n    output_buffer->Set(\"height\", (int32_t)img_bgr.rows);\n    auto width_stride = (int32_t)img_bgr.cols;\n    if (pixel_format_ == \"rgb\" || pixel_format_ == \"bgr\") {\n      width_stride *= 3;\n    }\n    \n    output_buffer->Set(\"width_stride\", width_stride);\n    output_buffer->Set(\"height_stride\", (int32_t)img_bgr.rows);\n    output_buffer->Set(\"channel\", (int32_t)img_dest.channels());\n    output_buffer->Set(\"pix_fmt\", pixel_format_);\n    output_buffer->Set(\"type\", modelbox::ModelBoxDataType::MODELBOX_UINT8);\n    output_buffer->Set(\n        \"shape\",\n        std::vector<size_t>{(size_t)img_dest.rows, (size_t)img_dest.cols,\n                            (size_t)img_dest.channels()});\n    output_buffer->Set(\"layout\", std::string(\"hwc\"));\n  }\n\n  return modelbox::STATUS_OK;\n}\n\ncv::Mat ImageDecoderFlowUnit::BGR2YUV_NV12(const cv::Mat &src_bgr) {\n  modelbox::StatusError = modelbox::STATUS_OK;\n  cv::Mat dst_nv12(src_bgr.rows * 1.5, src_bgr.cols, CV_8UC1, cv::Scalar(0));\n  cv::Mat src_yuv_i420;\n  cv::cvtColor(src_bgr, src_yuv_i420, cv::COLOR_BGR2YUV_I420);\n\n  size_t len_y = src_bgr.rows * src_bgr.cols;\n  size_t len_u = len_y / 4;\n  auto ret = memcpy_s(dst_nv12.data, len_y, src_yuv_i420.data, len_y);\n  if (ret != EOK) {\n    MBLOG_ERROR << \"Cpu memcpy failed, ret \" << ret << \", size \" << len_y;\n    dst_nv12.release();\n    modelbox::StatusError = {modelbox::STATUS_FAULT};\n    return dst_nv12;\n  }\n  for (size_t i = 0; i < len_u; ++i) {\n    dst_nv12.data[len_y + 2 * i] = src_yuv_i420.data[len_y + i];\n    dst_nv12.data[len_y + 2 * i + 1] = src_yuv_i420.data[len_y + len_u + i];\n  }\n\n  return dst_nv12;\n}\n\nMODELBOX_FLOWUNIT(ImageDecoderFlowUnit, desc) {\n  desc.SetFlowUnitName(FLOWUNIT_NAME);\n  desc.SetFlowUnitGroupType(\"Image\");\n  desc.AddFlowUnitInput({\"in_encoded_image\"});\n  desc.AddFlowUnitOutput({\"out_image\"});\n\n  desc.AddFlowUnitOption(modelbox::FlowUnitOption(\n      \"pix_fmt\", \"string\", true, \"bgr\", \"the output pixel format\"));\n\n  desc.SetFlowType(modelbox::NORMAL);\n  desc.SetInputContiguous(false);\n  desc.SetDescription(FLOWUNIT_DESC);\n}\n\nMODELBOX_DRIVER_FLOWUNIT(desc) {\n  desc.Desc.SetName(FLOWUNIT_NAME);\n  desc.Desc.SetClass(modelbox::DRIVER_CLASS_FLOWUNIT);\n  desc.Desc.SetType(FLOWUNIT_TYPE);\n  desc.Desc.SetDescription(FLOWUNIT_DESC);\n  desc.Desc.SetVersion(\"1.0.0\");\n}\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/image_decoder/image_decoder.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_HTTPSERVER_CPU_H_\n#define MODELBOX_FLOWUNIT_HTTPSERVER_CPU_H_\n\n#include <modelbox/base/device.h>\n#include <modelbox/flow.h>\n#include <modelbox/flowunit.h>\n\n#include <opencv2/opencv.hpp>\n\nconstexpr const char *FLOWUNIT_NAME = \"image_decoder\";\nconstexpr const char *FLOWUNIT_TYPE = \"cpu\";\nconstexpr const char *FLOWUNIT_DESC =\n    \"\\n\\t@Brief: An OpenCV crop flowunit on cpu. \\n\"\n    \"\\t@Port parameter: The input port buffer type is image file binary, the \"\n    \"output port buffer type are image. \\n\"\n    \"\\t  The image type buffer contains the following meta fields:\\n\"\n    \"\\t\\tField Name: width,         Type: int32_t\\n\"\n    \"\\t\\tField Name: height,        Type: int32_t\\n\"\n    \"\\t\\tField Name: width_stride,  Type: int32_t\\n\"\n    \"\\t\\tField Name: height_stride, Type: int32_t\\n\"\n    \"\\t\\tField Name: channel,       Type: int32_t\\n\"\n    \"\\t\\tField Name: pix_fmt,       Type: string\\n\"\n    \"\\t\\tField Name: layout,        Type: int32_t\\n\"\n    \"\\t\\tField Name: shape,         Type: vector<size_t>\\n\"\n    \"\\t\\tField Name: type,          Type: ModelBoxDataType::MODELBOX_UINT8\\n\"\n    \"\\t@Constraint:\";\n\nclass ImageDecoderFlowUnit : public modelbox::FlowUnit {\n public:\n  ImageDecoderFlowUnit();\n  ~ImageDecoderFlowUnit() override;\n\n  modelbox::Status Open(\n      const std::shared_ptr<modelbox::Configuration> &opts) override;\n  modelbox::Status Close() override;\n  modelbox::Status Process(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override;\n\n private:\n  cv::Mat BGR2YUV_NV12(const cv::Mat &src_bgr);\n\n  std::string pixel_format_{\"bgr\"};\n};\n\n#endif  // MODELBOX_FLOWUNIT_HTTPSERVER_CPU_H_\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/image_decoder/image_decoder_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include <securec.h>\n\n#include <functional>\n#include <future>\n#include <opencv2/opencv.hpp>\n#include <random>\n#include <thread>\n\n#include \"modelbox/base/log.h\"\n#include \"modelbox/base/utils.h\"\n#include \"modelbox/buffer.h\"\n#include \"driver_flow_test.h\"\n#include \"flowunit_mockflowunit/flowunit_mockflowunit.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing ::testing::_;\n\nnamespace modelbox {\nclass ImageDecoderFlowUnitTest : public testing::Test {\n public:\n  ImageDecoderFlowUnitTest()\n      : driver_flow_(std::make_shared<DriverFlowTest>()) {}\n\n protected:\n  void SetUp() override {\n    auto ret = AddMockFlowUnit();\n    EXPECT_EQ(ret, STATUS_OK);\n  };\n\n  void TearDown() override { driver_flow_->Clear(); };\n  std::shared_ptr<DriverFlowTest> GetDriverFlow();\n\n private:\n  Status AddMockFlowUnit();\n  std::shared_ptr<DriverFlowTest> driver_flow_;\n};\n\nstd::shared_ptr<DriverFlowTest> ImageDecoderFlowUnitTest::GetDriverFlow() {\n  return driver_flow_;\n}\n\nStatus ImageDecoderFlowUnitTest::AddMockFlowUnit() {\n  auto ctl_ = driver_flow_->GetMockFlowCtl();\n  {\n    MockFlowUnitDriverDesc desc_flowunit;\n    desc_flowunit.SetClass(\"DRIVER-FLOWUNIT\");\n    desc_flowunit.SetType(\"cpu\");\n    desc_flowunit.SetName(\"test_0_1_decode\");\n    desc_flowunit.SetDescription(\"the test in 0 out 1\");\n    desc_flowunit.SetVersion(\"1.0.0\");\n    std::string file_path_flowunit =\n        std::string(TEST_DRIVER_DIR) + \"/libmodelbox-unit-cpu-test_0_1_decode.so\";\n    desc_flowunit.SetFilePath(file_path_flowunit);\n    auto mock_flowunit = std::make_shared<MockFlowUnit>();\n    auto mock_flowunit_desc = std::make_shared<FlowUnitDesc>();\n    mock_flowunit_desc->SetFlowUnitName(\"test_0_1_decode\");\n    mock_flowunit_desc->AddFlowUnitOutput(modelbox::FlowUnitOutput(\"Out_1\"));\n    mock_flowunit_desc->SetFlowType(STREAM);\n    mock_flowunit_desc->SetMaxBatchSize(16);\n    mock_flowunit->SetFlowUnitDesc(mock_flowunit_desc);\n    std::weak_ptr<MockFlowUnit> mock_flowunit_wp;\n    mock_flowunit_wp = mock_flowunit;\n\n    EXPECT_CALL(*mock_flowunit, Open(_))\n        .WillRepeatedly(testing::Invoke(\n            [=](const std::shared_ptr<modelbox::Configuration>& flow_option) {\n              auto spt = mock_flowunit_wp.lock();\n              auto ext_data = spt->CreateExternalData();\n              if (!ext_data) {\n                const auto* err_msg = \"can not get external data.\";\n                modelbox::Status ret = {modelbox::STATUS_NODATA, err_msg};\n                MBLOG_ERROR << err_msg;\n                return ret;\n              }\n\n              std::string gimg_path = std::string(TEST_ASSETS) + \"/test.jpg\";\n\n              auto output_buf = ext_data->CreateBufferList();\n              modelbox::TensorList output_tensor_list(output_buf);\n              output_tensor_list.BuildFromHost<uchar>(\n                  {1, {gimg_path.size() + 1}}, (void*)gimg_path.data(),\n                  gimg_path.size() + 1);\n\n              auto status = ext_data->Send(output_buf);\n              if (!status) {\n                MBLOG_ERROR << \"external data send buffer list failed:\"\n                            << status;\n                return status;\n              }\n\n              status = ext_data->Close();\n              if (!status) {\n                MBLOG_ERROR << \"external data close failed:\" << status;\n                return status;\n              }\n\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, DataPre(_))\n        .WillRepeatedly(\n            testing::Invoke([&](const std::shared_ptr<DataContext>& data_ctx) {\n              MBLOG_INFO << \"stream_info DataPre\";\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, DataPost(_))\n        .WillRepeatedly(\n            testing::Invoke([&](const std::shared_ptr<DataContext>& data_ctx) {\n              MBLOG_INFO << \"stream_info DataPost\";\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit,\n                Process(testing::An<std::shared_ptr<modelbox::DataContext>>()))\n        .WillRepeatedly(testing::Invoke([=](const std::shared_ptr<DataContext>&\n                                                data_ctx) {\n          MBLOG_INFO << \"test_0_1_decode process\";\n\n          auto external = data_ctx->External();\n          std::string gimg_path =\n              std::string((char*)(*external)[0]->ConstData());\n\n          cv::Mat gimg_data = cv::imread(gimg_path);\n\n          MBLOG_INFO << \"gimage col \" << gimg_data.cols << \"  grow \"\n                     << gimg_data.rows << \" gchannel:\" << gimg_data.channels();\n\n          // read img and encode\n          uint32_t batch_size = 3;\n          std::vector<std::string> encode_fmt{\".jpg\", \".png\", \".bmp\"};\n          std::vector<std::vector<u_char>> img_data_list;\n          std::vector<size_t> output_bufs_shape;\n          for (size_t i = 0; i < batch_size; ++i) {\n            const std::string& img_path = gimg_path;\n            cv::Mat ori_img = cv::imread(img_path);\n            MBLOG_INFO << \"input image col \" << ori_img.cols << \"  row \"\n                       << ori_img.rows << \" channel:\" << ori_img.channels()\n                       << \" encode fmt \" << encode_fmt[i];\n\n            std::vector<u_char> img_data;\n            std::vector<int> img_quality_param{cv::IMWRITE_JPEG_QUALITY, 100};\n            cv::imencode(encode_fmt[i], ori_img, img_data, img_quality_param);\n            img_data_list.push_back(img_data);\n            output_bufs_shape.push_back(img_data.size());\n\n            cv::Mat ori_decode = cv::imdecode(img_data, cv::IMREAD_COLOR);\n            std::string ori_decode_name = std::string(TEST_DATA_DIR) +\n                                          \"/decode_ori_\" + std::to_string(i) +\n                                          \".jpg\";\n            cv::imwrite(ori_decode_name, ori_decode);\n          }\n\n          // build output bufs\n          auto output_bufs = data_ctx->Output(\"Out_1\");\n          output_bufs->Build(output_bufs_shape);\n          for (size_t i = 0; i < batch_size; ++i) {\n            auto* output_data =\n                static_cast<u_char*>(output_bufs->MutableBufferData(i));\n            memcpy_s(output_data, output_bufs->At(i)->GetBytes(),\n                     img_data_list[i].data(), img_data_list[i].size());\n          }\n\n          return modelbox::STATUS_OK;\n        }));\n\n    EXPECT_CALL(*mock_flowunit, Close()).WillRepeatedly(testing::Invoke([=]() {\n      return modelbox::STATUS_OK;\n    }));\n    desc_flowunit.SetMockFlowUnit(mock_flowunit);\n    ctl_->AddMockDriverFlowUnit(\"test_0_1_decode\", \"cpu\", desc_flowunit,\n                                std::string(TEST_DRIVER_DIR));\n  }\n\n  {\n    MockFlowUnitDriverDesc desc_flowunit;\n    desc_flowunit.SetClass(\"DRIVER-FLOWUNIT\");\n    desc_flowunit.SetType(\"cpu\");\n    desc_flowunit.SetName(\"test_1_0_decode\");\n    desc_flowunit.SetDescription(\"the test in 1 out 0\");\n    desc_flowunit.SetVersion(\"1.0.0\");\n    std::string file_path_flowunit =\n        std::string(TEST_DRIVER_DIR) + \"/libmodelbox-unit-cpu-test_1_0_decode.so\";\n    desc_flowunit.SetFilePath(file_path_flowunit);\n    auto mock_flowunit = std::make_shared<MockFlowUnit>();\n    auto mock_flowunit_desc = std::make_shared<FlowUnitDesc>();\n    mock_flowunit_desc->SetFlowUnitName(\"test_1_0_decode\");\n    mock_flowunit_desc->AddFlowUnitInput(modelbox::FlowUnitInput(\"In_1\"));\n    mock_flowunit_desc->SetFlowType(STREAM);\n    mock_flowunit_desc->SetMaxBatchSize(16);\n    mock_flowunit->SetFlowUnitDesc(mock_flowunit_desc);\n    std::weak_ptr<MockFlowUnit> mock_flowunit_wp;\n    mock_flowunit_wp = mock_flowunit;\n\n    EXPECT_CALL(*mock_flowunit, Open(_))\n        .WillRepeatedly(testing::Invoke(\n            [=](const std::shared_ptr<modelbox::Configuration>& flow_option) {\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, DataPre(_))\n        .WillRepeatedly(\n            testing::Invoke([&](const std::shared_ptr<DataContext>& data_ctx) {\n              MBLOG_INFO << \"stream_info DataPre\";\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, DataPost(_))\n        .WillRepeatedly(\n            testing::Invoke([&](const std::shared_ptr<DataContext>& data_ctx) {\n              MBLOG_INFO << \"stream_info DataPost\";\n              return modelbox::STATUS_STOP;\n            }));\n\n    EXPECT_CALL(*mock_flowunit,\n                Process(testing::An<std::shared_ptr<modelbox::DataContext>>()))\n        .WillRepeatedly(\n            testing::Invoke([=](const std::shared_ptr<DataContext>& op_ctx) {\n              MBLOG_INFO << \"test_1_0_decode process\";\n              auto input_buf = op_ctx->Input(\"In_1\");\n              int32_t cols = 0;\n              int32_t rows = 0;\n              int32_t channels = 0;\n\n              for (size_t i = 0; i < input_buf->Size(); i++) {\n                input_buf->At(i)->Get(\"width\", cols);\n                input_buf->At(i)->Get(\"height\", rows);\n                input_buf->At(i)->Get(\"channel\", channels);\n                const auto* input_data =\n                    static_cast<const uchar*>(input_buf->ConstBufferData(i));\n\n                cv::Mat img_data(cv::Size(cols, rows), CV_8UC3);\n                memcpy_s(img_data.data, img_data.total() * img_data.elemSize(),\n                         input_data, input_buf->At(i)->GetBytes());\n\n                MBLOG_INFO << \"output image col \" << img_data.cols << \"  row \"\n                           << img_data.rows\n                           << \" channel:\" << img_data.channels();\n\n                std::string name = std::string(TEST_DATA_DIR) +\n                                   \"/decode_result_\" + std::to_string(i) +\n                                   \".jpg\";\n\n                cv::imwrite(name, img_data);\n              }\n\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, Close()).WillRepeatedly(testing::Invoke([=]() {\n      return modelbox::STATUS_OK;\n    }));\n    desc_flowunit.SetMockFlowUnit(mock_flowunit);\n    ctl_->AddMockDriverFlowUnit(\"test_1_0_decode\", \"cpu\", desc_flowunit,\n                                std::string(TEST_DRIVER_DIR));\n  }\n  return STATUS_OK;\n}\n\nTEST_F(ImageDecoderFlowUnitTest, DecodeTest) {\n  const std::string test_lib_dir = TEST_DRIVER_DIR;\n  std::string toml_content = R\"(\n    [driver]\n    skip-default=true\n    dir=[\")\" + test_lib_dir + \"\\\"]\\n    \" +\n                             R\"([graph]\n    graphconf = '''digraph demo {\n          test_0_1_decode[type=flowunit, flowunit=test_0_1_decode, device=cpu, deviceid=0, label=\"<Out_1>\"]\n          image_decoder[type=flowunit, flowunit=image_decoder, device=cpu, deviceid=0, label=\"<in_encoded_image> | <out_image>\", batch_size=3]\n          test_1_0_decode[type=flowunit, flowunit=test_1_0_decode, device=cpu, deviceid=0, label=\"<In_1>\",batch_size=3]                                \n          test_0_1_decode:Out_1 -> image_decoder:in_encoded_image \n          image_decoder:out_image -> test_1_0_decode:In_1                                                                      \n        }'''\n    format = \"graphviz\"\n  )\";\n\n  auto driver_flow = GetDriverFlow();\n  auto ret = driver_flow->BuildAndRun(\"DecodeTest\", toml_content);\n  EXPECT_EQ(ret, STATUS_STOP);\n\n  std::vector<std::string> filePath;\n  ListFiles(std::string(TEST_DATA_DIR), \"*\", &filePath);\n  for (auto& elem : filePath) {\n    MBLOG_DEBUG << \"filePath: \" << elem;\n  }\n\n  for (size_t i = 0; i < 3; ++i) {\n    std::string expected_file_path = std::string(TEST_DATA_DIR) +\n                                     \"/decode_ori_\" + std::to_string(i) +\n                                     \".jpg\";\n    cv::Mat expected_img = cv::imread(expected_file_path);\n\n    std::string decode_result_file_path = std::string(TEST_DATA_DIR) +\n                                          \"/decode_result_\" +\n                                          std::to_string(i) + \".jpg\";\n    cv::Mat decode_result_img = cv::imread(decode_result_file_path);\n\n    int result_data_size =\n        decode_result_img.total() * decode_result_img.elemSize();\n    int expected_data_size = expected_img.total() * expected_img.elemSize();\n    EXPECT_EQ(result_data_size, expected_data_size);\n\n    int ret =\n        memcmp(decode_result_img.data, expected_img.data, result_data_size);\n    EXPECT_EQ(ret, 0);\n\n    auto rmret = remove(expected_file_path.c_str());\n    EXPECT_EQ(rmret, 0);\n\n    auto rmret2 = remove(decode_result_file_path.c_str());\n    EXPECT_EQ(rmret2, 0);\n  }\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/image_rotate/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nset(UNIT_DEVICE \"cpu\")\nset(UNIT_NAME \"image_rotate\")\n\nproject(modelbox-unit-${UNIT_NAME}-${UNIT_DEVICE})\n\nif (NOT OPENCV_FOUND) \n    message(STATUS \"Not found opencv, disable image_rotate flowunit\")\n    return()\nendif()\n\nfile(GLOB_RECURSE UNIT_SOURCE *.cpp *.cc)\ngroup_source_test_files(MODELBOX_UNIT_SOURCE MODELBOX_UNIT_TEST_SOURCE \"_test.c*\" ${UNIT_SOURCE})\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_CPU_INCLUDE})\ninclude_directories(${OpenCV_INCLUDE_DIRS})\ninclude_directories(${MODELBOX_COMMON_IMAGE_ROTATE_INCLUDE})\n\nset(MODELBOX_UNIT_SHARED modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-shared)\nset(MODELBOX_UNIT_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\nadd_library(${MODELBOX_UNIT_SHARED} SHARED ${MODELBOX_UNIT_SOURCE})\n\nset(LIBMODELBOX_FLOWUNIT_RESIZE_CPU_SHARED ${MODELBOX_UNIT_SHARED})\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES \n    SOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n\nset(MODELBOX_UNIT_LINK_LIBRARY ${OpenCV_LIBS})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} rt)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} dl)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_DEVICE_CPU_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${MODELBOX_UNIT_LINK_LIBRARY})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${MODELBOX_COMMON_IMAGE_ROTATE_LIBRARY})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}\")\n\ninstall(TARGETS ${MODELBOX_UNIT_SHARED} \n    COMPONENT cpu-device-flowunit\n    RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n    LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    OPTIONAL\n    )\n\n\ninstall(DIRECTORY ${HEADER} \n    DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR} \n    COMPONENT cpu-device-flowunit\n    )\n\nset(LIBMODELBOX_FLOWUNIT_RESIZE_CPU_SHARED ${MODELBOX_UNIT_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_RESIZE_CPU_INCLUDE ${MODELBOX_UNIT_SOURCE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_RESIZE_CPU_SOURCES ${MODELBOX_UNIT_SOURCE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_RESIZE_CPU_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}.so CACHE INTERNAL \"\")\n\n# driver test\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_UNIT_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_UNIT_SHARED})\nlist(APPEND DRIVER_UNIT_TEST_LINK_LIBRARIES ${MODELBOX_UNIT_LINK_LIBRARY})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_LINK_LIBRARIES ${DRIVER_UNIT_TEST_LINK_LIBRARIES} CACHE INTERNAL \"\")\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/image_rotate/image_rotate.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"image_rotate.h\"\n\n#include \"modelbox/flowunit_api_helper.h\"\n\nmodelbox::Status ImageRotateCpuFlowUnit::RotateOneImage(\n    std::shared_ptr<modelbox::Buffer> input_buffer,\n    std::shared_ptr<modelbox::Buffer> output_buffer, int32_t rotate_angle,\n    int32_t width, int32_t height) {\n  cv::Mat input_img(cv::Size(width, height), CV_8UC3,\n                    const_cast<void *>(input_buffer->ConstData()));\n  auto output_img = std::make_shared<cv::Mat>();\n  cv::rotate(input_img, *output_img, rotate_code_[rotate_angle]);\n\n  // build output buffer\n  auto ret = output_buffer->BuildFromHost(\n      output_img->data, output_img->total() * output_img->elemSize(),\n      [output_img](void *ptr) {\n        /* Only capture image */\n      });\n  if (ret != modelbox::STATUS_OK) {\n    MBLOG_ERROR << \"BuildFromHost failed, ret \" << ret;\n    return modelbox::STATUS_FAULT;\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nMODELBOX_FLOWUNIT(ImageRotateCpuFlowUnit, desc) {\n  desc.SetFlowUnitName(FLOWUNIT_NAME);\n  desc.SetFlowUnitGroupType(\"Image\");\n  desc.AddFlowUnitInput({\"in_image\"});\n  desc.AddFlowUnitOutput({\"out_image\"});\n\n  desc.AddFlowUnitOption(modelbox::FlowUnitOption(\n      \"rotate_angle\", \"int\", false, \"0\", \"the image rotate image\"));\n\n  desc.SetFlowType(modelbox::NORMAL);\n  desc.SetDescription(FLOWUNIT_DESC);\n}\n\nMODELBOX_DRIVER_FLOWUNIT(desc) {\n  desc.Desc.SetName(FLOWUNIT_NAME);\n  desc.Desc.SetClass(modelbox::DRIVER_CLASS_FLOWUNIT);\n  desc.Desc.SetType(FLOWUNIT_TYPE);\n  desc.Desc.SetDescription(FLOWUNIT_DESC);\n  desc.Desc.SetVersion(\"1.0.0\");\n}\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/image_rotate/image_rotate.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_IMAGE_ROTATE_CPU_H_\n#define MODELBOX_FLOWUNIT_IMAGE_ROTATE_CPU_H_\n\n#include \"image_rotate_base.h\"\n#include <opencv2/opencv.hpp>\n\nconstexpr const char *FLOWUNIT_NAME = \"image_rotate\";\nconstexpr const char *FLOWUNIT_TYPE = \"cpu\";\nconstexpr const char *FLOWUNIT_DESC =\n    \"\\n\\t@Brief: An OpenCV rotate flowunit on cpu. \\n\"\n    \"\\t@Port parameter: The input port buffer type is image file binary, the \"\n    \"output port buffer type are image. \\n\"\n    \"\\t  The image type buffer contains the following meta fields:\\n\"\n    \"\\t\\tField Name: width,         Type: int32_t\\n\"\n    \"\\t\\tField Name: height,        Type: int32_t\\n\"\n    \"\\t\\tField Name: width_stride,  Type: int32_t\\n\"\n    \"\\t\\tField Name: height_stride, Type: int32_t\\n\"\n    \"\\t\\tField Name: rotate_angle,  Type: int32_t\\n\"\n    \"\\t\\tField Name: channel,       Type: int32_t\\n\"\n    \"\\t\\tField Name: pix_fmt,       Type: string\\n\"\n    \"\\t\\tField Name: layout,        Type: int32_t\\n\"\n    \"\\t\\tField Name: shape,         Type: vector<size_t>\\n\"\n    \"\\t\\tField Name: type,          Type: ModelBoxDataType::MODELBOX_UINT8\\n\"\n    \"\\t@Constraint:\";\n\nclass ImageRotateCpuFlowUnit : public ImageRotateFlowUnitBase {\n public:\n  modelbox::Status RotateOneImage(\n      std::shared_ptr<modelbox::Buffer> input_buffer,\n      std::shared_ptr<modelbox::Buffer> output_buffer, int32_t rotate_angle,\n      int32_t width, int32_t height) override;\n\n  std::map<int32_t, cv::RotateFlags> rotate_code_{\n      {90, cv::ROTATE_90_CLOCKWISE},\n      {180, cv::ROTATE_180},\n      {270, cv::ROTATE_90_COUNTERCLOCKWISE}};\n};\n\n#endif  // MODELBOX_FLOWUNIT_IMAGE_ROTATE_CPU_H_\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/image_rotate/image_rotate_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <opencv2/opencv.hpp>\n\n#include \"image_rotate_test_base.h\"\n\nnamespace modelbox {\n\nTEST_F(ImageRotateFlowUnitTest, CpuRotateTest) {\n  const std::string test_lib_dir = TEST_DRIVER_DIR;\n  std::string toml_content = R\"(\n    [driver]\n    skip-default=true\n    dir=[\")\" + test_lib_dir + \"\\\"]\\n    \" +\n                             R\"([graph]\n    graphconf = '''digraph demo {\n          test_0_1_rotate[type=flowunit, flowunit=test_0_1_rotate, device=cpu, deviceid=0, label=\"<out_1>\"]\n          image_rotate[type=flowunit, flowunit=image_rotate, device=cpu, deviceid=0, label=\"<in_encoded_image> | <out_image>\", batch_size=3]\n          test_1_0_rotate[type=flowunit, flowunit=test_1_0_rotate, device=cpu, deviceid=0, label=\"<in_1>\",batch_size=3]                                \n          test_0_1_rotate:out_1 -> image_rotate:in_image \n          test_0_1_rotate:out_1 -> test_1_0_rotate:in_origin                                                                      \n          image_rotate:out_image -> test_1_0_rotate:in_rotate\n        }'''\n    format = \"graphviz\"\n  )\";\n\n  auto driver_flow = GetDriverFlow();\n  auto ret = driver_flow->BuildAndRun(\"CpuRotateTest\", toml_content);\n  EXPECT_EQ(ret, STATUS_STOP);\n\n  for (auto rotate_angle : test_rotate_angle_) {\n    std::string expected_file_path = std::string(TEST_ASSETS) + \"/rotate_\" +\n                                     std::to_string(rotate_angle) + \".jpg\";\n    cv::Mat expected_img = cv::imread(expected_file_path);\n\n    std::string rotate_result_file_path = std::string(TEST_DATA_DIR) +\n                                          \"/rotate_result_\" +\n                                          std::to_string(rotate_angle) + \".jpg\";\n    cv::Mat rotate_result_img = cv::imread(rotate_result_file_path);\n\n    int result_data_size =\n        rotate_result_img.total() * rotate_result_img.elemSize();\n    int expected_data_size = expected_img.total() * expected_img.elemSize();\n    EXPECT_EQ(result_data_size, expected_data_size);\n\n    auto cmp_ret =\n        memcmp(rotate_result_img.data, expected_img.data, result_data_size);\n    EXPECT_EQ(cmp_ret, 0);\n\n    auto rmret = remove(rotate_result_file_path.c_str());\n    EXPECT_EQ(rmret, 0);\n  }\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/java/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nset(UNIT_DEVICE \"cpu\")\nset(UNIT_NAME \"java\")\n\nproject(modelbox-flowunit-${UNIT_DEVICE}-${UNIT_NAME})\n\nif (NOT WITH_JAVA) \n    message(STATUS \"java support is disabled\")\n    return()\nendif()\n\nif(NOT ${JNI_FOUND})\n    message(STATUS \"Not found java, disable java flowunit\")\n    return()\nendif()\n\nfile(GLOB_RECURSE UNIT_SOURCE *.cpp *.cc *.c)\ngroup_source_test_files(MODELBOX_UNIT_SOURCE MODELBOX_UNIT_TEST_SOURCE \"_test.c*\" ${UNIT_SOURCE})\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_CPU_INCLUDE})\ninclude_directories(${JNI_INCLUDE_DIRS})\ninclude_directories(${MODELBOX_COMMON_MODELBOX_API_INCLUDE})\ninclude_directories(${LIBMODELBOX_VIRTUALDRIVER_JAVA_INCLUDE})\n\nset(EMPTY_SOURCE_FILE ${CMAKE_BINARY_DIR}/empty.cc)\nif (NOT EXISTS ${EMPTY_SOURCE_FILE})\n    file(WRITE ${EMPTY_SOURCE_FILE})\nendif()\n\nset(MODELBOX_UNIT_SHARED modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-shared)\nset(MODELBOX_UNIT_STATIC modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-static)\nset(MODELBOX_UNIT_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\nadd_library(${MODELBOX_UNIT_STATIC} STATIC ${MODELBOX_UNIT_SOURCE})\ntarget_link_libraries(${MODELBOX_UNIT_STATIC} ${JNI_LIBRARIES})\nset_property(TARGET ${MODELBOX_UNIT_STATIC} PROPERTY POSITION_INDEPENDENT_CODE ON)\n\nadd_library(${MODELBOX_UNIT_SHARED} SHARED ${EMPTY_SOURCE_FILE})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${MODELBOX_UNIT_STATIC} )\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} -Wl,--whole-archive ${MODELBOX_UNIT_STATIC} -Wl,--no-whole-archive)\nset(LIBMODELBOX_FLOWUNIT_JAVA_SHARED ${MODELBOX_UNIT_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${MODELBOX_COMMON_MODELBOX_API_LIBRARY})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_SHARED})\nadd_dependencies(${MODELBOX_UNIT_SHARED} ${MODELBOX_COMMON_MODELBOX_API_LIBRARY})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES \n    SOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} rt)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} dl)\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}\")\n\ninstall(TARGETS ${MODELBOX_UNIT_SHARED}\n    COMPONENT cpu-device-flowunit\n    RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n    LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    ARCHIVE DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    OPTIONAL\n    )\n\ninstall(DIRECTORY \n    ${HEADER} DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR}\n    COMPONENT cpu-device-flowunit\n    )\n\nset(LIBMODELBOX_FLOWUNIT_JAVA_SHARED ${MODELBOX_UNIT_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_JAVA_INCLUDE ${MODELBOX_UNIT_SOURCE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_JAVA_SOURCES ${MODELBOX_UNIT_SOURCE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_JAVA_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}.so CACHE INTERNAL \"\")\n\n# driver test\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_UNIT_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_UNIT_SHARED})\nlist(APPEND DRIVER_UNIT_TEST_INCLUDE ${JAVA_INCLUDE_DIRS})\nlist(APPEND DRIVER_UNIT_TEST_LINK_LIBRARIES pybind11::module pybind11::embed)\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_INCLUDE ${DRIVER_UNIT_TEST_INCLUDE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_LINK_LIBRARIES ${DRIVER_UNIT_TEST_LINK_LIBRARIES} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_JAVA_SO_PATH ${LIBMODELBOX_FLOWUNIT_JAVA_SO_PATH} CACHE INTERNAL \"\")\n\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/java/flowunit_desc.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <memory>\n#include <mutex>\n\n#include \"java_flowunit.h\"\n#include \"java_module.h\"\n#include \"modelbox/base/driver_api_helper.h\"\n#include \"modelbox/base/status.h\"\n#include \"modelbox/device/cpu/device_cpu.h\"\n#include \"modelbox/flowunit.h\"\n\nconstexpr const char *FLOWUNIT_NAME = \"java\";\nconstexpr const char *FLOWUNIT_DESC = \"A java flowunit\";\n\nstd::mutex kJavaInitLock;\n\nstd::shared_ptr<modelbox::DriverFactory> CreateDriverFactory() {\n  std::shared_ptr<modelbox::DriverFactory> factory =\n      std::make_shared<JavaFlowUnitFactory>();\n  return factory;\n}\n\nvoid DriverDescription(modelbox::DriverDesc *desc) {\n  desc->SetName(FLOWUNIT_NAME);\n  desc->SetClass(modelbox::DRIVER_CLASS_FLOWUNIT);\n  desc->SetType(modelbox::DEVICE_TYPE);\n  desc->SetDescription(FLOWUNIT_DESC);\n  desc->SetNodelete(true);\n  desc->SetGlobal(true);\n}\n\nmodelbox::Status DriverInit() {\n  std::lock_guard<std::mutex> lock(kJavaInitLock);\n  // Driver Init.\n  if (kJavaJVM != nullptr) {\n    return modelbox::STATUS_OK;\n  }\n\n  kJavaJVM = std::make_shared<JavaJVM>();\n  auto ret = kJavaJVM->InitJVM();\n  if (!ret) {\n    kJavaJVM = nullptr;\n  }\n\n  return ret;\n}\n\nvoid DriverFini() {\n  // Driver Fini.\n  std::lock_guard<std::mutex> lock(kJavaInitLock);\n  if (kJavaJVM) {\n    kJavaJVM->ExitJVM();\n    kJavaJVM = nullptr;\n  }\n}\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/java/java_flowunit.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"java_flowunit.h\"\n\n#include \"modelbox/device/cpu/device_cpu.h\"\n\nJavaFlowUnit::JavaFlowUnit() = default;\nJavaFlowUnit::~JavaFlowUnit() = default;\n\nmodelbox::Status JavaFlowUnit::Open(\n    const std::shared_ptr<modelbox::Configuration>& opts) {\n  java_desc_ = std::dynamic_pointer_cast<VirtualJavaFlowUnitDesc>(\n      this->GetFlowUnitDesc());\n\n  auto java_entry = java_desc_->GetJarEntry();\n  auto config = java_desc_->GetConfiguration();\n\n  auto merge_config = std::make_shared<modelbox::Configuration>();\n  // opts override python_desc_ config\n  if (config != nullptr) {\n    merge_config->Add(*config);\n  }\n  merge_config->Add(*opts);\n\n  constexpr const char DELIM_CHAR = '@';\n  constexpr size_t ENTRY_FILENAME_AND_CLASS_COUNT = 2;\n  const auto& entry_list = modelbox::StringSplit(java_entry, DELIM_CHAR);\n  if (entry_list.size() != ENTRY_FILENAME_AND_CLASS_COUNT) {\n    return {modelbox::STATUS_INVALID, \"invalid entry string: \" + java_entry};\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status JavaFlowUnit::Process(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  return modelbox::STATUS_FAULT;\n}\n\nmodelbox::Status JavaFlowUnit::DataPre(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  return modelbox::STATUS_FAULT;\n}\n\nmodelbox::Status JavaFlowUnit::DataPost(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  return modelbox::STATUS_FAULT;\n}\n\nmodelbox::Status JavaFlowUnit::DataGroupPre(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  return modelbox::STATUS_FAULT;\n}\n\nmodelbox::Status JavaFlowUnit::DataGroupPost(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  return modelbox::STATUS_FAULT;\n}\n\nmodelbox::Status JavaFlowUnit::Close() { return modelbox::STATUS_OK; }\n\nvoid JavaFlowUnit::SetFlowUnitDesc(\n    std::shared_ptr<modelbox::FlowUnitDesc> desc) {}\n\nstd::shared_ptr<modelbox::FlowUnitDesc> JavaFlowUnit::GetFlowUnitDesc() {\n  return nullptr;\n}\n\nJavaFlowUnitDesc::JavaFlowUnitDesc() = default;\n\nJavaFlowUnitDesc::~JavaFlowUnitDesc() = default;\n\nvoid JavaFlowUnitDesc::SetJavaEntry(const std::string& java_entry) {\n  java_entry_ = java_entry;\n}\n\nstd::string JavaFlowUnitDesc::GetJavaEntry() { return java_entry_; }\n\nJavaFlowUnitFactory::JavaFlowUnitFactory() = default;\n\nJavaFlowUnitFactory::~JavaFlowUnitFactory() = default;\n\nstd::shared_ptr<modelbox::FlowUnit> JavaFlowUnitFactory::CreateFlowUnit(\n    const std::string& unit_name, const std::string& unit_type) {\n  auto java_flowunit = std::make_shared<JavaFlowUnit>();\n  return java_flowunit;\n}\n\nstd::string JavaFlowUnitFactory::GetFlowUnitFactoryType() {\n  return FLOWUNIT_TYPE;\n}\n\nstd::map<std::string, std::shared_ptr<modelbox::FlowUnitDesc>>\nJavaFlowUnitFactory::FlowUnitProbe() {\n  return std::map<std::string, std::shared_ptr<modelbox::FlowUnitDesc>>();\n}"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/java/java_flowunit.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_JAVA_H_\n#define MODELBOX_FLOWUNIT_JAVA_H_\n\n#include <modelbox/base/device.h>\n#include <modelbox/base/status.h>\n#include <modelbox/flow.h>\n#include <modelbox/flowunit.h>\n\n#include \"virtualdriver_java.h\"\n\nconstexpr const char *FLOWUNIT_TYPE = \"cpu\";\n\nclass JavaFlowUnitDesc : public modelbox::FlowUnitDesc {\n public:\n  JavaFlowUnitDesc();\n  ~JavaFlowUnitDesc() override;\n\n  void SetJavaEntry(const std::string &java_entry);\n  std::string GetJavaEntry();\n\n  std::string java_entry_;\n};\n\nclass JavaFlowUnit : public modelbox::FlowUnit {\n public:\n  JavaFlowUnit();\n  ~JavaFlowUnit() override;\n\n  modelbox::Status Open(\n      const std::shared_ptr<modelbox::Configuration> &opts) override;\n\n  modelbox::Status Close() override;\n\n  modelbox::Status Process(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override;\n\n  modelbox::Status DataPre(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override;\n\n  modelbox::Status DataPost(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override;\n\n  modelbox::Status DataGroupPre(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override;\n\n  modelbox::Status DataGroupPost(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override;\n\n  void SetFlowUnitDesc(std::shared_ptr<modelbox::FlowUnitDesc> desc) override;\n  std::shared_ptr<modelbox::FlowUnitDesc> GetFlowUnitDesc() override;\n\n private:\n  std::shared_ptr<VirtualJavaFlowUnitDesc> java_desc_;\n};\n\nclass JavaFlowUnitFactory : public modelbox::FlowUnitFactory {\n public:\n  JavaFlowUnitFactory();\n  ~JavaFlowUnitFactory() override;\n\n  std::shared_ptr<modelbox::FlowUnit> CreateFlowUnit(\n      const std::string &unit_name, const std::string &unit_type) override;\n\n  std::string GetFlowUnitFactoryType() override;\n\n  std::map<std::string, std::shared_ptr<modelbox::FlowUnitDesc>> FlowUnitProbe()\n      override;\n};\n\n#endif  // MODELBOX_FLOWUNIT_JAVA_H_\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/java/java_flowunit_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include <pybind11/embed.h>\n\n#include <functional>\n#include <future>\n#include <thread>\n\n#include \"modelbox/base/log.h\"\n#include \"modelbox/buffer.h\"\n#include \"driver_flow_test.h\"\n#include \"flowunit_mockflowunit/flowunit_mockflowunit.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace modelbox {\nclass JavaFlowUnitTest : public testing::Test {\n public:\n  JavaFlowUnitTest() : driver_flow_(std::make_shared<DriverFlowTest>()) {}\n\n  std::shared_ptr<DriverFlowTest> GetDriverFlow();\n\n protected:\n  void SetUp() override {}\n\n  void TearDown() override { driver_flow_->Clear(); };\n\n  const std::string test_lib_dir = TEST_DRIVER_DIR,\n                    test_data_dir = TEST_DATA_DIR, test_assets = TEST_ASSETS;\n\n private:\n  std::shared_ptr<DriverFlowTest> driver_flow_;\n};\n\nstd::shared_ptr<DriverFlowTest> JavaFlowUnitTest::GetDriverFlow() {\n  return driver_flow_;\n}\n\nTEST_F(JavaFlowUnitTest, DISABLED_Init) {\n  auto op_dir = test_data_dir + \"/java_op\";\n  std::string toml_content = R\"(\n    [driver]\n    dir=[\")\" + test_lib_dir + \"\\\",\\\"\" +\n                             op_dir + \"\\\"]\\n    \" +\n                             R\"(\nskip-default=true\n[log]\nlevel=\"INFO\"\n[graph]\ngraphconf = '''digraph demo {{                                                                                                                                                                    \n}}'''\nformat = \"graphviz\"\n\n  )\";\n  auto driver_flow = GetDriverFlow();\n  auto ret = driver_flow->BuildAndRun(\"JavaFlowUnit\", toml_content, 0);\n  EXPECT_EQ(ret, STATUS_STOP);\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/java/java_module.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"java_module.h\"\n\n#include <jni.h>\n#include <modelbox/base/log.h>\n\n#include <chrono>\n#include <functional>\n\nstd::shared_ptr<JavaJVM> kJavaJVM = nullptr;\n\nJavaJVM::JavaJVM() = default;\n\nJavaJVM::~JavaJVM() = default;\n\nJNIEnv *JavaJVM::GetEnv() { return env_; }\n\nmodelbox::Status JavaJVM::InitJNI() {\n  jsize vms_num = 0;\n  auto ret = JNI_GetCreatedJavaVMs(nullptr, 0, &vms_num);\n  if (vms_num <= 0 || ret != JNI_OK) {\n    JavaVMInitArgs vm_args;\n    ret = JNI_CreateJavaVM(&jvm_, (void **)&env_, &vm_args);\n    if (ret != JNI_OK) {\n      return modelbox::STATUS_FAULT;\n    }\n    is_jvm_create_ = true;\n  } else {\n    JavaVM *jvms[vms_num];\n    ret = JNI_GetCreatedJavaVMs(jvms, vms_num, &vms_num);\n    if (ret != JNI_OK) {\n      return modelbox::STATUS_FAULT;\n    }\n    jvm_ = jvms[0];\n    jvm_->GetEnv((void **)&env_, JNI_VERSION_1_8);\n  }\n\n  is_initialized_ = true;\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status JavaJVM::InitJVM() {\n  auto ret = InitJNI();\n  if (!ret) {\n    return ret;\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status JavaJVM::ExitJNI() {\n  if (is_initialized_ == false) {\n    return modelbox::STATUS_OK;\n  }\n\n  if (is_jvm_create_ == false) {\n    return modelbox::STATUS_OK;\n  }\n\n  jvm_->DestroyJavaVM();\n  is_jvm_create_ = false;\n  is_initialized_ = false;\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status JavaJVM::ExitJVM() {\n  ExitJNI();\n\n  return modelbox::STATUS_OK;\n}\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/java/java_module.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_JAVA_FLOWUNIT_MODULE_H_\n#define MODELBOX_JAVA_FLOWUNIT_MODULE_H_\n\n#include <modelbox/base/status.h>\n#include <jni.h>\n\nclass JavaJVM {\n public:\n  JavaJVM();\n  virtual ~JavaJVM();\n  modelbox::Status InitJVM();\n  modelbox::Status ExitJVM();\n\n  JNIEnv *GetEnv();\n private:\n  modelbox::Status InitJNI();\n  modelbox::Status ExitJNI();\n  bool is_initialized_ = false;\n  bool is_jvm_create_ = false;\n  JavaVM *jvm_ = nullptr;\n  JNIEnv *env_;\n};\n\nextern std::shared_ptr<JavaJVM> kJavaJVM;\n\n#endif  // MODELBOX_JAVA_FLOWUNIT_MODULE_H_"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/mean/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nset(UNIT_DEVICE \"cpu\")\nset(UNIT_NAME \"mean\")\n\nproject(modelbox-flowunit-${UNIT_DEVICE}-${UNIT_NAME})\n\nfile(GLOB_RECURSE UNIT_SOURCE *.cpp *.cc *.c)\ngroup_source_test_files(MODELBOX_UNIT_SOURCE MODELBOX_UNIT_TEST_SOURCE \"_test.c*\" ${UNIT_SOURCE})\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_CPU_INCLUDE})\ninclude_directories(${MODELBOX_COMMON_MEAN_INCLUDE})\n\nset(MODELBOX_UNIT_SHARED modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-shared)\nset(MODELBOX_UNIT_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\nadd_library(${MODELBOX_UNIT_SHARED} SHARED ${MODELBOX_UNIT_SOURCE})\n\nset(LIBMODELBOX_FLOWUNIT_MEAN_CPU_SHARED ${MODELBOX_UNIT_SHARED})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES \n    SOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n    DEFINE_SYMBOL \"\"\n)\n\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_DEVICE_CPU_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} rt)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} dl)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${MODELBOX_COMMON_MEAN_LIBRARY})\nadd_dependencies(${MODELBOX_UNIT_SHARED} ${MODELBOX_COMMON_MEAN_LIBRARY})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}\")\n\ninstall(TARGETS ${MODELBOX_UNIT_SHARED} \n    COMPONENT cpu-device-flowunit\n    RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n    LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    OPTIONAL\n    )\n\n\ninstall(DIRECTORY ${HEADER} \n    DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR} \n    COMPONENT cpu-device-flowunit-devel\n    )\n\nset(LIBMODELBOX_FLOWUNIT_MEAN_CPU_SHARED ${MODELBOX_UNIT_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_MEAN_CPU_INCLUDE ${MODELBOX_UNIT_SOURCE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_MEAN_CPU_SOURCES ${MODELBOX_UNIT_SOURCE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_MEAN_CPU_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}.so CACHE INTERNAL \"\")\n\n\n# driver test\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_UNIT_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_UNIT_SHARED})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/mean/mean_flowunit.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"mean_flowunit.h\"\n#include \"modelbox/flowunit_api_helper.h\"\n#include \"modelbox/type.h\"\n\nMeanFlowUnit::MeanFlowUnit() = default;\nMeanFlowUnit::~MeanFlowUnit() = default;\n\nmodelbox::Status MeanFlowUnit::Process(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  const auto input_bufs = data_ctx->Input(\"in_data\");\n  if (!CheckBufferListValid(input_bufs)) {\n    MBLOG_ERROR << \"mean flowunit in_image invalied\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  auto output_bufs = data_ctx->Output(\"out_data\");\n  if (!BuildOutputBufferList(input_bufs, output_bufs)) {\n    MBLOG_ERROR << \"build out_image BufferList failed\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  for (size_t i = 0; i < input_bufs->Size(); ++i) {\n    auto input_buf = input_bufs->At(i);\n    std::vector<size_t> shape;\n    if (!input_buf->Get(\"shape\", shape)) {\n      MBLOG_ERROR << \"mean flowunit can not get shape from meta\";\n      continue;\n    }\n\n    modelbox::ModelBoxDataType type = modelbox::MODELBOX_TYPE_INVALID;\n    if (!input_bufs->At(i)->Get(\"type\", type)) {\n      MBLOG_ERROR << \"mean flowunit can not get input type from meta\";\n      continue;\n    }\n\n    float *in_data_f32 = nullptr;\n    uint8_t *in_data_uint8 = nullptr;\n    if (type == modelbox::ModelBoxDataType::MODELBOX_FLOAT) {\n      Process(in_data_f32, input_buf, output_bufs->At(i));\n    } else {\n      Process(in_data_uint8, input_buf, output_bufs->At(i));\n    }\n  }\n\n  return modelbox::STATUS_OK;\n}\n\ntemplate <typename T>\nvoid MeanFlowUnit::Process(const T *input_data,\n                           const std::shared_ptr<modelbox::Buffer> &input_buf,\n                           const std::shared_ptr<modelbox::Buffer> &out_buff) {\n  input_data = static_cast<T *>(const_cast<void *>(input_buf->ConstData()));\n  if (input_data == nullptr) {\n    MBLOG_ERROR << \"mean flowunit data is nullptr\";\n    return;\n  }\n\n  size_t size = (input_buf->GetBytes() / sizeof(T)) / CHANNEL_NUM;\n  out_buff->CopyMeta(input_buf);\n  out_buff->Set(\"type\", modelbox::ModelBoxDataType::MODELBOX_FLOAT);\n  auto *out_data = static_cast<float *>(out_buff->MutableData());\n  if (out_data == nullptr) {\n    MBLOG_ERROR << \"output is null\";\n    return;\n  }\n\n  for (size_t c = 0; c < CHANNEL_NUM; c++) {\n    for (size_t j = size * c; j < size * (c + 1); j++) {\n      out_data[j] = input_data[j] - params_.means_[c];\n    }\n  }\n}\n\nMODELBOX_FLOWUNIT(MeanFlowUnit, desc) {\n  desc.SetFlowUnitName(\"mean\");\n  desc.SetFlowUnitGroupType(\"Image\");\n  desc.AddFlowUnitInput({\"in_data\"});\n  desc.AddFlowUnitOutput({\"out_data\"});\n  desc.SetFlowType(modelbox::NORMAL);\n  desc.AddFlowUnitOption(\n      modelbox::FlowUnitOption(\"mean\", \"string\", true, \"\", \"the mean param\"));\n  desc.SetInputContiguous(false);\n  desc.SetDescription(FLOWUNIT_DESC);\n}\n\nMODELBOX_DRIVER_FLOWUNIT(desc) {\n  desc.Desc.SetName(FLOWUNIT_NAME);\n  desc.Desc.SetClass(modelbox::DRIVER_CLASS_FLOWUNIT);\n  desc.Desc.SetType(FLOWUNIT_TYPE);\n  desc.Desc.SetDescription(FLOWUNIT_DESC);\n  desc.Desc.SetVersion(\"1.0.0\");\n}"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/mean/mean_flowunit.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_MEAN_H_\n#define MODELBOX_FLOWUNIT_MEAN_H_\n\n#include <mean_flowunit_base.h>\n#include <modelbox/base/device.h>\n#include <modelbox/base/status.h>\n#include <modelbox/flow.h>\n#include <modelbox/flowunit.h>\n\nconstexpr const char *FLOWUNIT_TYPE = \"cpu\";\nconstexpr const char *FLOWUNIT_NAME = \"mean\";\nconstexpr const char *FLOWUNIT_DESC =\n    \"\\n\\t@Brief: The operator is used to subtract the mean for tensor data, \"\n    \"for example the image(RGB/BGR), shape(W, H, C), subtract the \"\n    \"corresponding value for different channels. \\n\"\n    \"\\t@Port parameter: The input port and the output buffer type are tensor. \\n\"\n    \"\\t  The tensor type buffer contain the following meta fields:\\n\"\n    \"\\t\\tField Name: shape,         Type: vector<size_t>\\n\"\n    \"\\t\\tField Name: type,          Type: ModelBoxDataType::MODELBOX_UINT8\\n\"\n    \"\\t@Constraint: \";\n\nclass MeanFlowUnit : public MeanFlowUnitBase {\n public:\n  MeanFlowUnit();\n  ~MeanFlowUnit() override;\n\n  modelbox::Status Process(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override;\n\n private:\n  template <typename T>\n  void Process(const T *input_data,\n               const std::shared_ptr<modelbox::Buffer> &input_buf,\n               const std::shared_ptr<modelbox::Buffer> &out_buff);\n};\n\n#endif  // MODELBOX_FLOWUNIT_MEAN_H_"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/mean/mean_flowunit_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include <functional>\n#include <future>\n#include <random>\n#include <thread>\n\n#include \"modelbox/base/log.h\"\n#include \"modelbox/base/utils.h\"\n#include \"modelbox/buffer.h\"\n#include \"driver_flow_test.h\"\n#include \"flowunit_mockflowunit/flowunit_mockflowunit.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing ::testing::_;\n\nnamespace modelbox {\nclass MeanCpuFlowUnitTest : public testing::Test {\n public:\n  MeanCpuFlowUnitTest() : driver_flow_(std::make_shared<DriverFlowTest>()) {}\n\n protected:\n  void SetUp() override {\n    auto ret = AddMockFlowUnit();\n    EXPECT_EQ(ret, STATUS_OK);\n  }\n\n  void TearDown() override { driver_flow_->Clear(); };\n\n  std::shared_ptr<DriverFlowTest> GetDriverFlow();\n\n  const std::string test_lib_dir = TEST_DRIVER_DIR,\n                    test_data_dir = TEST_DATA_DIR, test_assets = TEST_ASSETS;\n\n private:\n  Status AddMockFlowUnit();\n  std::shared_ptr<DriverFlowTest> driver_flow_;\n};\n\nStatus MeanCpuFlowUnitTest::AddMockFlowUnit() {\n  auto ctl_ = driver_flow_->GetMockFlowCtl();\n  {\n    MockFlowUnitDriverDesc desc_flowunit;\n    desc_flowunit.SetClass(\"DRIVER-FLOWUNIT\");\n    desc_flowunit.SetType(\"cpu\");\n    desc_flowunit.SetName(\"test_mean_0\");\n    desc_flowunit.SetDescription(\"The test input data, 0 inputs 1 output\");\n    desc_flowunit.SetVersion(\"1.0.0\");\n    std::string file_path_flowunit =\n        std::string(TEST_DRIVER_DIR) + \"/libmodelbox-unit-cpu-test_mean_0.so\";\n    desc_flowunit.SetFilePath(file_path_flowunit);\n    auto mock_flowunit = std::make_shared<MockFlowUnit>();\n    auto mock_flowunit_desc = std::make_shared<FlowUnitDesc>();\n    mock_flowunit_desc->SetFlowUnitName(\"test_mean_0\");\n    mock_flowunit_desc->AddFlowUnitOutput(modelbox::FlowUnitOutput(\"Out_1\"));\n    mock_flowunit->SetFlowUnitDesc(mock_flowunit_desc);\n    std::weak_ptr<MockFlowUnit> mock_flowunit_wp;\n    mock_flowunit_wp = mock_flowunit;\n\n    EXPECT_CALL(*mock_flowunit, Open(_))\n        .WillRepeatedly(testing::Invoke(\n            [=](const std::shared_ptr<modelbox::Configuration>& flow_option) {\n              auto spt = mock_flowunit_wp.lock();\n              auto ext_data = spt->CreateExternalData();\n              if (!ext_data) {\n                MBLOG_ERROR << \"can not get external data.\";\n              }\n\n              auto buffer_list = ext_data->CreateBufferList();\n              buffer_list->Build({10 * sizeof(int)});\n              auto *data = (int *)buffer_list->MutableData();\n              for (size_t i = 0; i < 10; i++) {\n                data[i] = i;\n              }\n\n              auto status = ext_data->Send(buffer_list);\n              if (!status) {\n                MBLOG_ERROR << \"external data send buffer list failed:\"\n                            << status;\n              }\n\n              status = ext_data->Close();\n              if (!status) {\n                MBLOG_ERROR << \"external data close failed:\" << status;\n              }\n\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, DataPre(_))\n        .WillRepeatedly(\n            testing::Invoke([&](const std::shared_ptr<DataContext>& data_ctx) {\n              MBLOG_DEBUG << \"test_mean_0 \"\n                          << \"DataPre\";\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, DataPost(_))\n        .WillRepeatedly(\n            testing::Invoke([&](const std::shared_ptr<DataContext>& data_ctx) {\n              MBLOG_DEBUG << \"test_mean_0 \"\n                          << \"DataPost\";\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit,\n                Process(testing::An<std::shared_ptr<modelbox::DataContext>>()))\n        .WillRepeatedly(\n            testing::Invoke([=](const std::shared_ptr<DataContext>& op_ctx) {\n              auto output_buf_1 = op_ctx->Output(\"Out_1\");\n              std::vector<size_t> data_1_shape = {5 * 4 * 3 * sizeof(uint8_t)};\n              output_buf_1->Build(data_1_shape);\n              auto *dev_data_1 =\n                  static_cast<uint8_t *>(output_buf_1->At(0)->MutableData());\n              for (size_t i = 0; i < 3; ++i) {\n                for (size_t j = 0; j < 5; j++) {\n                  for (size_t k = 0; k < 4; k++) {\n                    {\n                      dev_data_1[i * 20 + j * 4 + k] =\n                          static_cast<uint8_t>(100);\n                    }\n                  }\n                }\n              }\n\n              std::vector<size_t> shape{4, 5, 3};\n              output_buf_1->Set(\"shape\", shape);\n              output_buf_1->Set(\"type\", ModelBoxDataType::MODELBOX_UINT8);\n\n              MBLOG_DEBUG << \"test_mean_0 gen data, 0\"\n                          << output_buf_1->GetBytes();\n\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, Close()).WillRepeatedly(testing::Invoke([=]() {\n      return modelbox::STATUS_OK;\n    }));\n\n    desc_flowunit.SetMockFlowUnit(mock_flowunit);\n    ctl_->AddMockDriverFlowUnit(\"test_mean_0\", \"cpu\", desc_flowunit,\n                                std::string(TEST_DRIVER_DIR));\n  }\n\n  {\n    MockFlowUnitDriverDesc desc_flowunit;\n    desc_flowunit.SetClass(\"DRIVER-FLOWUNIT\");\n    desc_flowunit.SetType(\"cpu\");\n    desc_flowunit.SetName(\"test_mean_1\");\n    desc_flowunit.SetDescription(\"The test output data, 1 input 0 outputs\");\n    desc_flowunit.SetVersion(\"1.0.0\");\n    std::string file_path_flowunit =\n        std::string(TEST_DRIVER_DIR) + \"/libmodelbox-unit-cpu-test_mean_1.so\";\n    desc_flowunit.SetFilePath(file_path_flowunit);\n    auto mock_flowunit = std::make_shared<MockFlowUnit>();\n    auto mock_flowunit_desc = std::make_shared<FlowUnitDesc>();\n    mock_flowunit_desc->SetFlowUnitName(\"test_mean_1\");\n    mock_flowunit_desc->AddFlowUnitInput(modelbox::FlowUnitInput(\"In_1\"));\n    mock_flowunit->SetFlowUnitDesc(mock_flowunit_desc);\n    std::weak_ptr<MockFlowUnit> mock_flowunit_wp;\n    mock_flowunit_wp = mock_flowunit;\n\n    EXPECT_CALL(*mock_flowunit, Open(_))\n        .WillRepeatedly(testing::Invoke(\n            [=](const std::shared_ptr<modelbox::Configuration>& flow_option) {\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, DataPre(_))\n        .WillRepeatedly(\n            testing::Invoke([&](const std::shared_ptr<DataContext>& data_ctx) {\n              MBLOG_DEBUG << \"test_mean_1 \"\n                          << \"DataPre\";\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, DataPost(_))\n        .WillRepeatedly(\n            testing::Invoke([&](const std::shared_ptr<DataContext>& data_ctx) {\n              MBLOG_DEBUG << \"test_mean_1 \"\n                          << \"DataPost\";\n              return modelbox::STATUS_STOP;\n            }));\n\n    EXPECT_CALL(*mock_flowunit,\n                Process(testing::An<std::shared_ptr<modelbox::DataContext>>()))\n        .WillRepeatedly(\n            testing::Invoke([=](const std::shared_ptr<DataContext>& op_ctx) {\n              auto input_bufs = op_ctx->Input(\"In_1\");\n              EXPECT_EQ(input_bufs->Size(), 1);\n              for (size_t i = 0; i < input_bufs->Size(); ++i) {\n                auto input_buf = input_bufs->At(i);\n                std::vector<size_t> shape;\n                input_buf->Get(\"shape\", shape);\n                size_t width = shape[1];\n                size_t height = shape[0];\n                EXPECT_EQ(width, 5);\n                EXPECT_EQ(height, 4);\n\n                const auto *const in_data =\n                    static_cast<const float *>(input_buf->ConstData());\n                for (size_t c = 0; c < 3; c++) {\n                  for (size_t j = 0; j < width; j++) {\n                    for (size_t k = 0; k < height; k++) {\n                      float data = in_data[c * width * height + j * height + k];\n                      if (c == 0) {\n                        EXPECT_NEAR(data, 100, 0.0001);\n                      } else if (c == 1) {\n                        EXPECT_NEAR(data, 90, 0.0001);\n                      } else {\n                        EXPECT_NEAR(data, 80, 0.0001);\n                      }\n                    }\n                  }\n                }\n              }\n\n              return modelbox::STATUS_STOP;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, Close()).WillRepeatedly(testing::Invoke([=]() {\n      return modelbox::STATUS_OK;\n    }));\n    desc_flowunit.SetMockFlowUnit(mock_flowunit);\n    ctl_->AddMockDriverFlowUnit(\"test_mean_1\", \"cpu\", desc_flowunit,\n                                std::string(TEST_DRIVER_DIR));\n  }\n\n  return STATUS_OK;\n}\n\nstd::shared_ptr<DriverFlowTest> MeanCpuFlowUnitTest::GetDriverFlow() {\n  return driver_flow_;\n}\n\nTEST_F(MeanCpuFlowUnitTest, RunUnit) {\n  std::string profile_path = test_data_dir + \"/perf\";\n  std::string toml_content = R\"(\n\n    [profile]\n    trace = \"enable\"\n    session = \"enable\"\n    dir = \")\" + profile_path +\n                             \"\\\"\\n    \" +\n                             R\"(\n    [driver]\n    skip-default=true\n    dir=[\")\" + test_lib_dir + \"\\\",\\\"\" +\n                             test_data_dir + \"\\\"]\\n    \" +\n                             R\"([graph]\n    graphconf = '''digraph demo {\n          test_mean_0[type=flowunit, flowunit=test_mean_0, device=cpu,deviceid=0, label=\"<Out_1>\"] \n          mean[type=flowunit, flowunit=mean, device=cpu, deviceid=0, label=\"<in_data> | <out_data>\", mean=\"0.0,10.0,20.0\"]\n          test_mean_1[type=flowunit, flowunit=test_mean_1, device=cpu, deviceid=0, label=\"<In_1>\"] \n\n          test_mean_0:Out_1 -> mean:in_data\n          mean:out_data -> test_mean_1:In_1\n        }'''\n    format = \"graphviz\"\n  )\";\n  \n  auto driver_flow = GetDriverFlow();\n  auto ret = driver_flow->BuildAndRun(\"RunUnit\", toml_content);\n  EXPECT_EQ(ret, STATUS_STOP);\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/meta_mapping/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nset(UNIT_DEVICE \"cpu\")\nset(UNIT_NAME \"buff_meta_mapping\")\n\nproject(modelbox-flowunit-${UNIT_DEVICE}-${UNIT_NAME})\n\nfile(GLOB UNIT_SOURCE *.cpp *.cc *.c)\ngroup_source_test_files(MODELBOX_UNIT_SOURCE MODELBOX_UNIT_TEST_SOURCE \"_test.c*\" ${UNIT_SOURCE})\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_CPU_INCLUDE})\n\nset(MODELBOX_UNIT_SHARED modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-shared)\nset(MODELBOX_UNIT_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\nadd_library(${MODELBOX_UNIT_SHARED} SHARED ${MODELBOX_UNIT_SOURCE})\n\nset(LIBMODELBOX_FLOWUNIT_META_MAP_CPU_SHARED ${MODELBOX_UNIT_SHARED})\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES \n    SOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_DEVICE_CPU_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} rt)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} dl)\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}\")\n\ninstall(TARGETS ${MODELBOX_UNIT_SHARED} \n    COMPONENT cpu-device-flowunit\n    RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n    LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    OPTIONAL\n    )\n\n\ninstall(DIRECTORY ${HEADER} \n    DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR} \n    COMPONENT cpu-device-flowunit-devel\n    )\n\nset(LIBMODELBOX_FLOWUNIT_META_MAP_CPU_SHARED ${MODELBOX_UNIT_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_META_MAP_CPU_INCLUDE ${MODELBOX_UNIT_SOURCE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_META_MAP_CPU_SOURCES ${MODELBOX_UNIT_SOURCE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_META_MAP_CPU_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}.so CACHE INTERNAL \"\")\n\n# driver test\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_UNIT_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_UNIT_SHARED})\nlist(APPEND DRIVER_UNIT_TEST_LINK_LIBRARIES ${MODELBOX_UNIT_LINK_LIBRARY})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_LINK_LIBRARIES ${DRIVER_UNIT_TEST_LINK_LIBRARIES} CACHE INTERNAL \"\")"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/meta_mapping/meta_mapping_flowunit.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"meta_mapping_flowunit.h\"\n\n#include <memory>\n\n#include \"modelbox/base/config.h\"\n#include \"modelbox/base/utils.h\"\n#include \"modelbox/flowunit.h\"\n#include \"modelbox/flowunit_api_helper.h\"\n\n#define CASTER_IMPL(code) \\\n  [](std::stringstream &ss, modelbox::Any *any) { code; }\n\n#define SETTER_IMPL(code)                                                     \\\n  [this](std::shared_ptr<modelbox::Buffer> &buffer, const std::string &str) { \\\n    code;                                                                     \\\n  }\n\nMetaMappingFlowUnit::MetaMappingFlowUnit() = default;\nMetaMappingFlowUnit::~MetaMappingFlowUnit() = default;\n\nmodelbox::Status MetaMappingFlowUnit::Open(\n    const std::shared_ptr<modelbox::Configuration> &opts) {\n  src_meta_name_ = opts->GetString(\"src_meta\");\n  if (src_meta_name_.empty()) {\n    MBLOG_ERROR << \"Missing src_meta in flowunit config\";\n    return modelbox::STATUS_BADCONF;\n  }\n\n  dest_meta_name_ = opts->GetString(\"dest_meta\");\n  if (dest_meta_name_.empty()) {\n    MBLOG_ERROR << \"Missing dest_meta in flowunit config\";\n    return modelbox::STATUS_BADCONF;\n  }\n\n  auto rules = opts->GetStrings(\"rules\");\n  auto ret = ParseRules(rules);\n  if (!ret) {\n    MBLOG_ERROR << \"parser rules failed\";\n    return ret;\n  }\n\n  InitToStringCasters();\n  InitBufferMetaSetters();\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nvoid MetaMappingFlowUnit::InitToStringCasters() {\n  to_string_casters_ = {\n      {typeid(int8_t).hash_code(),\n       CASTER_IMPL(ss << modelbox::any_cast<int8_t>(*any);)},\n      {typeid(uint8_t).hash_code(),\n       CASTER_IMPL(ss << modelbox::any_cast<uint8_t>(*any);)},\n      {typeid(int16_t).hash_code(),\n       CASTER_IMPL(ss << modelbox::any_cast<int16_t>(*any);)},\n      {typeid(uint16_t).hash_code(),\n       CASTER_IMPL(ss << modelbox::any_cast<uint16_t>(*any);)},\n      {typeid(int32_t).hash_code(),\n       CASTER_IMPL(ss << modelbox::any_cast<int32_t>(*any);)},\n      {typeid(uint32_t).hash_code(),\n       CASTER_IMPL(ss << modelbox::any_cast<uint32_t>(*any);)},\n      {typeid(int64_t).hash_code(),\n       CASTER_IMPL(ss << modelbox::any_cast<int64_t>(*any);)},\n      {typeid(uint64_t).hash_code(),\n       CASTER_IMPL(ss << modelbox::any_cast<uint64_t>(*any);)},\n      {typeid(float).hash_code(),\n       CASTER_IMPL(ss << modelbox::any_cast<float>(*any);)},\n      {typeid(double).hash_code(),\n       CASTER_IMPL(ss << modelbox::any_cast<double>(*any);)},\n      {typeid(bool).hash_code(),\n       CASTER_IMPL(ss << modelbox::any_cast<bool>(*any);)},\n      {typeid(std::string).hash_code(),\n       CASTER_IMPL(ss << modelbox::any_cast<std::string>(*any);)}};\n}\n\nvoid MetaMappingFlowUnit::InitBufferMetaSetters() {\n  buffer_meta_setters_ = {\n      {typeid(int8_t).hash_code(),\n       SETTER_IMPL(buffer->Set(dest_meta_name_, (int8_t)std::stoi(str));)},\n      {typeid(uint8_t).hash_code(),\n       SETTER_IMPL(buffer->Set(dest_meta_name_, (uint8_t)std::stoi(str));)},\n      {typeid(int16_t).hash_code(),\n       SETTER_IMPL(buffer->Set(dest_meta_name_, (int16_t)std::stoi(str));)},\n      {typeid(uint16_t).hash_code(),\n       SETTER_IMPL(buffer->Set(dest_meta_name_, (uint16_t)std::stoi(str));)},\n      {typeid(int32_t).hash_code(),\n       SETTER_IMPL(buffer->Set(dest_meta_name_, (int32_t)std::stoi(str));)},\n      {typeid(uint32_t).hash_code(),\n       SETTER_IMPL(buffer->Set(dest_meta_name_, (uint32_t)std::stol(str));)},\n      {typeid(int64_t).hash_code(),\n       SETTER_IMPL(buffer->Set(dest_meta_name_, (int64_t)std::stol(str));)},\n      {typeid(uint64_t).hash_code(),\n       SETTER_IMPL(buffer->Set(dest_meta_name_, (uint64_t)std::stoul(str));)},\n      {typeid(float).hash_code(),\n       SETTER_IMPL(buffer->Set(dest_meta_name_, (float)std::stof(str));)},\n      {typeid(double).hash_code(),\n       SETTER_IMPL(buffer->Set(dest_meta_name_, (double)std::stod(str));)},\n      {typeid(bool).hash_code(),\n       SETTER_IMPL(buffer->Set(dest_meta_name_, str == \"true\");)},\n      {typeid(std::string).hash_code(),\n       SETTER_IMPL(buffer->Set(dest_meta_name_, str);)}};\n}\n\nmodelbox::Status MetaMappingFlowUnit::ParseRules(\n    const std::vector<std::string> &rules) {\n  for (const auto &rule : rules) {\n    auto rule_v = modelbox::StringSplit(rule, '=');\n    if (rule_v.size() != 2) {\n      return modelbox::STATUS_BADCONF;\n    }\n\n    MBLOG_INFO << \"Add map rule \" << rule_v[0] << \"=\" << rule_v[1];\n    mapping_rules_[rule_v[0]] = rule_v[1];\n  }\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status MetaMappingFlowUnit::Close() { return modelbox::STATUS_OK; }\n\nmodelbox::Status MetaMappingFlowUnit::ToString(modelbox::Any *any,\n                                               std::string &val) {\n  const auto &type = any->type();\n  auto caster_item = to_string_casters_.find(type.hash_code());\n  if (caster_item == to_string_casters_.end()) {\n    MBLOG_ERROR << \"Not support meta type \" << type.name();\n    return modelbox::STATUS_NOTSUPPORT;\n  }\n\n  std::stringstream ss;\n  caster_item->second(ss, any);\n  val = ss.str();\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status MetaMappingFlowUnit::SetValue(\n    std::shared_ptr<modelbox::Buffer> &buffer, std::string &str,\n    const std::type_info &type) {\n  try {\n    auto setter_item = buffer_meta_setters_.find(type.hash_code());\n    if (setter_item == buffer_meta_setters_.end()) {\n      MBLOG_ERROR << \"Not support meta type \" << type.name();\n      return modelbox::STATUS_NOTSUPPORT;\n    }\n\n    setter_item->second(buffer, str);\n  } catch (std::invalid_argument &e) {\n    MBLOG_ERROR << \"Can not convert \" << str << \" to target type \"\n                << type.name();\n    return modelbox::STATUS_FAULT;\n  } catch (std::out_of_range &e) {\n    MBLOG_ERROR << \"Value \" << str << \" is out of range for type \"\n                << type.name();\n    return modelbox::STATUS_FAULT;\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status MetaMappingFlowUnit::Process(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  auto input_buffer_list = data_ctx->Input(INPUT_DATA);\n  auto output_buffer_list = data_ctx->Output(OUTPUT_DATA);\n  for (auto &buffer : *input_buffer_list) {\n    output_buffer_list->PushBack(buffer);\n    modelbox::Any *src_val = nullptr;\n    bool exist = false;\n    std::tie(src_val, exist) = buffer->Get(src_meta_name_);\n    if (!exist) {\n      continue;\n    }\n\n    modelbox::Any src_val_cpy = *src_val;\n    // Only copy src meta to dest meta\n    if (mapping_rules_.empty()) {\n      buffer->Set(dest_meta_name_, src_val_cpy);\n      continue;\n    }\n\n    // Try map src meta value to dest meta value\n    std::string src_val_str;\n    auto ret = ToString(src_val, src_val_str);\n    if (!ret) {\n      buffer->Set(dest_meta_name_, src_val_cpy);\n      continue;\n    }\n\n    auto item = mapping_rules_.find(src_val_str);\n    if (item == mapping_rules_.end()) {\n      buffer->Set(dest_meta_name_, src_val_cpy);\n      continue;\n    }\n\n    SetValue(buffer, item->second, src_val->type());\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status MetaMappingFlowUnit::DataPre(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  return modelbox::STATUS_OK;\n};\n\nmodelbox::Status MetaMappingFlowUnit::DataPost(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  return modelbox::STATUS_OK;\n};\n\nMODELBOX_FLOWUNIT(MetaMappingFlowUnit, desc) {\n  desc.SetFlowUnitName(FLOWUNIT_NAME);\n  desc.AddFlowUnitInput({INPUT_DATA});\n  desc.AddFlowUnitOutput({OUTPUT_DATA});\n  desc.SetFlowType(modelbox::STREAM);\n  desc.SetFlowUnitGroupType(\"Image\");\n  desc.SetDescription(FLOWUNIT_DESC);\n  desc.AddFlowUnitOption(modelbox::FlowUnitOption(\"src_meta\", \"string\", true,\n                                                  \"\", \"the source meta\"));\n  desc.AddFlowUnitOption(modelbox::FlowUnitOption(\"dest_meta\", \"string\", true,\n                                                  \"\", \"the dest meta\"));\n  desc.AddFlowUnitOption(modelbox::FlowUnitOption(\"rules\", \"string\", false, \"\",\n                                                  \"the meta mapping rules\"));\n}\n\nMODELBOX_DRIVER_FLOWUNIT(desc) {\n  desc.Desc.SetName(FLOWUNIT_NAME);\n  desc.Desc.SetClass(modelbox::DRIVER_CLASS_FLOWUNIT);\n  desc.Desc.SetType(FLOWUNIT_TYPE);\n  desc.Desc.SetDescription(FLOWUNIT_DESC);\n  desc.Desc.SetVersion(\"1.0.0\");\n}\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/meta_mapping/meta_mapping_flowunit.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_META_MAP_CPU_H_\n#define MODELBOX_FLOWUNIT_META_MAP_CPU_H_\n\n#include <modelbox/base/device.h>\n#include <modelbox/base/status.h>\n#include <modelbox/flow.h>\n\n#include <functional>\n#include <map>\n#include <string>\n\n#include \"modelbox/base/any.h\"\n#include \"modelbox/buffer.h\"\n#include \"modelbox/flowunit.h\"\n\nconstexpr const char *FLOWUNIT_NAME = \"buff_meta_mapping\";\nconstexpr const char *FLOWUNIT_TYPE = \"cpu\";\nconstexpr const char *FLOWUNIT_DESC =\n    \"\\n\\t@Brief: Modify the input buffer meta field name and value according to custom \"\n    \"rules. \\n\"\n    \"\\t@Port parameter: The input port and the output buffer type are binary. \\n\"\n    \"\\t@Constraint: \";\nconstexpr const char *INPUT_DATA = \"in_data\";\nconstexpr const char *OUTPUT_DATA = \"out_data\";\n\nusing MappingRules = std::map<std::string, std::string>;\nusing AnyToStringCaster =\n    std::function<void(std::stringstream &, modelbox::Any *)>;\nusing BufferSetter = std::function<void(std::shared_ptr<modelbox::Buffer> &,\n                                        const std::string &)>;\nclass MetaMappingFlowUnit : public modelbox::FlowUnit {\n public:\n  MetaMappingFlowUnit();\n  ~MetaMappingFlowUnit() override;\n\n  modelbox::Status Open(\n      const std::shared_ptr<modelbox::Configuration> &opts) override;\n\n  modelbox::Status Close() override;\n\n  /* run when processing data */\n  modelbox::Status Process(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override;\n\n  modelbox::Status DataPre(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override;\n\n  modelbox::Status DataPost(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override;\n\n  modelbox::Status DataGroupPre(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override {\n    return modelbox::STATUS_OK;\n  };\n\n  modelbox::Status DataGroupPost(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override {\n    return modelbox::STATUS_OK;\n  };\n\n private:\n  void InitToStringCasters();\n\n  void InitBufferMetaSetters();\n\n  modelbox::Status ParseRules(const std::vector<std::string> &rules);\n\n  modelbox::Status ToString(modelbox::Any *any, std::string &val);\n\n  modelbox::Status SetValue(std::shared_ptr<modelbox::Buffer> &buffer,\n                            std::string &str, const std::type_info &type);\n\n  MappingRules mapping_rules_;\n  std::string src_meta_name_;\n  std::string dest_meta_name_;\n\n  std::map<size_t, AnyToStringCaster> to_string_casters_;\n  std::map<size_t, BufferSetter> buffer_meta_setters_;\n};\n\n#endif  // MODELBOX_FLOWUNIT_META_MAP_CPU_H_\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/meta_mapping/meta_mapping_flowunit_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include \"meta_mapping_flowunit.h\"\n\n#include <functional>\n#include <future>\n\n#include \"modelbox/base/log.h\"\n#include \"modelbox/base/utils.h\"\n#include \"modelbox/buffer.h\"\n#include \"driver_flow_test.h\"\n#include \"flowunit_mockflowunit/flowunit_mockflowunit.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"test/mock/minimodelbox/mockflow.h\"\n\nnamespace modelbox {\nclass MetaMappingFlowUnitTest : public testing::Test {\n public:\n  MetaMappingFlowUnitTest() : driver_flow_(std::make_shared<MockFlow>()) {}\n\n protected:\n  void SetUp() override {\n    auto ret = AddMockFlowUnit();\n    EXPECT_EQ(ret, STATUS_OK);\n  };\n\n  void TearDown() override { driver_flow_ = nullptr; };\n  std::shared_ptr<MockFlow> GetDriverFlow();\n  std::shared_ptr<MockFlow> RunDriverFlow(const std::string &rules);\n\n private:\n  Status AddMockFlowUnit();\n  std::shared_ptr<MockFlow> driver_flow_;\n};\n\nstd::shared_ptr<MockFlow> MetaMappingFlowUnitTest::GetDriverFlow() {\n  return driver_flow_;\n}\n\nstd::shared_ptr<MockFlow> MetaMappingFlowUnitTest::RunDriverFlow(\n    const std::string &rules) {\n  const std::string test_lib_dir = TEST_DRIVER_DIR;\n  std::string toml_content = R\"(\n    [driver]\n    skip-default=true\n    dir=[\")\" + test_lib_dir + \"\\\"]\\n    \" +\n                             R\"([graph]\n    graphconf = '''digraph demo {\n          input[type=input, device=cpu, deviceid=0]\n          meta_mapping[type=flowunit, flowunit=buff_meta_mapping, device=cpu, deviceid=0, label=\"<output_data>\", src_meta=\"src\", dest_meta=\"dest\", rules=\")\" +\n                             rules + R\"(\"]\n          output[type=output, deveice=cpu, deviceid=0]\n          input -> meta_mapping:in_data\n          meta_mapping:out_data -> output\n        }'''\n    format = \"graphviz\"\n  )\";\n\n  auto driver_flow = GetDriverFlow();\n  driver_flow->BuildAndRun(\"InitUnit\", toml_content, -1);\n  return driver_flow;\n}\n\nStatus MetaMappingFlowUnitTest::AddMockFlowUnit() { return STATUS_OK; }\n\nTEST_F(MetaMappingFlowUnitTest, NameMapping) {\n  auto driver_flow = RunDriverFlow(\"\");\n\n  auto ext_data = driver_flow->GetFlow()->CreateExternalDataMap();\n  auto buffer_list = ext_data->CreateBufferList();\n  buffer_list->Build({1});\n  auto buffer = buffer_list->At(0);\n  buffer->Set(\"src\", (int32_t)123);\n  ext_data->Send(\"input\", buffer_list);\n  modelbox::OutputBufferList output_buffer_map;\n  ext_data->Recv(output_buffer_map);\n  EXPECT_EQ(output_buffer_map.size(), 1);\n  auto output_buffer_list = output_buffer_map[\"output\"];\n  EXPECT_EQ(output_buffer_list->Size(), 1);\n  auto output_buffer = output_buffer_list->At(0);\n  int32_t dest_val;\n  EXPECT_TRUE(output_buffer->Get(\"dest\", dest_val));\n  EXPECT_EQ(dest_val, 123);\n  ext_data->Shutdown();\n\n  driver_flow->GetFlow()->Wait(3 * 1000);\n}\n\nTEST_F(MetaMappingFlowUnitTest, Int32Mapping) {\n  auto driver_flow = RunDriverFlow(\"1=2,3=4,5=6\");\n\n  auto ext_data = driver_flow->GetFlow()->CreateExternalDataMap();\n  auto buffer_list = ext_data->CreateBufferList();\n  buffer_list->Build({1, 1, 1, 1});\n  auto buffer = buffer_list->At(0);\n  buffer->Set(\"src\", (int32_t)1);\n  buffer->Set(\"expect\", (int32_t)2);\n  buffer = buffer_list->At(1);\n  buffer->Set(\"src\", (int32_t)3);\n  buffer->Set(\"expect\", (int32_t)4);\n  buffer = buffer_list->At(2);\n  buffer->Set(\"src\", (int32_t)5);\n  buffer->Set(\"expect\", (int32_t)6);\n  buffer = buffer_list->At(3);\n  buffer->Set(\"src\", (int32_t)333);\n  buffer->Set(\"expect\", (int32_t)333);\n  ext_data->Send(\"input\", buffer_list);\n  modelbox::OutputBufferList output_buffer_map;\n  ext_data->Recv(output_buffer_map);\n  EXPECT_EQ(output_buffer_map.size(), 1);\n  auto output_buffer_list = output_buffer_map[\"output\"];\n  EXPECT_EQ(output_buffer_list->Size(), 4);\n  auto output_buffer1 = output_buffer_list->At(0);\n  auto output_buffer2 = output_buffer_list->At(1);\n  auto output_buffer3 = output_buffer_list->At(2);\n  auto output_buffer4 = output_buffer_list->At(3);\n  int32_t dest_val;\n  int32_t expect_val;\n  EXPECT_TRUE(output_buffer1->Get(\"dest\", dest_val));\n  EXPECT_TRUE(output_buffer1->Get(\"expect\", expect_val));\n  EXPECT_EQ(dest_val, expect_val);\n  EXPECT_TRUE(output_buffer2->Get(\"dest\", dest_val));\n  EXPECT_TRUE(output_buffer2->Get(\"expect\", expect_val));\n  EXPECT_EQ(dest_val, expect_val);\n  EXPECT_TRUE(output_buffer3->Get(\"dest\", dest_val));\n  EXPECT_TRUE(output_buffer3->Get(\"expect\", expect_val));\n  EXPECT_EQ(dest_val, expect_val);\n  EXPECT_TRUE(output_buffer4->Get(\"dest\", dest_val));\n  EXPECT_TRUE(output_buffer4->Get(\"expect\", expect_val));\n  EXPECT_EQ(dest_val, expect_val);\n  ext_data->Shutdown();\n\n  driver_flow->GetFlow()->Wait(3 * 1000);\n}\n\nTEST_F(MetaMappingFlowUnitTest, StringMapping) {\n  auto driver_flow = RunDriverFlow(\"face=dis1|dis2\");\n\n  auto ext_data = driver_flow->GetFlow()->CreateExternalDataMap();\n  auto buffer_list = ext_data->CreateBufferList();\n  buffer_list->Build({1});\n  auto buffer = buffer_list->At(0);\n  buffer->Set(\"src\", std::string(\"face\"));\n  ext_data->Send(\"input\", buffer_list);\n  modelbox::OutputBufferList output_buffer_map;\n  ext_data->Recv(output_buffer_map);\n  EXPECT_EQ(output_buffer_map.size(), 1);\n  auto output_buffer_list = output_buffer_map[\"output\"];\n  EXPECT_EQ(output_buffer_list->Size(), 1);\n  auto output_buffer = output_buffer_list->At(0);\n  std::string dest_val;\n  EXPECT_TRUE(output_buffer->Get(\"dest\", dest_val));\n  EXPECT_EQ(dest_val, \"dis1|dis2\");\n  ext_data->Shutdown();\n\n  driver_flow->GetFlow()->Wait(3 * 1000);\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/mindspore_lite_inference/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10.2)\n\nset(UNIT_DEVICE \"cpu\")\nset(UNIT_NAME \"mindspore-lite-inference\")\n\nproject(modelbox-flowunit-${UNIT_DEVICE}-${UNIT_NAME})\nif (NOT MINDSPORE_LITE_FOUND) \n    message(STATUS \"Not found mindspore-lite, disable ${UNIT_NAME} flowunit\")\n    return()\nendif()\nset(CMAKE_CXX_STANDARD 17)\n\nfile(GLOB_RECURSE UNIT_SOURCE *.cpp *.cc *.c)\ngroup_source_test_files(MODELBOX_UNIT_SOURCE MODELBOX_UNIT_TEST_SOURCE \"_test.c*\" ${UNIT_SOURCE})\n\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_CPU_INCLUDE})\ninclude_directories(${LIBMODELBOX_VIRTUALDRIVER_INFERENCE_INCLUDE})\ninclude_directories(${MINDSPORE_LITE_INCLUDE_DIR})\ninclude_directories(${LIBMODELBOX_FLOWUNIT_INFERENCE_MINDSPORE_LITE_INCLUDE})\ninclude_directories(${MODELBOX_COMMON_INFERENCE_INCLUDE})\n\nset(MODELBOX_UNIT_SHARED libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-shared)\nset(MODELBOX_UNIT_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\nadd_library(${MODELBOX_UNIT_SHARED} SHARED ${MODELBOX_UNIT_SOURCE})\nset(LIBMODELBOX_FLOWUNIT_INFERENCE_CPU_SHARED ${MODELBOX_UNIT_SHARED})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES \n    SOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n\nconfigure_file(${CMAKE_CURRENT_SOURCE_DIR}/test_toml/modelbox.test.mindspore.cpu.inference.in ${CMAKE_BINARY_DIR}/test/test-working-dir/data/virtual_mindspore_cpu_infer_test.toml @ONLY)\nconfigure_file(${CMAKE_CURRENT_SOURCE_DIR}/test_toml/modelbox.test.mindspore.cpu.inference.encrypt.in ${CMAKE_BINARY_DIR}/test/test-working-dir/data/virtual_mindspore_cpu_infer_test_en.toml @ONLY)\n\ntarget_compile_options(${MODELBOX_UNIT_SHARED} PUBLIC -fvisibility=hidden)\n\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} rt)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} dl)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_DEVICE_CPU_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_VIRTUALDRIVER_INFERENCE_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_FLOWUNIT_INFERENCE_MINDSPORE_LITE_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${MODELBOX_COMMON_INFERENCE_LIBRARY})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}\")\n\ninstall(TARGETS ${MODELBOX_UNIT_SHARED}\n        COMPONENT cpu-device-flowunit\n        RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n        LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        ARCHIVE DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        OPTIONAL)\ninstall(DIRECTORY ${HEADER} \n        DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR}\n        COMPONENT cpu-device-flowunit-devel)\n\nset(LIBMODELBOX_FLOWUNIT_MINDSPORE_LITE_CPU_SHARED ${MODELBOX_UNIT_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_MINDSPORE_LITE_CPU_INCLUDE ${MODELBOX_UNIT_SOURCE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_MINDSPORE_LITE_CPU_SOURCES ${MODELBOX_UNIT_SOURCE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_MINDSPORE_LITE_CPU_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}.so CACHE INTERNAL \"\")\n\n# driver test\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_UNIT_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_UNIT_SHARED})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/mindspore_lite_inference/flowunit_desc.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <stdio.h>\n\n#include <memory>\n\n#include \"mindspore_cpu_inference_flowunit.h\"\n#include \"modelbox/base/driver_api_helper.h\"\n#include \"modelbox/base/status.h\"\n#include \"modelbox/device/cpu/device_cpu.h\"\n#include \"modelbox/flowunit.h\"\n\nconstexpr const char *FLOWUNIT_NAME = \"mindspore_inference\";\nconstexpr const char *FLOWUNIT_DESC = \"A mindspore cpu inference flowunit\";\n\nstd::shared_ptr<modelbox::DriverFactory> CreateDriverFactory() {\n  std::shared_ptr<modelbox::DriverFactory> factory =\n      std::make_shared<MindSporeInferenceCPUFlowUnitFactory>();\n  return factory;\n}\n\nvoid DriverDescription(modelbox::DriverDesc *desc) {\n  desc->SetName(FLOWUNIT_NAME);\n  desc->SetClass(modelbox::DRIVER_CLASS_INFERENCE);\n  desc->SetType(modelbox::DEVICE_TYPE);\n  desc->SetDescription(FLOWUNIT_DESC);\n  desc->SetGlobal(true);\n}\n\nmodelbox::Status DriverInit() {\n  // Driver Init.\n  return modelbox::STATUS_OK;\n}\n\nvoid DriverFini() {\n  // Driver Fini.\n}\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/mindspore_lite_inference/mindspore_cpu_inference_flowunit.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"mindspore_cpu_inference_flowunit.h\"\n\nMindSporeInferenceCPUFlowUnit::MindSporeInferenceCPUFlowUnit() = default;\n\nMindSporeInferenceCPUFlowUnit::~MindSporeInferenceCPUFlowUnit() = default;\n\nmodelbox::Status MindSporeInferenceCPUFlowUnit::Open(\n    const std::shared_ptr<modelbox::Configuration> &opts) {\n  auto context = std::make_shared<mindspore::Context>();\n  auto &device_list = context->MutableDeviceInfo();\n  auto cpu_device_info = std::make_shared<mindspore::CPUDeviceInfo>();\n  device_list.push_back(cpu_device_info);\n\n  infer_ = std::make_shared<MindSporeInference>(GetBindDevice(), context);\n  return infer_->Open(opts, this->GetFlowUnitDesc());\n}\n\nmodelbox::Status MindSporeInferenceCPUFlowUnit::Process(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  return infer_->Infer(data_ctx);\n}\n\nmodelbox::Status MindSporeInferenceCPUFlowUnit::Close() {\n  infer_ = nullptr;\n  return modelbox::STATUS_OK;\n}\n\nstd::shared_ptr<modelbox::FlowUnit>\nMindSporeInferenceCPUFlowUnitFactory::VirtualCreateFlowUnit(\n    const std::string &unit_name, const std::string &unit_type,\n    const std::string &virtual_type) {\n  auto inference_flowunit = std::make_shared<MindSporeInferenceCPUFlowUnit>();\n  return inference_flowunit;\n};\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/mindspore_lite_inference/mindspore_cpu_inference_flowunit.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_INFERENCE_MINDSPORE_CPU_H_\n#define MODELBOX_FLOWUNIT_INFERENCE_MINDSPORE_CPU_H_\n\n#include <modelbox/flowunit.h>\n\n#include \"mindspore_inference.h\"\n\nconstexpr const char *FLOWUNIT_TYPE = \"cpu\";\n\nclass MindSporeInferenceCPUFlowUnit : public modelbox::FlowUnit {\n public:\n  MindSporeInferenceCPUFlowUnit();\n  ~MindSporeInferenceCPUFlowUnit() override;\n\n  modelbox::Status Open(\n      const std::shared_ptr<modelbox::Configuration> &opts) override;\n\n  modelbox::Status Close() override;\n\n  modelbox::Status Process(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override;\n\n private:\n  std::shared_ptr<MindSporeInference> infer_;\n};\n\nclass MindSporeInferenceCPUFlowUnitFactory : public modelbox::FlowUnitFactory {\n public:\n  MindSporeInferenceCPUFlowUnitFactory() = default;\n  ~MindSporeInferenceCPUFlowUnitFactory() override = default;\n\n  std::shared_ptr<modelbox::FlowUnit> VirtualCreateFlowUnit(\n      const std::string &unit_name, const std::string &unit_type,\n      const std::string &virtual_type) override;\n\n  std::string GetFlowUnitFactoryType() override { return FLOWUNIT_TYPE; };\n  std::string GetVirtualType() override { return INFERENCE_TYPE; };\n\n  std::map<std::string, std::shared_ptr<modelbox::FlowUnitDesc>> FlowUnitProbe()\n      override {\n    return std::map<std::string, std::shared_ptr<modelbox::FlowUnitDesc>>();\n  };\n};\n\n#endif  // MODELBOX_FLOWUNIT_INFERENCE_MINDSPORE_CPU_H_\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/mindspore_lite_inference/mindspore_cpu_inference_flowunit_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <functional>\n#include <future>\n#include <random>\n#include <thread>\n\n#include \"modelbox/base/log.h\"\n#include \"modelbox/base/utils.h\"\n#include \"modelbox/buffer.h\"\n#include \"flowunit_mockflowunit/flowunit_mockflowunit.h\"\n#include \"mindspore_inference_flowunit_test.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"test/mock/minimodelbox/mockflow.h\"\n\nnamespace modelbox {\nclass InferenceMindSporeCPUFlowUnitTest : public testing::Test {\n public:\n  InferenceMindSporeCPUFlowUnitTest()\n      : mindspore_flow_(std::make_shared<InferenceMindSporeFlowUnitTest>()) {}\n\n protected:\n  void SetUp() override {\n    auto ret = mindspore_flow_->Init();\n    EXPECT_EQ(ret, STATUS_OK);\n\n    const std::string src_file =\n        test_assets + \"/mindspore_inference/\" + test_model_file;\n    const std::string src_toml = test_data_dir + \"/\" + test_toml_file;\n    mindspore_inference_path = test_data_dir + \"/mindspore_inference\";\n    mkdir(mindspore_inference_path.c_str(), 0700);\n    dest_model_file = mindspore_inference_path + \"/\" + test_model_file;\n    dest_toml_file = mindspore_inference_path + \"/\" + test_toml_file;\n    CopyFile(src_file, dest_model_file, true);\n    CopyFile(src_toml, dest_toml_file, true);\n    const std::string src_file_en =\n        test_assets + \"/mindspore_inference/\" + test_model_file_en;\n    const std::string src_toml_en = test_data_dir + \"/\" + test_toml_file_en;\n    dest_model_file_en = mindspore_inference_path + \"/\" + test_model_file_en;\n    dest_toml_file_en = mindspore_inference_path + \"/\" + test_toml_file_en;\n    CopyFile(src_file_en, dest_model_file_en, true);\n    CopyFile(src_toml_en, dest_toml_file_en, true);\n  }\n\n  void TearDown() override {\n    remove(dest_model_file.c_str());\n    remove(dest_toml_file.c_str());\n    remove(dest_model_file_en.c_str());\n    remove(dest_toml_file_en.c_str());\n    remove(mindspore_inference_path.c_str());\n\n    mindspore_flow_ = nullptr;\n  };\n\n  const std::string test_lib_dir = TEST_DRIVER_DIR,\n                    test_data_dir = TEST_DATA_DIR, test_assets = TEST_ASSETS,\n                    test_model_file = \"tensor_add.mindir\",\n                    test_toml_file = \"virtual_mindspore_infer_test.toml\",\n                    test_model_file_en = \"tensor_add_en.mindir\",\n                    test_toml_file_en = \"virtual_mindspore_infer_test_en.toml\";\n\n  std::string mindspore_inference_path, dest_model_file, dest_toml_file,\n      dest_model_file_en, dest_toml_file_en;\n\n  std::shared_ptr<InferenceMindSporeFlowUnitTest> mindspore_flow_;\n};\n\nTEST_F(InferenceMindSporeCPUFlowUnitTest, RunUnit) {\n  std::string toml_content = R\"(\n    [driver]\n    skip-default=true\n    dir=[\")\" + test_lib_dir + \"\\\",\\\"\" +\n                             test_data_dir + \"\\\"]\\n    \" +\n                             R\"([graph]\n    graphconf = '''digraph demo {                                                                            \n          prepare_ms_infer_data[type=flowunit, flowunit=prepare_ms_infer_data, device=cpu, deviceid=0]             \n          mindspore_inference[type=flowunit, flowunit=mindspore_inference, device=cpu, deviceid=0, batch_size=2]\n          check_ms_infer_result[type=flowunit, flowunit=check_ms_infer_result, device=cpu, deviceid=0, batch_size=2]  \n                                  \n          prepare_ms_infer_data:out1 -> mindspore_inference:input1\n          prepare_ms_infer_data:out2 -> mindspore_inference:input2\n          mindspore_inference:output1 -> check_ms_infer_result:in\n        }'''\n    format = \"graphviz\"\n  )\";\n  auto ret = mindspore_flow_->Run(\"RunUnit\", toml_content);\n  EXPECT_EQ(ret, STATUS_STOP);\n}\n\nTEST_F(InferenceMindSporeCPUFlowUnitTest, RunUnitEncrypt) {\n  std::string toml_content = R\"(\n    [driver]\n    skip-default=true\n    dir=[\")\" + test_lib_dir + \"\\\",\\\"\" +\n                             test_data_dir + \"\\\"]\\n    \" +\n                             R\"([graph]\n    graphconf = '''digraph demo {                                                                            \n          prepare_ms_infer_data[type=flowunit, flowunit=prepare_ms_infer_data, device=cpu, deviceid=0]             \n          mindspore_inference[type=flowunit, flowunit=mindspore_inference_encrypt, device=cpu, deviceid=0, batch_size=2]\n          check_ms_infer_result[type=flowunit, flowunit=check_ms_infer_result, device=cpu, deviceid=0, batch_size=2]  \n                                  \n          prepare_ms_infer_data:out1 -> mindspore_inference:input1\n          prepare_ms_infer_data:out2 -> mindspore_inference:input2\n          mindspore_inference:output1 -> check_ms_infer_result:in\n        }'''\n    format = \"graphviz\"\n  )\";\n  auto ret = mindspore_flow_->Run(\"RunUnit\", toml_content);\n  EXPECT_EQ(ret, STATUS_STOP);\n}\n\n}  // namespace modelbox\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/mindspore_lite_inference/test_toml/modelbox.test.mindspore.cpu.inference.encrypt.in",
    "content": "[base]\nname = \"mindspore_inference_encrypt\"\ndevice = \"cpu\"\nversion = \"1.0.0\"\ndescription = \"an mindspore cpu inference flowunit\"\nentry = \"@CMAKE_SOURCE_DIR@/test/assets/mindspore_inference/tensor_add_en.ms\"\ntype = \"inference\"\nvirtual_type = \"mindspore\"\n\n[encryption]\nplugin_name = \"modeldecrypt-plugin\"\nplugin_version = \"1.0.0\"\nrootkey = \"5yQMTJz5vFZFD7ABFyr6dCwjAVrPv5QTv3tfKSZ/cAAJz4Qnoj6VEiSCg2xVAr/z2MXsfSI1NZXYQ9zGqcMPB9+8H1NnSokrs3jKe7bSNDdo\"\npasswd = \"zTYD4Jued4ZotTeD8yBxVApnj74pMPlZQoG56FkGN2bcsBpcU/4IFQ4DGD55nNtZ2MUjoMYvWrVxSVuCMW5cqw==\"\n\n[input]\n[input.input1]\nname = \"input1\"\n\n[input.input2]\nname = \"input2\"\n\n[output]\n[output.output1]\nname = \"output1\"\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/mindspore_lite_inference/test_toml/modelbox.test.mindspore.cpu.inference.in",
    "content": "[base]\nname = \"mindspore_inference\"\ndevice = \"cpu\"\nversion = \"1.0.0\"\ndescription = \"an mindspore cpu inference flowunit\"\nentry = \"@CMAKE_SOURCE_DIR@/test/assets/mindspore_inference/tensor_add.ms\"\ntype = \"inference\"\nvirtual_type = \"mindspore\"\n\n[config]\ninput_format = \"NCHW\"\n\n[input]\n[input.input1]\nname = \"input1\"\ntype = \"float\"\ndevice = \"cpu\"\n\n[input.input2]\nname = \"input2\"\ntype = \"float\"\ndevice = \"cpu\"\n\n[output]\n[output.output1]\nname = \"output1\"\ntype = \"float\"\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/model_decrypt_plugin/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nset(UNIT_DEVICE \"cpu\")\nset(UNIT_NAME \"model-decrypt-plugin\")\n\nproject(modelbox-flowunit-${UNIT_DEVICE}-${UNIT_NAME})\n\nfile(GLOB_RECURSE UNIT_SOURCE *.cpp *.cc *.c)\ngroup_source_test_files(MODELBOX_UNIT_SOURCE MODELBOX_UNIT_TEST_SOURCE \"_test.c*\" ${UNIT_SOURCE})\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_CPU_INCLUDE})\ninclude_directories(${MODELBOX_COMMON_INFERENCE_INCLUDE})\n\nset(MODELBOX_UNIT_SHARED modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-shared)\nset(MODELBOX_UNIT_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\nadd_library(${MODELBOX_UNIT_SHARED} SHARED ${MODELBOX_UNIT_SOURCE})\n\nset(LIBMODELBOX_FLOWUNIT_MODELDECRYPT_DEFAULT_CPU_SHARED ${MODELBOX_UNIT_SHARED})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES \n    SOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n    DEFINE_SYMBOL \"\"\n)\n\nadd_definitions(-DVERSION_MAJOR=${MODELBOX_VERSION_MAJOR})\n\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_DEVICE_CPU_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} rt)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} dl)\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}\")\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES C_VISIBILITY_PRESET  hidden)\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES CXX_VISIBILITY_PRESET  hidden)\n\ninstall(TARGETS ${MODELBOX_UNIT_SHARED} \n    COMPONENT cpu-device-flowunit\n    RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n    LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    OPTIONAL\n    )\n\ninstall(DIRECTORY ${HEADER} \n    DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR} \n    COMPONENT cpu-device-flowunit-devel\n    )\n\nset(LIBMODELBOX_FLOWUNIT_MODELDECRYPT_DEFAULT_CPU_SHARED ${MODELBOX_UNIT_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_MODELDECRYPT_DEFAULT_CPU_INCLUDE ${MODELBOX_UNIT_SOURCE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_MODELDECRYPT_DEFAULT_CPU_SOURCES ${MODELBOX_UNIT_SOURCE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_MODELDECRYPT_DEFAULT_CPU_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}.so CACHE INTERNAL \"\")\n\n# driver test\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_UNIT_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_UNIT_SHARED})\nlist(APPEND DRIVER_UNIT_TEST_LINK_LIBRARIES ${MODELBOX_UNIT_LINK_LIBRARY})\nlist(APPEND TEST_INCLUDE ${MODELBOX_COMMON_INFERENCE_INCLUDE})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")\nset(TEST_INCLUDE ${TEST_INCLUDE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_LINK_LIBRARIES ${DRIVER_UNIT_TEST_LINK_LIBRARIES} CACHE INTERNAL \"\")\n\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/model_decrypt_plugin/driver_desc.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <stdio.h>\n\n#include <memory>\n\n#include \"model_decrypt_header.h\"\n#include \"model_decrypt_plugin.h\"\n#include \"modelbox/base/driver_api_helper.h\"\n#include \"modelbox/base/status.h\"\n#include \"modelbox/flowunit.h\"\n\nconstexpr const char* DRIVER_DESC = \"default model descrypt plugin with AES256\";\nconstexpr const char* DRIVER_NAME = \"modeldecrypt-plugin\";\nconstexpr const char* DRIVER_VERSION = \"1.0.0\";\n\nstd::shared_ptr<modelbox::DriverFactory> CreateDriverFactory() {\n  std::shared_ptr<modelbox::DriverFactory> factory =\n      std::make_shared<ModelDecryptFactory>();\n  return factory;\n}\n\nvoid DriverDescription(modelbox::DriverDesc* desc) {\n  desc->SetName(DRIVER_NAME);\n  desc->SetClass(DRIVER_CLASS_MODEL_DECRYPT);\n  desc->SetType(DRIVER_TYPE);\n  desc->SetDescription(DRIVER_DESC);\n  desc->SetVersion(DRIVER_VERSION);\n}\n\nmodelbox::Status DriverInit() {\n  // Driver Init.\n  return modelbox::STATUS_OK;\n}\n\nvoid DriverFini() {\n  // Driver Fini.\n}\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/model_decrypt_plugin/model_decrypt_plugin.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n#include \"model_decrypt_plugin.h\"\n\n#include <sys/mman.h>\n\n#include \"modelbox/base/crypto.h\"\n#include \"modelbox/base/log.h\"\n#include \"modelbox/base/status.h\"\n#include \"modelbox/base/utils.h\"\n\nmodelbox::Status ModelDecryptPlugin::Init(\n    const std::string &fname,\n    const std::shared_ptr<modelbox::Configuration> config) {\n  rootkey_ = config->GetString(\"encryption.rootkey\");\n  en_pass_ = config->GetString(\"encryption.passwd\");\n  if (rootkey_.empty() || en_pass_.empty()) {\n    MBLOG_ERROR << \"passwd is empty\";\n    return modelbox::STATUS_FAULT;\n  }\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status ModelDecryptPlugin::ModelDecrypt(uint8_t *raw_buf,\n                                                  int64_t raw_len,\n                                                  uint8_t *plain_buf,\n                                                  int64_t &plain_len) {\n  std::vector<char> pass;\n  auto ret = modelbox::PassDecrypt(en_pass_, rootkey_, &pass,\n                                   modelbox::DEFAULT_CIPHER_AES256_CBC);\n  if (ret != modelbox::STATUS_SUCCESS) {\n    MBLOG_ERROR << \"decrypt passwd err:\" << ret;\n    return ret;\n  }\n\n  std::vector<unsigned char> iv;\n  iv.resize(modelbox::IV_LEN + modelbox::MAX_PASSWORD_LEN);\n  modelbox::Base64Decode(en_pass_, &iv);\n\n  int out_len;\n  ret = modelbox::Decrypt(modelbox::DEFAULT_CIPHER_AES256_CBC, raw_buf, raw_len,\n                          plain_buf, &out_len, plain_len,\n                          (unsigned char *)pass.data(), iv.data());\n  if (ret != modelbox::STATUS_SUCCESS) {\n    MBLOG_ERROR << \"decrypt model err:\" << ret;\n    return ret;\n  }\n\n  plain_len = out_len;\n  return modelbox::STATUS_SUCCESS;\n}\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/model_decrypt_plugin/model_decrypt_plugin.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_MODEL_DECRYPT_PLUGIN_H_\n#define MODELBOX_FLOWUNIT_MODEL_DECRYPT_PLUGIN_H_\n\n#include <modelbox/base/device.h>\n#include <modelbox/base/status.h>\n\n#include \"model_decrypt_interface.h\"\n\nclass ModelDecryptPlugin : public IModelDecryptPlugin {\n public:\n  ModelDecryptPlugin() = default;\n  ~ModelDecryptPlugin() override = default;\n  modelbox::Status Init(\n      const std::string &fname,\n      std::shared_ptr<modelbox::Configuration> config) override;\n  modelbox::Status ModelDecrypt(uint8_t *raw_buf, int64_t raw_len,\n                                uint8_t *plain_buf,\n                                int64_t &plain_len) override;\n\n private:\n  std::string rootkey_;\n  std::string en_pass_;\n};\n\nclass ModelDecryptFactory : public modelbox::DriverFactory {\n public:\n  ModelDecryptFactory() = default;\n  ~ModelDecryptFactory() override = default;\n\n  std::shared_ptr<modelbox::Driver> GetDriver() override {\n    std::shared_ptr<modelbox::Driver> model_plugin =\n        std::make_shared<ModelDecryptPlugin>();\n    return model_plugin;\n  }\n};\n\n#endif  // MODELBOX_FLOWUNIT_MODEL_DECRYPT_PLUGIN_H_\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/model_decrypt_plugin/model_decrypt_plugin_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <dlfcn.h>\n#include <gmock/gmock-actions.h>\n#include <securec.h>\n\n#include <cstdint>\n#include <cstdio>\n#include <functional>\n#include <memory>\n#include <thread>\n\n#include \"driver_flow_test.h\"\n#include \"flowunit_mockflowunit/flowunit_mockflowunit.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"model_decrypt_interface.h\"\n#include \"modelbox/base/log.h\"\n#include \"modelbox/base/status.h\"\n#include \"modelbox/base/utils.h\"\n#include \"modelbox/base/config.h\"\n#include \"modelbox/buffer.h\"\n\n#define DLL_NAME_SUB \"libmodelbox-unit-cpu-model-decrypt-plugin.so.\"\n#define MODELBOX_VERSION_MAJORSTR(R) #R\n#define MODELBOX_VERSION_MAJORSTRING(R) MODELBOX_VERSION_MAJORSTR(R)\n#define DLL_NAME DLL_NAME_SUB MODELBOX_VERSION_MAJORSTRING(MODELBOX_VERSION_MAJOR)\n\nnamespace modelbox {\n\ntypedef std::shared_ptr<DriverFactory> (*CreateDriverFactory)();\n\nclass ModelDecryptPluginTest : public testing::Test {\n public:\n  ModelDecryptPluginTest() = default;\n  CreateDriverFactory driver_func_ = nullptr;\n\n protected:\n  void SetUp() override {\n    auto ret = OpenDriver();\n    EXPECT_EQ(ret, STATUS_OK);\n  };\n\n  void TearDown() override {\n    if (driver_handler_) {\n      dlclose(driver_handler_);\n    }\n  };\n\n private:\n  modelbox::Status OpenDriver();\n  void *driver_handler_ = nullptr;\n};\n\nmodelbox::Status ModelDecryptPluginTest::OpenDriver() {\n  std::string so_path = TEST_DRIVER_DIR;\n  so_path.append(\"/\").append(DLL_NAME);\n  driver_handler_ = dlopen(so_path.c_str(), RTLD_GLOBAL | RTLD_NOW);\n  if (driver_handler_ == nullptr) {\n    MBLOG_ERROR << \"dll open fail :\" << dlerror();\n    return STATUS_FAULT;\n  }\n  driver_func_ =\n      (CreateDriverFactory)dlsym(driver_handler_, \"CreateDriverFactory\");\n  if (driver_func_ == nullptr) {\n    MBLOG_ERROR << \"dll func fail :\" << dlerror();\n    return STATUS_FAULT;\n  }\n\n  return STATUS_OK;\n}\n\nTEST_F(ModelDecryptPluginTest, ModelDecryptTest) {\n  if (driver_func_ == nullptr) {\n    MBLOG_ERROR << \"driver_func is null\";\n    return;\n  }\n  // This test would be skipped, if no auth info is provided.\n  auto model_decrypt_func = std::dynamic_pointer_cast<IModelDecryptPlugin>(\n      driver_func_()->GetDriver());\n  std::shared_ptr<Configuration> config = std::make_shared<Configuration>();\n  config->SetProperty(\"encryption.rootkey\",\n                      \"JRNd6slbpA08mRxnMwZZZJYBR5gHhtJASjgSiRNTiLgTNrC8DGEfKuYF\"\n                      \"SDashsuU/eHB1ybr+Fm7kgjDcoCYk71nv4LIHrHZL6QZiVqL9CfT\");\n  config->SetProperty(\"encryption.passwd\",\n                      \"IudbJKZB+7lenEjHkPO+AaMmoloOv5MMDbbZwqPSTpsANBWF/C/\"\n                      \"eDJGnDvARVpUV3EIgXm4oS28RBtNT27c+5Q==\");\n\n  auto ret = model_decrypt_func->Init(\"\", config);\n  EXPECT_EQ(ret, STATUS_OK);\n\n  std::string test_str(\"this is a test\");\n  uint8_t enbuf[] = {0x87, 0xAC, 0xDD, 0x3D, 0x3F, 0x47, 0xCC, 0x87,\n                     0x3C, 0x1A, 0x1B, 0x31, 0x3B, 0xB5, 0x34, 0x70};\n  char plainbuf[sizeof(enbuf) + EVP_MAX_BLOCK_LENGTH + 1];\n  int64_t plain_len = sizeof(plainbuf);\n  ret = model_decrypt_func->ModelDecrypt((uint8_t *)enbuf, sizeof(enbuf),\n                                         (uint8_t *)plainbuf, plain_len);\n  EXPECT_EQ(ret, STATUS_OK);\n  std::string result_str(plainbuf, plain_len);\n  EXPECT_EQ(result_str.compare(test_str), 0);\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/normalize/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nset(UNIT_DEVICE \"cpu\")\nset(UNIT_NAME \"normalize\")\n\nproject(modelbox-flowunit-${UNIT_DEVICE}-${UNIT_NAME})\n\nfile(GLOB_RECURSE UNIT_SOURCE *.cpp *.cc *.c)\ngroup_source_test_files(MODELBOX_UNIT_SOURCE MODELBOX_UNIT_TEST_SOURCE \"_test.c*\" ${UNIT_SOURCE})\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_CPU_INCLUDE})\ninclude_directories(${MODELBOX_COMMON_NORMALIZE_INCLUDE})\n\nset(MODELBOX_UNIT_SHARED modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-shared)\nset(MODELBOX_UNIT_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\nadd_library(${MODELBOX_UNIT_SHARED} SHARED ${MODELBOX_UNIT_SOURCE})\n\nset(LIBMODELBOX_FLOWUNIT_NORMALIZE_CPU_SHARED ${MODELBOX_UNIT_SHARED})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES \n    SOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n    DEFINE_SYMBOL \"\"\n)\n\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_DEVICE_CPU_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} rt)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} dl)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${MODELBOX_COMMON_NORMALIZE_LIBRARY})\nadd_dependencies(${MODELBOX_UNIT_SHARED} ${MODELBOX_COMMON_NORMALIZE_LIBRARY})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}\")\n\ninstall(TARGETS ${MODELBOX_UNIT_SHARED} \n    COMPONENT cpu-device-flowunit\n    RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n    LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    OPTIONAL\n    )\n\n\ninstall(DIRECTORY ${HEADER} \n    DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR} \n    COMPONENT cpu-device-flowunit-devel\n    )\n\nset(LIBMODELBOX_FLOWUNIT_NORMALIZE_CPU_SHARED ${MODELBOX_UNIT_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_NORMALIZE_CPU_INCLUDE ${MODELBOX_UNIT_SOURCE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_NORMALIZE_CPU_SOURCES ${MODELBOX_UNIT_SOURCE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_NORMALIZE_CPU_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}.so CACHE INTERNAL \"\")\n\n\n# driver test\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_UNIT_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_UNIT_SHARED})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/normalize/normalize_flowunit.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"normalize_flowunit.h\"\n#include \"modelbox/flowunit_api_helper.h\"\n\nNormalizeFlowUnit::NormalizeFlowUnit() = default;\nNormalizeFlowUnit::~NormalizeFlowUnit() = default;\n\nmodelbox::Status NormalizeFlowUnit::Process(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  const auto input_bufs = data_ctx->Input(\"in_data\");\n  if (!CheckBufferListValid(input_bufs)) {\n    MBLOG_ERROR << \"normalize flowunit in_data invalied\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  auto output_bufs = data_ctx->Output(\"out_data\");\n  if (!BuildOutputBufferList(input_bufs, output_bufs)) {\n    MBLOG_ERROR << \"build out_data BufferList failed\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  for (size_t i = 0; i < input_bufs->Size(); ++i) {\n    auto input_buf = input_bufs->At(i);\n    std::vector<size_t> shape;\n    if (!input_buf->Get(\"shape\", shape)) {\n      MBLOG_ERROR << \"mean flowunit can not get shape from meta\";\n      continue;\n    }\n\n    modelbox::ModelBoxDataType type = modelbox::MODELBOX_TYPE_INVALID;\n    if (!input_bufs->At(i)->Get(\"type\", type)) {\n      MBLOG_FATAL << \"normalize flowunit can not get input type from meta\";\n      continue;\n    }\n\n    float *in_data_f32 = nullptr;\n    uint8_t *in_data_uint8 = nullptr;\n    if (type == modelbox::ModelBoxDataType::MODELBOX_FLOAT) {\n      Process(in_data_f32, input_buf, output_bufs->At(i));\n    } else {\n      Process(in_data_uint8, input_buf, output_bufs->At(i));\n    }\n  }\n\n  return modelbox::STATUS_OK;\n}\n\ntemplate <typename T>\nvoid NormalizeFlowUnit::Process(\n    const T *input_data, const std::shared_ptr<modelbox::Buffer> &input_buf,\n    const std::shared_ptr<modelbox::Buffer> &out_buff) {\n  input_data = static_cast<T *>(const_cast<void *>(input_buf->ConstData()));\n  if (input_data == nullptr) {\n    MBLOG_ERROR << \"normalize FlowUnit flowunit data is nullptr\";\n    return;\n  }\n\n  size_t size = (input_buf->GetBytes() / sizeof(T)) / CHANNEL_NUM;\n  out_buff->CopyMeta(input_buf);\n  out_buff->Set(\"type\", modelbox::ModelBoxDataType::MODELBOX_FLOAT);\n  auto *out_data = static_cast<float *>(out_buff->MutableData());\n  if (out_data == nullptr) {\n    MBLOG_ERROR << \"get output memory failed.\";\n    return;\n  }\n\n  for (size_t c = 0; c < CHANNEL_NUM; c++) {\n    for (size_t j = size * c; j < size * (c + 1); j++) {\n      out_data[j] = input_data[j] * params_.normalizes_[c];\n    }\n  }\n}\n\nMODELBOX_FLOWUNIT(NormalizeFlowUnit, desc) {\n  desc.SetFlowUnitName(\"normalize\");\n  desc.SetFlowUnitGroupType(\"Image\");\n  desc.AddFlowUnitInput({\"in_data\"});\n  desc.AddFlowUnitOutput({\"out_data\"});\n  desc.SetFlowType(modelbox::NORMAL);\n  desc.SetDescription(FLOWUNIT_DESC);\n  desc.AddFlowUnitOption(modelbox::FlowUnitOption(\n      \"standard_deviation_inverse\", \"string\", true, \"\", \"the normalize param\"));\n  desc.SetInputContiguous(false);\n}\n\nMODELBOX_DRIVER_FLOWUNIT(desc) {\n  desc.Desc.SetName(FLOWUNIT_NAME);\n  desc.Desc.SetClass(modelbox::DRIVER_CLASS_FLOWUNIT);\n  desc.Desc.SetType(FLOWUNIT_TYPE);\n  desc.Desc.SetDescription(FLOWUNIT_DESC);\n  desc.Desc.SetVersion(\"1.0.0\");\n}\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/normalize/normalize_flowunit.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_NORMALIZE_H_\n#define MODELBOX_FLOWUNIT_NORMALIZE_H_\n\n#include <modelbox/base/device.h>\n#include <modelbox/base/status.h>\n#include <modelbox/flow.h>\n#include <modelbox/flowunit.h>\n#include <normalize_flowunit_base.h>\n\nconstexpr const char *FLOWUNIT_NAME = \"normalize\";\nconstexpr const char *FLOWUNIT_TYPE = \"cpu\";\nconstexpr const char *FLOWUNIT_DESC =\n    \"\\n\\t@Brief: The operator is used to normalize for tensor data, \"\n    \"for example the image(RGB/BGR). \\n\"\n    \"\\t@Port parameter: The input port and the output buffer type are tensor. \\n\"\n    \"\\t  The tensor type buffer contain the following meta fields:\\n\"\n    \"\\t\\tField Name: shape,         Type: vector<size_t>\\n\"\n    \"\\t\\tField Name: type,          Type: ModelBoxDataType::MODELBOX_UINT8\\n\"\n    \"\\t@Constraint: \";\n\nclass NormalizeFlowUnit : public NormalizeFlowUnitBase {\n public:\n  NormalizeFlowUnit();\n  ~NormalizeFlowUnit() override;\n\n  modelbox::Status Process(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override;\n\n private:\n  template <typename T>\n  void Process(const T *input_data,\n               const std::shared_ptr<modelbox::Buffer> &input_buf,\n               const std::shared_ptr<modelbox::Buffer> &out_buff);\n};\n\n#endif  // MODELBOX_FLOWUNIT_NORMALIZE_H_"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/normalize/normalize_flowunit_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <functional>\n#include <future>\n#include <random>\n#include <thread>\n\n#include \"driver_flow_test.h\"\n#include \"flowunit_mockflowunit/flowunit_mockflowunit.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"modelbox/base/log.h\"\n#include \"modelbox/base/utils.h\"\n#include \"modelbox/buffer.h\"\n\nusing ::testing::_;\n\nnamespace modelbox {\nclass NormalizeCpuFlowUnitTest : public testing::Test {\n public:\n  NormalizeCpuFlowUnitTest()\n      : driver_flow_(std::make_shared<DriverFlowTest>()) {}\n\n protected:\n  void SetUp() override {\n    auto ret = AddMockFlowUnit();\n    EXPECT_EQ(ret, STATUS_OK);\n  }\n\n  void TearDown() override { driver_flow_->Clear(); };\n\n  std::shared_ptr<DriverFlowTest> GetDriverFlow();\n\n  const std::string test_lib_dir = TEST_DRIVER_DIR,\n                    test_data_dir = TEST_DATA_DIR, test_assets = TEST_ASSETS;\n\n private:\n  Status AddMockFlowUnit();\n  std::shared_ptr<DriverFlowTest> driver_flow_;\n};\n\nStatus NormalizeCpuFlowUnitTest::AddMockFlowUnit() {\n  auto ctl_ = driver_flow_->GetMockFlowCtl();\n  {\n    MockFlowUnitDriverDesc desc_flowunit;\n    desc_flowunit.SetClass(\"DRIVER-FLOWUNIT\");\n    desc_flowunit.SetType(\"cpu\");\n    desc_flowunit.SetName(\"test_normalize_0\");\n    desc_flowunit.SetDescription(\"The test input data, 0 inputs 1 output\");\n    desc_flowunit.SetVersion(\"1.0.0\");\n    std::string file_path_flowunit =\n        std::string(TEST_DRIVER_DIR) +\n        \"/libmodelbox-unit-cpu-test_normalize_0.so\";\n    desc_flowunit.SetFilePath(file_path_flowunit);\n    auto mock_flowunit = std::make_shared<MockFlowUnit>();\n    auto mock_flowunit_desc = std::make_shared<FlowUnitDesc>();\n    mock_flowunit_desc->SetFlowUnitName(\"test_normalize_0\");\n    mock_flowunit_desc->AddFlowUnitOutput(modelbox::FlowUnitOutput(\"Out_1\"));\n    mock_flowunit->SetFlowUnitDesc(mock_flowunit_desc);\n    std::weak_ptr<MockFlowUnit> mock_flowunit_wp;\n    mock_flowunit_wp = mock_flowunit;\n\n    EXPECT_CALL(*mock_flowunit, Open(_))\n        .WillRepeatedly(testing::Invoke(\n            [=](const std::shared_ptr<modelbox::Configuration> &flow_option) {\n              modelbox::Status ret = modelbox::STATUS_FAULT;\n              auto spt = mock_flowunit_wp.lock();\n              auto ext_data = spt->CreateExternalData();\n              if (!ext_data) {\n                MBLOG_ERROR << \"can not get external data.\";\n                return ret;\n              }\n\n              auto buffer_list = ext_data->CreateBufferList();\n              buffer_list->Build({10 * sizeof(int)});\n              auto *data = (int *)buffer_list->MutableData();\n              for (size_t i = 0; i < 10; i++) {\n                data[i] = i;\n              }\n\n              auto status = ext_data->Send(buffer_list);\n              if (!status) {\n                MBLOG_ERROR << \"external data send buffer list failed:\"\n                            << status;\n                return status;\n              }\n\n              status = ext_data->Close();\n              if (!status) {\n                MBLOG_ERROR << \"external data close failed:\" << status;\n                return status;\n              }\n\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, DataPre(_))\n        .WillRepeatedly(\n            testing::Invoke([&](const std::shared_ptr<DataContext> &data_ctx) {\n              MBLOG_DEBUG << \"test_normalize_0 \"\n                          << \"DataPre\";\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, DataPost(_))\n        .WillRepeatedly(\n            testing::Invoke([&](const std::shared_ptr<DataContext> &data_ctx) {\n              MBLOG_DEBUG << \"test_normalize_0 \"\n                          << \"DataPost\";\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit,\n                Process(testing::An<std::shared_ptr<modelbox::DataContext>>()))\n        .WillRepeatedly(\n            testing::Invoke([=](const std::shared_ptr<DataContext> &op_ctx) {\n              auto output_buf_1 = op_ctx->Output(\"Out_1\");\n              std::vector<size_t> data_1_shape = {5 * 4 * 3 * sizeof(float)};\n              output_buf_1->Build(data_1_shape);\n              auto *dev_data_1 =\n                  static_cast<float *>(output_buf_1->At(0)->MutableData());\n              for (size_t i = 0; i < 3; ++i) {\n                for (size_t j = 0; j < 5; j++) {\n                  for (size_t k = 0; k < 4; k++) {\n                    dev_data_1[i * 20 + j * 4 + k] = static_cast<float>(255);\n                  }\n                }\n              }\n\n              std::vector<size_t> shape{4, 5, 3};\n              output_buf_1->Set(\"shape\", shape);\n              output_buf_1->Set(\"type\", ModelBoxDataType::MODELBOX_FLOAT);\n\n              MBLOG_DEBUG << \"test_normalize_0 gen data, 0\"\n                          << output_buf_1->GetBytes();\n\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, Close()).WillRepeatedly(testing::Invoke([=]() {\n      return modelbox::STATUS_OK;\n    }));\n\n    desc_flowunit.SetMockFlowUnit(mock_flowunit);\n    ctl_->AddMockDriverFlowUnit(\"test_normalize_0\", \"cpu\", desc_flowunit,\n                                std::string(TEST_DRIVER_DIR));\n  }\n\n  {\n    MockFlowUnitDriverDesc desc_flowunit;\n    desc_flowunit.SetClass(\"DRIVER-FLOWUNIT\");\n    desc_flowunit.SetType(\"cpu\");\n    desc_flowunit.SetName(\"test_normalize_1\");\n    desc_flowunit.SetDescription(\"The test output data, 1 input 0 outputs\");\n    desc_flowunit.SetVersion(\"1.0.0\");\n    std::string file_path_flowunit =\n        std::string(TEST_DRIVER_DIR) +\n        \"/libmodelbox-unit-cpu-test_normalize_1.so\";\n    desc_flowunit.SetFilePath(file_path_flowunit);\n    auto mock_flowunit = std::make_shared<MockFlowUnit>();\n    auto mock_flowunit_desc = std::make_shared<FlowUnitDesc>();\n    mock_flowunit_desc->SetFlowUnitName(\"test_normalize_1\");\n    mock_flowunit_desc->AddFlowUnitInput(modelbox::FlowUnitInput(\"In_1\"));\n    mock_flowunit->SetFlowUnitDesc(mock_flowunit_desc);\n    std::weak_ptr<MockFlowUnit> mock_flowunit_wp;\n    mock_flowunit_wp = mock_flowunit;\n\n    EXPECT_CALL(*mock_flowunit, Open(_))\n        .WillRepeatedly(testing::Invoke(\n            [=](const std::shared_ptr<modelbox::Configuration> &flow_option) {\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, DataPre(_))\n        .WillRepeatedly(\n            testing::Invoke([&](const std::shared_ptr<DataContext> &data_ctx) {\n              MBLOG_DEBUG << \"test_normalize_1 \"\n                          << \"DataPre\";\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, DataPost(_))\n        .WillRepeatedly(\n            testing::Invoke([&](const std::shared_ptr<DataContext> &data_ctx) {\n              MBLOG_DEBUG << \"test_normalize_1 \"\n                          << \"DataPost\";\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit,\n                Process(testing::An<std::shared_ptr<modelbox::DataContext>>()))\n        .WillRepeatedly(\n            testing::Invoke([=](const std::shared_ptr<DataContext> &op_ctx) {\n              auto input_bufs = op_ctx->Input(\"In_1\");\n              EXPECT_EQ(input_bufs->Size(), 1);\n              for (size_t i = 0; i < input_bufs->Size(); ++i) {\n                auto input_buf = input_bufs->At(i);\n                std::vector<size_t> shape;\n                input_buf->Get(\"shape\", shape);\n                size_t width = shape[1];\n                size_t height = shape[0];\n                EXPECT_EQ(width, 5);\n                EXPECT_EQ(height, 4);\n\n                const auto *const in_data =\n                    static_cast<const float *>(input_buf->ConstData());\n                for (size_t c = 0; c < 3; c++) {\n                  for (size_t j = 0; j < width; j++) {\n                    for (size_t k = 0; k < height; k++) {\n                      float data = in_data[c * width * height + j * height + k];\n                      if (c == 0) {\n                        EXPECT_NEAR(data, 1, 0.0001);\n                      } else if (c == 1) {\n                        EXPECT_NEAR(data, 255, 0.0001);\n                      } else {\n                        EXPECT_NEAR(data, 255, 0.0001);\n                      }\n                    }\n                  }\n                }\n              }\n\n              return modelbox::STATUS_STOP;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, Close()).WillRepeatedly(testing::Invoke([=]() {\n      return modelbox::STATUS_OK;\n    }));\n    desc_flowunit.SetMockFlowUnit(mock_flowunit);\n    ctl_->AddMockDriverFlowUnit(\"test_normalize_1\", \"cpu\", desc_flowunit,\n                                std::string(TEST_DRIVER_DIR));\n  }\n\n  return STATUS_OK;\n}  // namespace modelbox\n\nstd::shared_ptr<DriverFlowTest> NormalizeCpuFlowUnitTest::GetDriverFlow() {\n  return driver_flow_;\n}\n\nTEST_F(NormalizeCpuFlowUnitTest, RunUnit) {\n  std::string toml_content = R\"(\n    [driver]\n    skip-default=true\n    dir=[\")\" + test_lib_dir + \"\\\",\\\"\" +\n                             test_data_dir + \"\\\"]\\n    \" +\n                             R\"([graph]\n    graphconf = '''digraph demo {\n          test_normalize_0[type=flowunit, flowunit=test_normalize_0, device=cpu,deviceid=0, label=\"<Out_1>\"] \n          normalize[type=flowunit, flowunit=normalize, device=cpu, deviceid=0, label=\"<in_data> | <out_data>\", standard_deviation_inverse=\"0.003921568627451,1,1\"]\n          test_normalize_1[type=flowunit, flowunit=test_normalize_1, device=cpu, deviceid=0, label=\"<In_1>\"] \n\n          test_normalize_0:Out_1 -> normalize:in_data\n          normalize:out_data -> test_normalize_1:In_1\n        }'''\n    format = \"graphviz\"\n  )\";\n  auto driver_flow = GetDriverFlow();\n  auto ret = driver_flow->BuildAndRun(\"RunUnit\", toml_content);\n  EXPECT_EQ(ret, STATUS_STOP);\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/output_broker/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nset(UNIT_DEVICE \"cpu\")\nset(UNIT_NAME \"output_broker\")\n\nproject(modelbox-flowunit-${UNIT_DEVICE}-${UNIT_NAME})\n\nfile(GLOB UNIT_SOURCE *.cpp *.cc *.c)\ngroup_source_test_files(MODELBOX_UNIT_SOURCE MODELBOX_UNIT_TEST_SOURCE \"_test.c*\" ${UNIT_SOURCE})\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_CPU_INCLUDE})\ninclude_directories(${MODELBOX_COMMON_DRIVER_UTIL_INCLUDE})\ninclude_directories(${MODELBOX_COMMON_IAM_AUTH_INCLUDE})\ninclude_directories(${NLOHMANN_INCLUDE_DIR})\n\nset(MODELBOX_UNIT_SHARED modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-shared)\nset(MODELBOX_UNIT_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\nadd_library(${MODELBOX_UNIT_SHARED} SHARED ${MODELBOX_UNIT_SOURCE})\n\nset(LIBMODELBOX_FLOWUNIT_OUTPUT_BROKER_CPU_SHARED ${MODELBOX_UNIT_SHARED})\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES \n    SOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_DEVICE_CPU_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} rt)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} dl)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${MODELBOX_COMMON_DRIVER_UTIL_LIBRARY})\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}\")\n\ninstall(TARGETS ${MODELBOX_UNIT_SHARED} \n    COMPONENT cpu-device-flowunit\n    RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n    LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    OPTIONAL\n    )\n\n\ninstall(DIRECTORY ${HEADER} \n    DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR} \n    COMPONENT cpu-device-flowunit-devel\n    )\n\nset(LIBMODELBOX_FLOWUNIT_OUTPUT_BROKER_CPU_SHARED ${MODELBOX_UNIT_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_OUTPUT_BROKER_CPU_INCLUDE ${MODELBOX_UNIT_SOURCE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_OUTPUT_BROKER_CPU_SOURCES ${MODELBOX_UNIT_SOURCE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_OUTPUT_BROKER_CPU_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}.so CACHE INTERNAL \"\")\n\n# driver test\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_UNIT_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_UNIT_SHARED})\nlist(APPEND DRIVER_UNIT_TEST_LINK_LIBRARIES ${MODELBOX_UNIT_LINK_LIBRARY})\nlist(APPEND DRIVER_UNIT_TEST_INCLUDE ${LIBMODELBOX_FLOWUNIT_OUTPUT_BROKER_CPU_INCLUDE})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_LINK_LIBRARIES ${DRIVER_UNIT_TEST_LINK_LIBRARIES} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_INCLUDE ${DRIVER_UNIT_TEST_INCLUDE} CACHE INTERNAL \"\")\n\nadd_subdirectory(broker_plugin)"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/output_broker/broker_plugin/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nproject(output_broker_plugin)\n\nif(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_BINARY_DIR})\n    message(FATAL_ERROR \"Do not build in source directory!\")\nendif()\n\nif(OBS_FOUND)\n    add_subdirectory(obs_output_broker)\nelse()\n    message(STATUS \"Not found obs library, disable obs output broker plugin\")\nendif()\n\nif(DIS_FOUND)\n    add_subdirectory(dis_output_broker)\nelse()\n    message(STATUS \"Not found dis library, disable dis output broker plugin\")\nendif()\n\nif(CPPREST_FOUND)\n    add_subdirectory(webhook_output_broker)\nelse()\n    message(STATUS \"Not found cpprest library, disable webhook output broker plugin\")\nendif()\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/output_broker/broker_plugin/dis_output_broker/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n \ncmake_minimum_required(VERSION 3.10)\n \nset(PLUGIN_NAME \"dis\")\n \nproject(modelbox-flowunit-${UNIT_DEVICE}-${UNIT_NAME}-${PLUGIN_NAME})\n \nfile(GLOB_RECURSE PLUGIN_SOURCE *.cpp *.cc *.c)\ngroup_source_test_files(MODELBOX_PLUGIN_SOURCE MODELBOX_PLUGIN_TEST_SOURCE \"_test.c*\" ${PLUGIN_SOURCE})\n \ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_CPU_INCLUDE})\ninclude_directories(${MODELBOX_COMMON_IAM_AUTH_INCLUDE})\ninclude_directories(${DIS_INCLUDE_DIR})\n \nset(MODELBOX_PLUGIN_SHARED modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-${PLUGIN_NAME}-shared)\nset(MODELBOX_PLUGIN_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n \nadd_library(${MODELBOX_PLUGIN_SHARED} SHARED ${MODELBOX_PLUGIN_SOURCE})\n \nset(LIBMODELBOX_FLOWUNIT_OUTPUT_BROKER_PLUGIN_CPU_SHARED ${MODELBOX_PLUGIN_SHARED})\nset_target_properties(${MODELBOX_PLUGIN_SHARED} PROPERTIES \n    SOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n \ntarget_link_libraries(${MODELBOX_PLUGIN_SHARED} ${LIBMODELBOX_DEVICE_CPU_SHARED})\ntarget_link_libraries(${MODELBOX_PLUGIN_SHARED} ${MODELBOX_COMMON_IAM_AUTH_LIBRARY})\ntarget_link_libraries(${MODELBOX_PLUGIN_SHARED} ${DIS_LIBRARIES})\ntarget_link_libraries(${MODELBOX_PLUGIN_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_PLUGIN_SHARED} rt)\ntarget_link_libraries(${MODELBOX_PLUGIN_SHARED} dl)\n \nset_target_properties(${MODELBOX_PLUGIN_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-${PLUGIN_NAME}\")\n \ninstall(TARGETS ${MODELBOX_PLUGIN_SHARED} \n    COMPONENT cpu-device-flowunit\n    RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n    LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    OPTIONAL\n    )\n \n \ninstall(DIRECTORY ${HEADER} \n    DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR} \n    COMPONENT cpu-device-flowunit-devel\n    )\n \nset(LIBMODELBOX_FLOWUNIT_OUTPUT_BROKER_PLUGIN_CPU_SHARED ${MODELBOX_PLUGIN_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_OUTPUT_BROKER_PLUGIN_CPU_INCLUDE ${MODELBOX_PLUGIN_SOURCE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_OUTPUT_BROKER_PLUGIN_CPU_SOURCES ${MODELBOX_PLUGIN_SOURCE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_OUTPUT_BROKER_PLUGIN_CPU_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-${PLUGIN_NAME}.so CACHE INTERNAL \"\")\n \n# driver test\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_PLUGIN_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_PLUGIN_SHARED})\nlist(APPEND DRIVER_UNIT_TEST_LINK_LIBRARIES ${MODELBOX_UNIT_LINK_LIBRARY})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_LINK_LIBRARIES ${DRIVER_UNIT_TEST_LINK_LIBRARIES} CACHE INTERNAL \"\")"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/output_broker/broker_plugin/dis_output_broker/dis_output_broker.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"dis_output_broker.h\"\n\n#include <securec.h>\n\n#include <nlohmann/json.hpp>\n\n#include \"iam_auth.h\"\n#include \"modelbox/base/uuid.h\"\n#include \"modelbox/device/cpu/device_cpu.h\"\n\n#define MAX_AK_SIZE 64\n#define MAX_SK_SIZE 128\n#define MAX_TOKEN_SIZE 16 * 1024\n#define MAX_PROJECT_ID_SIZE 64\n#define SERIALIZE_MODE \"base64\"\n\nDisOutputConfigurations DisOutputBroker::output_configs_;\nstd::mutex DisOutputBroker::output_configs_lock_;\nthread_local modelbox::Status callback_status;\n\nDisOutputBroker::DisOutputBroker() {}\nDisOutputBroker::~DisOutputBroker() {}\n\nstd::atomic_bool DisOutputBroker::init_flag_{false};\n\nmodelbox::Status DisOutputBroker::Init(\n    const std::shared_ptr<modelbox::Configuration> &opts) {\n  bool expect = false;\n  if (!init_flag_.compare_exchange_strong(expect, true,\n                                          std::memory_order_acq_rel)) {\n    return modelbox::STATUS_OK;\n  }\n\n  FILE *logFile = NULL;\n  logFile = stdout;\n  if (NULL == logFile) {\n    MBLOG_ERROR << \"Output dis sdk log failed\";\n    return modelbox::STATUS_FAULT;\n  }\n  int ret = 0;\n  ret = DisInit(logFile, GetUserAuthInfo);\n  if (0 != ret) {\n    MBLOG_ERROR << \"Init dis sdk failed: \" << ret;\n    return modelbox::STATUS_FAULT;\n  }\n  MBLOG_INFO << \"Init dis sdk success\";\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status DisOutputBroker::Deinit() {\n  bool expect = true;\n  if (!init_flag_.compare_exchange_strong(expect, false,\n                                          std::memory_order_acq_rel)) {\n    return modelbox::STATUS_OK;\n  }\n\n  DisDeinit();\n  MBLOG_INFO << \"Deinit dis sdk success\";\n  return modelbox::STATUS_OK;\n}\n\nstd::shared_ptr<modelbox::OutputBrokerHandle> DisOutputBroker::Open(\n    const std::shared_ptr<modelbox::Configuration> &session_config,\n    const std::string &config) {\n  std::string uuid;\n  if (modelbox::STATUS_OK != modelbox::GetUUID(&uuid)) {\n    MBLOG_ERROR << \"Failed to generate a uuid for the dis output broker!\";\n    return nullptr;\n  }\n\n  auto handle = std::make_shared<modelbox::OutputBrokerHandle>();\n  handle->broker_id_ = uuid;\n\n  if (modelbox::STATUS_OK != ParseConfig(handle, config)) {\n    MBLOG_ERROR << \"Parse config to json failed\";\n    return nullptr;\n  }\n\n  return handle;\n}\n\nmodelbox::Status DisOutputBroker::Write(\n    const std::shared_ptr<modelbox::OutputBrokerHandle> &handle,\n    const std::shared_ptr<modelbox::Buffer> &buffer) {\n  if (buffer == nullptr) {\n    MBLOG_ERROR << \"Invalid buffer: buffer is nullptr!\";\n    return modelbox::STATUS_NODATA;\n  }\n\n  size_t data_size = buffer->GetBytes();\n  char *data = const_cast<char *>((const char *)buffer->ConstData());\n  if (data == nullptr || data_size == 0) {\n    MBLOG_WARN << \"Invalid data! Nothing to be upload!\";\n  }\n\n  std::unique_lock<std::mutex> guard(output_configs_lock_);\n  auto iter = output_configs_.find(handle->broker_id_);\n  if (iter == output_configs_.end()) {\n    MBLOG_ERROR\n        << \"Failed to send data! Can not find the broker configuration, type: \"\n        << handle->output_broker_type_ << \", id: \" << handle->broker_id_;\n    return modelbox::STATUS_NOTFOUND;\n  }\n  guard.unlock();\n\n  std::shared_ptr<DisOutputInfo> output_info = iter->second;\n\n  char *method = const_cast<char *>(SERIALIZE_MODE);\n  DISSetSerializedMode(method);\n\n  char *region = const_cast<char *>(output_info->region.c_str());\n  char *host = const_cast<char *>(output_info->end_point.c_str());\n  char *stream_name = const_cast<char *>(output_info->stream_name.c_str());\n\n  int ret = 0;\n  char project_id_mask[MAX_PROJECT_ID_SIZE];\n  ret = snprintf_s(project_id_mask, MAX_PROJECT_ID_SIZE,\n                   handle->broker_id_.size(), handle->broker_id_.c_str());\n  if (ret == -1) {\n    MBLOG_ERROR << \"Failed to copy broker_id to project_id.\"\n                << \" ret: \" << ret\n                << \", broker_id size: \" << handle->broker_id_.size()\n                << \", MAX_PROJECT_ID_SIZE: \" << MAX_PROJECT_ID_SIZE;\n    if (handle->broker_id_.size() >= MAX_PROJECT_ID_SIZE) {\n      MBLOG_ERROR << \"MAX_PROJECT_ID_SIZE must be larger than broker_id size.\";\n    }\n    return modelbox::STATUS_FAULT;\n  }\n\n  callback_status = modelbox::STATUS_OK;\n\n  DISResponseInfo rsp_info = {0};\n  DISPutRecord record = {0};\n  record.recordData.stringLen = data_size;\n  record.recordData.data = data;\n  record.partitionKey = const_cast<char *>(handle->broker_id_.c_str());\n\n  ret = PutRecords(host, project_id_mask, region, stream_name, 1, &record,\n                   PutRecordCallBack, &rsp_info);\n  if (ret == 0 && rsp_info.HttpResponseCode < 300 &&\n      callback_status == modelbox::STATUS_OK) {\n    MBLOG_DEBUG << \"Send record success, ret: \" << ret\n                << \". Http response code: \" << rsp_info.HttpResponseCode;\n    return modelbox::STATUS_OK;\n  } else if (JudgeTryAgain(rsp_info.HttpResponseCode) ==\n                 modelbox::STATUS_FAULT ||\n             callback_status == modelbox::STATUS_FAULT) {\n    MBLOG_ERROR << \"Send record failed, the httprspcode is: \"\n                << rsp_info.HttpResponseCode << \", ret: \" << ret\n                << \". Error code: \" << rsp_info.ErrorCode\n                << \". Error detail: \" << rsp_info.ErrorDetail;\n    return modelbox::STATUS_FAULT;\n  } else {\n    if (JudgeUpdateCert(rsp_info.HttpResponseCode)) {\n      MBLOG_WARN << \"Send dis failed, try to update cert info.\";\n      modelbox::AgencyInfo agency_info;\n      agency_info.user_domain_name = output_info->domain_name;\n      agency_info.xrole_name = output_info->xrole_name;\n      auto hw_cert = modelbox::IAMAuth::GetInstance();\n      if (hw_cert == nullptr) {\n        MBLOG_ERROR << \"Failed to get hw_cert instance!\";\n        return {modelbox::STATUS_FAULT};\n      }\n      hw_cert->ExpireUserAgencyProjectCredential(agency_info);\n    }\n\n    MBLOG_WARN << \"Try to send dis again.\";\n\n    return modelbox::STATUS_AGAIN;\n  }\n}\n\nmodelbox::Status DisOutputBroker::Sync(\n    const std::shared_ptr<modelbox::OutputBrokerHandle> &handle) {\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status DisOutputBroker::Close(\n    const std::shared_ptr<modelbox::OutputBrokerHandle> &handle) {\n  std::unique_lock<std::mutex> guard(output_configs_lock_);\n  auto iter = output_configs_.find(handle->broker_id_);\n  if (iter == output_configs_.end()) {\n    MBLOG_ERROR << \"Broker handle not found, type: \"\n                << handle->output_broker_type_\n                << \", id: \" << handle->broker_id_;\n    return modelbox::STATUS_NOTFOUND;\n  }\n\n  output_configs_.erase(handle->broker_id_);\n  guard.unlock();\n\n  return modelbox::STATUS_OK;\n}\n\nDISStatus DisOutputBroker::GetUserAuthInfo(char *project_id, char *ak_array,\n                                           char *sk_array,\n                                           char *x_security_token) {\n  std::string broker_id;\n  broker_id = project_id;\n  std::unique_lock<std::mutex> guard(output_configs_lock_);\n  auto iter = output_configs_.find(broker_id);\n  if (iter == output_configs_.end()) {\n    MBLOG_ERROR\n        << \"Failed to send data! Can not find the broker configuration, id: \"\n        << broker_id;\n    return DISStatusGetUserAuthInfoErr;\n  }\n  guard.unlock();\n\n  std::shared_ptr<DisOutputInfo> output_info = iter->second;\n\n  if (GetCertInfo(output_info) != modelbox::STATUS_OK) {\n    MBLOG_ERROR << \"Failed to send data! Invalid authorization.\";\n    return DISStatusGetUserAuthInfoErr;\n  }\n  int ret = 0;\n  ret = strncpy_s(ak_array, MAX_AK_SIZE, output_info->ak.c_str(),\n                  output_info->ak.size());\n  if (ret != 0) {\n    MBLOG_ERROR << \"Failed to copy output_info->ak to ak_array.\"\n                << \" ret: \" << ret\n                << \". output_info->ak size: \" << output_info->ak.size()\n                << \", MAX_AK_SIZE: \" << MAX_AK_SIZE;\n    if (output_info->ak.size() >= MAX_AK_SIZE) {\n      MBLOG_ERROR << \"MAX_AK_SIZE must be larger than output_info->ak size.\";\n    }\n    return DISStatusGetUserAuthInfoErr;\n  }\n  ret = strncpy_s(sk_array, MAX_SK_SIZE, output_info->sk.c_str(),\n                  output_info->sk.size());\n  if (ret != 0) {\n    MBLOG_ERROR << \"Failed to copy output_info->sk to sk_array.\"\n                << \" ret: \" << ret\n                << \". output_info->sk size: \" << output_info->sk.size()\n                << \", MAX_SK_SIZE: \" << MAX_SK_SIZE;\n    if (output_info->sk.size() >= MAX_SK_SIZE) {\n      MBLOG_ERROR << \"MAX_SK_SIZE must be larger than output_info->ak size.\";\n    }\n    return DISStatusGetUserAuthInfoErr;\n  }\n  ret = strncpy_s(project_id, MAX_PROJECT_ID_SIZE,\n                  output_info->project_id.c_str(),\n                  output_info->project_id.size());\n  if (ret != 0) {\n    MBLOG_ERROR << \"Failed to copy output_info->project_id to project_id.\"\n                << \" ret: \" << ret << \". output_info->project_id size: \"\n                << output_info->project_id.size()\n                << \", MAX_PROJECT_ID_SIZE: \" << MAX_PROJECT_ID_SIZE;\n    if (output_info->project_id.size() >= MAX_PROJECT_ID_SIZE) {\n      MBLOG_ERROR << \"MAX_PROJECT_ID_SIZE must be larger than \"\n                     \"output_info->project_id size.\";\n    }\n    return DISStatusGetUserAuthInfoErr;\n  }\n  ret = strncpy_s(x_security_token, MAX_TOKEN_SIZE, output_info->token.c_str(),\n                  output_info->token.size());\n  if (ret != 0) {\n    MBLOG_ERROR << \"Failed to copy output_info->token to x_security_token.\"\n                << \" ret: \" << ret\n                << \". output_info->token size: \" << output_info->token.size()\n                << \", MAX_TOKEN_SIZE: \" << MAX_TOKEN_SIZE;\n    if (output_info->token.size() >= MAX_TOKEN_SIZE) {\n      MBLOG_ERROR\n          << \"MAX_TOKEN_SIZE must be larger than output_info->token size.\";\n    }\n    return DISStatusGetUserAuthInfoErr;\n  }\n\n  MBLOG_DEBUG << \"Get user auth info success\";\n\n  return DISStatusOK;\n}\n\nmodelbox::Status DisOutputBroker::GetCertInfo(\n    std::shared_ptr<DisOutputInfo> &output_info) {\n  MBLOG_DEBUG << \"Try to get cert info.\";\n\n  modelbox::UserAgencyCredential credential;\n  modelbox::AgencyInfo agent_info;\n\n  agent_info.user_domain_name = output_info->domain_name;\n  agent_info.xrole_name = output_info->xrole_name;\n\n  auto hw_cert = modelbox::IAMAuth::GetInstance();\n  if (hw_cert == nullptr) {\n    MBLOG_ERROR << \"Failed to get hw_cert instance!\";\n    return {modelbox::STATUS_FAULT};\n  }\n\n  auto ret = hw_cert->GetUserAgencyProjectCredential(credential, agent_info,\n                                                     output_info->user_id);\n\n  if (ret != modelbox::STATUS_OK) {\n    MBLOG_ERROR << \"Failed to get credential info!\";\n    return {modelbox::STATUS_FAULT};\n  }\n\n  if (credential.user_ak.empty()) {\n    MBLOG_ERROR << \"Failed to get credential ak info! String is empty.\";\n    return {modelbox::STATUS_FAULT};\n  }\n  if (credential.user_sk.empty()) {\n    MBLOG_ERROR << \"Failed to get credential sk info! String is empty.\";\n\n    return {modelbox::STATUS_FAULT};\n  }\n  if (credential.user_secure_token.empty()) {\n    MBLOG_ERROR << \"Failed to get credential token info! String is empty.\";\n    return {modelbox::STATUS_FAULT};\n  }\n\n  output_info->ak = credential.user_ak;\n  output_info->sk = credential.user_sk;\n  output_info->token = \"X-Security-Token:\" + credential.user_secure_token;\n\n  MBLOG_DEBUG << \"Get cert info success.\";\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status DisOutputBroker::ParseConfig(\n    const std::shared_ptr<modelbox::OutputBrokerHandle> &handle,\n    const std::string &config) {\n  nlohmann::json json;\n  std::shared_ptr<DisOutputInfo> output_info =\n      std::make_shared<DisOutputInfo>();\n  try {\n    json = nlohmann::json::parse(config);\n\n    std::string end_point;\n    end_point = json[\"disEndPoint\"].get<std::string>();\n    std::string::size_type idx;\n    std::string https_endpoint = \"https://\";\n    idx = end_point.find(https_endpoint);\n    if (idx == 0) {\n      end_point = end_point.erase(idx, https_endpoint.size());\n    }\n    output_info->end_point = end_point;\n    if (end_point.empty()) {\n      MBLOG_ERROR\n          << \"Invalid disEndPoint, value of key <disEndPoint> is empty!\";\n      return modelbox::STATUS_BADCONF;\n    }\n\n    output_info->region = json[\"region\"].get<std::string>();\n    if (output_info->region.empty()) {\n      MBLOG_ERROR << \"Invalid region, value of key <region> is empty!\";\n      return modelbox::STATUS_BADCONF;\n    }\n\n    output_info->stream_name = json[\"streamName\"].get<std::string>();\n    if (output_info->stream_name.empty()) {\n      MBLOG_ERROR << \"Invalid streamName, value of key <streamName> is empty!\";\n      return modelbox::STATUS_BADCONF;\n    }\n\n    output_info->project_id = json[\"projectId\"].get<std::string>();\n    if (output_info->project_id.empty()) {\n      MBLOG_ERROR << \"Invalid projectId, value of key <projectId> is empty!\";\n      return modelbox::STATUS_BADCONF;\n    }\n\n    if (json.contains(\"domainName\")) {\n      output_info->domain_name = json[\"domainName\"].get<std::string>();\n      if (output_info->domain_name.empty()) {\n        MBLOG_DEBUG << \"Value of key <domainName> is empty!\";\n      }\n    }\n\n    if (json.contains(\"xroleName\")) {\n      output_info->xrole_name = json[\"xroleName\"].get<std::string>();\n      if (output_info->xrole_name.empty()) {\n        MBLOG_DEBUG << \"Value of key <xroleName> is empty!\";\n      }\n    }\n\n    if (json.contains(\"userId\")) {\n      output_info->user_id = json[\"userId\"].get<std::string>();\n      MBLOG_DEBUG << \"Value of key <userId> is \" << output_info->user_id;\n    }\n\n    std::unique_lock<std::mutex> guard(output_configs_lock_);\n    output_configs_[handle->broker_id_] = output_info;\n    guard.unlock();\n\n    MBLOG_DEBUG << \"Parse cfg json success.\";\n\n    return modelbox::STATUS_OK;\n  } catch (const std::exception &e) {\n    MBLOG_ERROR << \"Parse output config to json failed, detail: \" << e.what();\n\n    return modelbox::STATUS_BADCONF;\n  }\n}\n\nDISStatus DisOutputBroker::PutRecordCallBack(\n    char *error_code, char *error_details, char *stream_name,\n    DISPutRecord *put_record, char *seq_number, char *partitiod_id) {\n  if (NULL == seq_number) {\n    MBLOG_WARN << \"Send record failed, key: \" << put_record->partitionKey\n               << \", error code: \" << error_code\n               << \", message: \" << error_details;\n    if (strncmp(error_code, \"DIS.4219\", 8) == 0 ||\n        strncmp(error_code, \"DIS.4223\", 8) == 0) {\n      callback_status = modelbox::STATUS_FAULT;\n    } else {\n      MBLOG_WARN << \"Set try again flag.\";\n      callback_status = modelbox::STATUS_AGAIN;\n    }\n    return DISStatusError;\n  } else {\n    MBLOG_DEBUG << \"Send record success, key: \" << put_record->partitionKey\n                << \", seqnum: \" << seq_number << \", pid: \" << partitiod_id;\n    callback_status == modelbox::STATUS_OK;\n    return DISStatusOK;\n  }\n}\n\nmodelbox::Status DisOutputBroker::JudgeTryAgain(long http_response_code) {\n  switch (http_response_code) {\n    case 400:\n    case 403:\n    case 404:\n    case 405:\n    case 503:\n      return modelbox::STATUS_FAULT;\n    default:\n      return modelbox::STATUS_AGAIN;\n  }\n}\n\nbool DisOutputBroker::JudgeUpdateCert(long http_response_code) {\n  switch (http_response_code) {\n    case 401:\n    case 407:\n    case 441:\n      return true;\n    default:\n      return false;\n  }\n}\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/output_broker/broker_plugin/dis_output_broker/dis_output_broker.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_DIS_OUTPUT_BROKER_CPU_H_\n#define MODELBOX_FLOWUNIT_DIS_OUTPUT_BROKER_CPU_H_\n\n#include <modelbox/base/device.h>\n#include <modelbox/base/status.h>\n#include <modelbox/output_broker_plugin.h>\n\n#include \"dis/dis.h\"\n#include \"output_broker_flowunit.h\"\n\nconstexpr const char *DRIVER_NAME = \"dis\";\nconstexpr const char *DRIVER_DESC = \"A dis output broker plugin on CPU\";\nconstexpr const char *DRIVER_TYPE = \"cpu\";\n\ntypedef struct tag_DisOutputInfo {\n  std::string ak;\n  std::string sk;\n  std::string token;\n  std::string end_point;\n  std::string region;\n  std::string stream_name;\n  std::string project_id;\n  std::string domain_name;\n  std::string xrole_name;\n  std::string user_id;\n} DisOutputInfo;\n\nusing DisOutputConfigurations =\n    std::map<std::string, std::shared_ptr<DisOutputInfo>>;\n\nclass DisOutputBroker : public modelbox::OutputBrokerPlugin {\n public:\n  DisOutputBroker();\n  ~DisOutputBroker() override;\n\n  modelbox::Status Init(\n      const std::shared_ptr<modelbox::Configuration> &opts) override;\n\n  modelbox::Status Deinit() override;\n\n  std::shared_ptr<modelbox::OutputBrokerHandle> Open(\n      const std::shared_ptr<modelbox::Configuration> &session_config,\n      const std::string &config) override;\n\n  modelbox::Status Write(\n      const std::shared_ptr<modelbox::OutputBrokerHandle> &handle,\n      const std::shared_ptr<modelbox::Buffer> &buffer) override;\n\n  modelbox::Status Sync(\n      const std::shared_ptr<modelbox::OutputBrokerHandle> &handle) override;\n\n  modelbox::Status Close(\n      const std::shared_ptr<modelbox::OutputBrokerHandle> &handle) override;\n\n private:\n  static DISStatus GetUserAuthInfo(char *project_id, char *ak_array,\n                                   char *sk_array, char *x_security_token);\n  static modelbox::Status GetCertInfo(\n      std::shared_ptr<DisOutputInfo> &output_info);\n  modelbox::Status ParseConfig(\n      const std::shared_ptr<modelbox::OutputBrokerHandle> &handle,\n      const std::string &config);\n  static DISStatus PutRecordCallBack(char *error_code, char *error_details,\n                                     char *stream_name,\n                                     DISPutRecord *put_record, char *seq_number,\n                                     char *partitiod_id);\n  modelbox::Status JudgeTryAgain(long http_response_code);\n  bool JudgeUpdateCert(long http_response_code);\n  static DisOutputConfigurations output_configs_;\n  static std::mutex output_configs_lock_;\n  static std::atomic_bool init_flag_;\n};\n\nclass DisOutputBrokerFactory : public modelbox::DriverFactory {\n public:\n  DisOutputBrokerFactory() = default;\n  virtual ~DisOutputBrokerFactory() = default;\n\n  std::shared_ptr<modelbox::Driver> GetDriver() override {\n    std::shared_ptr<modelbox::Driver> parser =\n        std::make_shared<DisOutputBroker>();\n    return parser;\n  }\n};\n\n#endif  // MODELBOX_FLOWUNIT_DIS_OUTPUT_BROKER_CPU_H_"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/output_broker/broker_plugin/dis_output_broker/driver_desc.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n \n#include \"driver_desc.h\"\n#include \"modelbox/base/status.h\"\n#include \"modelbox/device/cpu/device_cpu.h\"\n#include \"modelbox/flowunit.h\"\n#include \"dis_output_broker.h\"\n \n#include <stdio.h>\n#include <memory>\n \nstd::shared_ptr<modelbox::DriverFactory> CreateDriverFactory() {\n  std::shared_ptr<modelbox::DriverFactory> factory =\n      std::make_shared<DisOutputBrokerFactory>();\n  return factory;\n}\n \nvoid DriverDescription(modelbox::DriverDesc *desc) {\n  desc->SetName(DRIVER_NAME);\n  desc->SetClass(DRIVER_CLASS_OUTPUT_BROKER_PLUGIN);\n  desc->SetType(DRIVER_TYPE);\n  desc->SetDescription(DRIVER_DESC);\n \n  return;\n}\n \nmodelbox::Status DriverInit() {\n  // Driver Init.\n  return modelbox::STATUS_OK;\n}\n \nvoid DriverFini() {\n  // Driver Fini.\n}"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/output_broker/broker_plugin/dis_output_broker/driver_desc.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#ifndef MODELBOX_DRIVER_DESC_H_\n#define MODELBOX_DRIVER_DESC_H_\n\n#include <modelbox/base/device.h>\n#include <modelbox/base/driver.h>\n#include <modelbox/base/status.h>\n\nextern \"C\" {\n\nstd::shared_ptr<modelbox::DriverFactory> CreateDriverFactory();\n\nmodelbox::Status DriverInit();\n\nvoid DriverFini();\n\nvoid DriverDescription(modelbox::DriverDesc *desc);\n}  // namespace modelbox\n\n#endif  // MODELBOX_DRIVER_DESC_H_"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/output_broker/broker_plugin/obs_output_broker/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nset(PLUGIN_NAME \"obs\")\n\nproject(modelbox-flowunit-${UNIT_DEVICE}-${UNIT_NAME}-${PLUGIN_NAME})\n\nfile(GLOB_RECURSE PLUGIN_SOURCE *.cpp *.cc *.c)\ngroup_source_test_files(MODELBOX_PLUGIN_SOURCE MODELBOX_PLUGIN_TEST_SOURCE \"_test.c*\" ${PLUGIN_SOURCE})\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_CPU_INCLUDE})\ninclude_directories(${MODELBOX_COMMON_OBS_CLIENT_INCLUDE})\ninclude_directories(${MODELBOX_COMMON_INCLUDE})\n\nset(MODELBOX_PLUGIN_SHARED modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-${PLUGIN_NAME}-shared)\nset(MODELBOX_PLUGIN_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\nadd_library(${MODELBOX_PLUGIN_SHARED} SHARED ${MODELBOX_PLUGIN_SOURCE})\n\nset(LIBMODELBOX_FLOWUNIT_OUTPUT_BROKER_PLUGIN_CPU_SHARED ${MODELBOX_PLUGIN_SHARED})\nset_target_properties(${MODELBOX_PLUGIN_SHARED} PROPERTIES \n    SOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n\ntarget_link_libraries(${MODELBOX_PLUGIN_SHARED} ${LIBMODELBOX_DEVICE_CPU_SHARED})\ntarget_link_libraries(${MODELBOX_PLUGIN_SHARED} ${MODELBOX_COMMON_OBS_CLIENT_LIBRARY})\ntarget_link_libraries(${MODELBOX_PLUGIN_SHARED} ${MODELBOX_COMMON_LIBRARY})\ntarget_link_libraries(${MODELBOX_PLUGIN_SHARED} ${OBS_LIBRARIES})\ntarget_link_libraries(${MODELBOX_PLUGIN_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_PLUGIN_SHARED} rt)\ntarget_link_libraries(${MODELBOX_PLUGIN_SHARED} dl)\nset_target_properties(${MODELBOX_PLUGIN_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-${PLUGIN_NAME}\")\n\ninstall(TARGETS ${MODELBOX_PLUGIN_SHARED} \n    COMPONENT cpu-device-flowunit\n    RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n    LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    OPTIONAL\n    )\n\n\ninstall(DIRECTORY ${HEADER} \n    DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR} \n    COMPONENT cpu-device-flowunit-devel\n    )\n\nset(LIBMODELBOX_FLOWUNIT_OUTPUT_BROKER_PLUGIN_CPU_SHARED ${MODELBOX_PLUGIN_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_OUTPUT_BROKER_PLUGIN_CPU_INCLUDE ${MODELBOX_PLUGIN_SOURCE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_OUTPUT_BROKER_PLUGIN_CPU_SOURCES ${MODELBOX_PLUGIN_SOURCE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_OUTPUT_BROKER_PLUGIN_CPU_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-${PLUGIN_NAME}.so CACHE INTERNAL \"\")\n\n# driver test\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_PLUGIN_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_PLUGIN_SHARED})\nlist(APPEND DRIVER_UNIT_TEST_LINK_LIBRARIES ${MODELBOX_UNIT_LINK_LIBRARY})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_LINK_LIBRARIES ${DRIVER_UNIT_TEST_LINK_LIBRARIES} CACHE INTERNAL \"\")\n\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/output_broker/broker_plugin/obs_output_broker/driver_desc.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <stdio.h>\n\n#include <memory>\n\n#include \"modelbox/base/driver_api_helper.h\"\n#include \"modelbox/base/status.h\"\n#include \"modelbox/device/cpu/device_cpu.h\"\n#include \"modelbox/flowunit.h\"\n#include \"obs_output_broker.h\"\n\nstd::shared_ptr<modelbox::DriverFactory> CreateDriverFactory() {\n  std::shared_ptr<modelbox::DriverFactory> factory =\n      std::make_shared<OBSOutputBrokerFactory>();\n  return factory;\n}\n\nvoid DriverDescription(modelbox::DriverDesc *desc) {\n  desc->SetName(DRIVER_NAME);\n  desc->SetClass(DRIVER_CLASS_OUTPUT_BROKER_PLUGIN);\n  desc->SetType(DRIVER_TYPE);\n  desc->SetDescription(DRIVER_DESC);\n}\n\nmodelbox::Status DriverInit() {\n  // Driver Init.\n  return modelbox::STATUS_OK;\n}\n\nvoid DriverFini() {\n  // Driver Fini.\n}\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/output_broker/broker_plugin/obs_output_broker/obs_output_broker.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"obs_output_broker.h\"\n\n#include <modelbox/base/uuid.h>\n#include <modelbox/device/cpu/device_cpu.h>\n#include <modelbox/iam_auth.h>\n#include <securec.h>\n\n#include <nlohmann/json.hpp>\n\nObsOutputBroker::ObsOutputBroker() = default;\nObsOutputBroker::~ObsOutputBroker() = default;\n\nmodelbox::Status ObsOutputBroker::Init(\n    const std::shared_ptr<modelbox::Configuration> &opts) {\n  obs_status ret_status = OBS_STATUS_BUTT;\n  ret_status = obs_initialize(OBS_INIT_ALL);\n  if (OBS_STATUS_OK != ret_status) {\n    const auto *obs_status_name = obs_get_status_name(ret_status);\n    if (obs_status_name == nullptr) {\n      obs_status_name = \"null\";\n    }\n    MBLOG_ERROR << \"failed to initialize OBS SDK: \" << obs_status_name;\n    return {modelbox::STATUS_FAULT};\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status ObsOutputBroker::Deinit() { return modelbox::STATUS_OK; }\n\nstd::shared_ptr<modelbox::OutputBrokerHandle> ObsOutputBroker::Open(\n    const std::shared_ptr<modelbox::Configuration> &session_config,\n    const std::string &config) {\n  nlohmann::json config_json;\n  std::shared_ptr<OBSOutputInfo> output_info =\n      std::make_shared<OBSOutputInfo>();\n\n  try {\n    config_json = nlohmann::json::parse(config);\n\n    auto value = config_json[\"obsEndPoint\"];\n    if (value.empty()) {\n      MBLOG_ERROR << \"obsEndPoint is empty!\";\n      return nullptr;\n    }\n    std::string http_header = \"http://\";\n    std::string https_header = \"https://\";\n    std::string end_point = value;\n\n    if (end_point.find(http_header) == 0) {\n      end_point = end_point.substr(http_header.length());\n    } else if (end_point.find(https_header) == 0) {\n      end_point = end_point.substr(https_header.length());\n    }\n    output_info->end_point = end_point;\n\n    value = config_json[\"bucket\"];\n    if (value.empty()) {\n      MBLOG_ERROR << \"bucket is empty!\";\n      return nullptr;\n    }\n    output_info->bucket = value;\n\n    value = config_json[\"path\"];\n    if (value.empty()) {\n      MBLOG_ERROR << \"path is empty!\";\n      return nullptr;\n    }\n    output_info->path = value;\n\n    if (config_json.contains(\"domainName\")) {\n      value =\n          config_json[\"domainName\"];  // domainName maybe empty in edge scene.\n      output_info->domain_name = value;\n    }\n    if (config_json.contains(\"xroleName\")) {\n      value = config_json[\"xroleName\"];  // xroleName maybe empty in edge scene.\n      output_info->xrole_name = value;\n    }\n\n    if (config_json.contains(\"userId\")) {\n      output_info->user_id = config_json[\"userId\"];\n    }\n  } catch (const std::exception &e) {\n    MBLOG_ERROR << \"Failed to parse json config, detail: \" << e.what();\n    return nullptr;\n  }\n  output_info->file_key_index = 0;\n\n  auto handle = std::make_shared<modelbox::OutputBrokerHandle>();\n  std::string uuid;\n  if (modelbox::STATUS_OK != modelbox::GetUUID(&uuid)) {\n    MBLOG_ERROR << \"Failed to generate a uuid for the OBS output broker!\";\n    return nullptr;\n  }\n  handle->broker_id_ = uuid;\n  std::lock_guard<std::mutex> lock(output_cfgs_mutex_);\n  output_configs_[handle->broker_id_] = output_info;\n\n  return handle;\n}\n\nmodelbox::Status ObsOutputBroker::Write(\n    const std::shared_ptr<modelbox::OutputBrokerHandle> &handle,\n    const std::shared_ptr<modelbox::Buffer> &buffer) {\n  size_t data_size = buffer->GetBytes();\n  auto *data = const_cast<char *>((const char *)buffer->ConstData());\n  if (buffer == nullptr || data == nullptr || data_size == 0) {\n    MBLOG_WARN << \"Invalid buffer: buffer is nullptr!\";\n    return modelbox::STATUS_NODATA;\n  }\n\n  // get the broker configuration\n  std::unique_lock<std::mutex> lock(output_cfgs_mutex_);\n  auto iter = output_configs_.find(handle->broker_id_);\n  if (iter == output_configs_.end()) {\n    MBLOG_ERROR\n        << \"Failed to send data! Can not find the broker configuration, type: \"\n        << handle->output_broker_type_ << \", id: \" << handle->broker_id_;\n    return modelbox::STATUS_FAULT;\n  }\n  std::shared_ptr<OBSOutputInfo> output_info = iter->second;\n  lock.unlock();\n\n  // set OBS file key\n  std::string file_key;\n  buffer->Get(META_OUTPUT_FILE_NAME, file_key);\n  if (file_key.empty()) {\n    file_key =\n        handle->broker_id_ + \"_\" + std::to_string(output_info->file_key_index);\n  }\n\n  std::string path;\n  if (!output_info->path.empty()) {\n    if ('/' != output_info->path.at(output_info->path.length() - 1)) {\n      path = output_info->path + \"/\";\n    } else {\n      path = output_info->path;\n    }\n  }\n\n  file_key = path + file_key;  // File Path: [bucket]:[path]/[output_file_name]\n\n  auto obs_client = modelbox::ObsClient::GetInstance();\n  modelbox::ObsOptions obs_opt;\n  obs_opt.end_point = output_info->end_point;\n  obs_opt.bucket = output_info->bucket;\n  obs_opt.path = file_key;\n  obs_opt.domain_name = output_info->domain_name;\n  obs_opt.xrole_name = output_info->xrole_name;\n  obs_opt.user_id = output_info->user_id;\n  auto ret = obs_client->PutObject(obs_opt, data, data_size);\n  if (modelbox::STATUS_AGAIN == ret) {\n    MBLOG_WARN << ret.Errormsg();\n    PrintObsConfig(obs_opt);\n    return modelbox::STATUS_AGAIN;\n  }\n\n  if (modelbox::STATUS_OK != ret) {\n    MBLOG_ERROR << ret.Errormsg();\n    return ret;\n  }\n\n  ++output_info->file_key_index;\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status ObsOutputBroker::Sync(\n    const std::shared_ptr<modelbox::OutputBrokerHandle> &handle) {\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status ObsOutputBroker::Close(\n    const std::shared_ptr<modelbox::OutputBrokerHandle> &handle) {\n  std::unique_lock<std::mutex> lock(output_cfgs_mutex_);\n  auto iter = output_configs_.find(handle->broker_id_);\n  if (iter == output_configs_.end()) {\n    MBLOG_ERROR << \"broker handle not found, type: \"\n                << handle->output_broker_type_\n                << \", id: \" << handle->broker_id_;\n    return modelbox::STATUS_NOTFOUND;\n  }\n\n  output_configs_.erase(handle->broker_id_);\n  return modelbox::STATUS_OK;\n}\n\nvoid ObsOutputBroker::PrintObsConfig(const modelbox::ObsOptions &opt) {\n  MBLOG_INFO << \"obs option - endpoint: \" << opt.end_point;\n}\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/output_broker/broker_plugin/obs_output_broker/obs_output_broker.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_OUTPUT_BROKER_OBS_CPU_H_\n#define MODELBOX_FLOWUNIT_OUTPUT_BROKER_OBS_CPU_H_\n\n#include <modelbox/base/device.h>\n#include <modelbox/base/status.h>\n#include <modelbox/obs_client.h>\n#include <modelbox/output_broker_plugin.h>\n\n#include \"eSDKOBS.h\"\n#include \"output_broker_flowunit.h\"\n\nconstexpr const char *DRIVER_NAME = \"obs\";\nconstexpr const char *DRIVER_DESC = \"A obs output broker plugin on CPU\";\nconstexpr const char *DRIVER_TYPE = \"cpu\";\n\nconstexpr const char *DATA_NAME = \"data_name\";\n\ntypedef struct tag_OBSOutputInfo {\n  std::string ak;  // temporary USER AK, would be destoyed after use\n  std::string sk;  // temporary USER SK, would be destoyed after use\n  std::string\n      token;  // temporary USER Security Token, would be destoyed after use\n  std::string end_point;    // OBS EndPoint, for example:\n                            // obs.cn-north-7.ulanqab.huawei.com\n  std::string bucket;       // Bucket where the target file locates, for ex\n  std::string path;         // path to save data, for example: obs-test/data/\n  std::string domain_name;  // domain name of the resources agent\n  std::string xrole_name;   // commit name\n  std::string user_id;\n  unsigned int file_key_index;  //\n} OBSOutputInfo;\n\nusing ObsOutputConfigurations =\n    std::map<std::string, std::shared_ptr<OBSOutputInfo>>;\n\nclass ObsOutputBroker : public modelbox::OutputBrokerPlugin {\n public:\n  ObsOutputBroker();\n  ~ObsOutputBroker() override;\n\n  modelbox::Status Init(\n      const std::shared_ptr<modelbox::Configuration> &opts) override;\n\n  modelbox::Status Deinit() override;\n\n  /**\n   * @brief Initial each broker: 1. allocate a broker id; 2. save the\n   * configuration to a map\n   * @param config - configurations in json style\n   * @return a handle\n   */\n  std::shared_ptr<modelbox::OutputBrokerHandle> Open(\n      const std::shared_ptr<modelbox::Configuration> &session_config,\n      const std::string &config) override;\n\n  /**\n   * @brief Write data to target output\n   * @param params - some parameters needed, as example:\n   *                 (optional) \"data_name\": indicates the file key for the\n   * uploaded data.\n   * @return Successful or not\n   */\n  modelbox::Status Write(\n      const std::shared_ptr<modelbox::OutputBrokerHandle> &handle,\n      const std::shared_ptr<modelbox::Buffer> &buffer) override;\n\n  modelbox::Status Sync(\n      const std::shared_ptr<modelbox::OutputBrokerHandle> &handle) override;\n\n  /**\n   * @brief Remove the configuration\n   * @param handle - to identify the configuration\n   * @return Successful or not\n   */\n  modelbox::Status Close(\n      const std::shared_ptr<modelbox::OutputBrokerHandle> &handle) override;\n\n private:\n  /**\n   * @brief Print obs configuration\n   * @param opt - obs configuration\n   */\n  void PrintObsConfig(const modelbox::ObsOptions &opt);\n\n  ObsOutputConfigurations output_configs_;\n  std::mutex output_cfgs_mutex_;\n};\n\nclass OBSOutputBrokerFactory : public modelbox::DriverFactory {\n public:\n  OBSOutputBrokerFactory() = default;\n  ~OBSOutputBrokerFactory() override = default;\n\n  std::shared_ptr<modelbox::Driver> GetDriver() override {\n    std::shared_ptr<modelbox::Driver> parser =\n        std::make_shared<ObsOutputBroker>();\n    return parser;\n  }\n};\n\n#endif  // MODELBOX_FLOWUNIT_OUTPUT_BROKER_OBS_CPU_H_\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/output_broker/broker_plugin/obs_output_broker/obs_output_broker_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include \"output_broker_flowunit.h\"\n\n#include <securec.h>\n\n#include <functional>\n#include <future>\n\n#include \"modelbox/base/log.h\"\n#include \"modelbox/base/utils.h\"\n#include \"modelbox/buffer.h\"\n#include \"driver_flow_test.h\"\n#include \"flowunit_mockflowunit/flowunit_mockflowunit.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"modelbox/iam_auth.h\"\n\nusing ObsOutputDataPack = struct _OutputDataPack {\n  std::string output_data;\n  std::string output_file_name;\n  std::string output_broker_names;\n};\n\nnamespace modelbox {\nclass OutputBrokerObsPluginTest : public testing::Test {\n public:\n  OutputBrokerObsPluginTest()\n      : driver_flow_(std::make_shared<DriverFlowTest>()) {}\n  void PreparationToGetCert();\n\n protected:\n  void SetUp() override {\n    auto ret = AddMockFlowUnit();\n    EXPECT_EQ(ret, STATUS_OK);\n  };\n\n  void TearDown() override { driver_flow_->Clear(); };\n  std::shared_ptr<DriverFlowTest> GetDriverFlow();\n  std::shared_ptr<DriverFlowTest> RunDriverFlow();\n  modelbox::Status SendOutputDataObs(\n      std::shared_ptr<DriverFlowTest> &driver_flow,\n      std::vector<std::shared_ptr<ObsOutputDataPack>> &output_data_pack_vec,\n      const std::string &output_broker_cfg);\n\n private:\n  Status AddMockFlowUnit();\n  std::shared_ptr<DriverFlowTest> driver_flow_;\n};\n\nstd::shared_ptr<DriverFlowTest> OutputBrokerObsPluginTest::GetDriverFlow() {\n  return driver_flow_;\n}\n\nstd::shared_ptr<DriverFlowTest> OutputBrokerObsPluginTest::RunDriverFlow() {\n  const std::string test_lib_dir = TEST_DRIVER_DIR;\n  std::string toml_content = R\"(\n    [driver]\n    skip-default=true\n    dir=[\")\" + test_lib_dir + \"\\\"]\\n    \" +\n                             R\"([graph]\n    graphconf = '''digraph demo {\n          input[type=input]\n          output_broker[type=flowunit, flowunit=output_broker, device=cpu, deviceid=0, label=\"<in_output_info>\", retry_count_limit=\"2\", retry_interval_base_ms=\"100\", retry_interval_increment_ms=\"100\", retry_interval_limit_ms=\"200\"]\n \n          input -> output_broker:in_output_info\n        }'''\n    format = \"graphviz\"\n  )\";\n\n  auto driver_flow = GetDriverFlow();\n  auto ret = driver_flow->BuildAndRun(\"null\", toml_content, -1);\n\n  return driver_flow;\n}\n\nmodelbox::Status OutputBrokerObsPluginTest::SendOutputDataObs(\n    std::shared_ptr<DriverFlowTest> &driver_flow,\n    std::vector<std::shared_ptr<ObsOutputDataPack>> &output_data_pack_vec,\n    const std::string &output_broker_cfg) {\n  auto ext_data = driver_flow->GetFlow()->CreateExternalDataMap();\n  auto buffer_list = ext_data->CreateBufferList();\n\n  std::vector<size_t> tmp_sizes;\n  tmp_sizes.reserve(output_data_pack_vec.size());\n  for (auto &pack : output_data_pack_vec) {\n    tmp_sizes.push_back(pack->output_data.size());\n  }\n\n  buffer_list->Build(tmp_sizes);\n  for (size_t i = 0; i < output_data_pack_vec.size(); ++i) {\n    auto buffer = buffer_list->At(i);\n    auto data_pack = output_data_pack_vec[i];\n    memcpy_s(buffer->MutableData(), buffer->GetBytes(),\n             data_pack->output_data.data(), data_pack->output_data.size());\n    buffer->Set(\"output_broker_names\", data_pack->output_broker_names);\n    buffer->Set(\"output_file_name\", data_pack->output_file_name);\n  }\n\n  auto config = ext_data->GetSessionConfig();\n  config->SetProperty(\"flowunit.output_broker.config\", output_broker_cfg);\n  ext_data->Send(\"input\", buffer_list);\n  ext_data->Shutdown();\n  return modelbox::STATUS_OK;\n}\n\nStatus OutputBrokerObsPluginTest::AddMockFlowUnit() { return STATUS_OK; }\n\nTEST_F(OutputBrokerObsPluginTest, ObsOutputTest) {\n  // This test would be skipped, if no auth info is provided.\n  auto conf_builder = std::make_shared<ConfigurationBuilder>();\n  std::shared_ptr<Configuration> config_file =\n      conf_builder->Build(TEST_ASSETS + std::string(\"/auth/auth_info.toml\"));\n  if (config_file == nullptr || config_file->GetString(\"base.ak\").empty()) {\n    GTEST_SKIP();\n  }\n\n  auto driver_flow = RunDriverFlow();\n  std::vector<std::shared_ptr<ObsOutputDataPack>> output_data_pack_vec;\n  std::shared_ptr<ObsOutputDataPack> output_data_pack =\n      std::make_shared<ObsOutputDataPack>();\n  output_data_pack->output_broker_names = \"obs1|obs2\";\n  output_data_pack->output_file_name = \"text\";\n  output_data_pack->output_data = \"output data text to obs1 & obs2.\";\n  output_data_pack_vec.push_back(output_data_pack);\n  output_data_pack = std::make_shared<ObsOutputDataPack>();\n  output_data_pack->output_broker_names = \"obs2|obs3\";\n  output_data_pack->output_file_name = \"frame\";\n  output_data_pack->output_data = \"output data text to obs2 & obs3.\";\n  output_data_pack_vec.push_back(output_data_pack);\n\n  // construct OUTPUT BROKER CFG\n  std::string obsEndPoint(config_file->GetString(\"output_broker.obsEndPoint\"));\n  std::string bucket(config_file->GetString(\"output_broker.bucket\"));\n  std::string path1(config_file->GetString(\"output_broker.path1\"));\n  std::string path2(config_file->GetString(\"output_broker.path2\"));\n  std::string path3(config_file->GetString(\"output_broker.path3\"));\n  std::string domainName(config_file->GetString(\"output_broker.domainName\"));\n  std::string xroleName(config_file->GetString(\"output_broker.xroleName\"));\n\n  std::string output_broker_cfg = R\"({ \n    \"brokers\": [\n      {\n        \"type\" : \"obs\",\n        \"name\" : \"obs1\",\n        \"cfg\": \"{\\\"obsEndPoint\\\" : \\\")\" + obsEndPoint + R\"(\\\", \\\"bucket\\\" : \\\")\" + bucket + R\"(\\\", \\\"path\\\" : \\\")\" + path1 + R\"(\\\",\\\"domainName\\\" :\\\")\" + domainName + R\"(\\\",\\\"xroleName\\\" : \\\")\" + xroleName + R\"(\\\"}\" \n      },\n      {\n        \"type\" : \"obs\",\n        \"name\" : \"obs2\",\n        \"cfg\": \"{\\\"obsEndPoint\\\" : \\\")\" + obsEndPoint + R\"(\\\", \\\"bucket\\\" : \\\")\" + bucket + R\"(\\\", \\\"path\\\" : \\\")\" + path2 + R\"(\\\",\\\"domainName\\\" :\\\")\" + domainName + R\"(\\\",\\\"xroleName\\\" : \\\")\" + xroleName + R\"(\\\"}\" \n      },\n      {\n        \"type\" : \"obs\",\n        \"name\" : \"obs3\",\n        \"cfg\": \"{\\\"obsEndPoint\\\" : \\\")\" + obsEndPoint + R\"(\\\", \\\"bucket\\\" : \\\")\" + bucket + R\"(\\\", \\\"path\\\" : \\\")\" + path3 + R\"(\\\",\\\"domainName\\\" :\\\")\" + domainName + R\"(\\\",\\\"xroleName\\\" : \\\")\" + xroleName + R\"(\\\"}\" \n      }\n    ]\n  })\";\n\n  PreparationToGetCert();\n  auto ret =\n      SendOutputDataObs(driver_flow, output_data_pack_vec, output_broker_cfg);\n  EXPECT_EQ(ret, modelbox::STATUS_OK);\n\n  driver_flow->GetFlow()->Wait(1 * 1000);\n}\n\n\nTEST_F(OutputBrokerObsPluginTest, ObsOutputTestWithNoFileName) {\n  // This test would be skipped, if no auth info is provided.\n  auto conf_builder = std::make_shared<ConfigurationBuilder>();\n  std::shared_ptr<Configuration> config_file =\n      conf_builder->Build(TEST_ASSETS + std::string(\"/auth/auth_info.toml\"));\n  if (config_file == nullptr || config_file->GetString(\"base.ak\").empty()) {\n    GTEST_SKIP();\n  }\n\n  auto driver_flow = RunDriverFlow();\n  std::vector<std::shared_ptr<ObsOutputDataPack>> output_data_pack_vec;\n  std::shared_ptr<ObsOutputDataPack> output_data_pack =\n      std::make_shared<ObsOutputDataPack>();\n  output_data_pack->output_broker_names = \"obs1|obs2\";\n  output_data_pack->output_file_name = \"\";\n  output_data_pack->output_data = \"output data text to obs1 & obs2.\";\n  output_data_pack_vec.push_back(output_data_pack);\n  output_data_pack = std::make_shared<ObsOutputDataPack>();\n  output_data_pack->output_broker_names = \"obs2|obs3\";\n  output_data_pack->output_file_name = \"\";\n  output_data_pack->output_data = \"output data text to obs2 & obs3.\";\n  output_data_pack_vec.push_back(output_data_pack);\n\n  // construct OUTPUT BROKER CFG\n  std::string obsEndPoint(config_file->GetString(\"output_broker.obsEndPoint\"));\n  std::string bucket(config_file->GetString(\"output_broker.bucket\"));\n  std::string path1(config_file->GetString(\"output_broker.path1\"));\n  std::string path2(config_file->GetString(\"output_broker.path2\"));\n  std::string path3(config_file->GetString(\"output_broker.path3\"));\n  std::string domainName(config_file->GetString(\"output_broker.domainName\"));\n  std::string xroleName(config_file->GetString(\"output_broker.xroleName\"));\n\n  std::string output_broker_cfg = R\"({ \n    \"brokers\": [\n      {\n        \"type\" : \"obs\",\n        \"name\" : \"obs1\",\n        \"cfg\": \"{\\\"obsEndPoint\\\" : \\\")\" + obsEndPoint + R\"(\\\", \\\"bucket\\\" : \\\")\" + bucket + R\"(\\\", \\\"path\\\" : \\\")\" + path1 + R\"(\\\",\\\"domainName\\\" :\\\")\" + domainName + R\"(\\\",\\\"xroleName\\\" : \\\")\" + xroleName + R\"(\\\"}\" \n      },\n      {\n        \"type\" : \"obs\",\n        \"name\" : \"obs2\",\n        \"cfg\": \"{\\\"obsEndPoint\\\" : \\\")\" + obsEndPoint + R\"(\\\", \\\"bucket\\\" : \\\")\" + bucket + R\"(\\\", \\\"path\\\" : \\\")\" + path2 + R\"(\\\",\\\"domainName\\\" :\\\")\" + domainName + R\"(\\\",\\\"xroleName\\\" : \\\")\" + xroleName + R\"(\\\"}\" \n      },\n      {\n        \"type\" : \"obs\",\n        \"name\" : \"obs3\",\n        \"cfg\": \"{\\\"obsEndPoint\\\" : \\\")\" + obsEndPoint + R\"(\\\", \\\"bucket\\\" : \\\")\" + bucket + R\"(\\\", \\\"path\\\" : \\\")\" + path3 + R\"(\\\",\\\"domainName\\\" :\\\")\" + domainName + R\"(\\\",\\\"xroleName\\\" : \\\")\" + xroleName + R\"(\\\"}\" \n      }\n    ]\n  })\";\n\n  PreparationToGetCert();\n  auto ret =\n      SendOutputDataObs(driver_flow, output_data_pack_vec, output_broker_cfg);\n  EXPECT_EQ(ret, modelbox::STATUS_OK);\n\n  driver_flow->GetFlow()->Wait(1 * 1000);\n}\n\nvoid OutputBrokerObsPluginTest::PreparationToGetCert() {\n  auto conf_builder = std::make_shared<ConfigurationBuilder>();\n  std::shared_ptr<Configuration> config_file =\n      conf_builder->Build(TEST_ASSETS + std::string(\"/auth/auth_info.toml\"));\n  std::string ak(config_file->GetString(\"base.ak\"));\n  std::string sk(config_file->GetString(\"base.sk\"));\n  std::string domain_id(config_file->GetString(\"base.domain_id\"));\n  std::string project_id(config_file->GetString(\"base.project_id\"));\n  std::string iam_host(config_file->GetString(\"base.iam_host\"));\n\n  modelbox::IAMAuth::GetInstance()->SetIAMHostAddress(iam_host);\n\n  if (modelbox::STATUS_OK != modelbox::IAMAuth::GetInstance()->SetConsigneeInfo(\n                               ak, sk, domain_id, project_id)) {\n    MBLOG_ERROR << \"set Consignee failed\";\n    return;\n  }\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/output_broker/broker_plugin/webhook_output_broker/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nset(PLUGIN_NAME \"webhook\")\n\nproject(modelbox-flowunit-${UNIT_DEVICE}-${UNIT_NAME}-${PLUGIN_NAME})\n\nfile(GLOB_RECURSE PLUGIN_SOURCE *.cpp *.cc *.c)\ngroup_source_test_files(MODELBOX_PLUGIN_SOURCE MODELBOX_PLUGIN_TEST_SOURCE \"_test.c*\" ${PLUGIN_SOURCE})\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_CPU_INCLUDE})\n\nset(MODELBOX_PLUGIN_SHARED modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-${PLUGIN_NAME}-shared)\nset(MODELBOX_PLUGIN_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\nadd_library(${MODELBOX_PLUGIN_SHARED} SHARED ${MODELBOX_PLUGIN_SOURCE})\n\nset(LIBMODELBOX_FLOWUNIT_OUTPUT_BROKER_PLUGIN_CPU_SHARED ${MODELBOX_PLUGIN_SHARED})\nset_target_properties(${MODELBOX_PLUGIN_SHARED} PROPERTIES \n    SOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n\ntarget_link_libraries(${MODELBOX_PLUGIN_SHARED} ${CPPREST_LIBRARIES})\ntarget_link_libraries(${MODELBOX_PLUGIN_SHARED} ${LIBMODELBOX_DEVICE_CPU_SHARED})\ntarget_link_libraries(${MODELBOX_PLUGIN_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_PLUGIN_SHARED} rt)\ntarget_link_libraries(${MODELBOX_PLUGIN_SHARED} dl)\nset_target_properties(${MODELBOX_PLUGIN_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-${PLUGIN_NAME}\")\n\ninstall(TARGETS ${MODELBOX_PLUGIN_SHARED} \n    COMPONENT cpu-device-flowunit\n    RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n    LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    OPTIONAL\n    )\n\n\ninstall(DIRECTORY ${HEADER} \n    DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR} \n    COMPONENT cpu-device-flowunit-devel\n    )\n\nset(LIBMODELBOX_FLOWUNIT_OUTPUT_BROKER_PLUGIN_CPU_SHARED ${MODELBOX_PLUGIN_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_OUTPUT_BROKER_PLUGIN_CPU_INCLUDE ${MODELBOX_PLUGIN_SOURCE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_OUTPUT_BROKER_PLUGIN_CPU_SOURCES ${MODELBOX_PLUGIN_SOURCE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_OUTPUT_BROKER_PLUGIN_CPU_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-${PLUGIN_NAME}.so CACHE INTERNAL \"\")\n\n# driver test\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_PLUGIN_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_PLUGIN_SHARED})\nlist(APPEND DRIVER_UNIT_TEST_LINK_LIBRARIES ${MODELBOX_UNIT_LINK_LIBRARY})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_LINK_LIBRARIES ${DRIVER_UNIT_TEST_LINK_LIBRARIES} CACHE INTERNAL \"\")\n\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/output_broker/broker_plugin/webhook_output_broker/driver_desc.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <stdio.h>\n\n#include <memory>\n\n#include \"modelbox/base/driver_api_helper.h\"\n#include \"modelbox/base/status.h\"\n#include \"modelbox/device/cpu/device_cpu.h\"\n#include \"modelbox/flowunit.h\"\n#include \"webhook_output_broker.h\"\n\nstd::shared_ptr<modelbox::DriverFactory> CreateDriverFactory() {\n  std::shared_ptr<modelbox::DriverFactory> factory =\n      std::make_shared<WebhookOutputBrokerFactory>();\n  return factory;\n}\n\nvoid DriverDescription(modelbox::DriverDesc *desc) {\n  desc->SetName(DRIVER_NAME);\n  desc->SetClass(DRIVER_CLASS_OUTPUT_BROKER_PLUGIN);\n  desc->SetType(DRIVER_TYPE);\n  desc->SetDescription(DRIVER_DESC);\n}\n\nmodelbox::Status DriverInit() {\n  // Driver Init.\n  return modelbox::STATUS_OK;\n}\n\nvoid DriverFini() {\n  // Driver Fini.\n}\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/output_broker/broker_plugin/webhook_output_broker/webhook_output_broker.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"webhook_output_broker.h\"\n\n#include <securec.h>\n\n#include <nlohmann/json.hpp>\n\n#include \"modelbox/base/uuid.h\"\n#include \"modelbox/device/cpu/device_cpu.h\"\n\nWebhookOutputBroker::WebhookOutputBroker() = default;\nWebhookOutputBroker::~WebhookOutputBroker() = default;\n\nmodelbox::Status WebhookOutputBroker::Init(\n    const std::shared_ptr<modelbox::Configuration> &opts) {\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status WebhookOutputBroker::Deinit() { return modelbox::STATUS_OK; }\n\nstd::shared_ptr<modelbox::OutputBrokerHandle> WebhookOutputBroker::Open(\n    const std::shared_ptr<modelbox::Configuration> &session_config,\n    const std::string &config) {\n  std::string uuid;\n  if (modelbox::STATUS_OK != modelbox::GetUUID(&uuid)) {\n    MBLOG_ERROR << \"Failed to generate a uuid for the dis output broker!\";\n    return nullptr;\n  }\n\n  auto handle = std::make_shared<modelbox::OutputBrokerHandle>();\n  handle->broker_id_ = uuid;\n\n  if (modelbox::STATUS_OK != ParseConfig(handle, config)) {\n    MBLOG_ERROR << \"Parse config to json failed\";\n    return nullptr;\n  }\n\n  std::unique_lock<std::mutex> guard(output_configs_lock_);\n  auto iter = output_configs_.find(handle->broker_id_);\n  if (iter == output_configs_.end()) {\n    MBLOG_ERROR\n        << \"Failed to send data! Can not find the broker configuration, type: \"\n        << handle->output_broker_type_ << \", id: \" << handle->broker_id_;\n    return nullptr;\n  }\n\n  std::shared_ptr<WebhookOutputInfo> output_info = iter->second;\n  utility::string_t address = U(output_info->url);\n  web::http::uri uri = web::http::uri(address);\n\n  web::http::client::http_client_config client_config;\n  client_config.set_timeout(utility::seconds(30));\n  client_config.set_validate_certificates(false);\n\n  auto client = std::make_shared<web::http::client::http_client>(\n      web::http::uri_builder(uri).to_uri(), client_config);\n  output_clients_[handle->broker_id_] = client;\n\n  return handle;\n}\n\nmodelbox::Status WebhookOutputBroker::Write(\n    const std::shared_ptr<modelbox::OutputBrokerHandle> &handle,\n    const std::shared_ptr<modelbox::Buffer> &buffer) {\n  if (buffer == nullptr) {\n    MBLOG_ERROR << \"Invalid buffer: buffer is nullptr!\";\n    return modelbox::STATUS_NODATA;\n  }\n\n  const auto *buffer_data = buffer->ConstData();\n  if (buffer_data == nullptr) {\n    MBLOG_ERROR << \"Invalid buffer: buffer is nullptr!\";\n    return modelbox::STATUS_NODATA;\n  }\n\n  std::string data((const char *)buffer_data, buffer->GetBytes());\n  if (data.empty()) {\n    MBLOG_WARN << \"Invalid data! Nothing to be upload!\";\n  }\n\n  std::unique_lock<std::mutex> guard(output_configs_lock_);\n  auto iter = output_configs_.find(handle->broker_id_);\n  if (iter == output_configs_.end()) {\n    MBLOG_ERROR\n        << \"Failed to send data! Can not find the broker configuration, type: \"\n        << handle->output_broker_type_ << \", id: \" << handle->broker_id_;\n    return modelbox::STATUS_NOTFOUND;\n  }\n\n  auto client_iter = output_clients_.find(handle->broker_id_);\n  if (client_iter == output_clients_.end()) {\n    MBLOG_ERROR\n        << \"Failed to send data! Can not find the broker clients, type: \"\n        << handle->output_broker_type_ << \", id: \" << handle->broker_id_;\n    return modelbox::STATUS_NOTFOUND;\n  }\n\n  auto client = client_iter->second;\n  guard.unlock();\n  std::shared_ptr<WebhookOutputInfo> output_info = iter->second;\n  web::http::http_headers headers_post;\n  for (auto iter = output_info->headers.begin();\n       iter != output_info->headers.end(); ++iter) {\n    headers_post.add(U(iter->first), U(iter->second));\n  }\n  web::http::http_request msg_post;\n  msg_post.set_method(web::http::methods::POST);\n  msg_post.headers() = headers_post;\n\n  msg_post.set_body(data);\n\n  try {\n    web::http::http_response resp_post = client->request(msg_post).get();\n\n    std::string msg_name;\n    buffer->Get(\"msg_name\", msg_name);\n\n    if (resp_post.status_code() >= 200 && resp_post.status_code() < 300) {\n      MBLOG_DEBUG << \"Send data to webhook success. Message name: \" << msg_name\n                  << \". Http status code: \" << resp_post.status_code()\n                  << \". Response body: \" << resp_post.extract_string().get();\n      return modelbox::STATUS_OK;\n    }\n\n    MBLOG_WARN << \"Send data to webhook failed. Message name: \" << msg_name\n               << \". Http status code: \" << resp_post.status_code()\n               << \". Response body: \" << resp_post.extract_string().get()\n               << \". Try again.\";\n    return modelbox::STATUS_AGAIN;\n  } catch (std::exception const &e) {\n    MBLOG_ERROR << e.what();\n    return modelbox::STATUS_FAULT;\n  }\n}\n\nmodelbox::Status WebhookOutputBroker::Sync(\n    const std::shared_ptr<modelbox::OutputBrokerHandle> &handle) {\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status WebhookOutputBroker::Close(\n    const std::shared_ptr<modelbox::OutputBrokerHandle> &handle) {\n  std::unique_lock<std::mutex> guard(output_configs_lock_);\n  auto iter = output_configs_.find(handle->broker_id_);\n  if (iter == output_configs_.end()) {\n    MBLOG_ERROR << \"Broker handle not found, type: \"\n                << handle->output_broker_type_\n                << \", id: \" << handle->broker_id_;\n    return modelbox::STATUS_NOTFOUND;\n  }\n\n  output_configs_.erase(handle->broker_id_);\n\n  auto client_iter = output_clients_.find(handle->broker_id_);\n  if (client_iter == output_clients_.end()) {\n    MBLOG_ERROR << \"Broker clients handle not found, type: \"\n                << handle->output_broker_type_\n                << \", id: \" << handle->broker_id_;\n    return modelbox::STATUS_NOTFOUND;\n  }\n\n  output_clients_.erase(handle->broker_id_);\n  guard.unlock();\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status WebhookOutputBroker::ParseConfig(\n    const std::shared_ptr<modelbox::OutputBrokerHandle> &handle,\n    const std::string &config) {\n  nlohmann::json json;\n  std::shared_ptr<WebhookOutputInfo> output_info =\n      std::make_shared<WebhookOutputInfo>();\n  try {\n    json = nlohmann::json::parse(config);\n    output_info->url = json[\"url\"].get<std::string>();\n    if (output_info->url.empty()) {\n      MBLOG_ERROR << \"Invalid url, value of key <url> is empty!\";\n      return modelbox::STATUS_BADCONF;\n    }\n    MBLOG_DEBUG << \"url: \" << output_info->url;\n\n    auto value = json[\"headers\"].get<nlohmann::json>();\n    for (const auto &header : value.items()) {\n      if (header.key().empty()) {\n        MBLOG_ERROR << \"headers key is empty!\";\n        return modelbox::STATUS_BADCONF;\n      }\n      if (!header.value().is_string()) {\n        MBLOG_ERROR << \"Key <\" << header.key() << \"> must have string value.\";\n        return modelbox::STATUS_BADCONF;\n      }\n      output_info->headers[header.key()] = header.value();\n    }\n\n  } catch (const std::exception &e) {\n    MBLOG_ERROR << \"Parse output config to json failed, detail: \" << e.what();\n    return modelbox::STATUS_BADCONF;\n  }\n\n  MBLOG_DEBUG << \"Parse cfg json success.\";\n\n  std::unique_lock<std::mutex> guard(output_configs_lock_);\n  output_configs_[handle->broker_id_] = output_info;\n  guard.unlock();\n\n  return modelbox::STATUS_OK;\n}"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/output_broker/broker_plugin/webhook_output_broker/webhook_output_broker.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_WEBHOOK_OUTPUT_BROKER_CPU_H_\n#define MODELBOX_FLOWUNIT_WEBHOOK_OUTPUT_BROKER_CPU_H_\n\n#include <modelbox/base/device.h>\n#include <modelbox/base/status.h>\n#include <modelbox/output_broker_plugin.h>\n\n#include \"cpprest/http_client.h\"\n\nconstexpr const char *DRIVER_NAME = \"webhook\";\nconstexpr const char *DRIVER_DESC = \"A webhook output broker plugin on CPU\";\nconstexpr const char *DRIVER_TYPE = \"cpu\";\n\ntypedef struct tag_WebhookOutputInfo {\n  std::string url;\n  std::map<std::string, std::string> headers;\n} WebhookOutputInfo;\n\nclass WebhookOutputBroker : public modelbox::OutputBrokerPlugin {\n public:\n  WebhookOutputBroker();\n\n  ~WebhookOutputBroker() override;\n\n  modelbox::Status Init(\n      const std::shared_ptr<modelbox::Configuration> &opts) override;\n\n  modelbox::Status Deinit() override;\n\n  std::shared_ptr<modelbox::OutputBrokerHandle> Open(\n      const std::shared_ptr<modelbox::Configuration> &session_config,\n      const std::string &config) override;\n\n  modelbox::Status Write(\n      const std::shared_ptr<modelbox::OutputBrokerHandle> &handle,\n      const std::shared_ptr<modelbox::Buffer> &buffer) override;\n\n  modelbox::Status Sync(\n      const std::shared_ptr<modelbox::OutputBrokerHandle> &handle) override;\n\n  modelbox::Status Close(\n      const std::shared_ptr<modelbox::OutputBrokerHandle> &handle) override;\n\n private:\n  modelbox::Status ParseConfig(\n      const std::shared_ptr<modelbox::OutputBrokerHandle> &handle,\n      const std::string &config);\n  std::map<std::string, std::shared_ptr<WebhookOutputInfo>> output_configs_;\n  std::mutex output_configs_lock_;\n  std::map<std::string, std::shared_ptr<web::http::client::http_client>>\n      output_clients_;\n};\n\nclass WebhookOutputBrokerFactory : public modelbox::DriverFactory {\n public:\n  WebhookOutputBrokerFactory() = default;\n\n  ~WebhookOutputBrokerFactory() override = default;\n\n  std::shared_ptr<modelbox::Driver> GetDriver() override {\n    std::shared_ptr<modelbox::Driver> parser =\n        std::make_shared<WebhookOutputBroker>();\n    return parser;\n  }\n};\n\n#endif  // MODELBOX_FLOWUNIT_WEBHOOK_OUTPUT_BROKER_CPU_H_"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/output_broker/output_broker_flowunit.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"output_broker_flowunit.h\"\n\n#include <modelbox/base/timer.h>\n#include <modelbox/base/utils.h>\n#include <securec.h>\n\n#include <queue>\n#include <utility>\n\n#include \"driver_util.h\"\n#include \"modelbox/base/config.h\"\n#include \"modelbox/flowunit.h\"\n#include \"modelbox/flowunit_api_helper.h\"\n\n#define DEFAULT_RETRY_COUNT 5\n\nBrokerDataQueue::BrokerDataQueue(std::string broker_name, size_t queue_size)\n    : broker_name_(std::move(broker_name)), queue_size_(queue_size) {}\n\nvoid BrokerDataQueue::PushForce(\n    const std::shared_ptr<modelbox::Buffer> &buffer) {\n  std::lock_guard<std::mutex> lock(queue_lock_);\n  while (queue_.size() >= queue_size_ && !queue_.empty()) {\n    MBLOG_WARN << \"Data in broker \" << broker_name_ << \" exceed limit \"\n               << queue_size_\n               << \", old data drop one. set mode=\\\"sync\\\" will not drop data \"\n                  \"but will stuck, \"\n                  \"or you can enlarge queue_size in mode=\\\"async\\\".\";\n    queue_.pop();\n  }\n\n  queue_.push(buffer);\n}\n\nmodelbox::Status BrokerDataQueue::Front(\n    std::shared_ptr<modelbox::Buffer> &buffer) {\n  std::lock_guard<std::mutex> lock(queue_lock_);\n  if (queue_.empty()) {\n    return modelbox::STATUS_NODATA;\n  }\n\n  buffer = queue_.front();\n  return modelbox::STATUS_OK;\n}\n\nbool BrokerDataQueue::Empty() { return queue_.empty(); }\n\nvoid BrokerDataQueue::PopIfEqual(\n    const std::shared_ptr<modelbox::Buffer> &target) {\n  std::lock_guard<std::mutex> lock(queue_lock_);\n  if (queue_.empty()) {\n    return;\n  }\n\n  if (queue_.front() != target) {\n    return;\n  }\n\n  queue_.pop();\n}\n\nBrokerInstance::BrokerInstance(\n    std::shared_ptr<modelbox::OutputBrokerPlugin> &plugin,\n    const std::string &name,\n    std::shared_ptr<modelbox::OutputBrokerHandle> &handle,\n    size_t async_queue_size)\n    : plugin_(plugin),\n      name_(name),\n      handle_(handle),\n      data_queue_(name, async_queue_size) {}\n\nBrokerInstance::~BrokerInstance() = default;\n\nvoid BrokerInstance::SetRetryParam(int64_t retry_count_limit,\n                                   size_t retry_interval_base_ms,\n                                   size_t retry_interval_increment_ms,\n                                   size_t retry_interval_limit_ms) {\n  retry_count_limit_ = retry_count_limit;\n  retry_interval_base_ms_ = retry_interval_base_ms;\n  retry_interval_increment_ms_ = retry_interval_increment_ms;\n  retry_interval_limit_ms_ = retry_interval_limit_ms;\n}\n\nmodelbox::Status BrokerInstance::Write(\n    const std::shared_ptr<modelbox::Buffer> &buffer) {\n  cur_data_retry_count_ = 0;\n  bool retry = true;\n  do {\n    auto ret = plugin_->Write(handle_, buffer);\n    UpdateInstaceState(ret);\n    if (ret == modelbox::STATUS_AGAIN) {\n      if (cur_data_retry_count_++ < retry_count_limit_ ||\n          retry_count_limit_ < 0) {\n        MBLOG_ERROR << \"Write data to \" << name_\n                    << \" failed, detail: Try again\";\n      } else {\n        retry = false;\n      }\n\n      if (send_interval_ != 0) {\n        std::this_thread::sleep_for(std::chrono::milliseconds(send_interval_));\n      }\n    } else {\n      return ret;\n    }\n  } while (retry);\n  return {modelbox::STATUS_FAULT,\n          \"Reach max retry limit \" + std::to_string(retry_count_limit_)};\n}\n\nmodelbox::Status BrokerInstance::AddToQueue(\n    const std::shared_ptr<modelbox::Buffer> &buffer) {\n  data_queue_.PushForce(buffer);\n  std::lock_guard<std::mutex> lock(stop_lock_);\n  if (is_stopped && !exit_flag_) {\n    auto timer_task =\n        std::make_shared<modelbox::TimerTask>([&]() { WriteFromQueue(); });\n    is_stopped = false;\n    timer_.Start();\n    timer_.Schedule(timer_task, send_interval_, 0, true);\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nvoid BrokerInstance::WriteFromQueue() {\n  std::shared_ptr<modelbox::Buffer> buffer;\n  data_queue_.Front(buffer);  // Will not be empty\n  auto ret = plugin_->Write(handle_, buffer);\n  UpdateInstaceState(ret);\n  if (ret == modelbox::STATUS_AGAIN) {\n    if (cur_data_retry_count_ < retry_count_limit_ || retry_count_limit_ < 0) {\n      ++cur_data_retry_count_;\n      MBLOG_ERROR << \"Write data to \" << name_ << \" failed, detail: Try again \";\n    } else {\n      MBLOG_ERROR << \"Write data to \" << name_\n                  << \" failed, drop this data, detail: Reach max retry limit \"\n                  << retry_count_limit_;\n      cur_data_retry_count_ = 0;\n      data_queue_.PopIfEqual(buffer);\n    }\n  } else {\n    if (!ret) {\n      MBLOG_ERROR << \"Write data to \" << name_\n                  << \" failed, drop this data, detail: \" << ret.Errormsg();\n    } else {\n      MBLOG_INFO << \"Write data to \" << name_ << \" success\";\n    }\n\n    cur_data_retry_count_ = 0;\n    data_queue_.PopIfEqual(buffer);\n  }\n\n  std::lock_guard<std::mutex> lock(stop_lock_);\n  if (!data_queue_.Empty()) {\n    // if task stop, retry param will be changed by Dispose()\n    auto timer_task =\n        std::make_shared<modelbox::TimerTask>([&]() { WriteFromQueue(); });\n    is_stopped = false;\n    timer_.Start();\n    timer_.Schedule(timer_task, send_interval_, 0, true);\n  } else {\n    is_stopped = true;\n    stop_cv_.notify_all();\n  }\n}\n\nvoid BrokerInstance::Dispose() {\n  MBLOG_INFO << name_ << \" start dispose\";\n  // set retry param to ensure task could exit\n  if (retry_count_limit_ == -1) {\n    retry_count_limit_ = DEFAULT_RETRY_COUNT;\n  }\n  retry_interval_increment_ms_ = 0;\n  retry_interval_limit_ms_ = retry_interval_base_ms_;\n  send_interval_ = retry_interval_base_ms_;\n  // wait for sending task end\n  exit_flag_ = true;\n  std::unique_lock<std::mutex> lock(stop_lock_);\n  stop_cv_.wait(lock, [&]() { return is_stopped.load(); });\n  plugin_->Sync(handle_);\n  plugin_->Close(handle_);\n  MBLOG_INFO << name_ << \" dispose over\";\n}\n\nvoid BrokerInstance::UpdateInstaceState(modelbox::Status write_result) {\n  switch (write_result.Code()) {\n    case modelbox::STATUS_AGAIN:\n      if (send_interval_ == 0) {\n        send_interval_ = retry_interval_base_ms_;\n      } else if (send_interval_ < retry_interval_limit_ms_) {\n        send_interval_ += retry_interval_increment_ms_;\n        if (send_interval_ > retry_interval_limit_ms_) {\n          send_interval_ = retry_interval_limit_ms_;\n        }\n      }\n\n      break;\n\n    default:\n      send_interval_ = 0;\n      break;\n  }\n}\n\nOutputBrokerFlowUnit::OutputBrokerFlowUnit() = default;\nOutputBrokerFlowUnit::~OutputBrokerFlowUnit() = default;\n\nmodelbox::Status OutputBrokerFlowUnit::Open(\n    const std::shared_ptr<modelbox::Configuration> &opts) {\n  auto dev_mgr = GetBindDevice()->GetDeviceManager();\n  if (dev_mgr == nullptr) {\n    MBLOG_ERROR << \"Can not get device manger\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  auto drivers = dev_mgr->GetDrivers();\n  if (drivers == nullptr) {\n    MBLOG_ERROR << \"Can not get drivers\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  auto ret = driverutil::GetPlugin<modelbox::OutputBrokerPlugin>(\n      DRIVER_CLASS_OUTPUT_BROKER_PLUGIN, drivers, factories_, plugins_);\n  if (!ret) {\n    return ret;\n  }\n\n  for (auto &item : plugins_) {\n    auto ret = item.second->Init(opts);\n    if (!ret) {\n      MBLOG_ERROR << \"Init plugin \" << item.first\n                  << \" failed, detail : \" << ret.Errormsg();\n    }\n  }\n\n  mode_ = opts->GetString(\"mode\", SYNC_MODE);\n  if (mode_ != SYNC_MODE && mode_ != ASYNC_MODE) {\n    MBLOG_ERROR << \"Mode only support {sync, async}\";\n    return modelbox::STATUS_BADCONF;\n  }\n\n  retry_count_limit_ = opts->GetInt64(\"retry_count_limit\");\n  retry_interval_base_ms_ = opts->GetUint64(\"retry_interval_base_ms\");\n  retry_interval_increment_ms_ = opts->GetUint64(\"retry_interval_increment_ms\");\n  retry_interval_limit_ms_ =\n      opts->GetUint64(\"retry_interval_limit_ms\", retry_interval_base_ms_);\n  if (retry_interval_limit_ms_ < retry_interval_base_ms_) {\n    MBLOG_WARN << \"retry_interval_limit < retry_interval_base is unacceptable, \"\n                  \"use retry_interval_base as retry_interval_limit\";\n    retry_interval_limit_ms_ = retry_interval_base_ms_;\n  }\n\n  async_queue_size_ = opts->GetUint64(\"queue_size\", 100);\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status OutputBrokerFlowUnit::Close() {\n  for (auto &item : plugins_) {\n    item.second->Deinit();\n  }\n\n  plugins_.clear();\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status OutputBrokerFlowUnit::Process(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  auto input_buffer_list = data_ctx->Input(INPUT_DATA);\n  for (auto &buffer : *input_buffer_list) {\n    std::string output_broker_names;\n    buffer->Get(META_OUTPUT_BROKER_NAME, output_broker_names);\n    auto ret = SendData(data_ctx, output_broker_names, buffer);\n    if (!ret) {\n      MBLOG_ERROR << \"Send data to output broker \" << output_broker_names\n                  << \"failed\";\n    }\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status OutputBrokerFlowUnit::SendData(\n    std::shared_ptr<modelbox::DataContext> &data_ctx,\n    const std::string &output_broker_names,\n    const std::shared_ptr<modelbox::Buffer> &buffer) {\n  auto broker_instances = std::static_pointer_cast<BrokerInstances>(\n      data_ctx->GetPrivate(CTX_BROKER_INSTANCES));\n  auto loaded_broker_names = std::static_pointer_cast<BrokerNames>(\n      data_ctx->GetPrivate(CTX_BROKER_NAMES));\n  if (broker_instances == nullptr || loaded_broker_names == nullptr) {\n    MBLOG_ERROR << \"Output broker handles has not been inited\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  auto output_broker_name_list =\n      modelbox::StringSplit(output_broker_names, '|');\n  if (output_broker_name_list.empty()) {\n    output_broker_name_list = *loaded_broker_names;\n  }\n\n  for (auto &target_broker_name : output_broker_name_list) {\n    auto item = broker_instances->find(target_broker_name);\n    if (item == broker_instances->end()) {\n      MBLOG_ERROR << \"Wrong broker name \" << target_broker_name\n                  << \", it's not named in config\";\n      continue;\n    }\n\n    auto &broker = item->second;\n    if (mode_ == SYNC_MODE) {\n      auto ret = broker->Write(buffer);\n      if (!ret) {\n        MBLOG_ERROR << \"Write data to \" << target_broker_name\n                    << \" failed, drop this data, detail: \" << ret.Errormsg();\n      } else {\n        MBLOG_INFO << \"Write data to \" << target_broker_name << \" success\";\n      }\n    } else {\n      broker->AddToQueue(buffer);\n    }\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status OutputBrokerFlowUnit::DataPre(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  auto config = data_ctx->GetSessionConfig();\n  auto cfg_str = config->GetString(SESSION_OUTPUT_BROKER_CONFIG);\n  if (cfg_str.empty()) {\n    MBLOG_ERROR << \"Output broker config in session has not been set\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  auto ret = ParseCfg(data_ctx, cfg_str);\n  if (!ret) {\n    MBLOG_ERROR << \"Parse output broker config failed\";\n    return ret;\n  }\n\n  return modelbox::STATUS_OK;\n};\n\nmodelbox::Status OutputBrokerFlowUnit::DataPost(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  auto broker_instances = std::static_pointer_cast<BrokerInstances>(\n      data_ctx->GetPrivate(CTX_BROKER_INSTANCES));\n  for (auto &item : *broker_instances) {\n    item.second->Dispose();\n  }\n\n  broker_instances->clear();\n  return modelbox::STATUS_OK;\n};\n\nmodelbox::Status OutputBrokerFlowUnit::ParseCfg(\n    std::shared_ptr<modelbox::DataContext> &data_ctx, const std::string &cfg) {\n  nlohmann::json json;\n  try {\n    json = nlohmann::json::parse(cfg);\n  } catch (const std::exception &e) {\n    MBLOG_ERROR << \"Parse output config to json failed, detail: \" << e.what();\n    return modelbox::STATUS_INVALID;\n  }\n\n  if (!json.is_object()) {\n    MBLOG_ERROR << \"Output broker config must a json object\";\n    return modelbox::STATUS_INVALID;\n  }\n\n  auto brokers = json[\"brokers\"];\n  auto ret = InitBrokers(data_ctx, brokers);\n  if (!ret) {\n    MBLOG_ERROR << \"Init output brokers failed\";\n    return ret;\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status OutputBrokerFlowUnit::InitBrokers(\n    std::shared_ptr<modelbox::DataContext> &data_ctx,\n    const nlohmann::json &brokers_json) {\n  if (brokers_json.empty()) {\n    MBLOG_ERROR << \"Key <brokers> is missing in json object\";\n    return modelbox::STATUS_INVALID;\n  }\n\n  if (!brokers_json.is_array()) {\n    MBLOG_ERROR << \"Value of <brokers> must be an array\";\n    return modelbox::STATUS_INVALID;\n  }\n\n  auto broker_instances = std::make_shared<BrokerInstances>();\n  auto broker_names = std::make_shared<BrokerNames>();\n  for (const auto &broker_json : brokers_json) {\n    try {\n      AddBroker(data_ctx, broker_instances, broker_names, broker_json);\n    } catch (const std::exception &e) {\n      MBLOG_ERROR << \"init output broker failed, config: \"\n                  << broker_json.dump();\n      return modelbox::STATUS_INVALID;\n    }\n  }\n\n  data_ctx->SetPrivate(CTX_BROKER_INSTANCES, broker_instances);\n  data_ctx->SetPrivate(CTX_BROKER_NAMES, broker_names);\n  return modelbox::STATUS_OK;\n}\n\nvoid OutputBrokerFlowUnit::AddBroker(\n    std::shared_ptr<modelbox::DataContext> &data_ctx,\n    std::shared_ptr<BrokerInstances> &broker_instances,\n    std::shared_ptr<BrokerNames> &broker_names,\n    const nlohmann::json &broker_json) {\n  if (!broker_json.is_object()) {\n    MBLOG_ERROR << \"Single broker config must be object\";\n    return;\n  }\n\n  auto type = broker_json[\"type\"];\n  if (type.empty()) {\n    MBLOG_WARN << \"Key <type> is missing in single broker config\";\n    return;\n  }\n\n  if (!type.is_string()) {\n    MBLOG_WARN << \"Key <type> must has string value\";\n    return;\n  }\n\n  auto plugin = GetPlugin(type);\n  if (plugin == nullptr) {\n    MBLOG_WARN << \"No ouput broker plugin for type \" << type;\n    return;\n  }\n\n  auto name = broker_json[\"name\"];\n  if (name.empty()) {\n    MBLOG_WARN << \"Key <name> is missing in single broker config, type \"\n               << type;\n    return;\n  }\n\n  if (!name.is_string()) {\n    MBLOG_WARN << \"Key <name> must has string value, type \" << type;\n    return;\n  }\n\n  auto cfg = broker_json[\"cfg\"];\n  if (cfg.empty()) {\n    MBLOG_WARN << \"Key <cfg> is missing in single broker config, type \" << type\n               << \", name \" << name;\n    return;\n  }\n\n  if (!cfg.is_string()) {\n    MBLOG_WARN << \"Key <cfg> must has string value, type \" << type << \", name \"\n               << name;\n    return;\n  }\n\n  auto handle = plugin->Open(data_ctx->GetSessionConfig(), cfg);\n  if (handle == nullptr) {\n    MBLOG_WARN << \"Get broker handle for \" << name << \":\" << type << \" failed\";\n    return;\n  }\n\n  handle->output_broker_type_ = type;\n  auto instance =\n      std::make_shared<BrokerInstance>(plugin, name, handle, async_queue_size_);\n  instance->SetRetryParam(retry_count_limit_, retry_interval_base_ms_,\n                          retry_interval_increment_ms_,\n                          retry_interval_limit_ms_);\n  (*broker_instances)[name] = instance;\n  broker_names->push_back(name);\n}\n\nstd::shared_ptr<modelbox::OutputBrokerPlugin> OutputBrokerFlowUnit::GetPlugin(\n    const std::string &type) {\n  auto item = plugins_.find(type);\n  if (item == plugins_.end()) {\n    return nullptr;\n  }\n\n  return item->second;\n}\n\nMODELBOX_FLOWUNIT(OutputBrokerFlowUnit, desc) {\n  desc.SetFlowUnitName(FLOWUNIT_NAME);\n  desc.SetFlowUnitGroupType(\"Output\");\n  desc.AddFlowUnitInput({INPUT_DATA});\n  desc.SetFlowType(modelbox::FlowType::STREAM);\n  desc.SetInputContiguous(false);\n  desc.SetDescription(FLOWUNIT_DESC);\n}\n\nMODELBOX_DRIVER_FLOWUNIT(desc) {\n  desc.Desc.SetName(FLOWUNIT_NAME);\n  desc.Desc.SetClass(modelbox::DRIVER_CLASS_FLOWUNIT);\n  desc.Desc.SetType(FLOWUNIT_TYPE);\n  desc.Desc.SetDescription(FLOWUNIT_DESC);\n  desc.Desc.SetVersion(\"1.0.0\");\n}\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/output_broker/output_broker_flowunit.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_OUTPUT_BROKER_CPU_H_\n#define MODELBOX_FLOWUNIT_OUTPUT_BROKER_CPU_H_\n\n#include <modelbox/base/device.h>\n#include <modelbox/base/status.h>\n#include <modelbox/flow.h>\n#include <modelbox/output_broker_plugin.h>\n\n#include <algorithm>\n#include <atomic>\n#include <nlohmann/json.hpp>\n\n#include \"modelbox/base/timer.h\"\n#include \"modelbox/buffer.h\"\n#include \"modelbox/flowunit.h\"\n\nconstexpr const char *FLOWUNIT_NAME = \"output_broker\";\nconstexpr const char *FLOWUNIT_TYPE = \"cpu\";\nconstexpr const char *FLOWUNIT_DESC =\n    \"\\n\\t@Brief: Output the input data to the specified service. Currently \"\n    \"supported types have dis, obs, webhook. \\n\"\n    \"\\t@Port parameter: the input port buffer contain the following meta \"\n    \"fields:\\n\"\n    \"\\t\\tField Name: out_broker_names,      Type: string\\n\"\n    \"\\t\\tField Name: out_file_names,        Type: string\\n\"\n    \"\\t@Constraint: the fields 'out_file_names' can be only required when \"\n    \"output \"\n    \"type is obs. \";\n\nconstexpr const char *INPUT_DATA = \"in_output_info\";\nconstexpr const char *META_OUTPUT_BROKER_NAME = \"output_broker_names\";\nconstexpr const char *META_OUTPUT_FILE_NAME = \"output_file_name\";\nconstexpr const char *SESSION_OUTPUT_BROKER_CONFIG = \"config\";\nconstexpr const char *CTX_BROKER_NAMES = \"broker_names\";\nconstexpr const char *CTX_BROKER_INSTANCES = \"broker_instances\";\nconstexpr const char *SYNC_MODE = \"sync\";\nconstexpr const char *ASYNC_MODE = \"async\";\n\nusing BrokerNames = std::vector<std::string>;\n\nclass BrokerDataQueue {\n public:\n  BrokerDataQueue(std::string broker_name, size_t queue_size);\n\n  virtual ~BrokerDataQueue() = default;\n\n  void PushForce(const std::shared_ptr<modelbox::Buffer> &buffer);\n\n  modelbox::Status Front(std::shared_ptr<modelbox::Buffer> &buffer);\n\n  bool Empty();\n\n  void PopIfEqual(const std::shared_ptr<modelbox::Buffer> &target);\n\n private:\n  std::string broker_name_;\n  size_t queue_size_{0};\n  std::queue<std::shared_ptr<modelbox::Buffer>> queue_;\n  std::mutex queue_lock_;\n};\n\nclass BrokerInstance {\n public:\n  BrokerInstance(std::shared_ptr<modelbox::OutputBrokerPlugin> &plugin,\n                 const std::string &name,\n                 std::shared_ptr<modelbox::OutputBrokerHandle> &handle,\n                 size_t async_queue_size);\n\n  virtual ~BrokerInstance();\n\n  void SetRetryParam(int64_t retry_count_limit, size_t retry_interval_base_ms,\n                     size_t retry_interval_increment_ms,\n                     size_t retry_interval_limit_ms);\n\n  modelbox::Status Write(const std::shared_ptr<modelbox::Buffer> &buffer);\n\n  modelbox::Status AddToQueue(const std::shared_ptr<modelbox::Buffer> &buffer);\n\n  void WriteFromQueue();\n\n  void Dispose();\n\n  std::atomic_bool is_stopped{true};\n\n private:\n  void UpdateInstaceState(modelbox::Status write_result);\n\n  std::shared_ptr<modelbox::OutputBrokerPlugin> plugin_;\n  std::string name_;\n  std::shared_ptr<modelbox::OutputBrokerHandle> handle_;\n  BrokerDataQueue data_queue_;\n\n  size_t send_interval_{0};          // State of instance\n  int64_t cur_data_retry_count_{0};  // State of data\n\n  modelbox::Timer timer_;\n  std::atomic_bool exit_flag_{false};\n  std::mutex stop_lock_;\n  std::condition_variable stop_cv_;\n\n  int64_t retry_count_limit_{0};  // < 0 means unlimited, >= 0 means limited\n  size_t retry_interval_base_ms_{0};\n  size_t retry_interval_increment_ms_{0};\n  size_t retry_interval_limit_ms_{0};\n};\n\nusing BrokerInstances = std::map<std::string, std::shared_ptr<BrokerInstance>>;\n\nclass OutputBrokerFlowUnit : public modelbox::FlowUnit {\n public:\n  OutputBrokerFlowUnit();\n  ~OutputBrokerFlowUnit() override;\n\n  modelbox::Status Open(\n      const std::shared_ptr<modelbox::Configuration> &opts) override;\n\n  modelbox::Status Close() override;\n\n  /* run when processing data */\n  modelbox::Status Process(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override;\n\n  modelbox::Status DataPre(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override;\n\n  modelbox::Status DataPost(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override;\n\n  modelbox::Status DataGroupPre(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override {\n    return modelbox::STATUS_OK;\n  };\n\n  modelbox::Status DataGroupPost(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override {\n    return modelbox::STATUS_OK;\n  };\n\n private:\n  modelbox::Status SendData(std::shared_ptr<modelbox::DataContext> &data_ctx,\n                            const std::string &output_broker_names,\n                            const std::shared_ptr<modelbox::Buffer> &buffer);\n\n  modelbox::Status ParseCfg(std::shared_ptr<modelbox::DataContext> &data_ctx,\n                            const std::string &cfg);\n\n  modelbox::Status InitBrokers(std::shared_ptr<modelbox::DataContext> &data_ctx,\n                               const nlohmann::json &brokers_json);\n\n  void AddBroker(std::shared_ptr<modelbox::DataContext> &data_ctx,\n                 std::shared_ptr<BrokerInstances> &broker_instances,\n                 std::shared_ptr<BrokerNames> &broker_names,\n                 const nlohmann::json &broker_json);\n\n  std::shared_ptr<modelbox::OutputBrokerPlugin> GetPlugin(\n      const std::string &type);\n\n  std::vector<std::shared_ptr<modelbox::DriverFactory>> factories_;\n  std::map<std::string, std::shared_ptr<modelbox::OutputBrokerPlugin>> plugins_;\n\n  std::string mode_;\n  int64_t retry_count_limit_{0};  // < 0 means unlimited, >= 0 means limited\n  size_t retry_interval_base_ms_{0};\n  size_t retry_interval_increment_ms_{0};\n  size_t retry_interval_limit_ms_{0};\n  size_t async_queue_size_{0};\n};\n#endif  // MODELBOX_FLOWUNIT_OUTPUT_BROKER_CPU_H_\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/output_broker/output_broker_flowunit_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include \"output_broker_flowunit.h\"\n\n#include <securec.h>\n\n#include <functional>\n#include <future>\n\n#include \"modelbox/base/log.h\"\n#include \"modelbox/base/utils.h\"\n#include \"modelbox/buffer.h\"\n#include \"common/mock_cert.h\"\n#define _TURN_OFF_PLATFORM_STRING\n#include \"cpprest/http_listener.h\"\n#include \"driver_flow_test.h\"\n#include \"flowunit_mockflowunit/flowunit_mockflowunit.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"modelbox/iam_auth.h\"\n#include \"test/mock/minimodelbox/mockflow.h\"\n\nnamespace modelbox {\nclass OutputBrokerFlowUnitTest : public testing::Test {\n public:\n  OutputBrokerFlowUnitTest() : driver_flow_(std::make_shared<MockFlow>()) {}\n  void PreparationToGetCert();\n  modelbox::Status HandleFunc(web::http::http_request request);\n\n protected:\n  void SetUp() override {\n    auto ret = AddMockFlowUnit();\n    EXPECT_EQ(ret, STATUS_OK);\n  };\n\n  void TearDown() override { driver_flow_ = nullptr; };\n  std::shared_ptr<MockFlow> GetDriverFlow();\n  std::shared_ptr<MockFlow> RunDriverFlow();\n  modelbox::Status SendOutputData(std::shared_ptr<MockFlow> &driver_flow,\n                                const std::string &output_data,\n                                const std::string &output_broker_names,\n                                const std::string &output_broker_cfg);\n\n private:\n  Status AddMockFlowUnit();\n  std::shared_ptr<MockFlow> driver_flow_;\n};\n\nstd::shared_ptr<MockFlow> OutputBrokerFlowUnitTest::GetDriverFlow() {\n  return driver_flow_;\n}\n\nstd::shared_ptr<MockFlow> OutputBrokerFlowUnitTest::RunDriverFlow() {\n  const std::string test_lib_dir = TEST_DRIVER_DIR;\n  std::string toml_content = R\"(\n    [driver]\n    skip-default=true\n    dir=[\")\" + test_lib_dir + \"\\\"]\\n    \" +\n                             R\"([graph]\n    graphconf = '''digraph demo {\n          input[type=input]\n          output_broker[type=flowunit, flowunit=output_broker, device=cpu, deviceid=0, label=\"<in_output_info>\", retry_count_limit=\"2\", retry_interval_base_ms=\"100\", retry_interval_increment_ms=\"100\", retry_interval_limit_ms=\"200\"]\n \n          input -> output_broker:in_output_info\n        }'''\n    format = \"graphviz\"\n  )\";\n\n  auto driver_flow = GetDriverFlow();\n  driver_flow->BuildAndRun(\"InitUnit\", toml_content, -1);\n  return driver_flow;\n}\n\nmodelbox::Status OutputBrokerFlowUnitTest::SendOutputData(\n    std::shared_ptr<MockFlow> &driver_flow, const std::string &output_data,\n    const std::string &output_broker_names,\n    const std::string &output_broker_cfg) {\n  auto ext_data = driver_flow->GetFlow()->CreateExternalDataMap();\n  auto buffer_list = ext_data->CreateBufferList();\n  buffer_list->Build({output_data.size(), output_data.size()});\n\n  auto buffer = buffer_list->At(0);\n  memcpy_s(buffer->MutableData(), buffer->GetBytes(), output_data.data(),\n           output_data.size());\n  buffer->Set(\"msg_name\", std::string(\"webhook_msg\"));\n  buffer->Set(\"output_broker_names\", output_broker_names);\n\n  buffer = buffer_list->At(1);\n  memcpy_s(buffer->MutableData(), buffer->GetBytes(), output_data.data(),\n           output_data.size());\n  buffer->Set(\"msg_name\", std::string(\"webhook_msg2\"));\n  buffer->Set(\"output_broker_names\", output_broker_names);\n\n  auto config = ext_data->GetSessionConfig();\n  config->SetProperty(\"flowunit.output_broker.config\", output_broker_cfg);\n  ext_data->Send(\"input\", buffer_list);\n  ext_data->Shutdown();\n  return modelbox::STATUS_OK;\n}\n\nStatus OutputBrokerFlowUnitTest::AddMockFlowUnit() { return STATUS_OK; }\n\nTEST_F(OutputBrokerFlowUnitTest, InitUnit) {\n  auto driver_flow = RunDriverFlow();\n\n  std::string output_data = \"output data\";\n  std::string output_broker_cfg = R\"({\n    \"brokers\": [\n      {\n        \"type\" : \"dis\",\n        \"name\" : \"dis1\",\n        \"cfg\": \"xxx\"\n      },\n      {\n        \"type\" : \"dis\",\n        \"name\" : \"dis2\",\n        \"cfg\" : \"xxx\"\n      },\n      {\n        \"type\" : \"webhook\",\n        \"name\" : \"webhook\",\n        \"cfg\" : \"xxx\"\n      }\n    ]\n  })\";\n  auto ret = SendOutputData(driver_flow, output_data, \"dis1|webhook\",\n                            output_broker_cfg);\n  EXPECT_EQ(ret, modelbox::STATUS_OK);\n\n  driver_flow->GetFlow()->Wait(3 * 1000);\n}\n\nTEST_F(OutputBrokerFlowUnitTest, DisOutputTest) {\n  // This test would be skipped, if no auth info is provided.\n  auto conf_builder = std::make_shared<ConfigurationBuilder>();\n  std::shared_ptr<Configuration> config_file =\n      conf_builder->Build(TEST_ASSETS + std::string(\"/auth/auth_info.toml\"));\n  if (config_file == nullptr || config_file->GetString(\"base.ak\").empty()) {\n    GTEST_SKIP();\n  }\n\n  auto driver_flow = RunDriverFlow();\n\n  std::string output_data(0.8 * 1024 * 1024, 'a');\n  std::string disEndPoint(config_file->GetString(\"output_broker.obsEndPoint\"));\n  std::string region(config_file->GetString(\"output_broker.region\"));\n  std::string streamName(config_file->GetString(\"output_broker.streamName\"));\n  std::string projectId(config_file->GetString(\"output_broker.projectId\"));\n  std::string domainName(config_file->GetString(\"output_broker.domainName\"));\n  std::string xroleName(config_file->GetString(\"output_broker.xroleName\"));\n  std::string output_broker_cfg = R\"({\n    \"brokers\": [\n      {\n        \"type\" : \"dis\",\n        \"name\" : \"dis1\",\n        \"cfg\": \"{\\\"disEndPoint\\\" : \\\")\" + disEndPoint + R\"(\\\", \\\"region\\\" : \\\")\" + region + R\"(\\\", \\\"streamName\\\" : \\\")\" + streamName + R\"(\\\",\\\"projectId\\\" : \\\")\" + projectId + R\"(\\\",\\\"domainName\\\" :\\\")\" + domainName + R\"(\\\",\\\"xroleName\\\" : \\\")\" + xroleName + R\"(\\\"}\" \n     },\n      {\n        \"type\" : \"dis\",\n        \"name\" : \"dis2\",\n        \"cfg\": \"{\\\"disEndPoint\\\" : \\\")\" + disEndPoint + R\"(\\\", \\\"region\\\" : \\\")\" + region + R\"(\\\", \\\"streamName\\\" : \\\")\" + streamName + R\"(\\\",\\\"projectId\\\" : \\\")\" + projectId + R\"(\\\",\\\"domainName\\\" :\\\")\" + domainName + R\"(\\\",\\\"xroleName\\\" : \\\")\" + xroleName + R\"(\\\"}\"  \n      }\n    ]\n  })\";\n  PreparationToGetCert();\n  auto ret =\n      SendOutputData(driver_flow, output_data, \"dis1|dis2\", output_broker_cfg);\n  EXPECT_EQ(ret, modelbox::STATUS_OK);\n\n  driver_flow->GetFlow()->Wait(3 * 1000);\n}\n\nmodelbox::Status OutputBrokerFlowUnitTest::HandleFunc(\n    web::http::http_request request) {\n  utility::string_t request_body = request.extract_string().get();\n  EXPECT_EQ(\"{\\\"output data webhook\\\":\\\"a\\\"}\", request_body);\n  utility::string_t resp_body = \"OK\";\n  request.reply(web::http::status_codes::OK, resp_body);\n\n  return modelbox::STATUS_OK;\n}\n\nTEST_F(OutputBrokerFlowUnitTest, WebhookOutputTest) {\n  auto driver_flow = RunDriverFlow();\n\n  std::string output_data = R\"({\"output data webhook\":\"a\"})\";\n  std::string output_broker_cfg = R\"({\n    \"brokers\": [\n      {\n        \"type\" : \"webhook\",\n        \"name\" : \"webhook1\",\n        \"cfg\" : \"{\\\"url\\\" : \\\"https://localhost:54321\\\", \\\"headers\\\" : {\\\"header1\\\" : \\\"test1\\\",\\\"header2\\\" : \\\"test2\\\"}}\" \n      }\n    ]\n  })\";\n  auto ret =\n      SendOutputData(driver_flow, output_data, \"webhook1\", output_broker_cfg);\n\n  std::string request_url = \"https://localhost:54321\";\n  std::shared_ptr<web::http::experimental::listener::http_listener> listener;\n\n  web::http::experimental::listener::http_listener_config server_config;\n  server_config.set_timeout(std::chrono::seconds(60));\n  std::string cert = std::string(TEST_DATA_DIR) + \"/certificate.pem\";\n  std::string key = std::string(TEST_DATA_DIR) + \"/private_key_nopass.pem\";\n\n  ASSERT_EQ(GenerateCert(key, cert), STATUS_OK);\n\n  Defer {\n    remove(key.c_str());\n    remove(cert.c_str());\n  };\n\n  if (cert.length() > 0 && key.length() > 0) {\n    server_config.set_ssl_context_callback(\n        [cert, key](boost::asio::ssl::context &ctx) {\n          ctx.set_options(boost::asio::ssl::context::default_workarounds);\n          modelbox::HardeningSSL(ctx.native_handle());\n          ctx.use_certificate_file(\n              cert, boost::asio::ssl::context_base::file_format::pem);\n          ctx.use_private_key_file(key, boost::asio::ssl::context::pem);\n        });\n  }\n\n  listener = std::make_shared<web::http::experimental::listener::http_listener>(\n      request_url, server_config);\n\n  listener->support(web::http::methods::POST,\n                    [this](const web::http::http_request &request) {\n                      this->HandleFunc(request);\n                    });\n\n  try {\n    listener->open().wait();\n    MBLOG_INFO << \"start to listen \";\n  } catch (std::exception const &e) {\n    MBLOG_ERROR << e.what();\n  }\n\n  driver_flow->GetFlow()->Wait(3 * 1000);\n}\n\nvoid OutputBrokerFlowUnitTest::PreparationToGetCert() {\n  auto conf_builder = std::make_shared<ConfigurationBuilder>();\n  std::shared_ptr<Configuration> config_file =\n      conf_builder->Build(TEST_ASSETS + std::string(\"/auth/auth_info.toml\"));\n  std::string ak(config_file->GetString(\"base.ak\"));\n  std::string sk(config_file->GetString(\"base.sk\"));\n  std::string domain_id(config_file->GetString(\"base.domain_id\"));\n  std::string project_id(config_file->GetString(\"base.project_id\"));\n  std::string iam_host(config_file->GetString(\"base.iam_host\"));\n\n  modelbox::IAMAuth::GetInstance()->SetIAMHostAddress(iam_host);\n\n  if (modelbox::STATUS_OK != modelbox::IAMAuth::GetInstance()->SetConsigneeInfo(\n                               ak, sk, domain_id, project_id)) {\n    MBLOG_ERROR << \"set Consignee failed\";\n    return;\n  }\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/padding/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nset(UNIT_DEVICE \"cpu\")\nset(UNIT_NAME \"padding\")\n\nif (NOT OPENCV_FOUND)\n    return()\nendif()\n\nproject(modelbox-flowunit-${UNIT_DEVICE}-${UNIT_NAME})\n\nfile(GLOB_RECURSE UNIT_SOURCE *.cpp *.cc *.c)\ngroup_source_test_files(MODELBOX_UNIT_SOURCE MODELBOX_UNIT_TEST_SOURCE \"_test.c*\" ${UNIT_SOURCE})\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_CPU_INCLUDE})\n\nset(MODELBOX_UNIT_SHARED modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-shared)\nset(MODELBOX_UNIT_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\nadd_library(${MODELBOX_UNIT_SHARED} SHARED ${MODELBOX_UNIT_SOURCE})\n\nset(LIBMODELBOX_FLOWUNIT_PADDING_CPU_SHARED ${MODELBOX_UNIT_SHARED})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES \n    SOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${OpenCV_LIBS})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_DEVICE_CPU_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} rt)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} dl)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${MODELBOX_UNIT_LINK_LIBRARY})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}\")\n\ninstall(TARGETS ${MODELBOX_UNIT_SHARED} \n    COMPONENT cpu-device-flowunit\n    RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n    LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    OPTIONAL\n    )\n\n\ninstall(DIRECTORY ${HEADER} \n    DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR} \n    COMPONENT cpu-device-flowunit\n    )\n\nset(LIBMODELBOX_FLOWUNIT_PADDING_CPU_SHARED ${MODELBOX_UNIT_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_PADDING_CPU_INCLUDE ${MODELBOX_UNIT_SOURCE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_PADDING_CPU_SOURCES ${MODELBOX_UNIT_SOURCE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_PADDING_CPU_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}.so CACHE INTERNAL \"\")\n\n# driver test\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_UNIT_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_UNIT_SHARED})\nlist(APPEND DRIVER_UNIT_TEST_LINK_LIBRARIES ${MODELBOX_UNIT_LINK_LIBRARY})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_LINK_LIBRARIES ${DRIVER_UNIT_TEST_LINK_LIBRARIES} CACHE INTERNAL \"\")"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/padding/padding_flowunit.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"padding_flowunit.h\"\n#include <securec.h>\n#include \"modelbox/flowunit.h\"\n#include \"modelbox/flowunit_api_helper.h\"\n\nPaddingFlowUnit::PaddingFlowUnit() = default;\nPaddingFlowUnit::~PaddingFlowUnit() = default;\n\nstd::map<std::string, cv::InterpolationFlags> kCVResizeMethod = {\n    {\"inter_nearest\", cv::INTER_NEAREST},\n    {\"inter_linear\", cv::INTER_LINEAR},\n    {\"inter_cubic\", cv::INTER_CUBIC},\n    {\"inter_area\", cv::INTER_AREA},\n    {\"inter_lanczos4\", cv::INTER_LANCZOS4},\n    {\"inter_max\", cv::INTER_MAX},\n    {\"warp_fill_outliers\", cv::WARP_FILL_OUTLIERS},\n    {\"warp_inverse_map\", cv::WARP_INVERSE_MAP},\n};\n\nconst std::map<std::string, AlignType> kVerticalAlignType = {\n    {\"top\", AlignType::BEGIN},\n    {\"center\", AlignType::CENTER},\n    {\"bottom\", AlignType::END}};\n\nconst std::map<std::string, AlignType> kHorizontalAlignType = {\n    {\"left\", AlignType::BEGIN},\n    {\"center\", AlignType::CENTER},\n    {\"right\", AlignType::END}};\n\nmodelbox::Status PaddingFlowUnit::Open(\n    const std::shared_ptr<modelbox::Configuration> &opts) {\n  width_ = opts->GetUint32(\"image_width\", 0);\n  height_ = opts->GetUint32(\"image_height\", 0);\n  if (width_ == 0 || height_ == 0) {\n    MBLOG_ERROR << \"width and height must set in config\";\n    return modelbox::STATUS_BADCONF;\n  }\n\n  output_buffer_size_ = width_ * height_ * 3;\n  auto vertical_align_str = opts->GetString(\"vertical_align\", \"top\");\n  auto item = kVerticalAlignType.find(vertical_align_str);\n  if (item == kVerticalAlignType.end()) {\n    MBLOG_ERROR << \"vertical align must be one of [top|center|bottom]\";\n    return modelbox::STATUS_BADCONF;\n  }\n  vertical_align_ = item->second;\n\n  auto horizontal_align_str = opts->GetString(\"horizontal_align\", \"left\");\n  item = kHorizontalAlignType.find(horizontal_align_str);\n  if (item == kHorizontalAlignType.end()) {\n    MBLOG_ERROR << \"horizontal align must be one of [left|center|right]\";\n    return modelbox::STATUS_BADCONF;\n  }\n  horizontal_align_ = item->second;\n\n  padding_data_ = opts->GetUint8s(\"padding_data\", {0, 0, 0});\n  if (padding_data_.size() != 3) {\n    MBLOG_ERROR << \"padding data size must be 3\";\n    return modelbox::STATUS_BADCONF;\n  }\n\n  need_scale_ = opts->GetBool(\"need_scale\", true);\n  auto interpolation_str = opts->GetString(\"interpolation\", \"inter_linear\");\n  auto interpolation_item = kCVResizeMethod.find(interpolation_str);\n  if (interpolation_item == kCVResizeMethod.end()) {\n    MBLOG_ERROR << \"not support interpolation \" << interpolation_str;\n    return modelbox::STATUS_BADCONF;\n  }\n\n  interpolation_ = interpolation_item->second;\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status PaddingFlowUnit::Process(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  MBLOG_DEBUG << \"process padding\";\n  auto input_buffer_list = data_ctx->Input(\"in_image\");\n  auto output_buffer_list = data_ctx->Output(\"out_image\");\n  auto image_count = input_buffer_list->Size();\n  if (image_count == 0) {\n    MBLOG_ERROR << \"input buffer count is zero\";\n    return modelbox::STATUS_FAULT;\n  }\n  std::vector<size_t> output_shape(image_count, output_buffer_size_);\n  auto ret = output_buffer_list->Build(output_shape);\n  if (!ret) {\n    MBLOG_ERROR << \"build output buffer failed, count \" << image_count\n                << \",size \" << output_buffer_size_;\n    return modelbox::STATUS_FAULT;\n  }\n\n  for (size_t i = 0; i < image_count; ++i) {\n    auto in_image = input_buffer_list->At(i);\n    auto out_image = output_buffer_list->At(i);\n    auto ret = PaddingOneImage(in_image, out_image);\n    if (!ret) {\n      if (ret != modelbox::STATUS_OK) {\n        MBLOG_ERROR << \"padding image failed, err \" << ret;\n        return ret;\n      }\n    }\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status PaddingFlowUnit::PaddingOneImage(\n    std::shared_ptr<modelbox::Buffer> &in_image,\n    std::shared_ptr<modelbox::Buffer> &out_image) {\n  int32_t ori_width = 0;\n  int32_t ori_height = 0;\n  std::string pix_fmt;\n  auto ret = in_image->Get(\"width\", ori_width);\n  ret = ret && in_image->Get(\"height\", ori_height);\n  if (!ret) {\n    MBLOG_ERROR << \"input image must has width and height in meta\";\n    return modelbox::STATUS_INVALID;\n  }\n\n  in_image->Get(\"pix_fmt\", pix_fmt);\n  if (pix_fmt != \"rgb\" && pix_fmt != \"bgr\") {\n    MBLOG_ERROR << \"unsupport pix format \" << pix_fmt;\n    return modelbox::STATUS_NOTSUPPORT;\n  }\n\n  cv::Size src_size = cv::Size(ori_width, ori_height);\n  cv::Mat src_roi(src_size, CV_8UC3, const_cast<void *>(in_image->ConstData()));\n  MBLOG_DEBUG << \"ori image : cols \" << src_roi.cols << \" rows \" << src_roi.rows\n              << \" channel \" << src_roi.channels();\n\n  cv::Size dest_size;\n  cv::Mat dest_roi;\n\n  // calculate offset and wid, height，get the resized img\n  struct dest_roi_proportions drp;\n  struct dest_roi_proportions *p_drp = nullptr;\n  p_drp = &drp;\n  drp.dest_roi_width = 0;\n  drp.dest_roi_height = 0;\n  drp.dest_roi_x = 0;\n  drp.dest_roi_y = 0;\n  auto status = FillDestRoi(src_size, dest_roi, p_drp);\n  if (!status) {\n    MBLOG_ERROR << \"fill dest roi failed\";\n    return status;\n  }\n  dest_size.width = drp.dest_roi_width;\n  dest_size.height = drp.dest_roi_height;\n  cv::resize(src_roi, dest_roi, dest_size);\n  // filling the dest_roi with padding data.\n  cv::Size back_size = cv::Size(width_, height_);\n  cv::Mat back_roi(\n      back_size, CV_8UC3,\n      cv::Scalar(padding_data_[0], padding_data_[1], padding_data_[2]));\n  // finally, put resized img in the back_roi\n  dest_roi.copyTo(back_roi(\n      cv::Rect(drp.dest_roi_x, drp.dest_roi_y, dest_roi.cols, dest_roi.rows)));\n  // copy to output\n  auto img_dest = std::make_shared<cv::Mat>();\n  back_roi.copyTo(*img_dest);\n  size_t size_bytes = img_dest->total() * img_dest->elemSize();\n  out_image->BuildFromHost(img_dest->data, size_bytes, [img_dest](void *ptr) {\n    /* Only capture pkt */\n  });\n  out_image->Set(\"width\", width_);\n  out_image->Set(\"height\", height_);\n  out_image->Set(\"width_stride\", width_ * 3);\n  out_image->Set(\"height_stride\", height_);\n  out_image->Set(\"pix_fmt\", pix_fmt);\n  out_image->Set(\"channel\", src_roi.channels());\n  out_image->Set(\"layout\", std::string(\"hwc\"));\n  out_image->Set(\"type\", modelbox::MODELBOX_UINT8);\n  out_image->Set(\"shape\",\n                 std::vector<size_t>{(size_t)height_, (size_t)width_, 3});\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status PaddingFlowUnit::FillDestRoi(const cv::Size &src_size,\n                                              cv::Mat &dest_roi,\n                                              dest_roi_proportions *p_drp) {\n  if (need_scale_) {\n    auto w_scale = (float)src_size.width / width_;\n    auto h_scale = (float)src_size.height / height_;\n    auto scale = std::max(w_scale, h_scale);\n    if (scale == 0) {\n      MBLOG_ERROR << \"scale must not be 0\";\n      return modelbox::STATUS_INVALID;\n    }\n    p_drp->dest_roi_width = src_size.width / scale;\n    p_drp->dest_roi_height = src_size.height / scale;\n  } else {\n    if (src_size.width > width_ || src_size.height > height_) {\n      MBLOG_ERROR << \"src image[w:\" << src_size.width\n                  << \",h:\" << src_size.height\n                  << \"] is great than dest size[w:\" << width_\n                  << \",h:\" << height_ << \"]. But need_scale is false\";\n      return modelbox::STATUS_INVALID;\n    }\n\n    p_drp->dest_roi_width = src_size.width;\n    p_drp->dest_roi_height = src_size.height;\n  }\n\n  p_drp->dest_roi_x =\n      GetAlignOffset(horizontal_align_, width_, p_drp->dest_roi_width);\n  p_drp->dest_roi_y =\n      GetAlignOffset(vertical_align_, height_, p_drp->dest_roi_height);\n  return modelbox::STATUS_OK;\n}\n\nuint32_t PaddingFlowUnit::GetAlignOffset(AlignType type, uint32_t dest_range,\n                                         uint32_t roi_range) {\n  if (roi_range >= dest_range) {\n    return 0;\n  }\n\n  uint32_t offset = 0;\n  switch (type) {\n    case AlignType::BEGIN:\n      break;\n\n    case AlignType::CENTER:\n      offset = (dest_range - roi_range) / 2;\n      break;\n\n    case AlignType::END:\n      offset = dest_range - roi_range;\n      break;\n\n    default:\n      break;\n  }\n\n  return offset;\n}\n\nMODELBOX_FLOWUNIT(PaddingFlowUnit, desc) {\n  desc.SetFlowUnitName(FLOWUNIT_NAME);\n  desc.SetFlowUnitGroupType(\"Image\");\n  desc.AddFlowUnitInput({\"in_image\"});\n  desc.AddFlowUnitOutput({\"out_image\"});\n  desc.SetFlowType(modelbox::NORMAL);\n  desc.SetInputContiguous(false);\n  desc.SetDescription(FLOWUNIT_DESC);\n  desc.AddFlowUnitOption(modelbox::FlowUnitOption(\"image_width\", \"int\", true,\n                                                  \"0\", \"Output img width\"));\n  desc.AddFlowUnitOption(modelbox::FlowUnitOption(\"image_height\", \"int\", true,\n                                                  \"0\", \"Output img height\"));\n  std::map<std::string, std::string> vertical_align_list{\n      {\"top\", \"top\"}, {\"center\", \"center\"}, {\"bottom\", \"bottom\"}};\n  desc.AddFlowUnitOption(modelbox::FlowUnitOption(\n      \"vertical_align\", \"list\", false, \"top\", \"Output roi vertical align type\",\n      vertical_align_list));\n  std::map<std::string, std::string> horizontal_align_list{\n      {\"left\", \"left\"}, {\"center\", \"center\"}, {\"right\", \"right\"}};\n  desc.AddFlowUnitOption(modelbox::FlowUnitOption(\n      \"horizontal_align\", \"list\", false, \"left\",\n      \"Output roi horizontal align type\", horizontal_align_list));\n  desc.AddFlowUnitOption(modelbox::FlowUnitOption(\n      \"padding_data\", \"string\", false, \"0,0,0\", \"Data for padding\"));\n  desc.AddFlowUnitOption(\n      modelbox::FlowUnitOption(\"need_scale\", \"bool\", false, \"true\",\n                               \"Will scale roi to fit output image\"));\n  std::map<std::string, std::string> interpolation_list{\n      {\"inter_nn\", \"inter_nn\"},\n      {\"inter_linear\", \"inter_linear\"},\n      {\"inter_cubic\", \"inter_cubic\"},\n      {\"inter_super\", \"inter_super\"},\n      {\"inter_lanczos\", \"inter_lanczos\"}};\n  desc.AddFlowUnitOption(modelbox::FlowUnitOption(\n      \"interpolation\", \"list\", false, \"inter_linear\",\n      \"Interpolation method to scale roi\", interpolation_list));\n}\n\nMODELBOX_DRIVER_FLOWUNIT(desc) {\n  desc.Desc.SetName(FLOWUNIT_NAME);\n  desc.Desc.SetClass(modelbox::DRIVER_CLASS_FLOWUNIT);\n  desc.Desc.SetType(FLOWUNIT_TYPE);\n  desc.Desc.SetDescription(FLOWUNIT_DESC);\n  desc.Desc.SetVersion(\"1.0.0\");\n}\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/padding/padding_flowunit.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_PADDINGFLOWUNIT_CPU_H_\n#define MODELBOX_FLOWUNIT_PADDINGFLOWUNIT_CPU_H_\n\n#include <modelbox/base/device.h>\n#include <modelbox/base/status.h>\n#include <modelbox/flow.h>\n\n#include <algorithm>\n#include <opencv2/opencv.hpp>\n\n#include <string>\n#include <typeinfo>\n#include \"modelbox/buffer.h\"\n#include \"modelbox/flowunit.h\"\n\nconstexpr const char *FLOWUNIT_NAME = \"padding\";\nconstexpr const char *FLOWUNIT_TYPE = \"cpu\";\nconstexpr const char *FLOWUNIT_DESC =\n    \"\\n\\t@Brief: A padding flowunit on cpu. \\n\"\n    \"\\t@Port parameter: The input port buffer type and the output port buffer \"\n    \"type are image. \\n\"\n    \"\\t  The image type buffer contains the following meta fields:\\n\"\n    \"\\t\\tField Name: width,         Type: int32_t\\n\"\n    \"\\t\\tField Name: height,        Type: int32_t\\n\"\n    \"\\t\\tField Name: width_stride,  Type: int32_t\\n\"\n    \"\\t\\tField Name: height_stride, Type: int32_t\\n\"\n    \"\\t\\tField Name: channel,       Type: int32_t\\n\"\n    \"\\t\\tField Name: pix_fmt,       Type: string\\n\"\n    \"\\t\\tField Name: layout,        Type: int32_t\\n\"\n    \"\\t\\tField Name: shape,         Type: vector<size_t>\\n\"\n    \"\\t\\tField Name: type,          Type: ModelBoxDataType::MODELBOX_UINT8\\n\"\n    \"\\t@Constraint: The field value range of this flowunit supports: 'pix_fmt': \"\n    \"[rgb,bgr], 'layout': [hwc]. \";\n\nenum class AlignType { BEGIN, CENTER, END };\nstruct dest_roi_proportions {\n  int32_t dest_roi_width = 0;\n  int32_t dest_roi_height = 0;\n  int32_t dest_roi_x = 0;\n  int32_t dest_roi_y = 0;\n};\n\nclass PaddingFlowUnit : public modelbox::FlowUnit {\n public:\n  PaddingFlowUnit();\n  ~PaddingFlowUnit() override;\n\n  modelbox::Status Open(\n      const std::shared_ptr<modelbox::Configuration> &opts) override;\n\n  modelbox::Status Close() override { return modelbox::STATUS_OK; };\n\n  modelbox::Status Process(std::shared_ptr<modelbox::DataContext> data_ctx) override;\n\n private:\n  cv::InterpolationFlags GetCVResizeMethod(std::string resizeType);\n  modelbox::Status PaddingOneImage(\n      std::shared_ptr<modelbox::Buffer> &in_image,\n      std::shared_ptr<modelbox::Buffer> &out_image);\n\n  modelbox::Status FillDestRoi(const cv::Size &src_size, cv::Mat &dest_roi,\n                               struct dest_roi_proportions *p_drp);\n\n  uint32_t GetAlignOffset(AlignType type, uint32_t dest_range,\n                          uint32_t roi_range);\n\n  modelbox::Status FillPaddingData(\n      std::shared_ptr<modelbox::Buffer> &out_image);\n\n  int32_t width_{0};\n  int32_t height_{0};\n  size_t output_buffer_size_{0};\n  AlignType vertical_align_{AlignType::BEGIN};\n  AlignType horizontal_align_{AlignType::BEGIN};\n  std::vector<uint8_t> padding_data_;\n  bool need_scale_{true};\n  cv::InterpolationFlags interpolation_{cv::INTER_LINEAR};\n};\n\n#endif  // MODELBOX_FLOWUNIT_FLOWUNIT_CPU_H_\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/padding/padding_flowunit_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include \"padding_flowunit.h\"\n#include <securec.h>\n\n#include <functional>\n#include <future>\n#include <random>\n#include <thread>\n#include <opencv2/opencv.hpp>\n\n#include \"modelbox/base/log.h\"\n#include \"modelbox/base/utils.h\"\n#include \"modelbox/buffer.h\"\n#include \"driver_flow_test.h\"\n#include \"flowunit_mockflowunit/flowunit_mockflowunit.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"test/mock/minimodelbox/mockflow.h\"\n\nnamespace modelbox {\n  \nclass PaddingFlowUnitTest : public testing::Test {\n public:\n  PaddingFlowUnitTest() : driver_flow_(std::make_shared<MockFlow>()) {}\n protected:\n  std::shared_ptr<MockFlow> GetDriverFlow();\n\n private:\n  std::shared_ptr<MockFlow> driver_flow_;\n};\n\nstd::shared_ptr<MockFlow> PaddingFlowUnitTest::GetDriverFlow() {\n  return driver_flow_;\n};\n\nTEST_F(PaddingFlowUnitTest, TestPaddingImage) {\n  const std::string test_lib_dir = TEST_DRIVER_DIR;\n  std::string toml_content = R\"(\n    [driver]\n    skip-default=true\n    dir=[\")\" + test_lib_dir + \"\\\"]\\n    \" +\n                             R\"([graph]\n    graphconf = '''digraph demo {\n          input[type=input]\n          output[type=output]\n          padding[type=flowunit, flowunit=padding, device=cpu, deviceid=0, label=\"<in_image> | <out_image>\",\n          image_width=200, image_height=100, vertical_align=top, horizontal_align=center, padding_data=\"0, 255, 0\"]\n\n          input -> padding:in_image\n          padding:out_image -> output\n        }'''\n    format = \"graphviz\"\n  )\";\n\n\n  MBLOG_INFO << toml_content;\n  auto driver_flow = GetDriverFlow();\n  auto ret = driver_flow->BuildAndRun(\"TestPaddingImage\", toml_content, 10);\n  EXPECT_EQ(ret, STATUS_SUCCESS);\n\n  auto img = cv::imread(std::string(TEST_ASSETS) + \"/test.jpg\");\n  auto extern_data = driver_flow->GetFlow()->CreateExternalDataMap();\n  auto input_buffer_list = extern_data->CreateBufferList();\n  input_buffer_list->Build({img.total() * img.elemSize()});\n  auto input_buffer = input_buffer_list->At(0);\n  input_buffer->Set(\"width\", img.cols);\n  input_buffer->Set(\"height\", img.rows);\n  input_buffer->Set(\"pix_fmt\", std::string(\"bgr\"));\n  auto e_ret = memcpy_s(input_buffer->MutableData(), input_buffer->GetBytes(),\n                        img.data, img.total() * img.elemSize());\n  EXPECT_EQ(e_ret, 0);\n  auto status = extern_data->Send(\"input\", input_buffer_list);\n  EXPECT_EQ(status, STATUS_OK);\n\n  OutputBufferList map_buffer_list;\n  status = extern_data->Recv(map_buffer_list);\n  EXPECT_EQ(status, STATUS_OK);\n\n  auto output_buffer_list = map_buffer_list[\"output\"];\n  ASSERT_EQ(output_buffer_list->Size(), 1);\n  auto output_buffer = output_buffer_list->At(0);\n  cv::Mat out_img(cv::Size(200, 100), CV_8UC3, output_buffer->MutableData());\n  auto expected_img = cv::imread(std::string(TEST_ASSETS) + \"/padding_200x100_result.png\");\n  ASSERT_EQ(expected_img.cols, out_img.cols);\n  ASSERT_EQ(expected_img.rows, out_img.rows );\n\n  for (int32_t y = 0; y < expected_img.rows; ++y) {\n    for (int32_t x = 0; x < expected_img.cols; ++x) {\n      auto expected_pix = expected_img.at<cv::Vec3b>(y, x);\n      auto pix = out_img.at<cv::Vec3b>(y, x);\n      ASSERT_EQ(expected_pix[0], pix[0]);\n      ASSERT_EQ(expected_pix[1], pix[1]);\n      ASSERT_EQ(expected_pix[2], pix[2]);\n    }\n  }\n\n  driver_flow->GetFlow()->Wait(3 * 1000);\n}\n\n\n}  // namespace modelbox\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/python/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nset(UNIT_DEVICE \"cpu\")\nset(UNIT_NAME \"python\")\n\nproject(modelbox-flowunit-${UNIT_DEVICE}-${UNIT_NAME})\n\nif(NOT ${PYTHONLIBS_FOUND})\n    message(STATUS \"Not found python, disable python flowunit\")\n    return()\nendif()\n\nfile(GLOB_RECURSE UNIT_SOURCE *.cpp *.cc *.c)\ngroup_source_test_files(MODELBOX_UNIT_SOURCE MODELBOX_UNIT_TEST_SOURCE \"_test.c*\" ${UNIT_SOURCE})\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_CPU_INCLUDE})\ninclude_directories(${PYBIND11_INCLUDE_DIR})\ninclude_directories(${MODELBOX_COMMON_MODELBOX_API_INCLUDE})\ninclude_directories(${LIBMODELBOX_VIRTUALDRIVER_PYTHON_INCLUDE})\n\nset(EMPTY_SOURCE_FILE ${CMAKE_BINARY_DIR}/empty.cc)\nif (NOT EXISTS ${EMPTY_SOURCE_FILE})\n    file(WRITE ${EMPTY_SOURCE_FILE})\nendif()\n\nset(MODELBOX_UNIT_SHARED modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-shared)\nset(MODELBOX_UNIT_STATIC modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-static)\nset(MODELBOX_UNIT_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\nadd_library(${MODELBOX_UNIT_STATIC} STATIC ${MODELBOX_UNIT_SOURCE})\ntarget_link_libraries(${MODELBOX_UNIT_STATIC} PRIVATE pybind11::module)\ntarget_link_libraries(${MODELBOX_UNIT_STATIC} PRIVATE pybind11::embed)\nset_property(TARGET ${MODELBOX_UNIT_STATIC} PROPERTY POSITION_INDEPENDENT_CODE ON)\n\nadd_library(${MODELBOX_UNIT_SHARED} SHARED ${EMPTY_SOURCE_FILE})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${MODELBOX_UNIT_STATIC} )\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} -Wl,--whole-archive ${MODELBOX_UNIT_STATIC} -Wl,--no-whole-archive)\nset(LIBMODELBOX_FLOWUNIT_PYTHON_SHARED ${MODELBOX_UNIT_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${MODELBOX_COMMON_MODELBOX_API_LIBRARY})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_SHARED})\nadd_dependencies(${MODELBOX_UNIT_SHARED} ${MODELBOX_COMMON_MODELBOX_API_LIBRARY})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES \n    SOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_VIRTUALDRIVER_PYTHON_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} rt)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} dl)\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}\")\n\ninstall(TARGETS ${MODELBOX_UNIT_SHARED}\n    COMPONENT cpu-device-flowunit\n    RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n    LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    ARCHIVE DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    OPTIONAL\n    )\n\ninstall(DIRECTORY \n    ${HEADER} DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR}\n    COMPONENT cpu-device-flowunit\n    )\n\nset(LIBMODELBOX_FLOWUNIT_PYTHON_SHARED ${MODELBOX_UNIT_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_PYTHON_INCLUDE ${MODELBOX_UNIT_SOURCE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_PYTHON_SOURCES ${MODELBOX_UNIT_SOURCE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_PYTHON_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}.so CACHE INTERNAL \"\")\n\n# driver test\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_UNIT_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_UNIT_SHARED})\nlist(APPEND DRIVER_UNIT_TEST_INCLUDE ${PYBIND11_INCLUDE_DIR} ${PYTHON_INCLUDE_DIRS})\nlist(APPEND DRIVER_UNIT_TEST_LINK_LIBRARIES pybind11::module pybind11::embed)\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_INCLUDE ${DRIVER_UNIT_TEST_INCLUDE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_LINK_LIBRARIES ${DRIVER_UNIT_TEST_LINK_LIBRARIES} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_PYTHON_SO_PATH ${LIBMODELBOX_FLOWUNIT_PYTHON_SO_PATH} CACHE INTERNAL \"\")\n\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/python/flowunit_desc.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <memory>\n#include <mutex>\n\n#include \"modelbox/base/driver_api_helper.h\"\n#include \"modelbox/base/status.h\"\n#include \"modelbox/device/cpu/device_cpu.h\"\n#include \"modelbox/flowunit.h\"\n#include \"python_flowunit.h\"\n#include \"python_module.h\"\n\nconstexpr const char *FLOWUNIT_NAME = \"python\";\nconstexpr const char *FLOWUNIT_DESC = \"A python flowunit\";\n\nMODELBOX_DLL_LOCAL std::mutex kPythonInitLock;\nMODELBOX_DLL_LOCAL std::shared_ptr<PythonInterpreter> kpythonInterpreter = nullptr;\n\nstd::shared_ptr<modelbox::DriverFactory> CreateDriverFactory() {\n  std::shared_ptr<modelbox::DriverFactory> factory =\n      std::make_shared<PythonFlowUnitFactory>();\n  return factory;\n}\n\nvoid DriverDescription(modelbox::DriverDesc *desc) {\n  desc->SetName(FLOWUNIT_NAME);\n  desc->SetClass(modelbox::DRIVER_CLASS_FLOWUNIT);\n  desc->SetType(modelbox::DEVICE_TYPE);\n  desc->SetDescription(FLOWUNIT_DESC);\n  desc->SetNodelete(true);\n  desc->SetGlobal(true);\n}\n\nmodelbox::Status DriverInit() {\n  std::lock_guard<std::mutex> lock(kPythonInitLock);\n  // Driver Init.\n  if (kpythonInterpreter != nullptr) {\n    return modelbox::STATUS_OK;\n  }\n\n  kpythonInterpreter = std::make_shared<PythonInterpreter>();\n  auto ret = kpythonInterpreter->InitModule();\n  if (!ret) {\n    kpythonInterpreter = nullptr;\n  }\n\n  return ret;\n}\n\nvoid DriverFini() {\n  // Driver Fini.\n  std::lock_guard<std::mutex> lock(kPythonInitLock);\n  if (kpythonInterpreter) {\n    kpythonInterpreter->ExitModule();\n    kpythonInterpreter = nullptr;\n  }\n}\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/python/python_flowunit.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"python_flowunit.h\"\n\n#include \"modelbox/device/cpu/device_cpu.h\"\n\n// NOLINTNEXTLINE\nusing namespace pybind11::literals;\n\nstatic std::mutex reload_mutex;\n\nPythonFlowUnit::PythonFlowUnit() = default;\nPythonFlowUnit::~PythonFlowUnit() {\n  py::gil_scoped_acquire interpreter_guard{};\n  python_process_.dec_ref();\n  python_data_pre_.dec_ref();\n  python_data_post_.dec_ref();\n  python_data_group_pre_.dec_ref();\n  python_data_group_post_.dec_ref();\n  if (is_enable_debug_ == true) {\n    pydevd_set_trace_.dec_ref();\n  }\n  obj_.dec_ref();\n};\n\nvoid PythonFlowUnit::EnablePythonDebug() {\n  if (is_enable_debug_ == false) {\n    return;\n  }\n\n  pydevd_set_trace_(\"suspend\"_a = false, \"trace_only_current_thread\"_a = true);\n}\n\nmodelbox::Status PythonFlowUnit::Open(\n    const std::shared_ptr<modelbox::Configuration>& opts) {\n  python_desc_ = std::dynamic_pointer_cast<VirtualPythonFlowUnitDesc>(\n      this->GetFlowUnitDesc());\n\n  auto python_entry = python_desc_->GetPythonEntry();\n  auto config = python_desc_->GetConfiguration();\n\n  auto merge_config = std::make_shared<modelbox::Configuration>();\n  // opts override python_desc_ config\n  if (config != nullptr) {\n    merge_config->Add(*config);\n  }\n  merge_config->Add(*opts);\n\n  constexpr const char DELIM_CHAR = '@';\n  constexpr size_t ENTRY_FILENAME_AND_CLASS_COUNT = 2;\n  const auto& entry_list = modelbox::StringSplit(python_entry, DELIM_CHAR);\n  if (entry_list.size() != ENTRY_FILENAME_AND_CLASS_COUNT) {\n    return {modelbox::STATUS_INVALID, \"invalid entry string: \" + python_entry};\n  }\n\n  const auto& python_path = python_desc_->GetPythonFilePath();\n\n  // module reload mutex\n  std::lock_guard<std::mutex> lck(reload_mutex);\n\n  py::gil_scoped_acquire interpreter_guard{};\n\n  // Avoid thread.lock assert after interpreter finish.\n  PyGILState_STATE state = PyGILState_Ensure();\n  Defer {\n    PyGILState_Release(state);\n  };\n\n  const char *enable_debug = getenv(\"MODELBOX_DEBUG_PYTHON\");\n  if (enable_debug != nullptr) {\n    is_enable_debug_ = true;\n  }\n\n  try {\n    auto sys = py::module::import(\"sys\");\n    if (is_enable_debug_ == true) {\n      auto pydevd = py::module::import(\"pydevd\");\n      pydevd_set_trace_ = pydevd.attr(\"settrace\");\n    }\n\n    sys.attr(\"path\").cast<py::list>().append(python_path);\n\n    auto python_module = py::module_::import(entry_list[0].c_str());\n    python_module.reload();\n    auto python_class = python_module.attr(entry_list[1].c_str());\n    obj_ = python_class();\n    python_process_ = obj_.attr(\"process\");\n    python_data_pre_ = obj_.attr(\"data_pre\");\n    python_data_post_ = obj_.attr(\"data_post\");\n    python_data_group_pre_ = obj_.attr(\"data_group_pre\");\n    python_data_group_post_ = obj_.attr(\"data_group_post\");\n  } catch (const std::exception& ex) {\n    is_enable_debug_ = false;\n    return {modelbox::STATUS_INVALID, \"import \" + python_desc_->GetPythonEntry() +\n                                        \" failed: \" + ex.what()};\n  }\n\n  auto* fu = obj_.cast<modelbox::FlowUnit*>();\n  fu->SetBindDevice(GetBindDevice());\n  fu->SetExternalData(GetCreateExternalDataFunc());\n  fu->SetFlowUnitDesc(GetFlowUnitDesc());\n\n  py::object status;\n  try {\n    EnablePythonDebug();\n    auto python_open = obj_.attr(\"open\");\n    status = python_open(merge_config);\n  } catch (const std::exception& ex) {\n    return {modelbox::STATUS_FAULT, python_desc_->GetPythonEntry() +\n                                        \" function open error: \" + ex.what()};\n  }\n\n  try {\n    // if return is modelbox::StatusCode\n    return status.cast<modelbox::StatusCode>();\n  } catch (...) {\n    // do nothing\n  }\n\n  return status.cast<modelbox::Status>();\n}\n\nmodelbox::Status PythonFlowUnit::Process(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  py::gil_scoped_acquire interpreter_guard{};\n\n  try {\n    EnablePythonDebug();\n    auto status = python_process_(data_ctx);\n    try {\n      // if return is modelbox::StatusCode\n      return status.cast<modelbox::StatusCode>();\n    } catch (...) {\n      // do nothing\n    }\n\n    // if return modelbox::Status\n    return status.cast<modelbox::Status>();\n  } catch (py::error_already_set& ex) {\n    MBLOG_WARN << python_desc_->GetPythonEntry()\n               << \" python function process catch exception: \" << ex.what();\n    return modelbox::STATUS_FAULT;\n  }\n}\n\nmodelbox::Status PythonFlowUnit::DataPre(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  py::gil_scoped_acquire interpreter_guard{};\n  try {\n    EnablePythonDebug();\n    auto status = python_data_pre_(data_ctx);\n    try {\n      return status.cast<modelbox::StatusCode>();\n    } catch (...) {\n      // do nothing\n    }\n\n    return status.cast<modelbox::Status>();\n  } catch (const std::exception& ex) {\n    MBLOG_WARN << python_desc_->GetPythonEntry()\n               << \" python function data_pre catch exception: \" << ex.what();\n    return modelbox::STATUS_FAULT;\n  }\n}\n\nmodelbox::Status PythonFlowUnit::DataPost(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  py::gil_scoped_acquire interpreter_guard{};\n  try {\n    EnablePythonDebug();\n    auto status = python_data_post_(data_ctx);\n    try {\n      return status.cast<modelbox::StatusCode>();\n    } catch (...) {\n      // do nothing\n    }\n\n    return status.cast<modelbox::Status>();\n  } catch (const std::exception& ex) {\n    MBLOG_WARN << python_desc_->GetPythonEntry()\n               << \" python function data_post catch exception: \" << ex.what();\n    return modelbox::STATUS_FAULT;\n  }\n}\n\nmodelbox::Status PythonFlowUnit::DataGroupPre(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  py::gil_scoped_acquire interpreter_guard{};\n  try {\n    EnablePythonDebug();\n    auto status = python_data_group_pre_(data_ctx);\n    try {\n      return status.cast<modelbox::StatusCode>();\n    } catch (...) {\n      // do nothing\n    }\n\n    return status.cast<modelbox::Status>();\n  } catch (const std::exception& ex) {\n    MBLOG_WARN << python_desc_->GetPythonEntry()\n               << \" python function data_group_pre catch exception: \"\n               << ex.what();\n    return modelbox::STATUS_FAULT;\n  }\n}\n\nmodelbox::Status PythonFlowUnit::DataGroupPost(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  py::gil_scoped_acquire interpreter_guard{};\n  try {\n    EnablePythonDebug();\n    auto status = python_data_group_post_(data_ctx);\n    try {\n      return status.cast<modelbox::StatusCode>();\n    } catch (...) {\n      // do nothing\n    }\n\n    return status.cast<modelbox::Status>();\n  } catch (const std::exception& ex) {\n    MBLOG_WARN << python_desc_->GetPythonEntry()\n               << \" python function data_group_post catch exception: \"\n               << ex.what();\n    return modelbox::STATUS_FAULT;\n  }\n}\n\nmodelbox::Status PythonFlowUnit::Close() {\n  py::gil_scoped_acquire interpreter_guard{};\n  try {\n    EnablePythonDebug();\n    auto python_close = obj_.attr(\"close\");\n    auto status = python_close();\n    try {\n      return status.cast<modelbox::StatusCode>();\n    } catch (...) {\n      return modelbox::STATUS_OK;\n    }\n\n    return status.cast<modelbox::Status>();\n  } catch (const std::exception& ex) {\n    return modelbox::STATUS_OK;\n  }\n}\n\nvoid PythonFlowUnit::SetFlowUnitDesc(\n    std::shared_ptr<modelbox::FlowUnitDesc> desc) {\n  python_desc_ = std::dynamic_pointer_cast<VirtualPythonFlowUnitDesc>(desc);\n}\n\nstd::shared_ptr<modelbox::FlowUnitDesc> PythonFlowUnit::GetFlowUnitDesc() {\n  return python_desc_;\n}\n\nPythonFlowUnitDesc::PythonFlowUnitDesc() = default;\n\nPythonFlowUnitDesc::~PythonFlowUnitDesc() = default;\n\nvoid PythonFlowUnitDesc::SetPythonEntry(const std::string& python_entry) {\n  python_entry_ = python_entry;\n}\n\nstd::string PythonFlowUnitDesc::GetPythonEntry() { return python_entry_; }\n\nPythonFlowUnitFactory::PythonFlowUnitFactory() = default;\n\nPythonFlowUnitFactory::~PythonFlowUnitFactory() = default;\n\nstd::shared_ptr<modelbox::FlowUnit> PythonFlowUnitFactory::CreateFlowUnit(\n    const std::string& unit_name, const std::string& unit_type) {\n  auto python_flowunit = std::make_shared<PythonFlowUnit>();\n  return python_flowunit;\n}\n\nstd::string PythonFlowUnitFactory::GetFlowUnitFactoryType() {\n  return FLOWUNIT_TYPE;\n}\n\nstd::map<std::string, std::shared_ptr<modelbox::FlowUnitDesc>>\nPythonFlowUnitFactory::FlowUnitProbe() {\n  return std::map<std::string, std::shared_ptr<modelbox::FlowUnitDesc>>();\n}"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/python/python_flowunit.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_PYTHON_H_\n#define MODELBOX_FLOWUNIT_PYTHON_H_\n\n#include <modelbox/base/device.h>\n#include <modelbox/base/status.h>\n#include <modelbox/flow.h>\n#include <modelbox/flowunit.h>\n#include <pybind11/pybind11.h>\n\n#include \"virtualdriver_python.h\"\n\nnamespace py = pybind11;\n\nconstexpr const char *FLOWUNIT_TYPE = \"cpu\";\n\n#pragma GCC visibility push(default)\n\nclass PythonFlowUnitDesc : public modelbox::FlowUnitDesc {\n public:\n  PythonFlowUnitDesc();\n  ~PythonFlowUnitDesc() override;\n\n  void SetPythonEntry(const std::string &python_entry);\n  std::string GetPythonEntry();\n\n  std::string python_entry_;\n};\n\nclass PythonFlowUnit : public modelbox::FlowUnit {\n public:\n  PythonFlowUnit();\n  ~PythonFlowUnit() override;\n\n  modelbox::Status Open(\n      const std::shared_ptr<modelbox::Configuration> &opts) override;\n\n  modelbox::Status Close() override;\n\n  modelbox::Status Process(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override;\n\n  modelbox::Status DataPre(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override;\n\n  modelbox::Status DataPost(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override;\n\n  modelbox::Status DataGroupPre(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override;\n\n  modelbox::Status DataGroupPost(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override;\n\n  void SetFlowUnitDesc(std::shared_ptr<modelbox::FlowUnitDesc> desc) override;\n  std::shared_ptr<modelbox::FlowUnitDesc> GetFlowUnitDesc() override;\n\n private:\n  std::shared_ptr<VirtualPythonFlowUnitDesc> python_desc_;\n  void EnablePythonDebug();\n\n  py::object obj_;\n  py::object pydevd_set_trace_;\n  py::object python_process_;\n  py::object python_data_pre_;\n  py::object python_data_post_;\n  py::object python_data_group_pre_;\n  py::object python_data_group_post_;\n  bool is_enable_debug_{false};\n};\n\nclass PythonFlowUnitFactory : public modelbox::FlowUnitFactory {\n public:\n  PythonFlowUnitFactory();\n  ~PythonFlowUnitFactory() override;\n\n  std::shared_ptr<modelbox::FlowUnit> CreateFlowUnit(\n      const std::string &unit_name, const std::string &unit_type) override;\n\n  std::string GetFlowUnitFactoryType() override;\n\n  std::map<std::string, std::shared_ptr<modelbox::FlowUnitDesc>> FlowUnitProbe()\n      override;\n};\n#pragma GCC visibility pop\n#endif  // MODELBOX_FLOWUNIT_PYTHON_H_\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/python/python_flowunit_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include <pybind11/embed.h>\n\n#include <functional>\n#include <future>\n#include <thread>\n\n#include \"modelbox/base/log.h\"\n#include \"modelbox/buffer.h\"\n#include \"driver_flow_test.h\"\n#include \"flowunit_mockflowunit/flowunit_mockflowunit.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\n#include <pybind11/pybind11.h>\n\nnamespace py = pybind11;\n\nnamespace modelbox {\nclass PythonFlowUnitTest : public testing::Test {\n public:\n  PythonFlowUnitTest() : driver_flow_(std::make_shared<DriverFlowTest>()) {}\n\n  std::shared_ptr<DriverFlowTest> GetDriverFlow();\n\n protected:\n  void SetUp() override {}\n\n  void TearDown() override { driver_flow_->Clear(); };\n\n  const std::string test_lib_dir = TEST_DRIVER_DIR,\n                    test_data_dir = TEST_DATA_DIR, test_assets = TEST_ASSETS;\n\n private:\n  std::shared_ptr<DriverFlowTest> driver_flow_;\n};\n\nstd::shared_ptr<DriverFlowTest> PythonFlowUnitTest::GetDriverFlow() {\n  return driver_flow_;\n}\n\nTEST_F(PythonFlowUnitTest, Init) {\n  auto op_dir = test_data_dir + \"/python_op\";\n  std::string toml_content = R\"(\n    [driver]\n    dir=[\")\" + test_lib_dir + \"\\\",\\\"\" +\n                             op_dir + \"\\\"]\\n    \" +\n                             R\"(\nskip-default=true\n[log]\nlevel=\"INFO\"\n[graph]\ngraphconf = '''digraph demo {{                                                                            \n    python_image[type=flowunit, flowunit=python_image, device=cpu, deviceid=0, label=\"<image_out/out_1>\", batch_size = 10]   \n    python_resize[type=flowunit, flowunit=python_resize, device=cpu, deviceid=0, label=\"<resize_in> | <resize_out>\"]   \n    python_brightness[type=flowunit, flowunit=python_brightness, device=cpu, deviceid=0, label=\"<brightness_in> | <brightness_out>\", brightness = 0.1]  \n    python_show[type=flowunit, flowunit=python_show, device=cpu, deviceid=0, label=\"<show_in>\", is_save = true]    \n    python_image:\"image_out/out_1\" -> python_resize:resize_in\n    python_resize:resize_out -> python_brightness:brightness_in\n    python_brightness:brightness_out -> python_show:show_in                                                                                              \n}}'''\nformat = \"graphviz\"\n\n  )\";\n  auto driver_flow = GetDriverFlow();\n  auto ret = driver_flow->BuildAndRun(\"PythonFlowUnit\", toml_content, 0);\n  EXPECT_EQ(ret, STATUS_STOP);\n}\n\nTEST_F(PythonFlowUnitTest, StatusCount) {\n  auto op_dir = test_data_dir + \"/python_op\";\n  std::string toml_content = R\"(\n    [driver]\n    dir=[\")\" + test_lib_dir + \"\\\",\\\"\" +\n                             op_dir + \"\\\"]\\n    \" +\n                             R\"(\nskip-default=true\n[log]\nlevel=\"INFO\"\n[graph]\ngraphconf = '''digraph demo {{                                                                            \n    python_image[type=flowunit, flowunit=python_image, device=cpu, deviceid=0, batch_size = 10]   \n    python_resize[type=flowunit, flowunit=python_resize, device=cpu, deviceid=0]   \n    python_brightness[type=flowunit, flowunit=python_brightness, device=cpu, deviceid=0, brightness = 0.1]  \n    python_show[type=flowunit, flowunit=python_show, device=cpu, deviceid=0, is_save = true]    \n    python_image:image_out -> python_resize:resize_in\n    python_resize:resize_out -> python_brightness:brightness_in\n    python_brightness:brightness_out -> python_show:show_in                                                                                              \n}}'''\nformat = \"graphviz\"\n\n  )\";\n  auto driver_flow = GetDriverFlow();\n  auto ret = driver_flow->BuildAndRun(\"PythonFlowUnit\", toml_content, -1);\n  EXPECT_NE(ret, STATUS_OK);\n\n  {\n    py::gil_scoped_acquire interpreter_guard{};\n    py::object python_status;\n    try {\n      python_status = py::module::import(\"_flowunit\").attr(\"Status\");\n    } catch (const std::exception& ex) {\n      MBLOG_ERROR << \"import _flowunit.Status failed:\" << ex.what();\n      EXPECT_TRUE(false);\n    }\n\n    try {\n      auto obj = python_status(STATUS_LASTFLAG - 1);\n    } catch (const std::exception& ex) {\n      MBLOG_ERROR << \"init _flowunit.Status failed:\" << ex.what();\n      EXPECT_TRUE(false);\n    }\n\n    try {\n      auto obj = python_status(STATUS_LASTFLAG);\n      EXPECT_TRUE(false);\n    } catch (const std::exception& ex) {\n      MBLOG_ERROR << \"init _flowunit.Status failed:\" << ex.what();\n      EXPECT_TRUE(true);\n    }\n  }\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/python/python_module.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include \"python_module.h\"\n\n#include <modelbox/base/log.h>\n#include <pybind11/embed.h>\n#include <pybind11/pybind11.h>\n\n#include <chrono>\n#include <functional>\n\n#include \"modelbox_api.h\"\n#include \"python_log.h\"\n\nnamespace py = pybind11;\n\n#define PYBIND11_MODULE_INIT(name) PyInit_##name()\n\nPythonInterpreter::PythonInterpreter() {\n  if (!Py_IsInitialized()) {\n    is_initialized_ = true;\n    py::initialize_interpreter(false);\n    // unlock GIL\n    threadState_ = PyEval_SaveThread();\n    return;\n  }\n\n  modelbox::FlowUnitPythonLog::Init();\n}\n\nPythonInterpreter::~PythonInterpreter() {\n  modelbox::FlowUnitPythonLog::Finish();\n\n  if (is_initialized_ == false) {\n    return;\n  }\n\n  if (threadState_ != nullptr) {\n    // lock GIL\n    PyEval_RestoreThread(threadState_);\n    threadState_ = nullptr;\n  }\n\n  // never release python interpreter\n}\n\nPYBIND11_MODULE(_flowunit, m) {\n  modelbox::ModelboxPyApiSetUpLog(m);\n  modelbox::ModelboxPyApiSetUpStatus(m);\n  modelbox::ModelboxPyApiSetUpConfiguration(m);\n  modelbox::ModelboxPyApiSetUpBuffer(m);\n  modelbox::ModelboxPyApiSetUpBufferList(m);\n  modelbox::ModelboxPyApiSetUpGeneric(m);\n  modelbox::ModelboxPyApiSetUpFlowUnit(m);\n}\n\nmodelbox::Status PythonInterpreter::InitModule() {\n  py::gil_scoped_acquire acquire{};\n\n  auto *m = PyImport_AddModule(\"_flowunit\");\n  if (m == nullptr) {\n    MBLOG_ERROR << \"Add python module failed.\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  PyObject *module = PYBIND11_MODULE_INIT(_flowunit);\n  if (module == nullptr) {\n    MBLOG_ERROR << \"Init python module failed.\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  PyObject *sys_modules = PyImport_GetModuleDict();\n  PyDict_SetItemString(sys_modules, \"_flowunit\", module);\n  is_module_init_ = true;\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status PythonInterpreter::ExitModule() {\n  py::gil_scoped_acquire acquire{};\n\n  if (is_module_init_ == false) {\n    return modelbox::STATUS_OK;\n  }\n\n  is_module_init_ = false;\n  PyObject *sys_modules = PyImport_GetModuleDict();\n  PyDict_DelItemString(sys_modules, \"_flowunit\");\n  return modelbox::STATUS_OK;\n}\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/python/python_module.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#ifndef MODELBOX_PYTHON_FLOWUNIT_MODULE_H_\n#define MODELBOX_PYTHON_FLOWUNIT_MODULE_H_\n\n#include <modelbox/base/status.h>\n#include <pybind11/pybind11.h>\n\nclass PythonInterpreter {\n public:\n  PythonInterpreter();\n  virtual ~PythonInterpreter();\n  modelbox::Status InitModule();\n  modelbox::Status ExitModule();\n\n private:\n  bool is_initialized_ = false;\n  bool is_module_init_ = false;\n  PyThreadState* threadState_ = nullptr;\n};\n#endif  // MODELBOX_PYTHON_FLOWUNIT_MODULE_H_"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/resize/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nset(UNIT_DEVICE \"cpu\")\nset(UNIT_NAME \"resize\")\n\nproject(modelbox-flowunit-${UNIT_DEVICE}-${UNIT_NAME})\n\nif (NOT OPENCV_FOUND) \n    message(STATUS \"Not found opencv, disable resize flowunit\")\n    return()\nendif()\n\nfile(GLOB_RECURSE UNIT_SOURCE *.cpp *.cc *.c)\ngroup_source_test_files(MODELBOX_UNIT_SOURCE MODELBOX_UNIT_TEST_SOURCE \"_test.c*\" ${UNIT_SOURCE})\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_CPU_INCLUDE})\ninclude_directories(${OpenCV_INCLUDE_DIRS})\n\nset(MODELBOX_UNIT_SHARED modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-shared)\nset(MODELBOX_UNIT_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\nadd_library(${MODELBOX_UNIT_SHARED} SHARED ${MODELBOX_UNIT_SOURCE})\n\nset(LIBMODELBOX_FLOWUNIT_RESIZE_CPU_SHARED ${MODELBOX_UNIT_SHARED})\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES \n    SOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n\nset(MODELBOX_UNIT_LINK_LIBRARY ${OpenCV_LIBS})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_DEVICE_CPU_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} rt)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} dl)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${MODELBOX_UNIT_LINK_LIBRARY})\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}\")\n\ninstall(TARGETS ${MODELBOX_UNIT_SHARED} \n    COMPONENT cpu-device-flowunit\n    RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n    LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    OPTIONAL\n    )\n\n\ninstall(DIRECTORY ${HEADER} \n    DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR} \n    COMPONENT cpu-device-flowunit\n    )\n\nset(LIBMODELBOX_FLOWUNIT_RESIZE_CPU_SHARED ${MODELBOX_UNIT_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_RESIZE_CPU_INCLUDE ${MODELBOX_UNIT_SOURCE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_RESIZE_CPU_SOURCES ${MODELBOX_UNIT_SOURCE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_RESIZE_CPU_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}.so CACHE INTERNAL \"\")\n\n# driver test\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_UNIT_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_UNIT_SHARED})\nlist(APPEND DRIVER_UNIT_TEST_LINK_LIBRARIES ${MODELBOX_UNIT_LINK_LIBRARY})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_LINK_LIBRARIES ${DRIVER_UNIT_TEST_LINK_LIBRARIES} CACHE INTERNAL \"\")\n\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/resize/resize_flowunit.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"resize_flowunit.h\"\n#include <securec.h>\n#include \"modelbox/flowunit.h\"\n#include \"modelbox/flowunit_api_helper.h\"\n\nCVResizeFlowUnit::CVResizeFlowUnit() = default;\nCVResizeFlowUnit::~CVResizeFlowUnit() = default;\n\nstd::map<std::string, cv::InterpolationFlags> kCVResizeMethod = {\n    {\"inter_nearest\", cv::INTER_NEAREST},\n    {\"inter_linear\", cv::INTER_LINEAR},\n    {\"inter_cubic\", cv::INTER_CUBIC},\n    {\"inter_area\", cv::INTER_AREA},\n    {\"inter_lanczos4\", cv::INTER_LANCZOS4},\n    {\"inter_max\", cv::INTER_MAX},\n    {\"warp_fill_outliers\", cv::WARP_FILL_OUTLIERS},\n    {\"warp_inverse_map\", cv::WARP_INVERSE_MAP},\n};\n\nmodelbox::Status CVResizeFlowUnit::Open(\n    const std::shared_ptr<modelbox::Configuration> &opts) {\n  dest_width_ = opts->GetUint32(\"width\", 0);\n  if (dest_width_ == 0) {\n    dest_width_ = opts->GetUint32(\"image_width\", 0);\n  }\n\n  dest_height_ = opts->GetUint32(\"height\", 0);\n  if (dest_height_ == 0) {\n    dest_height_ = opts->GetUint32(\"image_height\", 0);\n  }\n  if (dest_width_ <= 0 || dest_height_ <= 0) {\n    const auto *errMsg = \"resize width or height is not configured or invalid.\";\n    MBLOG_ERROR << errMsg;\n    return {modelbox::STATUS_BADCONF, errMsg};\n  }\n\n  auto interpolation_str = opts->GetString(\"interpolation\", \"inter_linear\");\n  auto item = kCVResizeMethod.find(interpolation_str);\n  if (item == kCVResizeMethod.end()) {\n    auto errMsg =\n        \"resize interpolation is invalid, configure is :\" + interpolation_str;\n    MBLOG_ERROR << errMsg;\n    std::string validmethod;\n    for (const auto &iter : kCVResizeMethod) {\n      if (validmethod.length() > 0) {\n        validmethod += \", \";\n      }\n      validmethod += iter.first;\n    }\n    MBLOG_ERROR << \"Valid interpolation method is: \" << validmethod;\n    return {modelbox::STATUS_BADCONF, errMsg};\n  }\n\n  interpolation_ = item->second;\n  MBLOG_DEBUG << \"resize dest width \" << dest_width_ << \", resize dest height \"\n              << dest_height_ << \", resize interpolation method \"\n              << interpolation_str;\n  return modelbox::STATUS_OK;\n}\nmodelbox::Status CVResizeFlowUnit::Close() { return modelbox::STATUS_OK; }\n\nmodelbox::Status CVResizeFlowUnit::Process(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  MBLOG_DEBUG << \"process image cvresize\";\n\n  auto input_bufs = data_ctx->Input(\"in_image\");\n  auto output_bufs = data_ctx->Output(\"out_image\");\n\n  if (input_bufs->Size() <= 0) {\n    auto errMsg = \"input images batch is \" + std::to_string(input_bufs->Size());\n    MBLOG_ERROR << errMsg;\n    return {modelbox::STATUS_FAULT, errMsg};\n  }\n\n  size_t channel = RGB_CHANNELS;\n  std::vector<size_t> sub_shape{dest_width_, dest_height_, channel};\n  std::vector<size_t> tensor_shape(\n      input_bufs->Size(), modelbox::Volume(sub_shape) * sizeof(u_char));\n  output_bufs->Build(tensor_shape);\n\n  for (size_t i = 0; i < input_bufs->Size(); ++i) {\n    int32_t width = 0;\n    int32_t height = 0;\n    int32_t channel = 0;\n    std::string pix_fmt;\n    bool exists = false;\n    exists = input_bufs->At(i)->Get(\"height\", height);\n    if (!exists) {\n      MBLOG_ERROR << \"meta don't have key height\";\n      return {modelbox::STATUS_NOTSUPPORT, \"meta don't have key height\"};\n    }\n\n    exists = input_bufs->At(i)->Get(\"width\", width);\n    if (!exists) {\n      MBLOG_ERROR << \"meta don't have key width\";\n      return {modelbox::STATUS_NOTSUPPORT, \"meta don't have key width\"};\n    }\n\n    exists = input_bufs->At(i)->Get(\"pix_fmt\", pix_fmt);\n    if (!exists && !input_bufs->At(i)->Get(\"channel\", channel)) {\n      MBLOG_ERROR << \"meta don't have key pix_fmt or channel\";\n      return {modelbox::STATUS_NOTSUPPORT,\n              \"meta don't have key pix_fmt or channel\"};\n    }\n\n    if (exists && pix_fmt != \"rgb\" && pix_fmt != \"bgr\") {\n      MBLOG_ERROR << \"unsupport pix format.\";\n      return {modelbox::STATUS_NOTSUPPORT, \"unsupport pix format.\"};\n    }\n\n    channel = RGB_CHANNELS;\n    MBLOG_DEBUG << \"get \" << width << \" rows \" << height << \" channel \"\n                << channel;\n\n    const auto *input_data =\n        static_cast<const u_char *>(input_bufs->ConstBufferData(i));\n\n    cv::Mat img_data(cv::Size(width, height), CV_8UC3);\n    memcpy_s(img_data.data, img_data.total() * img_data.elemSize(), input_data,\n             input_bufs->At(i)->GetBytes());\n\n    MBLOG_DEBUG << \"ori image : cols \" << img_data.cols << \" rows \"\n                << img_data.rows << \" channel \" << img_data.channels();\n\n    // resize image\n    cv::Size destSize = cv::Size(dest_width_, dest_height_);\n    cv::Mat img_dest;\n    cv::resize(img_data, img_dest, destSize, 0, 0, interpolation_);\n\n    // output resize image\n    auto *output = static_cast<uchar *>(output_bufs->MutableBufferData(i));\n    memcpy_s(output, output_bufs->At(i)->GetBytes(), img_dest.data,\n             img_dest.total() * img_dest.elemSize());\n    output_bufs->At(i)->Set(\"width\", (int32_t)dest_width_);\n    output_bufs->At(i)->Set(\"height\", (int32_t)dest_height_);\n    output_bufs->At(i)->Set(\"width_stride\", (int32_t)dest_width_ * 3);\n    output_bufs->At(i)->Set(\"height_stride\", (int32_t)dest_height_);\n    output_bufs->At(i)->Set(\"channel\", channel);\n    output_bufs->At(i)->Set(\"pix_fmt\", pix_fmt);\n    output_bufs->At(i)->Set(\"type\", modelbox::ModelBoxDataType::MODELBOX_UINT8);\n    output_bufs->At(i)->Set(\n        \"shape\",\n        std::vector<size_t>{(size_t)dest_height_, (size_t)dest_width_, 3});\n    output_bufs->At(i)->Set(\"layout\", std::string(\"hwc\"));\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nMODELBOX_FLOWUNIT(CVResizeFlowUnit, desc) {\n  desc.SetFlowUnitName(FLOWUNIT_NAME);\n  desc.SetFlowUnitGroupType(\"Image\");\n  desc.AddFlowUnitInput({\"in_image\"});\n  desc.AddFlowUnitOutput({\"out_image\"});\n  desc.SetFlowType(modelbox::NORMAL);\n  desc.SetInputContiguous(false);\n  desc.SetDescription(FLOWUNIT_DESC);\n  desc.AddFlowUnitOption(modelbox::FlowUnitOption(\"image_width\", \"int\", true,\n                                                  \"640\", \"the resize width\"));\n  desc.AddFlowUnitOption(modelbox::FlowUnitOption(\"image_height\", \"int\", true,\n                                                  \"480\", \"the resize height\"));\n\n  std::map<std::string, std::string> method_list;\n\n  for (auto &item : kCVResizeMethod) {\n    method_list[item.first] = item.first;\n  }\n\n  desc.AddFlowUnitOption(\n      modelbox::FlowUnitOption(\"interpolation\", \"list\", true, \"inter_linear\",\n                               \"the resize interpolation method\", method_list));\n}\n\nMODELBOX_DRIVER_FLOWUNIT(desc) {\n  desc.Desc.SetName(FLOWUNIT_NAME);\n  desc.Desc.SetClass(modelbox::DRIVER_CLASS_FLOWUNIT);\n  desc.Desc.SetType(FLOWUNIT_TYPE);\n  desc.Desc.SetDescription(FLOWUNIT_DESC);\n  desc.Desc.SetVersion(\"1.0.0\");\n}\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/resize/resize_flowunit.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_CVRESIZEFLOWUNIT_CPU_H_\n#define MODELBOX_FLOWUNIT_CVRESIZEFLOWUNIT_CPU_H_\n\n#include <modelbox/base/device.h>\n#include <modelbox/base/status.h>\n#include <modelbox/flow.h>\n\n#include <algorithm>\n#include <opencv2/opencv.hpp>\n\n#include \"modelbox/buffer.h\"\n#include \"modelbox/flowunit.h\"\n\nconstexpr const char *FLOWUNIT_NAME = \"resize\";\nconstexpr const char *FLOWUNIT_TYPE = \"cpu\";\nconstexpr const char *FLOWUNIT_DESC =\n    \"\\n\\t@Brief: A resize flowunit on cpu. \\n\"\n    \"\\t@Port parameter: The input port buffer type and the output port buffer \"\n    \"type are image. \\n\"\n    \"\\t  The image type buffer contains the following meta fields:\\n\"\n    \"\\t\\tField Name: width,         Type: int32_t\\n\"\n    \"\\t\\tField Name: height,        Type: int32_t\\n\"\n    \"\\t\\tField Name: width_stride,  Type: int32_t\\n\"\n    \"\\t\\tField Name: height_stride, Type: int32_t\\n\"\n    \"\\t\\tField Name: channel,       Type: int32_t\\n\"\n    \"\\t\\tField Name: pix_fmt,       Type: string\\n\"\n    \"\\t\\tField Name: layout,        Type: int32_t\\n\"\n    \"\\t\\tField Name: shape,         Type: vector<size_t>\\n\"\n    \"\\t\\tField Name: type,          Type: ModelBoxDataType::MODELBOX_UINT8\\n\"\n    \"\\t@Constraint: The field value range of this flowunit supports: 'pix_fmt': \"\n    \"[rgb_packed,bgr_packed], 'layout': [hwc]. \";\nconst int RGB_CHANNELS = 3;\n\nclass CVResizeFlowUnit : public modelbox::FlowUnit {\n public:\n  CVResizeFlowUnit();\n  ~CVResizeFlowUnit() override;\n\n  modelbox::Status Open(\n      const std::shared_ptr<modelbox::Configuration> &opts) override;\n\n  modelbox::Status Close() override;\n\n  /* run when processing data */\n  modelbox::Status Process(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override;\n\n private:\n  uint32_t dest_width_{224};\n  uint32_t dest_height_{224};\n  cv::InterpolationFlags interpolation_{cv::InterpolationFlags::INTER_LINEAR};\n};\n\n#endif  // MODELBOX_FLOWUNIT_CVRESIZEFLOWUNIT_CPU_H_\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/resize/resize_flowunit_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include <securec.h>\n\n#include <functional>\n#include <future>\n#include <opencv2/opencv.hpp>\n#include <random>\n#include <thread>\n\n#include \"modelbox/base/log.h\"\n#include \"modelbox/base/utils.h\"\n#include \"modelbox/buffer.h\"\n#include \"driver_flow_test.h\"\n#include \"flowunit_mockflowunit/flowunit_mockflowunit.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing ::testing::_;\n\nnamespace modelbox {\nclass CVResizeFlowUnitTest : public testing::Test {\n public:\n  CVResizeFlowUnitTest() : driver_flow_(std::make_shared<DriverFlowTest>()) {}\n\n protected:\n  void SetUp() override {\n    auto ret = AddMockFlowUnit();\n    EXPECT_EQ(ret, STATUS_OK);\n  };\n\n  void TearDown() override { driver_flow_->Clear(); };\n  std::shared_ptr<DriverFlowTest> GetDriverFlow();\n\n private:\n  Status AddMockFlowUnit();\n  std::shared_ptr<DriverFlowTest> driver_flow_;\n};\n\nstd::shared_ptr<DriverFlowTest> CVResizeFlowUnitTest::GetDriverFlow() {\n  return driver_flow_;\n}\n\nStatus CVResizeFlowUnitTest::AddMockFlowUnit() {\n  auto ctl_ = driver_flow_->GetMockFlowCtl();\n  {\n    MockFlowUnitDriverDesc desc_flowunit;\n    desc_flowunit.SetClass(\"DRIVER-FLOWUNIT\");\n    desc_flowunit.SetType(\"cpu\");\n    desc_flowunit.SetName(\"test_0_1_resize\");\n    desc_flowunit.SetDescription(\"the test in 0 out 1\");\n    desc_flowunit.SetVersion(\"1.0.0\");\n    std::string file_path_flowunit =\n        std::string(TEST_DRIVER_DIR) + \"/libmodelbox-unit-cpu-test_0_1_resize.so\";\n    desc_flowunit.SetFilePath(file_path_flowunit);\n    auto mock_flowunit = std::make_shared<MockFlowUnit>();\n    auto mock_flowunit_desc = std::make_shared<FlowUnitDesc>();\n    mock_flowunit_desc->SetFlowUnitName(\"test_0_1_resize\");\n    mock_flowunit_desc->AddFlowUnitOutput(modelbox::FlowUnitOutput(\"Out_1\"));\n    mock_flowunit_desc->SetFlowType(STREAM);\n    mock_flowunit_desc->SetMaxBatchSize(16);\n    mock_flowunit->SetFlowUnitDesc(mock_flowunit_desc);\n    std::weak_ptr<MockFlowUnit> mock_flowunit_wp;\n    mock_flowunit_wp = mock_flowunit;\n\n    EXPECT_CALL(*mock_flowunit, Open(_))\n        .WillRepeatedly(testing::Invoke(\n            [=](const std::shared_ptr<modelbox::Configuration>& flow_option) {\n              auto spt = mock_flowunit_wp.lock();\n              auto ext_data = spt->CreateExternalData();\n              if (!ext_data) {\n                const auto* err_msg = \"can not get external data.\";\n                modelbox::Status ret = {modelbox::STATUS_NODATA, err_msg};\n                MBLOG_ERROR << err_msg;\n                return ret;\n              }\n\n              std::string gimg_path = std::string(TEST_ASSETS) + \"/test.jpg\";\n\n              auto output_buf = ext_data->CreateBufferList();\n              modelbox::TensorList output_tensor_list(output_buf);\n              output_tensor_list.BuildFromHost<uchar>(\n                  {1, {gimg_path.size() + 1}}, (void*)gimg_path.data(),\n                  gimg_path.size() + 1);\n\n              auto status = ext_data->Send(output_buf);\n              if (!status) {\n                MBLOG_ERROR << \"external data send buffer list failed:\"\n                            << status;\n                return status;\n              }\n\n              status = ext_data->Close();\n              if (!status) {\n                MBLOG_ERROR << \"external data close failed:\" << status;\n                return status;\n              }\n\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, DataPre(_))\n        .WillRepeatedly(\n            testing::Invoke([&](const std::shared_ptr<DataContext>& data_ctx) {\n              MBLOG_INFO << \"stream_info \"\n                         << \"DataPre\";\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, DataPost(_))\n        .WillRepeatedly(\n            testing::Invoke([&](const std::shared_ptr<DataContext>& data_ctx) {\n              MBLOG_INFO << \"stream_info \"\n                         << \"DataPost\";\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit,\n                Process(testing::An<std::shared_ptr<modelbox::DataContext>>()))\n        .WillRepeatedly(\n            testing::Invoke([=](const std::shared_ptr<DataContext>& data_ctx) {\n              auto output_bufs = data_ctx->Output(\"Out_1\");\n              auto external = data_ctx->External();\n              std::string gimg_path =\n                  std::string((char*)(*external)[0]->ConstData());\n\n              cv::Mat gimg_data = cv::imread(gimg_path);\n\n              MBLOG_INFO << \"gimage col \" << gimg_data.cols << \"  grow \"\n                         << gimg_data.rows\n                         << \" gchannel:\" << gimg_data.channels();\n\n              long unsigned int gcols = gimg_data.cols;\n              long unsigned int grows = gimg_data.rows;\n              long unsigned int gchannels = gimg_data.channels();\n\n              uint32_t batch_size = 5;\n              std::vector<size_t> shape_vector(\n                  batch_size,\n                  modelbox::Volume({grows, gcols, gchannels}) * sizeof(uchar));\n              output_bufs->Build(shape_vector);\n\n              for (size_t i = 0; i < 5; ++i) {\n                const std::string& img_path = gimg_path;\n                cv::Mat img_data = cv::imread(img_path);\n                MBLOG_INFO << \"image col \" << img_data.cols << \"  row \"\n                           << img_data.rows\n                           << \" channel:\" << img_data.channels();\n\n                int32_t cols = img_data.cols;\n                int32_t rows = img_data.rows;\n                int32_t channels = img_data.channels();\n\n                output_bufs->At(i)->Set(\"width\", cols);\n                output_bufs->At(i)->Set(\"height\", rows);\n                output_bufs->At(i)->Set(\"channel\", channels);\n\n                auto* output_data =\n                    static_cast<uchar*>(output_bufs->MutableBufferData(i));\n                memcpy_s(output_data, output_bufs->At(i)->GetBytes(),\n                         img_data.data, img_data.total() * img_data.elemSize());\n              }\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, Close()).WillRepeatedly(testing::Invoke([=]() {\n      return modelbox::STATUS_OK;\n    }));\n    desc_flowunit.SetMockFlowUnit(mock_flowunit);\n    ctl_->AddMockDriverFlowUnit(\"test_0_1_resize\", \"cpu\", desc_flowunit,\n                                std::string(TEST_DRIVER_DIR));\n  }\n\n  {\n    MockFlowUnitDriverDesc desc_flowunit;\n    desc_flowunit.SetClass(\"DRIVER-FLOWUNIT\");\n    desc_flowunit.SetType(\"cpu\");\n    desc_flowunit.SetName(\"test_1_0_resize\");\n    desc_flowunit.SetDescription(\"the test in 1 out 0\");\n    desc_flowunit.SetVersion(\"1.0.0\");\n    std::string file_path_flowunit =\n        std::string(TEST_DRIVER_DIR) + \"/libmodelbox-unit-cpu-test_1_0_resize.so\";\n    desc_flowunit.SetFilePath(file_path_flowunit);\n    auto mock_flowunit = std::make_shared<MockFlowUnit>();\n    auto mock_flowunit_desc = std::make_shared<FlowUnitDesc>();\n    mock_flowunit_desc->SetFlowUnitName(\"test_1_0_resize\");\n    mock_flowunit_desc->AddFlowUnitInput(modelbox::FlowUnitInput(\"In_1\"));\n    mock_flowunit_desc->SetFlowType(STREAM);\n    mock_flowunit_desc->SetMaxBatchSize(16);\n    mock_flowunit->SetFlowUnitDesc(mock_flowunit_desc);\n    std::weak_ptr<MockFlowUnit> mock_flowunit_wp;\n    mock_flowunit_wp = mock_flowunit;\n\n    EXPECT_CALL(*mock_flowunit, Open(_))\n        .WillRepeatedly(testing::Invoke(\n            [=](const std::shared_ptr<modelbox::Configuration>& flow_option) {\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, DataPre(_))\n        .WillRepeatedly(\n            testing::Invoke([&](const std::shared_ptr<DataContext>& data_ctx) {\n              MBLOG_INFO << \"stream_info \"\n                         << \"DataPre\";\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, DataPost(_))\n        .WillRepeatedly(\n            testing::Invoke([&](const std::shared_ptr<DataContext>& data_ctx) {\n              MBLOG_INFO << \"stream_info \"\n                         << \"DataPost\";\n              return modelbox::STATUS_STOP;\n            }));\n\n    EXPECT_CALL(*mock_flowunit,\n                Process(testing::An<std::shared_ptr<modelbox::DataContext>>()))\n        .WillRepeatedly(\n            testing::Invoke([=](const std::shared_ptr<DataContext>& op_ctx) {\n              MBLOG_INFO << \"test_1_0_resize process\";\n              auto input_buf = op_ctx->Input(\"In_1\");\n              int32_t cols = 0;\n              int32_t rows = 0;\n              int32_t channels = 0;\n\n              for (size_t i = 0; i < input_buf->Size(); i++) {\n                input_buf->At(i)->Get(\"width\", cols);\n                input_buf->At(i)->Get(\"height\", rows);\n                input_buf->At(i)->Get(\"channel\", channels);\n                const auto* input_data =\n                    static_cast<const uchar*>(input_buf->ConstBufferData(i));\n\n                cv::Mat img_data(cv::Size(cols, rows), CV_8UC3);\n                memcpy_s(img_data.data, img_data.total() * img_data.elemSize(),\n                         input_data, input_buf->At(i)->GetBytes());\n\n                std::string name = std::string(TEST_DATA_DIR) + \"/test\" +\n                                   std::to_string(i) + \".jpg\";\n                cv::imwrite(name, img_data);\n              }\n\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, Close()).WillRepeatedly(testing::Invoke([=]() {\n      return modelbox::STATUS_OK;\n    }));\n    desc_flowunit.SetMockFlowUnit(mock_flowunit);\n    ctl_->AddMockDriverFlowUnit(\"test_1_0_resize\", \"cpu\", desc_flowunit,\n                                std::string(TEST_DRIVER_DIR));\n  }\n  return STATUS_OK;\n}\n\nTEST_F(CVResizeFlowUnitTest, InitUnit) {\n  const std::string test_lib_dir = TEST_DRIVER_DIR;\n  std::string toml_content = R\"(\n    [driver]\n    skip-default=true\n    dir=[\")\" + test_lib_dir + \"\\\"]\\n    \" +\n                             R\"([graph]\n    graphconf = '''digraph demo {                                                                            \n          test_0_1_resize[type=flowunit, flowunit=test_0_1_resize, device=cpu, deviceid=0, label=\"<Out_1>\"]\n          cv_resize[type=flowunit, flowunit=resize, device=cpu, deviceid=0, label=\"<in_image> | <out_image>\", width=128, height=128, interpolation=\"inter_nearest\", batch_size=5]\n          test_1_0_resize[type=flowunit, flowunit=test_1_0_resize, device=cpu, deviceid=0, label=\"<In_1>\",batch_size=5]                                \n          test_0_1_resize:Out_1 -> cv_resize:in_image \n          cv_resize:out_image -> test_1_0_resize:In_1                                                                      \n        }'''\n    format = \"graphviz\"\n  )\";\n\n  auto driver_flow = GetDriverFlow();\n  auto ret = driver_flow->BuildAndRun(\"InitUnit\", toml_content);\n  EXPECT_EQ(ret, STATUS_STOP);\n\n  std::vector<std::string> filePath;\n  ListFiles(std::string(TEST_DATA_DIR), \"*\", &filePath);\n  for (auto& elem : filePath) {\n    MBLOG_DEBUG << \"filePath: \" << elem;\n  }\n\n  for (size_t i = 0; i < 5; ++i) {\n    std::string expected_file_path =\n        std::string(TEST_ASSETS) + \"/cpu_resize_128x128_result.jpg\";\n    cv::Mat expected_img = cv::imread(expected_file_path);\n\n    std::string resize_result_file_path =\n        std::string(TEST_DATA_DIR) + \"/test\" + std::to_string(i) + \".jpg\";\n    cv::Mat resize_result_img = cv::imread(resize_result_file_path);\n\n    int result_data_size =\n        resize_result_img.total() * resize_result_img.elemSize();\n    int expected_data_size = expected_img.total() * expected_img.elemSize();\n    EXPECT_EQ(result_data_size, expected_data_size);\n\n    int ret =\n        memcmp(resize_result_img.data, expected_img.data, result_data_size);\n    EXPECT_EQ(ret, 0);\n\n    auto rmret = remove(resize_result_file_path.c_str());\n    EXPECT_EQ(rmret, 0);\n  }\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/tensorflow/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nset(UNIT_DEVICE \"cpu\")\nset(UNIT_NAME \"tensorflow_inference\")\n\nproject(modelbox-flowunit-${UNIT_DEVICE}-${UNIT_NAME})\n\nif (NOT TENSORFLOW_FOUND) \n    message(STATUS \"Not found tensorflow, disable ${UNIT_NAME} flowunit\")\n    return()\nendif()\n\n\nfile(GLOB_RECURSE UNIT_SOURCE *.cpp *.cc *.c)\n\ngroup_source_test_files(MODELBOX_UNIT_SOURCE MODELBOX_UNIT_TEST_SOURCE \"_test.c*\" ${UNIT_SOURCE})\n\nconfigure_file(${CMAKE_CURRENT_SOURCE_DIR}/test_toml/modelbox.test.cpu.tensorflow.in ${TEST_WORKING_DATA_DIR}/virtual_tfcpu_test.toml @ONLY)\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_CPU_INCLUDE})\ninclude_directories(${LIBMODELBOX_VIRTUALDRIVER_INFERENCE_INCLUDE})\ninclude_directories(${MODELBOX_COMMON_INFERENCE_INCLUDE})\ninclude_directories(${LIBMODELBOX_FLOWUNIT_INFERENCE_TENSORFLOW_INCLUDE})\n\nset(MODELBOX_UNIT_SHARED libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-shared)\nset(MODELBOX_UNIT_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\nadd_library(${MODELBOX_UNIT_SHARED} SHARED ${MODELBOX_UNIT_SOURCE})\nset(LIBMODELBOX_FLOWUNIT_INFERENCE_TENSORFLOW_CPU_SHARED ${MODELBOX_UNIT_SHARED})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES \nSOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} rt)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} dl)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_DEVICE_CPU_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_VIRTUALDRIVER_INFERENCE_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${MODELBOX_COMMON_INFERENCE_LIBRARY})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_FLOWUNIT_INFERENCE_TENSORFLOW_SHARED})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}\")\n\ninstall(TARGETS ${MODELBOX_UNIT_SHARED}\n        COMPONENT cpu-device-flowunit\n        RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n        LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        ARCHIVE DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        OPTIONAL)\n\ninstall(FILES ${HEADER} \n        DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR}/modelbox/drivers/devices/cpu/flowunit/tensorflow\n        COMPONENT cpu-device-flowunit-devel)\n\nset(LIBMODELBOX_FLOWUNIT_INFERENCE_TENSORFLOW_CPU_SHARED ${MODELBOX_UNIT_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_INFERENCE_TENSORFLOW_CPU_INCLUDE ${MODELBOX_UNIT_SOURCE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_INFERENCE_TENSORFLOW_CPU_SOURCES ${MODELBOX_UNIT_SOURCE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_INFERENCE_TENSORFLOW_CPU_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}.so CACHE INTERNAL \"\")\n\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_UNIT_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_UNIT_SHARED})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/tensorflow/flowunit_desc.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <stdio.h>\n\n#include <memory>\n\n#include \"modelbox/base/driver_api_helper.h\"\n#include \"modelbox/base/status.h\"\n#include \"modelbox/device/cpu/device_cpu.h\"\n#include \"modelbox/flowunit.h\"\n#include \"tensorflow_cpu_inference_flowunit.h\"\n\nconstexpr const char *FLOWUNIT_NAME = \"tensorflow_inference\";\nconstexpr const char *FLOWUNIT_DESC = \"A cpu tensorflow inference flowunit\";\n\nstd::shared_ptr<modelbox::DriverFactory> CreateDriverFactory() {\n  std::shared_ptr<modelbox::DriverFactory> factory =\n      std::make_shared<InferenceTensorflowCpuFlowUnitFactory>();\n  return factory;\n}\n\nvoid DriverDescription(modelbox::DriverDesc *desc) {\n  desc->SetName(FLOWUNIT_NAME);\n  desc->SetClass(modelbox::DRIVER_CLASS_INFERENCE);\n  desc->SetType(modelbox::DEVICE_TYPE);\n  desc->SetDescription(FLOWUNIT_DESC);\n  desc->SetNodelete(true);\n}\n\nmodelbox::Status DriverInit() {\n  // Driver Init.\n  return modelbox::STATUS_OK;\n}\n\nvoid DriverFini() {\n  // Driver Fini.\n}\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/tensorflow/tensorflow_cpu_inference_flowunit.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"tensorflow_cpu_inference_flowunit.h\"\n\nInferenceTensorflowCpuFlowUnit::InferenceTensorflowCpuFlowUnit() = default;\n\nInferenceTensorflowCpuFlowUnit::~InferenceTensorflowCpuFlowUnit() = default;\n\nstd::shared_ptr<modelbox::FlowUnit>\nInferenceTensorflowCpuFlowUnitFactory::VirtualCreateFlowUnit(\n    const std::string &unit_name, const std::string &unit_type,\n    const std::string &virtual_type) {\n  auto inference_flowunit = std::make_shared<InferenceTensorflowCpuFlowUnit>();\n  return inference_flowunit;\n};\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/tensorflow/tensorflow_cpu_inference_flowunit.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_INFERENCE_TENSORFLOW_CPU_H_\n#define MODELBOX_FLOWUNIT_INFERENCE_TENSORFLOW_CPU_H_\n\n#include <modelbox/flowunit.h>\n\n#include \"tensorflow_inference_common.h\"\n\nconstexpr const char *FLOWUNIT_TYPE = \"cpu\";\n\nclass InferenceTensorflowCpuFlowUnit : public InferenceTensorflowFlowUnit {\n public:\n  InferenceTensorflowCpuFlowUnit();\n  ~InferenceTensorflowCpuFlowUnit() override;\n};\n\nclass InferenceTensorflowCpuFlowUnitFactory : public modelbox::FlowUnitFactory {\n public:\n  InferenceTensorflowCpuFlowUnitFactory() = default;\n  ~InferenceTensorflowCpuFlowUnitFactory() override = default;\n\n  std::shared_ptr<modelbox::FlowUnit> VirtualCreateFlowUnit(\n      const std::string &unit_name, const std::string &unit_type,\n      const std::string &virtual_type) override;\n\n  std::string GetFlowUnitFactoryType() override { return FLOWUNIT_TYPE; };\n  std::string GetVirtualType() override { return INFERENCE_TYPE; };\n\n  std::map<std::string, std::shared_ptr<modelbox::FlowUnitDesc>> FlowUnitProbe()\n      override {\n    return std::map<std::string, std::shared_ptr<modelbox::FlowUnitDesc>>();\n  };\n};\n\n#endif  // MODELBOX_FLOWUNIT_INFERENCE_TENSORFLOW_CPU_H_\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/tensorflow/tensorflow_cpu_inference_flowunit_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <dlfcn.h>\n\n#include <functional>\n#include <future>\n#include <random>\n#include <thread>\n\n#include \"common/tensorflow_inference/tensorflow_inference_mock.h\"\n#include \"driver_flow_test.h\"\n#include \"flowunit_mockflowunit/flowunit_mockflowunit.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"modelbox/base/log.h\"\n#include \"modelbox/base/utils.h\"\n#include \"modelbox/buffer.h\"\n\nusing namespace tensorflow_inference; // NOLINT\n\nnamespace modelbox {\nclass InferenceTensorflowCpuFlowUnitTest : public testing::Test {\n public:\n  InferenceTensorflowCpuFlowUnitTest()\n      : driver_flow_(std::make_shared<DriverFlowTest>()) {}\n\n protected:\n  void SetUp() override {\n    auto version = GetTFVersion();\n\n    if (SUPPORT_TF_VERSION.find(version) == SUPPORT_TF_VERSION.end()) {\n      version_suitable_ = false;\n      MBLOG_INFO << \"the version is \" << version\n                 << \", not in support version, skip test suit\";\n      GTEST_SKIP();\n    }\n\n    auto ret = AddMockFlowUnit(driver_flow_);\n    EXPECT_EQ(ret, STATUS_OK);\n\n    SetUpTomlFiles(version);\n  }\n\n  void TearDown() override {\n    if (!version_suitable_) {\n      GTEST_SKIP();\n    }\n\n    RemoveFiles();\n    driver_flow_->Clear();\n  };\n\n  std::shared_ptr<DriverFlowTest> GetDriverFlow();\n\n  const std::string test_lib_dir = TEST_DRIVER_DIR,\n                    test_data_dir = TEST_DATA_DIR, test_assets = TEST_ASSETS,\n                    test_toml_file = \"virtual_tfcpu_test.toml\";\n\n  std::string tensorflow_cpu_path, dest_toml_file;\n\n private:\n  void SetUpTomlFiles(const std::string &version);\n  void RemoveFiles();\n\n  std::shared_ptr<DriverFlowTest> driver_flow_;\n  bool version_suitable_{true};\n};\n\nvoid InferenceTensorflowCpuFlowUnitTest::RemoveFiles() {\n  auto ret = remove(dest_toml_file.c_str());\n  EXPECT_EQ(ret, 0);\n  ret = remove(tensorflow_cpu_path.c_str());\n  EXPECT_EQ(ret, 0);\n}\n\nvoid InferenceTensorflowCpuFlowUnitTest::SetUpTomlFiles(\n    const std::string &version) {\n  const std::string src_file_dir = test_assets + \"/tensorflow/\" + version;\n\n  const std::string src_file_pb_toml = test_data_dir + \"/\" + test_toml_file;\n\n  tensorflow_cpu_path = test_data_dir + \"/tensorflow_cpu\";\n  auto mkdir_ret = mkdir(tensorflow_cpu_path.c_str(), 0700);\n  EXPECT_EQ(mkdir_ret, 0);\n\n  dest_toml_file = tensorflow_cpu_path + \"/\" + test_toml_file;\n  auto status = ReplaceVersion(src_file_pb_toml, dest_toml_file, version);\n  EXPECT_EQ(status, STATUS_OK);\n}\n\nstd::shared_ptr<DriverFlowTest>\nInferenceTensorflowCpuFlowUnitTest::GetDriverFlow() {\n  return driver_flow_;\n}\n\nTEST_F(InferenceTensorflowCpuFlowUnitTest, RunUnitBatch) {\n  std::string toml_content = R\"(\n    [driver]\n    skip-default=true\n    dir=[\")\" + test_lib_dir + \"\\\",\\\"\" +\n                             test_data_dir + \"/tensorflow_cpu\\\"]\\n    \" +\n                             R\"([graph]\n    graphconf = '''digraph demo {                                                                            \n          test_0_1_batch[type=flowunit, flowunit=test_0_1_batch, device=cpu, deviceid=0, label=\"<Out_1>\"]             \n          inference[type=flowunit, flowunit=inference, device=cpu, deviceid=0, label=\"<input> | <output>\", batch_size=10]\n          test_1_0_batch[type=flowunit, flowunit=test_1_0_batch, device=cpu, deviceid=0, label=\"<In_1>\", batch_size=10]  \n                                  \n          test_0_1_batch:Out_1 -> inference:input\n          inference:output -> test_1_0_batch:In_1                                                                  \n        }'''\n    format = \"graphviz\"\n  )\";\n  auto driver_flow = GetDriverFlow();\n  auto ret = driver_flow->BuildAndRun(\"RunUnit\", toml_content, 99999);\n  EXPECT_EQ(ret, STATUS_STOP);\n}\n\n}  // namespace modelbox\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/tensorflow/test_toml/modelbox.test.cpu.tensorflow.in",
    "content": "[base]\nname = \"inference\"\ndevice = \"cpu\"\nversion = \"1.1.2\"\ndescription = \"a cpu inference flowunit\"\nentry = \"@CMAKE_SOURCE_DIR@/test/assets/tensorflow/TF_VERSION/tensorflow_pb/frozen_model.pb\"\ntype = \"inference\"\nvirtual_type = \"tensorflow\"\n\n[input]\n[input.input1]\nname = \"input\"\n\n[output]\n[output.output1]\nname = \"output\"\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/video_decoder/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nset(UNIT_DEVICE \"cpu\")\nset(UNIT_NAME \"video_decoder\")\n\nif (NOT FFMPEG_FOUND) \n    message(STATUS \"Not found ffmpeg, disable ${UNIT_NAME} flowunit\")\n    return()\nendif()\n\n\nproject(modelbox-flowunit-${UNIT_DEVICE}-${UNIT_NAME})\n\nfile(GLOB_RECURSE UNIT_SOURCE *.cpp *.cc *.c)\ngroup_source_test_files(MODELBOX_UNIT_SOURCE MODELBOX_UNIT_TEST_SOURCE \"_test.c*\" ${UNIT_SOURCE})\n\nfind_package(FFMPEG)\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_CPU_INCLUDE})\ninclude_directories(${FFMPEG_INCLUDE_DIR})\ninclude_directories(${MODELBOX_COMMON_VIDEO_DECODE_INCLUDE})\n\n\nset(MODELBOX_UNIT_SHARED libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-shared)\nset(MODELBOX_UNIT_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\nadd_library(${MODELBOX_UNIT_SHARED} SHARED ${MODELBOX_UNIT_SOURCE})\nset(LIBMODELBOX_FLOWUNIT_VIDEO_DECODER_CPU_SHARED ${MODELBOX_UNIT_SHARED})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES \n    SOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_DEVICE_CPU_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} rt)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} dl)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${FFMPEG_LIBRARIES})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${MODELBOX_COMMON_VIDEO_DECODE_LIBRARY})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}\")\n\ninstall(TARGETS ${MODELBOX_UNIT_SHARED}\n        COMPONENT cpu-device-flowunit\n        RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n        LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        ARCHIVE DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        OPTIONAL)\n\ninstall(DIRECTORY ${HEADER} \n    DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR})\n\nset(LIBMODELBOX_FLOWUNIT_VIDEO_DECODER_CPU_SHARED ${MODELBOX_UNIT_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_VIDEO_DECODER_CPU_INCLUDE ${MODELBOX_UNIT_SOURCE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_VIDEO_DECODER_CPU_SOURCES ${MODELBOX_UNIT_SOURCE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_VIDEO_DECODER_CPU_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}.so CACHE INTERNAL \"\")\n\n# driver test\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_UNIT_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_UNIT_SHARED})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/video_decoder/ffmpeg_video_decoder.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"ffmpeg_video_decoder.h\"\n\n#include <video_decode_common.h>\n\n#include \"modelbox/base/log.h\"\n\nmodelbox::Status FfmpegVideoDecoder::Init(AVCodecID codec_id) {\n  codec_id_ = codec_id;\n  auto *codec_ptr = avcodec_find_decoder(codec_id_);\n  if (codec_ptr == nullptr) {\n    MBLOG_ERROR << \"Find decoder for codec[\" << codec_id_ << \"] failed\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  auto *av_ctx_ptr = avcodec_alloc_context3(codec_ptr);\n  if (av_ctx_ptr == nullptr) {\n    MBLOG_ERROR << \"avcodec_alloc_context3 return, codec_id \" << codec_id_;\n    return modelbox::STATUS_FAULT;\n  }\n\n  AVDictionary *opts = nullptr;\n  av_dict_set(&opts, \"refcounted_frames\", \"1\", 0);\n  auto ret = avcodec_open2(av_ctx_ptr, codec_ptr, &opts);\n  av_dict_free(&opts);\n  if (ret < 0) {\n    GET_FFMPEG_ERR(ret, err_str);\n    MBLOG_ERROR << \"avcodec_open2 failed, code_id \" << codec_id_ << \", err \"\n                << err_str;\n    avcodec_free_context(&av_ctx_ptr);\n    return modelbox::STATUS_FAULT;\n  }\n\n  av_ctx_.reset(av_ctx_ptr,\n                [](AVCodecContext *ctx) { avcodec_free_context(&ctx); });\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status FfmpegVideoDecoder::Decode(\n    const std::shared_ptr<const AVPacket> &av_packet,\n    std::list<std::shared_ptr<AVFrame>> &av_frame_list) {\n  auto ret = avcodec_send_packet(av_ctx_.get(), av_packet.get());\n  if (ret == AVERROR_EOF) {\n    return modelbox::STATUS_NODATA;\n  }\n\n  if (ret < 0) {\n    GET_FFMPEG_ERR(ret, err_str);\n    MBLOG_ERROR << \"avcodec_send_packet failed, err \" << err_str;\n    return modelbox::STATUS_FAULT;\n  }\n\n  do {\n    auto *av_frame_ptr = av_frame_alloc();\n    if (av_frame_ptr == nullptr) {\n      MBLOG_ERROR << \"av frame alloc failed\";\n      return modelbox::STATUS_FAULT;\n    }\n\n    std::shared_ptr<AVFrame> av_frame(\n        av_frame_ptr, [](AVFrame *frame) { av_frame_free(&frame); });\n    ret = avcodec_receive_frame(av_ctx_.get(), av_frame.get());\n    if (ret == AVERROR(EAGAIN)) {\n      return modelbox::STATUS_SUCCESS;\n    }\n\n    if (ret == AVERROR_EOF) {\n      return modelbox::STATUS_NODATA;\n    }\n\n    if (ret < 0) {\n      GET_FFMPEG_ERR(ret, err_str);\n      MBLOG_ERROR << \"avcodec_receive_frame failed, err \" << err_str;\n      return modelbox::STATUS_FAULT;\n    }\n\n    av_frame_list.push_back(av_frame);\n  } while (ret >= 0);\n\n  return modelbox::STATUS_SUCCESS;\n}"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/video_decoder/ffmpeg_video_decoder.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#ifndef MODELBOX_FLOWUNIT_FFMPEG_DECODER_H_\n#define MODELBOX_FLOWUNIT_FFMPEG_DECODER_H_\n\n#include <modelbox/base/status.h>\n#include <memory>\n#include <vector>\n#include <list>\n\nextern \"C\" {\n#include <libavformat/avformat.h>\n#include <libavutil/frame.h>\n}\n\nclass FfmpegVideoDecoder {\n public:\n  modelbox::Status Init(AVCodecID codec_id);\n\n  modelbox::Status Decode(const std::shared_ptr<const AVPacket> &av_packet,\n                        std::list<std::shared_ptr<AVFrame>> &av_frame_list);\n\n private:\n  AVCodecID codec_id_{AV_CODEC_ID_NONE};\n  std::shared_ptr<AVCodecContext> av_ctx_;\n};\n\n#endif  // MODELBOX_FLOWUNIT_FFMPEG_DECODER_H_"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/video_decoder/video_decoder_flowunit.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"video_decoder_flowunit.h\"\n\n#include <securec.h>\n\n#include \"ffmpeg_color_converter.h\"\n#include \"ffmpeg_video_decoder.h\"\n#include \"modelbox/flowunit.h\"\n#include \"modelbox/flowunit_api_helper.h\"\n#include \"video_decode_common.h\"\n\nVideoDecoderFlowUnit::VideoDecoderFlowUnit() = default;\nVideoDecoderFlowUnit::~VideoDecoderFlowUnit() = default;\n\nconst std::set<std::string> g_supported_pix_fmt = {\"nv12\", \"rgb\", \"bgr\"};\n\nmodelbox::Status VideoDecoderFlowUnit::Open(\n    const std::shared_ptr<modelbox::Configuration> &opts) {\n  auto fmt = opts->GetString(\"pix_fmt\", \"nv12\");\n  if (fmt == \"rgb\") {\n    out_pix_fmt_ = AVPixelFormat::AV_PIX_FMT_RGB24;\n  } else if (fmt == \"bgr\") {\n    out_pix_fmt_ = AVPixelFormat::AV_PIX_FMT_BGR24;\n  } else if (fmt == \"nv12\") {\n    out_pix_fmt_ = AVPixelFormat::AV_PIX_FMT_NV12;\n  } else {\n    MBLOG_ERROR << \"Not support pix fmt \" << fmt;\n    return modelbox::STATUS_BADCONF;\n  }\n\n  out_pix_fmt_str_ = fmt;\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status VideoDecoderFlowUnit::Close() { return modelbox::STATUS_OK; }\n\nmodelbox::Status VideoDecoderFlowUnit::Process(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  std::shared_ptr<modelbox::Buffer> flag_buffer = nullptr;\n  auto video_decoder = std::static_pointer_cast<FfmpegVideoDecoder>(\n      data_ctx->GetPrivate(DECODER_CTX));\n  if (video_decoder == nullptr) {\n    MBLOG_ERROR << \"Video decoder is not init\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  std::vector<std::shared_ptr<AVPacket>> pkt_list;\n  auto ret = ReadData(data_ctx, pkt_list, flag_buffer);\n  if (ret != modelbox::STATUS_SUCCESS) {\n    MBLOG_ERROR << \"Read av_packet input failed\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  if (flag_buffer) {\n    if (ReopenDecoder(data_ctx, flag_buffer) != modelbox::STATUS_SUCCESS) {\n      MBLOG_ERROR << \"Reopen decoder failed\";\n      return modelbox::STATUS_FAULT;\n    }\n\n    video_decoder = std::static_pointer_cast<FfmpegVideoDecoder>(\n        data_ctx->GetPrivate(DECODER_CTX));\n    if (video_decoder == nullptr) {\n      MBLOG_ERROR << \"Video decoder is not init\";\n      return modelbox::STATUS_FAULT;\n    }\n  }\n\n  std::list<std::shared_ptr<AVFrame>> frame_list;\n  modelbox::Status decode_ret = modelbox::STATUS_SUCCESS;\n  for (auto &pkt : pkt_list) {\n    decode_ret = video_decoder->Decode(pkt, frame_list);\n    if (decode_ret == modelbox::STATUS_FAULT) {\n      MBLOG_ERROR << \"Video decoder failed\";\n      // TODO: Process decoder fault\n    }\n  }\n\n  ret = WriteData(data_ctx, frame_list, decode_ret == modelbox::STATUS_NODATA);\n  if (ret != modelbox::STATUS_SUCCESS) {\n    MBLOG_ERROR << \"Send frame data failed\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  if (decode_ret == modelbox::STATUS_NODATA) {\n    MBLOG_INFO << \"Video decoder finish\";\n  }\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status VideoDecoderFlowUnit::ReadData(\n    const std::shared_ptr<modelbox::DataContext> &data_ctx,\n    std::vector<std::shared_ptr<AVPacket>> &pkt_list,\n    std::shared_ptr<modelbox::Buffer> &flag_buffer) {\n  bool reset_flag = false;\n  auto video_packet_input = data_ctx->Input(VIDEO_PACKET_INPUT);\n  if (video_packet_input == nullptr) {\n    MBLOG_ERROR << \"video packet input is null\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  if (video_packet_input->Size() == 0) {\n    MBLOG_ERROR << \"video packet input size is 0\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  for (size_t i = 0; i < video_packet_input->Size(); ++i) {\n    auto packet_buffer = video_packet_input->At(i);\n\n    if (reset_flag == false) {\n      packet_buffer->Get(\"reset_flag\", reset_flag);\n      if (reset_flag == true) {\n        flag_buffer = packet_buffer;\n      }\n    }\n\n    std::shared_ptr<AVPacket> pkt;\n    auto ret = ReadAVPacket(packet_buffer, pkt);\n    if (ret != modelbox::STATUS_SUCCESS) {\n      return modelbox::STATUS_FAULT;\n    }\n\n    pkt_list.push_back(pkt);\n  }\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status VideoDecoderFlowUnit::ReadAVPacket(\n    const std::shared_ptr<modelbox::Buffer> &packet_buffer,\n    std::shared_ptr<AVPacket> &pkt) {\n  auto size = packet_buffer->GetBytes();\n  if (size == 1) {\n    pkt = std::make_shared<AVPacket>();\n    pkt->data = nullptr;\n    pkt->size = 0;\n    return modelbox::STATUS_SUCCESS;\n  }\n\n  auto *data = const_cast<void *>(packet_buffer->ConstData());\n  if (data == nullptr) {\n    MBLOG_ERROR << \"video_packet data is nullptr\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  int64_t pts = 0;\n  int64_t dts = 0;\n  packet_buffer->Get(\"pts\", pts);\n  packet_buffer->Get(\"dts\", dts);\n  return BuildAVPacket(pkt, size, (uint8_t *)data, pts, dts);\n}\n\nmodelbox::Status VideoDecoderFlowUnit::BuildAVPacket(\n    std::shared_ptr<AVPacket> &pkt, size_t size, uint8_t *data, int64_t pts,\n    int64_t dts) {\n  auto *pkt_ptr = av_packet_alloc();\n  if (pkt_ptr == nullptr) {\n    MBLOG_ERROR << \"av_packet_alloc failed\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  pkt.reset(pkt_ptr, [](AVPacket *pkt) {\n    pkt->data = nullptr;\n    av_packet_free(&pkt);\n  });\n  pkt->data = data;\n  pkt->size = size;\n  pkt->pts = pts;\n  pkt->dts = dts;\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status VideoDecoderFlowUnit::WriteData(\n    std::shared_ptr<modelbox::DataContext> &data_ctx,\n    std::list<std::shared_ptr<AVFrame>> &frame_list, bool eos) {\n  auto last_frame =\n      std::static_pointer_cast<AVFrame>(data_ctx->GetPrivate(LAST_FRAME));\n  data_ctx->SetPrivate(LAST_FRAME, nullptr);\n  auto color_cvt = std::static_pointer_cast<FfmpegColorConverter>(\n      data_ctx->GetPrivate(CVT_CTX));\n  auto frame_buff_list = data_ctx->Output(FRAME_INFO_OUTPUT);\n  if (!eos && !frame_list.empty()) {\n    // try save last frame in data_ctx, when demuxe end, we could set last\n    // frame eos to 'true'\n    data_ctx->SetPrivate(LAST_FRAME, frame_list.back());\n    frame_list.pop_back();\n  }\n\n  if (last_frame != nullptr) {\n    frame_list.push_front(last_frame);\n  }\n\n  if (frame_list.size() == 0) {\n    return modelbox::STATUS_SUCCESS;\n  }\n\n  auto frame_index =\n      std::static_pointer_cast<int64_t>(data_ctx->GetPrivate(FRAME_INDEX_CTX));\n  auto pack_buff_list = data_ctx->Input(VIDEO_PACKET_INPUT);\n  auto pack_buff = pack_buff_list->At(0);\n  int32_t rate_num = 0;\n  int32_t rate_den = 0;\n  int32_t rotate_angle = 0;\n  int64_t duration = 0;\n  pack_buff->Get(\"rate_num\", rate_num);\n  pack_buff->Get(\"rate_den\", rate_den);\n  pack_buff->Get(\"rotate_angle\", rotate_angle);\n  pack_buff->Get(\"duration\", duration);\n  double time_base = 0;\n  pack_buff->Get(\"time_base\", time_base);\n  std::vector<size_t> shape;\n  size_t buffer_size;\n  for (auto &frame : frame_list) {\n    auto ret = videodecode::GetBufferSize(frame->width, frame->height,\n                                          out_pix_fmt_str_, buffer_size);\n    if (ret != modelbox::STATUS_SUCCESS) {\n      return ret;\n    }\n\n    shape.push_back(buffer_size);\n  }\n\n  frame_buff_list->Build(shape);\n  size_t i = 0;\n  auto meta = data_ctx->GetInputMeta(VIDEO_PACKET_INPUT);\n  auto source_url =\n      std::static_pointer_cast<std::string>(meta->GetMeta(SOURCE_URL_META));\n  for (auto &frame_ptr : frame_list) {\n    videodecode::UpdateStatsInfo(data_ctx, frame_ptr->width, frame_ptr->height);\n    auto frame_buff = frame_buff_list->At(i);\n    ++i;\n    auto ret = color_cvt->CvtColor(\n        frame_ptr, (uint8_t *)(frame_buff->MutableData()), out_pix_fmt_);\n    if (ret != modelbox::STATUS_SUCCESS) {\n      return ret;\n    }\n\n    frame_buff->Set(\"index\", *frame_index);\n    *frame_index = *frame_index + 1;\n    frame_buff->Set(\"width\", frame_ptr->width);\n    frame_buff->Set(\"height\", frame_ptr->height);\n    frame_buff->Set(\"height_stride\", frame_ptr->height);\n    frame_buff->Set(\"rate_num\", rate_num);\n    frame_buff->Set(\"rate_den\", rate_den);\n    frame_buff->Set(\"rotate_angle\", rotate_angle);\n    frame_buff->Set(\"duration\", duration);\n    frame_buff->Set(\"eos\", false);\n    frame_buff->Set(\"pix_fmt\", out_pix_fmt_str_);\n    frame_buff->Set(\"url\", *source_url);\n    auto width_stride = frame_ptr->width;\n    if (out_pix_fmt_str_ == \"rgb\" || out_pix_fmt_str_ == \"bgr\") {\n      width_stride *= 3;\n      int32_t channel = 3;\n      frame_buff->Set(\"channel\", channel);\n      frame_buff->Set(\n          \"shape\", std::vector<size_t>({static_cast<size_t>(frame_ptr->height),\n                                        static_cast<size_t>(frame_ptr->width),\n                                        static_cast<size_t>(channel)}));\n      frame_buff->Set(\"layout\", std::string(\"hwc\"));\n    }\n    frame_buff->Set(\"width_stride\", width_stride);\n\n    frame_buff->Set(\"type\", modelbox::ModelBoxDataType::MODELBOX_UINT8);\n    frame_buff->Set(\"timestamp\", (int64_t)(frame_ptr->pts * time_base));\n    if (eos && frame_ptr == frame_list.back()) {\n      frame_buff->Set(\"eos\", true);\n    }\n  }\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status VideoDecoderFlowUnit::ReopenDecoder(\n    std::shared_ptr<modelbox::DataContext> &data_ctx,\n    const std::shared_ptr<modelbox::Buffer> &flag_buffer) {\n  auto old_source_url = std::static_pointer_cast<std::string>(\n      data_ctx->GetPrivate(SOURCE_URL_META));\n  auto old_codec_id =\n      std::static_pointer_cast<AVCodecID>(data_ctx->GetPrivate(CODEC_ID_META));\n\n  if (old_source_url == nullptr || old_codec_id == nullptr) {\n    MBLOG_ERROR << \"Reopen decoder failed, source url or codec id is null\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  std::string source_url;\n  AVCodecID codec_id;\n  if (flag_buffer->Get(SOURCE_URL_META, source_url) == false) {\n    return modelbox::STATUS_SUCCESS;\n  }\n\n  if (flag_buffer->Get(CODEC_ID_META, codec_id) == false) {\n    return modelbox::STATUS_SUCCESS;\n  }\n\n  if (source_url == *old_source_url && codec_id == *old_codec_id) {\n    return modelbox::STATUS_SUCCESS;\n  }\n\n  MBLOG_WARN << \"Reopen decoder, source url or codec id changed\";\n  auto ret = CloseDecoder(data_ctx);\n  if (ret != modelbox::STATUS_SUCCESS) {\n    MBLOG_ERROR << \"Close decoder failed\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  return NewDecoder(data_ctx, source_url, codec_id);\n}\n\nmodelbox::Status VideoDecoderFlowUnit::CloseDecoder(\n    std::shared_ptr<modelbox::DataContext> &data_ctx) {\n  data_ctx->SetPrivate(DECODER_CTX, nullptr);\n  data_ctx->SetPrivate(CVT_CTX, nullptr);\n  data_ctx->SetPrivate(FRAME_INDEX_CTX, nullptr);\n  data_ctx->SetPrivate(SOURCE_URL_META, nullptr);\n  data_ctx->SetPrivate(CODEC_ID_META, nullptr);\n  data_ctx->SetOutputMeta(FRAME_INFO_OUTPUT, nullptr);\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status VideoDecoderFlowUnit::NewDecoder(\n    std::shared_ptr<modelbox::DataContext> &data_ctx,\n    const std::string &source_url, AVCodecID codec_id) {\n  auto video_decoder = std::make_shared<FfmpegVideoDecoder>();\n  auto ret = video_decoder->Init(codec_id);\n  if (ret != modelbox::STATUS_SUCCESS) {\n    MBLOG_ERROR << \"Video decoder init failed\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  auto color_cvt = std::make_shared<FfmpegColorConverter>();\n  auto frame_index = std::make_shared<int64_t>();\n  *frame_index = 0;\n  data_ctx->SetPrivate(DECODER_CTX, video_decoder);\n  data_ctx->SetPrivate(CVT_CTX, color_cvt);\n  data_ctx->SetPrivate(FRAME_INDEX_CTX, frame_index);\n  data_ctx->SetPrivate(SOURCE_URL_META,\n                       std::make_shared<std::string>(source_url));\n  data_ctx->SetPrivate(CODEC_ID_META, std::make_shared<AVCodecID>(codec_id));\n  auto meta = std::make_shared<modelbox::DataMeta>();\n  meta->SetMeta(SOURCE_URL_META, std::make_shared<std::string>(source_url));\n  data_ctx->SetOutputMeta(FRAME_INFO_OUTPUT, meta);\n  MBLOG_INFO << \"Video decoder init success\";\n  MBLOG_INFO << \"Video decoder output pix fmt \" << out_pix_fmt_str_;\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status VideoDecoderFlowUnit::DataPre(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  auto in_meta = data_ctx->GetInputMeta(VIDEO_PACKET_INPUT);\n  auto codec_id =\n      std::static_pointer_cast<AVCodecID>(in_meta->GetMeta(CODEC_META));\n  if (codec_id == nullptr) {\n    MBLOG_ERROR << \"Stream codec id is null, init decoder failed\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  auto source_url =\n      std::static_pointer_cast<std::string>(in_meta->GetMeta(SOURCE_URL_META));\n  if (source_url == nullptr) {\n    MBLOG_ERROR << \"Stream source url is null, init decoder failed\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  return NewDecoder(data_ctx, *source_url, *codec_id);\n}\n\nmodelbox::Status VideoDecoderFlowUnit::DataPost(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  return CloseDecoder(data_ctx);\n}\n\nMODELBOX_FLOWUNIT(VideoDecoderFlowUnit, desc) {\n  desc.SetFlowUnitName(FLOWUNIT_NAME);\n  desc.SetFlowUnitGroupType(\"Video\");\n  desc.AddFlowUnitInput({VIDEO_PACKET_INPUT});\n  desc.AddFlowUnitOutput({FRAME_INFO_OUTPUT});\n  desc.SetFlowType(modelbox::STREAM);\n  desc.SetDescription(FLOWUNIT_DESC);\n  desc.SetInputContiguous(false);\n  std::map<std::string, std::string> pix_fmt_list;\n\n  for (const auto &item : g_supported_pix_fmt) {\n    pix_fmt_list[item] = item;\n  }\n  desc.AddFlowUnitOption(modelbox::FlowUnitOption(\n      \"pix_fmt\", \"list\", true, \"0\", \"the decoder pixel format\", pix_fmt_list));\n}\n\nMODELBOX_DRIVER_FLOWUNIT(desc) {\n  desc.Desc.SetName(FLOWUNIT_NAME);\n  desc.Desc.SetClass(modelbox::DRIVER_CLASS_FLOWUNIT);\n  desc.Desc.SetType(FLOWUNIT_TYPE);\n  desc.Desc.SetDescription(FLOWUNIT_DESC);\n  desc.Desc.SetVersion(\"1.0.0\");\n}\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/video_decoder/video_decoder_flowunit.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_VIDEO_DECODER_CPU_H_\n#define MODELBOX_FLOWUNIT_VIDEO_DECODER_CPU_H_\n\n#include <modelbox/base/device.h>\n#include <modelbox/base/status.h>\n#include <modelbox/flow.h>\n\n#include \"ffmpeg_video_decoder.h\"\n#include \"modelbox/flowunit.h\"\n\nconstexpr const char *FLOWUNIT_NAME = \"video_decoder\";\nconstexpr const char *FLOWUNIT_TYPE = \"cpu\";\nconstexpr const char *FLOWUNIT_DESC =\n    \"\\n\\t@Brief: A video decoder on cpu. \\n\"\n    \"\\t@Port parameter: The input port buffer type is video_packet, the output \"\n    \"port buffer type is video_frame.\\n\"\n    \"\\t  The video_packet buffer contain the following meta fields:\\n\"\n    \"\\t\\tField Name: pts,           Type: int64_t\\n\"\n    \"\\t\\tField Name: dts,           Type: int64_t\\n\"\n    \"\\t\\tField Name: rate_num,      Type: int32_t\\n\"\n    \"\\t\\tField Name: rate_den,      Type: int32_t\\n\"\n    \"\\t\\tField Name: duration,      Type: int64_t\\n\"\n    \"\\t\\tField Name: time_base,     Type: double\\n\"\n    \"\\t\\tField Name: width,         Type: int32_t\\n\"\n    \"\\t\\tField Name: height,        Type: int32_t\\n\"\n    \"\\t  The video_frame buffer contain the following meta fields:\\n\"\n    \"\\t\\tField Name: index,         Type: int64_t\\n\"\n    \"\\t\\tField Name: rate_num,      Type: int32_t\\n\"\n    \"\\t\\tField Name: rate_den,      Type: int32_t\\n\"\n    \"\\t\\tField Name: duration,      Type: int64_t\\n\"\n    \"\\t\\tField Name: url,           Type: string\\n\"\n    \"\\t\\tField Name: timestamp,     Type: int64_t\\n\"\n    \"\\t\\tField Name: eos,           Type: bool\\n\"\n    \"\\t\\tField Name: width,         Type: int32_t\\n\"\n    \"\\t\\tField Name: height,        Type: int32_t\\n\"\n    \"\\t\\tField Name: width_stride,  Type: int32_t\\n\"\n    \"\\t\\tField Name: height_stride, Type: int32_t\\n\"\n    \"\\t\\tField Name: channel,       Type: int32_t\\n\"\n    \"\\t\\tField Name: pix_fmt,       Type: string\\n\"\n    \"\\t\\tField Name: layout,        Type: int32_t\\n\"\n    \"\\t\\tField Name: shape,         Type: vector<size_t>\\n\"\n    \"\\t\\tField Name: type,          Type: ModelBoxDataType::MODELBOX_UINT8\\n\"\n    \"\\t@Constraint: The flowuint 'video_decoder' must be used pair \"\n    \"with 'video_demuxer. the output buffer meta fields 'pix_fmt' is \"\n    \"'brg_packed' or 'rgb_packed', 'layout' is 'hcw'.\";\nconstexpr const char *CODEC_META = \"codec_meta\";\nconstexpr const char *DECODER_CTX = \"decoder_ctx\";\nconstexpr const char *CVT_CTX = \"converter_ctx\";\nconstexpr const char *FRAME_INDEX_CTX = \"frame_index_ctx\";\nconstexpr const char *VIDEO_PACKET_INPUT = \"in_video_packet\";\nconstexpr const char *FRAME_INFO_OUTPUT = \"out_video_frame\";\nconstexpr const char *SOURCE_URL_META = \"source_url\";\nconstexpr const char *CODEC_ID_META = \"codec_id\";\nconstexpr const char *LAST_FRAME = \"last_frame\";\n\nclass VideoDecoderFlowUnit : public modelbox::FlowUnit {\n public:\n  VideoDecoderFlowUnit();\n  ~VideoDecoderFlowUnit() override;\n\n  modelbox::Status Open(\n      const std::shared_ptr<modelbox::Configuration> &opts) override;\n\n  modelbox::Status Close() override;\n  /* run when processing data */\n  modelbox::Status Process(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override;\n\n  modelbox::Status DataPre(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override;\n\n  modelbox::Status DataPost(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override;\n\n  modelbox::Status DataGroupPre(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override {\n    return modelbox::STATUS_OK;\n  };\n\n  modelbox::Status DataGroupPost(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override {\n    return modelbox::STATUS_OK;\n  };\n\n private:\n  modelbox::Status ReadData(\n      const std::shared_ptr<modelbox::DataContext> &data_ctx,\n      std::vector<std::shared_ptr<AVPacket>> &pkt_list,\n      std::shared_ptr<modelbox::Buffer> &flag_buffer);\n  modelbox::Status ReadAVPacket(\n      const std::shared_ptr<modelbox::Buffer> &packet_buffer,\n      std::shared_ptr<AVPacket> &pkt);\n  modelbox::Status BuildAVPacket(std::shared_ptr<AVPacket> &pkt, size_t size,\n                                 uint8_t *data, int64_t pts, int64_t dts);\n  modelbox::Status WriteData(std::shared_ptr<modelbox::DataContext> &data_ctx,\n                             std::list<std::shared_ptr<AVFrame>> &frame_list,\n                             bool eos);\n\n  modelbox::Status CloseDecoder(\n      std::shared_ptr<modelbox::DataContext> &data_ctx);\n  modelbox::Status NewDecoder(std::shared_ptr<modelbox::DataContext> &data_ctx,\n                              const std::string &source_url,\n                              AVCodecID codec_id);\n  modelbox::Status ReopenDecoder(\n      std::shared_ptr<modelbox::DataContext> &data_ctx,\n      const std::shared_ptr<modelbox::Buffer> &flag_buffer);\n\n  AVPixelFormat out_pix_fmt_{AV_PIX_FMT_NV12};\n  std::string out_pix_fmt_str_;\n};\n\n#endif  // MODELBOX_FLOWUNIT_VIDEO_DECODER_CPU_H_\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/video_decoder/video_decoder_flowunit_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include <fstream>\n#include <functional>\n#include <future>\n#include <thread>\n\n#include \"modelbox/base/log.h\"\n#include \"modelbox/buffer.h\"\n#include \"common/video_decoder/video_decoder_mock.h\"\n#include \"driver_flow_test.h\"\n#include \"flowunit_mockflowunit/flowunit_mockflowunit.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace modelbox {\nclass VideoDecoderFlowUnitTest : public testing::Test {\n public:\n  VideoDecoderFlowUnitTest() = default;\n\n protected:\n  void SetUp() override{};\n\n  void TearDown() override{};\n\n public:\n  std::shared_ptr<MockFlow> flow_;\n\n  void StartFlow(std::string& toml_content, uint64_t millisecond);\n};\n\nvoid VideoDecoderFlowUnitTest::StartFlow(std::string& toml_content,\n                                         const uint64_t millisecond) {\n  flow_ = std::make_shared<MockFlow>();\n  auto ret = videodecoder::AddMockFlowUnit(flow_);\n  EXPECT_EQ(ret, STATUS_SUCCESS);\n\n  ret = flow_->BuildAndRun(\"VideoDecoder\", toml_content, millisecond);\n  EXPECT_EQ(ret, STATUS_SUCCESS);\n}\n\nTEST_F(VideoDecoderFlowUnitTest, cpuDecoderNv12Test) {\n  auto toml_content = videodecoder::GetTomlConfig(\"cpu\", \"nv12\");\n  StartFlow(toml_content, 5 * 1000);\n}\n\nTEST_F(VideoDecoderFlowUnitTest, cpuDecoderRgbTest) {\n  auto toml_content = videodecoder::GetTomlConfig(\"cpu\", \"rgb\");\n  StartFlow(toml_content, 5 * 1000);\n}\n\nTEST_F(VideoDecoderFlowUnitTest, cpuDecoderBgrTest) {\n  auto toml_content = videodecoder::GetTomlConfig(\"cpu\", \"bgr\");\n  StartFlow(toml_content, 5 * 1000);\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/video_demuxer/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nset(UNIT_DEVICE \"cpu\")\nset(UNIT_NAME \"video_demuxer\")\n\nproject(modelbox-flowunit-${UNIT_DEVICE}-${UNIT_NAME})\n\nif (NOT FFMPEG_FOUND) \n    message(STATUS \"Not found ffmpeg, disable ${UNIT_NAME} flowunit\")\n    return()\nendif()\n\nfile(GLOB_RECURSE UNIT_SOURCE *.cpp *.cc *.c)\ngroup_source_test_files(MODELBOX_UNIT_SOURCE MODELBOX_UNIT_TEST_SOURCE \"_test.c*\" ${UNIT_SOURCE})\n\nfind_package(FFMPEG)\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_CPU_INCLUDE})\ninclude_directories(${FFMPEG_INCLUDE_DIR})\ninclude_directories(${MODELBOX_COMMON_DRIVER_UTIL_INCLUDE})\ninclude_directories(${MODELBOX_COMMON_SOURCE_CONTEXT_INCLUDE})\n\nset(MODELBOX_UNIT_SHARED libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-shared)\nset(MODELBOX_UNIT_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\nadd_library(${MODELBOX_UNIT_SHARED} SHARED ${MODELBOX_UNIT_SOURCE})\nset(LIBMODELBOX_FLOWUNIT_VIDEO_DEMUXER_CPU_SHARED ${MODELBOX_UNIT_SHARED})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES \n    SOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_DEVICE_CPU_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} rt)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} dl)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${FFMPEG_LIBRARIES})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${MODELBOX_COMMON_DRIVER_UTIL_LIBRARY})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${MODELBOX_COMMON_SOURCE_CONTEXT_LIBRARY})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}\")\n\ninstall(TARGETS ${MODELBOX_UNIT_SHARED}\n        COMPONENT cpu-device-flowunit\n        RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n        LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        ARCHIVE DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        OPTIONAL)\n\ninstall(DIRECTORY ${HEADER} \n        DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR})\n\nset(LIBMODELBOX_FLOWUNIT_VIDEO_DEMUXER_CPU_SHARED ${MODELBOX_UNIT_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_VIDEO_DEMUXER_CPU_INCLUDE ${MODELBOX_UNIT_SOURCE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_VIDEO_DEMUXER_CPU_SOURCES ${MODELBOX_UNIT_SOURCE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_VIDEO_DEMUXER_CPU_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}.so CACHE INTERNAL \"\")\n\n# driver test\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_UNIT_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_UNIT_SHARED})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/video_demuxer/ffmpeg_reader.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"ffmpeg_reader.h\"\n\n#include \"driver_util.h\"\n#include <modelbox/base/log.h>\n\n#include <regex>\n\n#define GET_FFMPEG_ERR(err_num, var_name)        \\\n  char var_name[AV_ERROR_MAX_STRING_SIZE] = {0}; \\\n  av_make_error_string(var_name, AV_ERROR_MAX_STRING_SIZE, err_num);\n\nstatic int CheckTimeout(void *ctx) {\n  if (ctx == nullptr) {\n    MBLOG_ERROR << \"CheckTimeout: ctx is nullptr!\";\n    return 1;\n  }\n  auto *p = (FfmpegReader *)ctx;\n  if (p->IsTimeout()) {\n    MBLOG_INFO << \"CheckTimeout: ffmpeg read timeout !\";\n    return 1;\n  }\n  return 0;\n}\n\nmodelbox::Status FfmpegReader::Open(const std::string &source_url) {\n#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(58, 9, 100)\n  av_register_all();\n#endif\n  format_ctx_ = nullptr;\n\n  auto ret = avformat_network_init();\n  if (ret < 0) {\n    GET_FFMPEG_ERR(ret, err_str);\n    MBLOG_ERROR << \"avformat_network_init failed, err \" << err_str;\n    return modelbox::STATUS_FAULT;\n  }\n\n  origin_source_url_ = source_url;\n  std::regex pattern(\"://.*?@\");\n  format_source_url_ = std::regex_replace(origin_source_url_, pattern, \"://*@\");\n  AVDictionary *options = nullptr;\n  SetupRtspOption(format_source_url_, &options);\n  SetupCommonOption(format_source_url_, &options);\n  SetupHttpOption(format_source_url_, &options);\n\n  AVFormatContext *ctx = nullptr;\n  ctx = avformat_alloc_context();\n  if (ctx == nullptr) {\n    av_dict_free(&options);\n    return {modelbox::STATUS_FAULT, \"ctx is null\"};\n  }\n  ResetStartTime();\n  ctx->interrupt_callback.callback = CheckTimeout;\n  ctx->interrupt_callback.opaque = this;\n  ret = avformat_open_input(&ctx, source_url.c_str(), nullptr, &options);\n  av_dict_free(&options);\n  if (ret < 0) {\n    GET_FFMPEG_ERR(ret, err_str);\n    MBLOG_ERROR << \"avformat open input[\" << format_source_url_\n                << \"] failed, err \" << err_str;\n    avformat_close_input(&ctx);\n    return modelbox::STATUS_FAULT;\n  }\n\n  MBLOG_INFO << \"Open source \" << format_source_url_ << \" success, format \"\n             << ctx->iformat->long_name << \" : \" << ctx->iformat->name;\n  format_ctx_.reset(ctx,\n                    [](AVFormatContext *ctx) { avformat_close_input(&ctx); });\n  return modelbox::STATUS_SUCCESS;\n}\n\nstd::shared_ptr<AVFormatContext> FfmpegReader::GetCtx() { return format_ctx_; }\n\nstd::string FfmpegReader::GetSourceURL() { return format_source_url_; }\n\nvoid FfmpegReader::SetupRtspOption(const std::string &source_url,\n                                   AVDictionary **options) {\n  const std::string rtsp_prefix = \"RTSP:\";\n  if (source_url.size() < rtsp_prefix.size()) {\n    return;\n  }\n\n  auto source_url_prefix = source_url.substr(0, rtsp_prefix.size());\n  std::transform(source_url_prefix.begin(), source_url_prefix.end(),\n                 source_url_prefix.begin(), ::toupper);\n  if (source_url_prefix != rtsp_prefix) {\n    return;\n  }\n\n  MBLOG_INFO << \"Source is rtsp stream\";\n  av_dict_set(options, \"rtsp_transport\", \"tcp\", 0);\n  av_dict_set(options, \"recv_buffer_size\", \"10240000\", 0);\n  av_dict_set(options, \"stimeout\", \"2000000\", 0);\n}\n\nvoid FfmpegReader::SetupCommonOption(const std::string &source_url,\n                                     AVDictionary **options) {\n  av_dict_set(options, \"reconnect\", \"1\", 0);\n  av_dict_set(options, \"rw_timeout\", \"30000000\", 0);\n  MBLOG_INFO << \"Source url:\" << driverutil::string_masking(source_url)\n             << \", reconnect:true, rw_timeout:30s\";\n}\n\nvoid FfmpegReader::SetupHttpOption(const std::string &source_url,\n                                   AVDictionary **options) {\n  const std::string http_prefix = \"http:\";\n  if (source_url.size() < http_prefix.size()) {\n    return;\n  }\n\n  auto source_url_prefix = source_url.substr(0, http_prefix.size());\n  if (source_url_prefix != http_prefix) {\n    return;\n  }\n\n  MBLOG_INFO << \"Source is http file\";\n  av_dict_set(options, \"multiple_requests\", \"1\", 0);\n  av_dict_set(options, \"rw_timeout\", \"1000000\", 0);\n  av_log_set_level(AV_LOG_ERROR);\n}\n\nvoid FfmpegReader::ResetStartTime() {\n  start_time_ = std::chrono::steady_clock::now();\n}\n\nbool FfmpegReader::IsTimeout() {\n  return (std::chrono::steady_clock::now() - start_time_ >=\n          FFMPEG_READER_TIMEOUT_INTERVAL);\n}\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/video_demuxer/ffmpeg_reader.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#ifndef MODELBOX_FLOWUNIT_FFMPEG_READER_H_\n#define MODELBOX_FLOWUNIT_FFMPEG_READER_H_\n\n#include <modelbox/base/status.h>\n\n#include <chrono>\n#include <memory>\nextern \"C\" {\n#include <libavformat/avformat.h>\n#include <libavutil/log.h>\n}\nconstexpr std::chrono::seconds FFMPEG_READER_TIMEOUT_INTERVAL =\n    std::chrono::seconds(60);\n\nclass FfmpegReader {\n public:\n  modelbox::Status Open(const std::string &source_url);\n\n  std::shared_ptr<AVFormatContext> GetCtx();\n\n  std::string GetSourceURL();\n\n  void ResetStartTime();\n\n  bool IsTimeout();\n\n private:\n  void SetupRtspOption(const std::string &source_url, AVDictionary **options);\n\n  void SetupCommonOption(const std::string &source_url, AVDictionary **options);\n\n  void SetupHttpOption(const std::string &source_url, AVDictionary **options);\n\n  std::string origin_source_url_;\n  std::string format_source_url_;\n  std::shared_ptr<AVFormatContext> format_ctx_;\n  std::chrono::steady_clock::time_point start_time_;\n};\n\n#endif"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/video_demuxer/ffmpeg_video_demuxer.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"ffmpeg_video_demuxer.h\"\n\n#include <modelbox/base/log.h>\n\n#include <algorithm>\n\n#include \"driver_util.h\"\n\n#define GET_FFMPEG_ERR(err_num, var_name)        \\\n  char var_name[AV_ERROR_MAX_STRING_SIZE] = {0}; \\\n  av_make_error_string(var_name, AV_ERROR_MAX_STRING_SIZE, err_num);\n\nmodelbox::Status FfmpegVideoDemuxer::Init(std::shared_ptr<FfmpegReader> &reader,\n                                          bool key_frame_only) {\n  source_url_ = reader->GetSourceURL();\n  format_ctx_ = reader->GetCtx();\n\n  if (format_ctx_ == nullptr) {\n    return modelbox::STATUS_FAULT;\n  }\n\n  reader_ = reader;\n  auto ret = SetupStreamInfo();\n  if (ret != modelbox::STATUS_SUCCESS) {\n    return ret;\n  }\n\n  ret = GetStreamParam();\n  if (ret != modelbox::STATUS_SUCCESS) {\n    return ret;\n  }\n\n  key_frame_only_ = key_frame_only;\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status FfmpegVideoDemuxer::Demux(\n    std::shared_ptr<AVPacket> &av_packet) {\n  if (format_ctx_ == nullptr) {\n    MBLOG_ERROR << \"ffmpeg format context is null, init first\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  reader_->ResetStartTime();\n  auto ret = ReadPacket(av_packet);\n  if (ret != modelbox::STATUS_SUCCESS) {\n    return ret;\n  }\n\n  ret = BsfProcess(av_packet);\n  if (ret != modelbox::STATUS_SUCCESS) {\n    return ret;\n  }\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nvoid FfmpegVideoDemuxer::LogStreamInfo() {\n  MBLOG_INFO << \"demux info:\";\n  MBLOG_INFO << \"source url: \" << driverutil::string_masking(source_url_);\n  MBLOG_INFO << \"key frame only: \" << key_frame_only_;\n  MBLOG_INFO << \"codec id: \" << codec_id_;\n  MBLOG_INFO << \"profile id: \" << profile_id_;\n  MBLOG_INFO << \"creation time: \" << creation_time_;\n  MBLOG_INFO << \"time base: \" << time_base_;\n  MBLOG_INFO << \"frame width: \" << frame_width_;\n  MBLOG_INFO << \"frame height: \" << frame_height_;\n  MBLOG_INFO << \"frame rate: \" << frame_rate_numerator_ << \"/\"\n             << frame_rate_denominator_;\n  MBLOG_INFO << \"frame rotate: \" << frame_rotate_;\n  MBLOG_INFO << \"frame count: \" << frame_count_;\n  MBLOG_INFO << \"video duration: \" << GetDuration();\n  std::stringstream bsf_name_log;\n  for (auto &bsf_name : bsf_name_list_) {\n    bsf_name_log << bsf_name << \",\";\n  }\n\n  MBLOG_INFO << \"bsf_name:\" << bsf_name_log.str();\n}\n\nAVCodecID FfmpegVideoDemuxer::GetCodecID() { return codec_id_; }\n\nint32_t FfmpegVideoDemuxer::GetProfileID() { return profile_id_; }\n\nconst AVCodecParameters *FfmpegVideoDemuxer::GetCodecParam() {\n  return format_ctx_->streams[stream_id_]->codecpar;\n}\n\nvoid FfmpegVideoDemuxer::GetFrameRate(int32_t &rate_num, int32_t &rate_den) {\n  rate_num = frame_rate_numerator_;\n  rate_den = frame_rate_denominator_;\n}\n\nvoid FfmpegVideoDemuxer::GetFrameMeta(int32_t *frame_width,\n                                      int32_t *frame_height) {\n  *frame_width = frame_width_;\n  *frame_height = frame_height_;\n}\n\nint32_t FfmpegVideoDemuxer::GetFrameRotate() { return frame_rotate_; }\n\ndouble FfmpegVideoDemuxer::GetTimeBase() { return time_base_; }\n\nint64_t FfmpegVideoDemuxer::GetDuration() {\n  if (format_ctx_->duration != AV_NOPTS_VALUE) {\n    return format_ctx_->duration / AV_TIME_BASE;\n  }\n\n  auto &stream = format_ctx_->streams[stream_id_];\n  if (stream->duration > 0 && stream->time_base.den > 0) {\n    return stream->duration / stream->time_base.den * stream->time_base.num;\n  }\n\n  return 0;\n}\n\nmodelbox::Status FfmpegVideoDemuxer::ReadPacket(\n    std::shared_ptr<AVPacket> &av_packet) {\n  int32_t ret = 0;\n  auto *packet_ptr = av_packet_alloc();\n  if (packet_ptr == nullptr) {\n    MBLOG_ERROR << \"ReadPacket alloc packet failed\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  av_packet.reset(packet_ptr,\n                  [](AVPacket *packet) { av_packet_free(&packet); });\n  while ((ret = av_read_frame(format_ctx_.get(), av_packet.get())) >= 0) {\n    if (!IsTargetPacket(av_packet)) {\n      av_packet_unref(av_packet.get());\n      continue;\n    }\n\n    break;\n  }\n\n  if (ret == AVERROR_EOF) {\n    MBLOG_INFO << \"Stream \" << driverutil::string_masking(source_url_) << \" is end\";\n    return modelbox::STATUS_NODATA;\n  }\n\n  if (ret < 0) {\n    GET_FFMPEG_ERR(ret, err_str);\n    MBLOG_ERROR << \"av_read_frame failed, err \" << err_str;\n    return modelbox::STATUS_FAULT;\n  }\n\n  if (av_packet->size < 0) {\n    MBLOG_ERROR << \"Read packet size < 0\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nbool FfmpegVideoDemuxer::IsTargetPacket(std::shared_ptr<AVPacket> &av_packet) {\n  if (av_packet->stream_index != stream_id_) {\n    return false;\n  }\n\n  if (key_frame_only_ && ((av_packet->flags & AV_PKT_FLAG_KEY) != 0)) {\n    return false;\n  }\n\n  if (av_packet->size == 0) {\n    return false;\n  }\n\n  return true;\n}\n\nmodelbox::Status FfmpegVideoDemuxer::BsfProcess(\n    std::shared_ptr<AVPacket> &av_packet) {\n  for (size_t i = 0; i < bsf_ctx_list_.size(); ++i) {\n    auto &bsf_ctx = bsf_ctx_list_[i];\n    auto &bsf_name = bsf_name_list_[i];\n    if (bsf_ctx == nullptr) {\n      continue;\n    }\n\n    auto ret = av_bsf_send_packet(bsf_ctx.get(), av_packet.get());\n    if (ret < 0) {\n      GET_FFMPEG_ERR(ret, err_str);\n      MBLOG_ERROR << \"Bit stream filter[\" << bsf_name\n                  << \"] send packet failed, ret \" << err_str;\n      return modelbox::STATUS_FAULT;\n    }\n\n    ret = av_bsf_receive_packet(bsf_ctx.get(), av_packet.get());\n    if (ret < 0) {\n      GET_FFMPEG_ERR(ret, err_str);\n      MBLOG_ERROR << \"Bit stream filter[\" << bsf_name\n                  << \"] receive packet failed, ret \" << err_str;\n      return modelbox::STATUS_FAULT;\n    }\n  }\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status FfmpegVideoDemuxer::SetupStreamInfo() {\n  auto ret = avformat_find_stream_info(format_ctx_.get(), nullptr);\n  if (ret < 0) {\n    GET_FFMPEG_ERR(ret, err_str);\n    MBLOG_ERROR << \"Find stream info failed, err \" << err_str;\n    return modelbox::STATUS_FAULT;\n  }\n\n  stream_id_ = av_find_best_stream(format_ctx_.get(), AVMEDIA_TYPE_VIDEO, -1,\n                                   -1, nullptr, 0);\n  if (stream_id_ < 0) {\n    MBLOG_ERROR << \"Count find a stream\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status FfmpegVideoDemuxer::GetStreamParam() {\n  auto ret = GetStreamCodecID();\n  if (ret != modelbox::STATUS_SUCCESS) {\n    return ret;\n  }\n\n  ret = GetStreamTimeInfo();\n  if (ret != modelbox::STATUS_SUCCESS) {\n    return ret;\n  }\n\n  ret = GetStreamFrameInfo();\n  if (ret != modelbox::STATUS_SUCCESS) {\n    return ret;\n  }\n\n  ret = GetStreamBsfInfo();\n  if (ret != modelbox::STATUS_SUCCESS) {\n    return ret;\n  }\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status FfmpegVideoDemuxer::GetStreamCodecID() {\n  codec_id_ = format_ctx_->streams[stream_id_]->codecpar->codec_id;\n  profile_id_ = format_ctx_->streams[stream_id_]->codecpar->profile & 0xFF;\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status FfmpegVideoDemuxer::GetStreamTimeInfo() {\n  auto *entry =\n      av_dict_get(format_ctx_->metadata, \"creation_timestamp\", nullptr, 0);\n  if (entry != nullptr) {\n    creation_time_ = atol(entry->value);\n  } else {\n    MBLOG_INFO << \"Stream \" << driverutil::string_masking(source_url_) << \" creation time is null\";\n  }\n\n  time_base_ = av_q2d(format_ctx_->streams[stream_id_]->time_base) * 1000;\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status FfmpegVideoDemuxer::GetStreamFrameInfo() {\n  frame_width_ = format_ctx_->streams[stream_id_]->codecpar->width;\n  frame_height_ = format_ctx_->streams[stream_id_]->codecpar->height;\n  frame_rate_numerator_ = format_ctx_->streams[stream_id_]->avg_frame_rate.num;\n  frame_rate_denominator_ =\n      format_ctx_->streams[stream_id_]->avg_frame_rate.den;\n  auto *entry = av_dict_get(format_ctx_->streams[stream_id_]->metadata,\n                            \"rotate\", nullptr, 0);\n  if (entry != nullptr) {\n    frame_rotate_ = (atol(entry->value) % 360 + 360) % 360;\n  } else {\n    MBLOG_INFO << \"Stream \" << driverutil::string_masking(source_url_) << \" rotate is null\";\n  }\n  RescaleFrameRate(frame_rate_numerator_, frame_rate_denominator_);\n  frame_count_ = format_ctx_->streams[stream_id_]->nb_frames;\n  return modelbox::STATUS_SUCCESS;\n}\n\nvoid FfmpegVideoDemuxer::RescaleFrameRate(int32_t &frame_rate_numerator,\n                                          int32_t &frame_rate_denominator) {\n  // Try to avoid too large numerator & denominator\n  const int32_t fraction_limit =\n      32767;  // Try to be close to this value, might be greater\n  auto numerator_scale = frame_rate_numerator / fraction_limit;\n  auto denominator_scale = frame_rate_denominator / fraction_limit;\n  auto fraction_scale = std::max(numerator_scale, denominator_scale);\n  fraction_scale = std::min(fraction_scale, frame_rate_denominator);\n  if (fraction_scale > 1) {\n    frame_rate_numerator = frame_rate_numerator / fraction_scale;\n    // We are ensured that fraction_scale <= frame_rate_denominator\n    frame_rate_denominator = frame_rate_denominator / fraction_scale;\n  }\n}\n\nmodelbox::Status FfmpegVideoDemuxer::GetStreamBsfInfo() {\n  auto *extra_data = format_ctx_->streams[stream_id_]->codecpar->extradata;\n  auto extra_size = format_ctx_->streams[stream_id_]->codecpar->extradata_size;\n  std::stringstream extra_data_log;\n  for (int i = 0; i < extra_size; ++i) {\n    extra_data_log << std::hex << int(extra_data[i]) << \":\";\n  }\n\n  MBLOG_INFO << \"extra_data: \" << extra_data_log.str();\n  std::string bsf_name;\n  auto ret = GetBsfName(format_ctx_->streams[stream_id_]->codecpar->codec_tag,\n                        codec_id_, extra_data, extra_size, bsf_name);\n  if (ret != modelbox::STATUS_SUCCESS) {\n    return modelbox::STATUS_FAULT;\n  }\n\n  bsf_name_list_.push_back(bsf_name);\n  bsf_ctx_list_.push_back(CreateBsfCtx(bsf_name));\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status FfmpegVideoDemuxer::GetBsfName(\n    uint32_t codec_tag, AVCodecID codec_id, uint8_t *extra_data,\n    size_t extra_size, std::string &bsf_name) {\n  char fourcc_str_array[AV_FOURCC_MAX_STRING_SIZE] = {0};\n  char *fourcc = av_fourcc_make_string(fourcc_str_array, codec_tag);\n  if (fourcc) {\n    MBLOG_INFO << \"try get bsf for Fourcc:\" << fourcc\n               << \", CodecId:\" << codec_id;\n  }\n  // 1.Judge by codec_id\n  if (codec_id == AV_CODEC_ID_H264) {\n    bsf_name = \"h264_mp4toannexb\";\n  } else if (codec_id == AV_CODEC_ID_H265) {\n    bsf_name = \"hevc_mp4toannexb\";\n  } else {\n    // Try use dump_extra\n    bsf_name = \"dump_extra\";\n  }\n\n  // 2.Judge by codec_tag & extra_data\n  if (codec_tag == 0 && IsAnnexb(extra_data, extra_size)) {\n    bsf_name = \"dump_extra\";\n  }\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nbool FfmpegVideoDemuxer::IsAnnexb(const uint8_t *extra_data,\n                                  size_t extra_size) {\n  auto size_test = !extra_size;\n  auto start_code1 = extra_size >= 3 && extra_data[0] == 0 &&\n                     extra_data[1] == 0 && extra_data[2] == 1;\n  auto start_code2 = extra_size >= 4 && extra_data[0] == 0 &&\n                     extra_data[1] == 0 && extra_data[2] == 0 &&\n                     extra_data[3] == 1;\n  return size_test || start_code1 || start_code2;\n}\n\nstd::shared_ptr<AVBSFContext> FfmpegVideoDemuxer::CreateBsfCtx(\n    const std::string &bsf_name, AVDictionary **options) {\n  const auto *bsf = av_bsf_get_by_name(bsf_name.c_str());\n  if (!bsf) {\n    MBLOG_ERROR << \"Get bit stream filter failed, name \" << bsf_name;\n    return nullptr;\n  }\n\n  AVBSFContext *ctx = nullptr;\n  auto ret = av_bsf_alloc(bsf, &ctx);\n  if (ret < 0) {\n    GET_FFMPEG_ERR(ret, err_str);\n    MBLOG_ERROR << \"Alloc bit stream filter context failed, name \" << bsf_name\n                << \", err \" << err_str;\n    return nullptr;\n  }\n\n  std::shared_ptr<AVBSFContext> bsf_ctx(\n      ctx, [](AVBSFContext *ctx) { av_bsf_free(&ctx); });\n  ret = avcodec_parameters_copy(bsf_ctx->par_in,\n                                format_ctx_->streams[stream_id_]->codecpar);\n  if (ret < 0) {\n    GET_FFMPEG_ERR(ret, err_str);\n    MBLOG_ERROR << \"Copy codec param to bsf \" << bsf_name << \" failed, err \"\n                << err_str;\n    return nullptr;\n  }\n\n  if (options) {\n    ret = av_opt_set_dict2(bsf_ctx.get(), options, AV_OPT_SEARCH_CHILDREN);\n    if (ret < 0) {\n      GET_FFMPEG_ERR(ret, err_str);\n      MBLOG_ERROR << \"Set option to bsf \" << bsf_name << \" failed, err \"\n                  << err_str;\n      return nullptr;\n    }\n  }\n\n  ret = av_bsf_init(bsf_ctx.get());\n  if (ret < 0) {\n    GET_FFMPEG_ERR(ret, err_str);\n    MBLOG_ERROR << \"Init bsf \" << bsf_name << \" failed, err \" << err_str;\n    return nullptr;\n  }\n\n  return bsf_ctx;\n}"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/video_demuxer/ffmpeg_video_demuxer.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#ifndef MODELBOX_FLOWUNIT_FFMPEG_VIDEO_DEMUXER_H_\n#define MODELBOX_FLOWUNIT_FFMPEG_VIDEO_DEMUXER_H_\n\n#include <modelbox/base/status.h>\n\n#include <functional>\n#include <memory>\n#include <vector>\n\n#include \"ffmpeg_reader.h\"\nextern \"C\" {\n#include <libavformat/avformat.h>\n#include <libavutil/error.h>\n#include <libavutil/frame.h>\n#include <libavutil/opt.h>\n}\n\nclass FfmpegVideoDemuxer {\n public:\n  modelbox::Status Init(std::shared_ptr<FfmpegReader> &reader,\n                      bool key_frame_only);\n\n  modelbox::Status Demux(std::shared_ptr<AVPacket> &av_packet);\n\n  void LogStreamInfo();\n\n  AVCodecID GetCodecID();\n\n  int GetProfileID();\n\n  const AVCodecParameters *GetCodecParam();\n\n  void GetFrameRate(int32_t &rate_num, int32_t &rate_den);\n\n  void GetFrameMeta(int32_t *frame_width, int32_t *frame_height);\n\n  int32_t GetFrameRotate();\n\n  double GetTimeBase();\n\n  int64_t GetDuration();\n\n private:\n  void PrintCurrentOption(AVDictionary *options);\n\n  modelbox::Status SetupStreamInfo();\n\n  modelbox::Status GetStreamParam();\n\n  modelbox::Status GetStreamCodecID();\n\n  modelbox::Status GetStreamTimeInfo();\n\n  modelbox::Status GetStreamFrameInfo();\n\n  void RescaleFrameRate(int32_t &frame_rate_numerator,\n                        int32_t &frame_rate_denominator);\n\n  modelbox::Status GetStreamBsfInfo();\n\n  modelbox::Status ReadPacket(std::shared_ptr<AVPacket> &av_packet);\n\n  bool IsTargetPacket(std::shared_ptr<AVPacket> &av_packet);\n\n  modelbox::Status BsfProcess(std::shared_ptr<AVPacket> &av_packet);\n\n  modelbox::Status GetBsfName(uint32_t codec_tag, AVCodecID codec_id,\n                            uint8_t *extra_data, size_t extra_size,\n                            std::string &bsf_name);\n\n  std::shared_ptr<AVBSFContext> CreateBsfCtx(const std::string &bsf_name,\n                                             AVDictionary **options = nullptr);\n\n  bool IsAnnexb(const uint8_t *extra_data, size_t extra_size);\n\n  std::string source_url_;\n  bool key_frame_only_{false};\n  std::shared_ptr<AVFormatContext> format_ctx_;\n  int32_t stream_id_{0};\n  AVCodecID codec_id_{AVCodecID::AV_CODEC_ID_H264};\n  int32_t profile_id_{0};\n  int64_t creation_time_{0};\n  double time_base_{0};\n  int32_t frame_width_{0};\n  int32_t frame_height_{0};\n  int32_t frame_rate_numerator_{0};\n  int32_t frame_rate_denominator_{0};\n  int32_t frame_rotate_{0};\n  int32_t frame_count_{0};\n  std::vector<std::shared_ptr<AVBSFContext>> bsf_ctx_list_;\n  std::vector<std::string> bsf_name_list_;\n  std::shared_ptr<FfmpegReader> reader_;\n};\n\n#endif  // MODELBOX_FLOWUNIT_FFMPEG_VIDEO_DEMUXER_H_"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/video_demuxer/video_demux_flowunit_retry_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <fstream>\n#include <functional>\n#include <future>\n#include <thread>\n\n#include \"common/video_decoder/video_decoder_mock.h\"\n#include \"driver_flow_test.h\"\n#include \"flowunit_mockflowunit/flowunit_mockflowunit.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"modelbox/base/log.h\"\n#include \"modelbox/buffer.h\"\n#include \"securec.h\"\n#include \"test/mock/minimodelbox/mockflow.h\"\n\nnamespace modelbox {\nclass VideoDemuxerFlowUnitRetryTest : public testing::Test {\n public:\n  VideoDemuxerFlowUnitRetryTest()\n      : driver_flow_(std::make_shared<MockFlow>()) {}\n\n  std::shared_ptr<MockFlow> GetDriverFlow() { return driver_flow_; };\n  std::shared_ptr<MockFlow> RunDriverFlow();\n  modelbox::Status SendDataSourceCfg(const std::string &data_source_cfg,\n                                     const std::string &source_type);\n\n protected:\n  void SetUp() override{};\n\n  void TearDown() override{};\n\n  std::string GetRtspTomlConfig();\n\n  modelbox::Status StartFlow(std::string &toml_content, uint64_t millisecond);\n\n private:\n  std::shared_ptr<MockFlow> driver_flow_;\n  std::shared_ptr<Flow> flow_;\n};\n\nmodelbox::Status VideoDemuxerFlowUnitRetryTest::StartFlow(\n    std::string &toml_content, const uint64_t millisecond) {\n  driver_flow_ = std::make_shared<MockFlow>();\n  auto ret = videodecoder::AddMockFlowUnit(driver_flow_, true);\n  EXPECT_EQ(ret, STATUS_SUCCESS);\n\n  driver_flow_->BuildAndRun(\"VideoDecoder\", toml_content, -1);\n  std::string source_type = \"url\";\n  std::string data_source_cfg = R\"({\n        \"url\": \"rtsp://192.168.59.29:10054/live/k14XeNAIR\",\n        \"url_type\": \"stream\"\n  })\";\n  flow_ = driver_flow_->GetFlow();\n  SendDataSourceCfg(data_source_cfg, source_type);\n  return flow_->Wait(millisecond);\n}\n\nTEST_F(VideoDemuxerFlowUnitRetryTest, RtspInputTest) {\n  auto toml_content = GetRtspTomlConfig();\n  auto ret = StartFlow(toml_content, 10 * 1000);\n  EXPECT_EQ(ret, modelbox::STATUS_TIMEDOUT);\n}\n\nstd::string VideoDemuxerFlowUnitRetryTest::GetRtspTomlConfig() {\n  const std::string test_lib_dir = TEST_DRIVER_DIR;\n  const std::string dest_url = \"rtmp://192.168.59.29:10035/live/iEunZv0IR?sign=mPu7WDASRz\";\n  std::string toml_content =\n      R\"(\n      [log]\n      level = \"INFO\"\n      [driver]\n      skip-default = true\n      dir=[\")\" +\n      test_lib_dir + \"\\\"]\\n    \" +\n      R\"([graph]\n      thread-num = 16\n      max-thread-num = 100\n      graphconf = '''digraph demo {\n            input[type=input, device=cpu, deviceid=0]\n            data_source_parser[type=flowunit, flowunit=data_source_parser, device=cpu, deviceid=0, retry_interval_ms = 1000, obs_retry_interval_ms = 3000,url_retry_interval_ms = 1000, label=\"\", plugin_dir=\")\" +\n      test_lib_dir + R\"(\"] \n            videodemuxer[type=flowunit, flowunit=video_demuxer, device=cpu, deviceid=0, label=\"<in_video_url> | <out_video_packet>\", queue_size = 16]\n            videodecoder[type=flowunit, flowunit=video_decoder, device=cpu, deviceid=0, label=\"<in_video_packet> | <out_video_frame>\", pix_fmt=rgb, queue_size = 16]  \n            // videodecoder[type=flowunit, flowunit=video_decoder, device=cuda, deviceid=0, label=\"<in_video_packet> | <out_video_frame>\", pix_fmt=rgb, queue_size = 16]  \n            // videodecoder[type=flowunit, flowunit=video_decoder, device=ascend, deviceid=0, label=\"<in_video_packet> | <out_video_frame>\", pix_fmt=nv12, queue_size = 16]  \n            videoencoder[type=flowunit, flowunit=video_encoder, device=cpu, queue_size = 16, deviceid=0, default_dest_url=\")\" +\n      dest_url + R\"(\n            \", format=flv, encoder=libx264 ]\n            input -> data_source_parser:in_data\n            data_source_parser:out_video_url -> videodemuxer:in_video_url\n            videodemuxer:out_video_packet -> videodecoder:in_video_packet\n            videodecoder:out_video_frame -> videoencoder:in_video_frame\n          }'''\n      format = \"graphviz\"\n    )\";\n\n  return toml_content;\n}\n\nmodelbox::Status VideoDemuxerFlowUnitRetryTest::SendDataSourceCfg(\n    const std::string &data_source_cfg, const std::string &source_type) {\n  auto ext_data = flow_->CreateExternalDataMap();\n  auto buffer_list = ext_data->CreateBufferList();\n  buffer_list->Build({data_source_cfg.size()});\n  auto buffer = buffer_list->At(0);\n  memcpy_s(buffer->MutableData(), buffer->GetBytes(), data_source_cfg.data(),\n           data_source_cfg.size());\n  buffer->Set(\"source_type\", source_type);\n  ext_data->Send(\"input\", buffer_list);\n  ext_data->Close();\n  for (size_t i = 0; i < 5; ++i) {\n    // should continue reconnect\n    std::this_thread::sleep_for(std::chrono::seconds(1));\n    OutputBufferList output;\n    auto ret = ext_data->Recv(output, 100);\n    EXPECT_EQ(ret, STATUS_TIMEDOUT);\n    if (ret != STATUS_TIMEDOUT) {\n      return STATUS_FAULT;\n    }\n  }\n  // stop reconnect\n  ext_data->Shutdown();\n  Status final_state = STATUS_OK;\n  for (size_t i = 0; i < 5; ++i) {\n    // should stop reconnect, session will close\n    std::this_thread::sleep_for(std::chrono::seconds(1));\n    OutputBufferList output;\n    auto ret = ext_data->Recv(output, 100);\n    if (ret == STATUS_INVALID) {\n      final_state = ret;\n      break;\n    }\n  }\n  EXPECT_EQ(final_state, STATUS_INVALID);\n  return modelbox::STATUS_OK;\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/video_demuxer/video_demuxer_flowunit.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"video_demuxer_flowunit.h\"\n\n#include \"modelbox/flowunit.h\"\n#include \"modelbox/flowunit_api_helper.h\"\n\nVideoDemuxerFlowUnit::VideoDemuxerFlowUnit() = default;\nVideoDemuxerFlowUnit::~VideoDemuxerFlowUnit() = default;\n\nmodelbox::Status VideoDemuxerFlowUnit::Open(\n    const std::shared_ptr<modelbox::Configuration> &opts) {\n  key_frame_only_ = opts->GetBool(\"key_frame_only\", false);\n  queue_size_ = opts->GetUint64(\"queue_size\", queue_size_);\n  return modelbox::STATUS_OK;\n}\nmodelbox::Status VideoDemuxerFlowUnit::Close() { return modelbox::STATUS_OK; }\n\nmodelbox::Status VideoDemuxerFlowUnit::Reconnect(\n    modelbox::Status &status,\n    std::shared_ptr<modelbox::DataContext> &data_ctx) {\n  auto ret = modelbox::STATUS_CONTINUE;\n  DeferCond { return ret == modelbox::STATUS_SUCCESS; };\n  DeferCondAdd { WriteEnd(data_ctx); };\n  auto source_context = std::static_pointer_cast<modelbox::SourceContext>(\n      data_ctx->GetPrivate(DEMUX_RETRY_CONTEXT));\n  if (source_context == nullptr) {\n    if (status == modelbox::STATUS_NODATA) {\n      ret = modelbox::STATUS_SUCCESS;\n      return ret;\n    }\n    return status;\n  }\n\n  source_context->SetLastProcessStatus(status);\n  auto retry_status = source_context->NeedRetry();\n  if (retry_status == modelbox::RETRY_NONEED) {\n    ret = modelbox::STATUS_FAULT;\n  } else if (retry_status == modelbox::RETRY_STOP) {\n    ret = modelbox::STATUS_SUCCESS;\n  } else {\n    auto timer_task = std::static_pointer_cast<modelbox::TimerTask>(\n        data_ctx->GetPrivate(DEMUX_TIMER_TASK));\n    modelbox::TimerGlobal::Schedule(timer_task,\n                                    source_context->GetRetryInterval(), 0);\n  }\n  return ret;\n}\n\nmodelbox::Status VideoDemuxerFlowUnit::Process(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  auto demuxer_worker = std::static_pointer_cast<DemuxerWorker>(\n      data_ctx->GetPrivate(DEMUXER_CTX));\n  modelbox::Status demux_status = modelbox::STATUS_FAULT;\n  std::shared_ptr<AVPacket> pkt;\n  if (demuxer_worker != nullptr) {\n    demux_status = demuxer_worker->ReadPacket(pkt);\n    if (demux_status == modelbox::STATUS_NODATA) {\n      is_retry_reset_ = true;\n    }\n  }\n\n  if (demux_status == modelbox::STATUS_OK) {\n    auto video_demuxer = demuxer_worker->GetDemuxer();\n    auto ret = WriteData(data_ctx, pkt, video_demuxer);\n    if (!ret) {\n      return ret;\n    }\n\n    auto event = std::make_shared<modelbox::FlowUnitEvent>();\n    data_ctx->SendEvent(event);\n    return modelbox::STATUS_CONTINUE;\n  }\n\n  return Reconnect(demux_status, data_ctx);\n}\n\nvoid VideoDemuxerFlowUnit::WriteEnd(\n    std::shared_ptr<modelbox::DataContext> &data_ctx) {\n  auto demuxer_worker = std::static_pointer_cast<DemuxerWorker>(\n      data_ctx->GetPrivate(DEMUXER_CTX));\n  auto video_demuxer = demuxer_worker->GetDemuxer();\n  auto video_packet_output = data_ctx->Output(VIDEO_PACKET_OUTPUT);\n  video_packet_output->Build({1});\n  auto end_packet = video_packet_output->At(0);\n  int32_t rate_num;\n  int32_t rate_den;\n  int32_t rotate_angle = video_demuxer->GetFrameRotate();\n  video_demuxer->GetFrameRate(rate_num, rate_den);\n  end_packet->Set(\"rate_num\", rate_num);\n  end_packet->Set(\"rate_den\", rate_den);\n  end_packet->Set(\"rotate_angle\", rotate_angle);\n  end_packet->Set(\"duration\", video_demuxer->GetDuration());\n  end_packet->Set(\"time_base\", video_demuxer->GetTimeBase());\n}\n\nmodelbox::Status VideoDemuxerFlowUnit::WriteData(\n    std::shared_ptr<modelbox::DataContext> &data_ctx,\n    std::shared_ptr<AVPacket> &pkt,\n    const std::shared_ptr<FfmpegVideoDemuxer> &video_demuxer) {\n  if (pkt == nullptr) {\n    // no data to send\n    return modelbox::STATUS_OK;\n  }\n\n  auto video_packet_output = data_ctx->Output(VIDEO_PACKET_OUTPUT);\n  std::vector<size_t> shape(1, (size_t)pkt->size);\n  if (pkt->size == 0) {\n    // Tell decoder end of stream\n    video_packet_output->Build({1});\n  } else {\n    video_packet_output->BuildFromHost(\n        shape, pkt->data, pkt->size,\n        [pkt](void *ptr) { /* Only capture pkt */ });\n  }\n\n  auto packet_buffer = video_packet_output->At(0);\n  if (is_retry_reset_) {\n    bool is_reset = true;\n    auto codec_id = std::make_shared<AVCodecID>(video_demuxer->GetCodecID());\n    auto source_url =\n        std::static_pointer_cast<std::string>(data_ctx->GetPrivate(SOURCE_URL));\n    packet_buffer->Set(\"reset_flag\", is_reset);\n    packet_buffer->Set(\"source_url\", *source_url);\n    packet_buffer->Set(\"codec_id\", video_demuxer->GetCodecID());\n    is_retry_reset_ = false;\n  }\n  packet_buffer->Set(\"pts\", pkt->pts);\n  packet_buffer->Set(\"dts\", pkt->dts);\n  packet_buffer->Set(\"time_base\", video_demuxer->GetTimeBase());\n  int32_t rate_num;\n  int32_t rate_den;\n  int32_t frame_width;\n  int32_t frame_height;\n  int32_t rotate_angle = video_demuxer->GetFrameRotate();\n  video_demuxer->GetFrameRate(rate_num, rate_den);\n  video_demuxer->GetFrameMeta(&frame_width, &frame_height);\n  packet_buffer->Set(\"rate_num\", rate_num);\n  packet_buffer->Set(\"rate_den\", rate_den);\n  packet_buffer->Set(\"width\", frame_width);\n  packet_buffer->Set(\"height\", frame_height);\n  packet_buffer->Set(\"rotate_angle\", rotate_angle);\n  packet_buffer->Set(\"duration\", video_demuxer->GetDuration());\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status VideoDemuxerFlowUnit::CreateRetryTask(\n    std::shared_ptr<modelbox::DataContext> &data_ctx) {\n  auto stream_meta = data_ctx->GetInputMeta(STREAM_META_INPUT);\n  if (stream_meta == nullptr) {\n    return modelbox::STATUS_FAULT;\n  }\n\n  auto source_context = std::static_pointer_cast<modelbox::SourceContext>(\n      stream_meta->GetMeta(DEMUX_RETRY_CONTEXT));\n  if (source_context == nullptr) {\n    return modelbox::STATUS_FAULT;\n  }\n\n  data_ctx->SetPrivate(DEMUX_RETRY_CONTEXT, source_context);\n  source_context->SetLastProcessStatus(modelbox::STATUS_FAULT);\n  std::weak_ptr<VideoDemuxerFlowUnit> flowunit = shared_from_this();\n  std::weak_ptr<modelbox::DataContext> data_ctx_weak = data_ctx;\n  auto timer_task =\n      std::make_shared<modelbox::TimerTask>([flowunit, data_ctx_weak]() {\n        std::shared_ptr<VideoDemuxerFlowUnit> flow_unit_ = flowunit.lock();\n        std::shared_ptr<modelbox::DataContext> data_context =\n            data_ctx_weak.lock();\n        if (flow_unit_ == nullptr || data_context == nullptr) {\n          return;\n        }\n\n        auto event = std::make_shared<modelbox::FlowUnitEvent>();\n        auto source_context = std::static_pointer_cast<modelbox::SourceContext>(\n            data_context->GetPrivate(DEMUX_RETRY_CONTEXT));\n        auto source_url = source_context->GetSourceURL();\n        modelbox::Status status = modelbox::STATUS_FAULT;\n        if (source_url) {\n          auto status = flow_unit_->InitDemuxer(data_context, source_url);\n        }\n\n        source_context->SetLastProcessStatus(status);\n        data_context->SendEvent(event);\n      });\n  timer_task->SetName(\"DemuxerReconnect\");\n  data_ctx->SetPrivate(DEMUX_TIMER_TASK, timer_task);\n  return modelbox::STATUS_OK;\n}\n\nstd::shared_ptr<std::string> VideoDemuxerFlowUnit::GetSourceUrl(\n    const std::shared_ptr<modelbox::DataContext> &data_ctx) {\n  // Try get url in input meta\n  auto stream_meta = data_ctx->GetInputMeta(STREAM_META_INPUT);\n  if (stream_meta != nullptr) {\n    auto meta_value = stream_meta->GetMeta(SOURCE_URL);\n    if (meta_value != nullptr) {\n      return std::static_pointer_cast<std::string>(meta_value);\n    }\n  }\n\n  // Try get url in input buffer\n  auto inputs = data_ctx->Input(STREAM_META_INPUT);\n  if (inputs == nullptr || inputs->Size() == 0) {\n    MBLOG_ERROR << \"source url not found in input\";\n    return nullptr;\n  }\n\n  if (inputs->Size() > 1) {\n    MBLOG_WARN << \"only supports one url for a stream\";\n  }\n\n  auto input_buffer = inputs->At(0);\n  if (input_buffer == nullptr) {\n    MBLOG_ERROR << \"input buffer for demuxer is nullptr\";\n    return nullptr;\n  }\n\n  return std::make_shared<std::string>(\n      (const char *)(input_buffer->ConstData()), input_buffer->GetBytes());\n}\n\nmodelbox::Status VideoDemuxerFlowUnit::DataPre(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  auto source_url_ptr = GetSourceUrl(data_ctx);\n  if (source_url_ptr == nullptr) {\n    MBLOG_ERROR << \"Source url is null, please fill input url correctly\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  auto codec_id = std::make_shared<AVCodecID>();\n  auto profile_id = std::make_shared<int32_t>();\n  auto source_url = std::make_shared<std::string>();\n  auto meta = std::make_shared<modelbox::DataMeta>();\n  meta->SetMeta(CODEC_META, codec_id);\n  meta->SetMeta(PROFILE_META, profile_id);\n  meta->SetMeta(SOURCE_URL, source_url);\n  data_ctx->SetOutputMeta(VIDEO_PACKET_OUTPUT, meta);\n  data_ctx->SetPrivate(VIDEO_PACKET_OUTPUT, meta);\n\n  auto demuxer_status = InitDemuxer(data_ctx, source_url_ptr);\n\n  if (demuxer_status != modelbox::STATUS_OK) {\n    MBLOG_INFO << \"failed init Demuxer\";\n  }\n\n  auto ret = CreateRetryTask(data_ctx);\n  if (!ret && !demuxer_status) {\n    return modelbox::STATUS_FAULT;\n  }\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nvoid VideoDemuxerFlowUnit::UpdateStatsInfo(\n    const std::shared_ptr<modelbox::DataContext> &data_ctx,\n    const std::shared_ptr<FfmpegVideoDemuxer> &demuxer) {\n  auto stats = data_ctx->GetStatistics();\n  int32_t frame_rate_num = 0;\n  int32_t frame_rate_den = 0;\n  demuxer->GetFrameRate(frame_rate_num, frame_rate_den);\n  stats->AddItem(\"frame_rate_num\", frame_rate_num, true);\n  stats->AddItem(\"frame_rate_den\", frame_rate_den, true);\n}\n\nmodelbox::Status VideoDemuxerFlowUnit::InitDemuxer(\n    std::shared_ptr<modelbox::DataContext> &data_ctx,\n    std::shared_ptr<std::string> &source_url) {\n  auto reader = std::make_shared<FfmpegReader>();\n  auto ret = reader->Open(*source_url);\n  if (ret != modelbox::STATUS_SUCCESS) {\n    MBLOG_INFO << \"Open reader falied, set DEMUX_STATUS failed\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  auto video_demuxer = std::make_shared<FfmpegVideoDemuxer>();\n  ret = video_demuxer->Init(reader, key_frame_only_);\n  if (ret != modelbox::STATUS_SUCCESS) {\n    MBLOG_INFO << \"video demux init falied, set DEMUX_STATUS failed\";\n    return modelbox::STATUS_FAULT;\n  }\n  video_demuxer->LogStreamInfo();\n\n  int32_t width = 0;\n  int32_t height = 0;\n  video_demuxer->GetFrameMeta(&width, &height);\n  if (width == 0 || height == 0) {\n    MBLOG_ERROR << \"video demuxer get frame meta failed\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  auto codec_id = video_demuxer->GetCodecID();\n  auto profile_id = video_demuxer->GetProfileID();\n  // reset meta value\n  auto meta = std::static_pointer_cast<modelbox::DataMeta>(\n      data_ctx->GetPrivate(VIDEO_PACKET_OUTPUT));\n  auto code_meta = std::static_pointer_cast<int>(meta->GetMeta(CODEC_META));\n  *code_meta = codec_id;\n  auto profile_meta =\n      std::static_pointer_cast<int>(meta->GetMeta(PROFILE_META));\n  *profile_meta = profile_id;\n  auto uri_meta =\n      std::static_pointer_cast<std::string>(meta->GetMeta(SOURCE_URL));\n  *uri_meta = *source_url;\n\n  auto is_rtsp = (source_url->find(\"rtsp://\") == 0);\n  auto demuxer_worker =\n      std::make_shared<DemuxerWorker>(is_rtsp, queue_size_, video_demuxer);\n  ret = demuxer_worker->Init();\n  if (ret != modelbox::STATUS_OK) {\n    MBLOG_ERROR << \"init demuxer failed, ret \" << ret;\n    return ret;\n  }\n\n  data_ctx->SetPrivate(DEMUXER_CTX, demuxer_worker);\n  data_ctx->SetPrivate(SOURCE_URL, source_url);\n\n  UpdateStatsInfo(data_ctx, video_demuxer);\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status VideoDemuxerFlowUnit::DataPost(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  auto timer_task = std::static_pointer_cast<modelbox::TimerTask>(\n      data_ctx->GetPrivate(DEMUX_TIMER_TASK));\n\n  if (timer_task) {\n    timer_task->Stop();\n  }\n  return modelbox::STATUS_OK;\n}\n\nMODELBOX_FLOWUNIT(VideoDemuxerFlowUnit, desc) {\n  desc.SetFlowUnitName(FLOWUNIT_NAME);\n  desc.SetFlowUnitGroupType(\"Video\");\n  desc.AddFlowUnitInput({STREAM_META_INPUT});\n  desc.AddFlowUnitOutput({VIDEO_PACKET_OUTPUT});\n  desc.SetFlowType(modelbox::FlowType::STREAM);\n  desc.SetStreamSameCount(false);\n  desc.SetDescription(FLOWUNIT_DESC);\n}\n\nMODELBOX_DRIVER_FLOWUNIT(desc) {\n  desc.Desc.SetName(FLOWUNIT_NAME);\n  desc.Desc.SetClass(modelbox::DRIVER_CLASS_FLOWUNIT);\n  desc.Desc.SetType(FLOWUNIT_TYPE);\n  desc.Desc.SetDescription(FLOWUNIT_DESC);\n  desc.Desc.SetVersion(\"1.0.0\");\n}\n\nDemuxerWorker::DemuxerWorker(bool is_async, size_t cache_size,\n                             std::shared_ptr<FfmpegVideoDemuxer> demuxer)\n    : is_async_(is_async),\n      cache_size_(cache_size),\n      demuxer_(std::move(demuxer)) {\n  const size_t min_cache_size = 32;\n  if (cache_size_ < min_cache_size) {\n    cache_size_ = min_cache_size;\n  }\n}\n\nDemuxerWorker::~DemuxerWorker() {\n  if (demux_thread_ != nullptr) {\n    demux_thread_running_ = false;\n    demux_thread_->join();\n  }\n}\n\nmodelbox::Status DemuxerWorker::Init() {\n  if (!is_async_) {\n    return modelbox::STATUS_OK;\n  }\n\n  demux_thread_running_ = true;\n  demux_thread_ = std::make_shared<std::thread>([this]() {\n    while (IsRunning()) {\n      Process();\n    }\n  });\n\n  return modelbox::STATUS_OK;\n}\n\nstd::shared_ptr<FfmpegVideoDemuxer> DemuxerWorker::GetDemuxer() const {\n  return demuxer_;\n}\n\nsize_t DemuxerWorker::GetDropCount() const { return packet_drop_count_; }\n\nmodelbox::Status DemuxerWorker::ReadPacket(\n    std::shared_ptr<AVPacket> &av_packet) {\n  if (!is_async_) {\n    return demuxer_->Demux(av_packet);\n  }\n\n  auto ret = PopCache(av_packet);\n  if (ret != modelbox::STATUS_OK) {\n    // demuxer read end\n    return last_demux_status_;\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nbool DemuxerWorker::IsRunning() const { return demux_thread_running_; }\n\nvoid DemuxerWorker::Process() {\n  std::shared_ptr<AVPacket> av_packet;\n  last_demux_status_ = demuxer_->Demux(av_packet);\n  if (last_demux_status_ != modelbox::STATUS_OK) {\n    demux_thread_running_ = false;\n    av_packet = nullptr;\n    std::unique_lock<std::mutex> lock(packet_cache_lock_);\n    packet_cache_.push_back(av_packet);\n    packet_cache_not_empty_.notify_all();\n    return;\n  }\n\n  PushCache(av_packet);\n}\n\nvoid DemuxerWorker::PushCache(const std::shared_ptr<AVPacket> &av_packet) {\n  std::unique_lock<std::mutex> lock(packet_cache_lock_);\n  if (missing_pre_packet_) {\n    if (!IsKeyFrame(av_packet)) {\n      // not key frame, continue drop this packet\n      ++packet_drop_count_;\n      return;\n    }\n\n    // this packet is key frame, push to cache, continue decode\n    missing_pre_packet_ = false;\n    packet_cache_.push_back(av_packet);\n    packet_cache_not_empty_.notify_all();\n    return;\n  }\n\n  if (packet_cache_.size() >= cache_size_) {\n    // need drop packet in cache\n    do {\n      // drop front until key frame\n      packet_cache_.pop_front();\n      ++packet_drop_count_;\n      if (!packet_cache_.empty()) {\n        continue;\n      }\n\n      // all cache dropped\n      if (!IsKeyFrame(av_packet)) {\n        // not key frame, drop this packet too\n        // set flag to wait next key frame\n        missing_pre_packet_ = true;\n        ++packet_drop_count_;\n        return;\n      }\n\n      // this is key frame, push to cache\n      break;\n    } while (!IsKeyFrame(packet_cache_.front()));\n\n    // find key frame, push this packet to cache\n  }\n\n  // push this packet to cache\n  packet_cache_.push_back(av_packet);\n  packet_cache_not_empty_.notify_all();\n}\n\nmodelbox::Status DemuxerWorker::PopCache(std::shared_ptr<AVPacket> &av_packet) {\n  std::unique_lock<std::mutex> lock(packet_cache_lock_);\n  packet_cache_not_empty_.wait_for(lock, std::chrono::milliseconds(20),\n                                   [&]() { return !packet_cache_.empty(); });\n  if (packet_cache_.empty()) {\n    // avoid to stuck other stream in node::run, we need return when\n    // packet_cache has no data\n    av_packet = nullptr;\n    return modelbox::STATUS_OK;\n  }\n\n  av_packet = packet_cache_.front();\n  if (av_packet == nullptr) {\n    // stream end, keep nullptr in cache\n    return modelbox::STATUS_NODATA;\n  }\n\n  packet_cache_.pop_front();\n  return modelbox::STATUS_OK;\n}\n\nbool DemuxerWorker::IsKeyFrame(const std::shared_ptr<AVPacket> &av_packet) {\n  return (av_packet->flags & AV_PKT_FLAG_KEY) != 0;\n}\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/video_demuxer/video_demuxer_flowunit.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_VIDEO_DEMUXER_CPU_H_\n#define MODELBOX_FLOWUNIT_VIDEO_DEMUXER_CPU_H_\n\n#include <modelbox/base/device.h>\n#include <modelbox/base/status.h>\n#include <modelbox/flow.h>\n\n#include \"ffmpeg_video_demuxer.h\"\n#include \"modelbox/flowunit.h\"\n#include \"source_context.h\"\n\nconstexpr const char *FLOWUNIT_NAME = \"video_demuxer\";\nconstexpr const char *FLOWUNIT_TYPE = \"cpu\";\nconstexpr const char *FLOWUNIT_DESC =\n    \"\\n\\t@Brief: A video demuxer flowunit on cpu. \\n\"\n    \"\\t@Port parameter: The input port buffer data indicate video file path or \"\n    \"stream path, the output \"\n    \"port buffer type is video_packet.\\n\"\n    \"\\t  The video_packet buffer contain the following meta fields:\\n\"\n    \"\\t\\tField Name: pts,           Type: int64_t\\n\"\n    \"\\t\\tField Name: dts,           Type: int64_t\\n\"\n    \"\\t\\tField Name: rate_num,      Type: int32_t\\n\"\n    \"\\t\\tField Name: rate_den,      Type: int32_t\\n\"\n    \"\\t\\tField Name: duration,      Type: int64_t\\n\"\n    \"\\t\\tField Name: time_base,     Type: double\\n\"\n    \"\\t\\tField Name: width,         Type: int32_t\\n\"\n    \"\\t\\tField Name: height,        Type: int32_t\\n\"\n    \"\\t@Constraint: The flowuint 'video_decoder' must be used pair \"\n    \"with 'video_demuxer. \";\nconstexpr const char *SOURCE_URL = \"source_url\";\nconstexpr const char *CODEC_META = \"codec_meta\";\nconstexpr const char *PROFILE_META = \"profile_meta\";\nconstexpr const char *DEMUXER_CTX = \"demuxer_ctx\";\nconstexpr const char *STREAM_META_INPUT = \"in_video_url\";\nconstexpr const char *VIDEO_PACKET_OUTPUT = \"out_video_packet\";\nconstexpr const char *DEMUX_RETRY_CONTEXT = \"source_context\";\nconstexpr const char *DEMUX_TIMER_TASK = \"demux_timer_task\";\n\nenum DemuxStatus { DEMUX_FAIL = 0, DEMUX_SUCCESS = 1 };\n\nclass VideoDemuxerFlowUnit\n    : public modelbox::FlowUnit,\n      public std::enable_shared_from_this<VideoDemuxerFlowUnit> {\n public:\n  VideoDemuxerFlowUnit();\n  ~VideoDemuxerFlowUnit() override;\n\n  modelbox::Status Open(\n      const std::shared_ptr<modelbox::Configuration> &opts) override;\n\n  modelbox::Status Close() override;\n\n  /* run when processing data */\n  modelbox::Status Process(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override;\n\n  modelbox::Status DataPre(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override;\n\n  modelbox::Status DataPost(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override;\n\n  modelbox::Status DataGroupPre(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override {\n    return modelbox::STATUS_OK;\n  };\n\n  modelbox::Status DataGroupPost(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override {\n    return modelbox::STATUS_OK;\n  };\n\n private:\n  std::shared_ptr<std::string> GetSourceUrl(\n      const std::shared_ptr<modelbox::DataContext> &data_ctx);\n\n  modelbox::Status Reconnect(modelbox::Status &status,\n                             std::shared_ptr<modelbox::DataContext> &data_ctx);\n  modelbox::Status CreateRetryTask(\n      std::shared_ptr<modelbox::DataContext> &data_ctx);\n  modelbox::Status WriteData(\n      std::shared_ptr<modelbox::DataContext> &data_ctx,\n      std::shared_ptr<AVPacket> &pkt,\n      const std::shared_ptr<FfmpegVideoDemuxer> &video_demuxer);\n  void WriteEnd(std::shared_ptr<modelbox::DataContext> &data_ctx);\n\n  modelbox::Status InitDemuxer(std::shared_ptr<modelbox::DataContext> &data_ctx,\n                               std::shared_ptr<std::string> &source_url);\n\n  void UpdateStatsInfo(const std::shared_ptr<modelbox::DataContext> &data_ctx,\n                       const std::shared_ptr<FfmpegVideoDemuxer> &demuxer);\n\n  bool key_frame_only_{false};\n  size_t queue_size_{32};\n  bool is_retry_reset_{false};\n};\n\nclass DemuxerWorker {\n public:\n  DemuxerWorker(bool is_async, size_t cache_size,\n                std::shared_ptr<FfmpegVideoDemuxer> demuxer);\n\n  virtual ~DemuxerWorker();\n\n  modelbox::Status Init();\n\n  std::shared_ptr<FfmpegVideoDemuxer> GetDemuxer() const;\n\n  size_t GetDropCount() const;\n\n  modelbox::Status ReadPacket(std::shared_ptr<AVPacket> &av_packet);\n\n  bool IsRunning() const;\n\n  void Process();\n\n private:\n  void PushCache(const std::shared_ptr<AVPacket> &av_packet);\n\n  modelbox::Status PopCache(std::shared_ptr<AVPacket> &av_packet);\n\n  bool IsKeyFrame(const std::shared_ptr<AVPacket> &av_packet);\n\n  bool is_async_{false};\n  size_t cache_size_{0};\n  std::shared_ptr<FfmpegVideoDemuxer> demuxer_;\n\n  std::atomic_bool demux_thread_running_{false};\n  std::shared_ptr<std::thread> demux_thread_;\n\n  std::mutex packet_cache_lock_;\n  std::condition_variable packet_cache_not_empty_;\n  std::list<std::shared_ptr<AVPacket>> packet_cache_;\n  modelbox::Status last_demux_status_;\n  size_t packet_drop_count_{0};\n  bool missing_pre_packet_{false};\n};\n\n#endif  // MODELBOX_FLOWUNIT_VIDEO_DEMUXER_CPU_H_\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/video_demuxer/video_demuxer_flowunit_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include <fstream>\n#include <functional>\n#include <future>\n#include <thread>\n\n#include \"modelbox/base/log.h\"\n#include \"modelbox/buffer.h\"\n#include \"driver_flow_test.h\"\n#include \"flowunit_mockflowunit/flowunit_mockflowunit.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace modelbox {\nclass VideoDemuxerFlowUnitTest : public testing::Test {\n public:\n  VideoDemuxerFlowUnitTest() = default;\n  ~VideoDemuxerFlowUnitTest() override = default;\n\n protected:\n  void SetUp() override{};\n\n  void TearDown() override{};\n};\n}  // namespace modelbox"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/video_encoder/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nset(UNIT_DEVICE \"cpu\")\nset(UNIT_NAME \"video_encoder\")\n\nif (NOT FFMPEG_FOUND) \n    message(STATUS \"Not found ffmpeg, disable ${UNIT_NAME} flowunit\")\n    return()\nendif()\n\nproject(modelbox-flowunit-${UNIT_DEVICE}-${UNIT_NAME})\n\nfile(GLOB_RECURSE UNIT_SOURCE *.cpp *.cc *.c)\ngroup_source_test_files(MODELBOX_UNIT_SOURCE MODELBOX_UNIT_TEST_SOURCE \"_test.c*\" ${UNIT_SOURCE})\n\nfind_package(FFMPEG)\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_CPU_INCLUDE})\ninclude_directories(${FFMPEG_INCLUDE_DIR})\ninclude_directories(${MODELBOX_COMMON_VIDEO_DECODE_INCLUDE})\n\n\nset(MODELBOX_UNIT_SHARED libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-shared)\nset(MODELBOX_UNIT_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\nadd_library(${MODELBOX_UNIT_SHARED} SHARED ${MODELBOX_UNIT_SOURCE})\nset(LIBMODELBOX_FLOWUNIT_VIDEO_DECODER_CPU_SHARED ${MODELBOX_UNIT_SHARED})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES \n    SOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_DEVICE_CPU_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} rt)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} dl)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${FFMPEG_LIBRARIES})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${MODELBOX_COMMON_VIDEO_DECODE_LIBRARY})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}\")\n\ninstall(TARGETS ${MODELBOX_UNIT_SHARED}\n        COMPONENT cpu-device-flowunit\n        RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n        LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        ARCHIVE DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        OPTIONAL)\n\ninstall(DIRECTORY ${HEADER} \n    DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR})\n\nset(LIBMODELBOX_FLOWUNIT_VIDEO_ENCODER_CPU_SHARED ${MODELBOX_UNIT_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_VIDEO_ENCODER_CPU_INCLUDE ${MODELBOX_UNIT_SOURCE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_VIDEO_ENCODER_CPU_SOURCES ${MODELBOX_UNIT_SOURCE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_VIDEO_ENCODER_CPU_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}.so CACHE INTERNAL \"\")\n\n# driver test\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_UNIT_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_UNIT_SHARED})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/video_encoder/ffmpeg_video_encoder.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"ffmpeg_video_encoder.h\"\n\n#include <modelbox/base/log.h>\n\n#include \"video_decode_common.h\"\n\nmodelbox::Status FfmpegVideoEncoder::Init(int32_t width, int32_t height,\n                                          const AVRational &frame_rate,\n                                          uint64_t bit_rate,\n                                          const std::string &encoder_name) {\n#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(58, 9, 100)\n  av_register_all();\n#endif\n  auto *codec = avcodec_find_encoder_by_name(encoder_name.c_str());\n  if (codec == nullptr) {\n    MBLOG_ERROR << \"Find encoder failed, encoder name:\" << encoder_name;\n    return modelbox::STATUS_FAULT;\n  }\n\n  auto *codec_ctx = avcodec_alloc_context3(codec);\n  if (codec_ctx == nullptr) {\n    MBLOG_ERROR << \"Alloc codec ctx failed, encoder name:\" << encoder_name;\n    return modelbox::STATUS_FAULT;\n  }\n\n  codec_ctx_.reset(codec_ctx,\n                   [](AVCodecContext *ctx) { avcodec_free_context(&ctx); });\n  AVDictionary *param = nullptr;\n  SetupCodecParam(width, height, frame_rate, bit_rate, param, codec_ctx_);\n  auto ffmpeg_ret = avcodec_open2(codec_ctx_.get(), codec, &param);\n  av_dict_free(&param);\n  if (ffmpeg_ret < 0) {\n    GET_FFMPEG_ERR(ffmpeg_ret, ffmpeg_err);\n    MBLOG_ERROR << \"avcodec_open2 failed, ret \" << ffmpeg_err;\n    return modelbox::STATUS_FAULT;\n  }\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nvoid FfmpegVideoEncoder::SetupCodecParam(\n    int32_t width, int32_t height, const AVRational &frame_rate,\n    uint64_t bit_rate, AVDictionary *&param,\n    std::shared_ptr<AVCodecContext> &codec_ctx) {\n  av_dict_set(&param, \"preset\", \"fast\", 0);\n  codec_ctx->framerate = frame_rate;\n  codec_ctx_->bit_rate = bit_rate;\n  codec_ctx->time_base = av_inv_q(frame_rate);\n  codec_ctx->pix_fmt = AV_PIX_FMT_YUV420P;\n  codec_ctx->width = width;\n  codec_ctx->height = height;\n  codec_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;\n  codec_ctx->max_b_frames = 0;\n}\n\nmodelbox::Status FfmpegVideoEncoder::Encode(\n    const std::shared_ptr<AVFrame> &av_frame,\n    std::vector<std::shared_ptr<AVPacket>> &av_packet_list) {\n  auto ret = avcodec_send_frame(codec_ctx_.get(), av_frame.get());\n  if (ret < 0) {\n    GET_FFMPEG_ERR(ret, ffmpeg_err);\n    MBLOG_ERROR << \"avcodec_send_frame failed, ret \" << ffmpeg_err;\n    return modelbox::STATUS_FAULT;\n  }\n\n  do {\n    auto *av_packet_ptr = av_packet_alloc();\n    if (av_packet_ptr == nullptr) {\n      MBLOG_ERROR << \"av packet alloc failed\";\n      return modelbox::STATUS_FAULT;\n    }\n\n    std::shared_ptr<AVPacket> av_packet(\n        av_packet_ptr, [](AVPacket *pkt) { av_packet_free(&pkt); });\n    ret = avcodec_receive_packet(codec_ctx_.get(), av_packet.get());\n    if (ret == AVERROR(EAGAIN)) {\n      return modelbox::STATUS_SUCCESS;\n    }\n\n    if (ret == AVERROR_EOF) {\n      return modelbox::STATUS_NODATA;\n    }\n\n    if (ret < 0) {\n      GET_FFMPEG_ERR(ret, err_str);\n      MBLOG_ERROR << \"avcodec_receive_packet failed, err \" << err_str;\n      return modelbox::STATUS_FAULT;\n    }\n\n    av_packet_list.push_back(av_packet);\n  } while (ret >= 0);\n\n  return modelbox::STATUS_SUCCESS;\n}"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/video_encoder/ffmpeg_video_encoder.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_FFMPEG_ENCODER_H_\n#define MODELBOX_FLOWUNIT_FFMPEG_ENCODER_H_\n\n#include <modelbox/base/status.h>\n\n#include <memory>\n#include <vector>\nextern \"C\" {\n#include <libavcodec/avcodec.h>\n#include <libavformat/avformat.h>\n}\n\nclass FfmpegVideoEncoder {\n public:\n  modelbox::Status Init(int32_t width, int32_t height,\n                        const AVRational &frame_rate, uint64_t bit_rate,\n                        const std::string &encoder_name);\n\n  modelbox::Status Encode(\n      const std::shared_ptr<AVFrame> &av_frame,\n      std::vector<std::shared_ptr<AVPacket>> &av_packet_list);\n\n  std::shared_ptr<AVCodecContext> GetCtx() { return codec_ctx_; }\n\n private:\n  void SetupCodecParam(int32_t width, int32_t height,\n                       const AVRational &frame_rate, uint64_t bit_rate,\n                       AVDictionary *&param,\n                       std::shared_ptr<AVCodecContext> &codec_ctx);\n\n  std::shared_ptr<AVCodecContext> codec_ctx_;\n};\n\n#endif  // MODELBOX_FLOWUNIT_FFMPEG_ENCODER_H_"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/video_encoder/ffmpeg_video_muxer.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include \"ffmpeg_video_muxer.h\"\n#include <modelbox/base/log.h>\n#include \"video_decode_common.h\"\n\nmodelbox::Status FfmpegVideoMuxer::Init(\n    const std::shared_ptr<AVCodecContext> &codec_ctx,\n    const std::shared_ptr<FfmpegWriter> &writer) {\n  destination_url_ = writer->GetDestinationURL();\n  format_ctx_ = writer->GetCtx();\n  auto ret = SetupStreamParam(codec_ctx);\n  if (ret != modelbox::STATUS_SUCCESS) {\n    return ret;\n  }\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status FfmpegVideoMuxer::SetupStreamParam(\n    const std::shared_ptr<AVCodecContext> &codec_ctx) {\n  stream_ = avformat_new_stream(format_ctx_.get(), codec_ctx->codec);\n  if (stream_ == nullptr) {\n    MBLOG_ERROR << \"Create video stream failed\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  stream_->time_base = codec_ctx->time_base;\n  auto ret =\n      avcodec_parameters_from_context(stream_->codecpar, codec_ctx.get());\n  if (ret < 0) {\n    GET_FFMPEG_ERR(ret, ffmpeg_err);\n    MBLOG_ERROR << \"avcodec_parameters_from_context err \" << ffmpeg_err;\n    return modelbox::STATUS_FAULT;\n  }\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status FfmpegVideoMuxer::Mux(const AVRational &time_base,\n                             const std::shared_ptr<AVPacket> &av_packet) {\n  av_packet_rescale_ts(av_packet.get(), time_base, stream_->time_base);\n  av_packet->stream_index = stream_->index;\n  if (!is_header_wrote_) {\n    auto ret = avformat_write_header(format_ctx_.get(), nullptr);\n    if (ret < 0) {\n      GET_FFMPEG_ERR(ret, ffmpeg_err);\n      MBLOG_ERROR << \"avformat_write_header failed, ret \" << ffmpeg_err;\n      return modelbox::STATUS_FAULT;\n    }\n\n    is_header_wrote_ = true;\n  }\n\n  auto ret = av_interleaved_write_frame(format_ctx_.get(), av_packet.get());\n  if (ret < 0) {\n    if (ret == AVERROR(EPIPE) || ret == AVERROR_EOF) {\n      MBLOG_ERROR << \"remote end closed the connection\";\n      return modelbox::STATUS_NOSTREAM;\n    }\n    GET_FFMPEG_ERR(ret, ffmpeg_err);\n    MBLOG_ERROR << \"av_write_frame failed, ret \" << ffmpeg_err;\n    return modelbox::STATUS_FAULT;\n  }\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nFfmpegVideoMuxer::~FfmpegVideoMuxer() {\n  if (is_header_wrote_) {\n    auto ret = av_write_trailer(format_ctx_.get());\n    if (ret < 0) {\n      GET_FFMPEG_ERR(ret, ffmpeg_err);\n      MBLOG_ERROR << \"av_write_trailer failed, ret \" << ffmpeg_err;\n    }\n  }\n}"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/video_encoder/ffmpeg_video_muxer.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#ifndef MODELBOX_FLOWUNIT_FFMPEG_MUXER_H_\n#define MODELBOX_FLOWUNIT_FFMPEG_MUXER_H_\n\n#include <modelbox/base/status.h>\n#include <memory>\n#include <string>\n#include \"ffmpeg_writer.h\"\n\nclass FfmpegVideoMuxer {\n public:\n  modelbox::Status Init(const std::shared_ptr<AVCodecContext> &codec_ctx,\n                        const std::shared_ptr<FfmpegWriter> &writer);\n\n  modelbox::Status Mux(const AVRational &time_base,\n                     const std::shared_ptr<AVPacket> &av_packet);\n\n  virtual ~FfmpegVideoMuxer();\n private:\n  modelbox::Status SetupStreamParam(\n      const std::shared_ptr<AVCodecContext> &codec_ctx);\n\n  std::shared_ptr<AVFormatContext> format_ctx_;\n  std::string destination_url_;\n  AVStream *stream_{nullptr};\n  bool is_header_wrote_{false};\n};\n\n#endif  // MODELBOX_FLOWUNIT_FFMPEG_MUXER_H_"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/video_encoder/ffmpeg_writer.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include \"ffmpeg_writer.h\"\n#include <modelbox/base/log.h>\n#include \"video_decode_common.h\"\n\nextern \"C\" {\n#include <libavutil/opt.h>\n}\n\nmodelbox::Status FfmpegWriter::Open(const std::string &format_name,\n                          const std::string &destination_url) {\n#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(58, 9, 100)\n  av_register_all();\n#endif\n  auto ret = avformat_network_init();\n  if (ret < 0) {\n    GET_FFMPEG_ERR(ret, ffmpeg_err);\n    MBLOG_ERROR << \"avformat_network_init, err \" << ffmpeg_err;\n    return modelbox::STATUS_FAULT;\n  }\n\n  format_name_ = format_name;\n  destination_url_ = destination_url;\n\n  AVFormatContext *format_ctx = nullptr;\n  ret = avformat_alloc_output_context2(\n      &format_ctx, nullptr, format_name.c_str(), destination_url.c_str());\n  if (ret < 0 || format_ctx == nullptr) {\n    GET_FFMPEG_ERR(ret, ffmpeg_err);\n    MBLOG_ERROR << \"avformat_alloc_output_context2 failed, format \"\n                << format_name << \", dest_url \" << destination_url << \", ret \"\n                << ffmpeg_err;\n    return modelbox::STATUS_FAULT;\n  }\n\n  format_ctx_.reset(format_ctx,\n                    [](AVFormatContext *ctx) { avformat_free_context(ctx); });\n  if (format_name_ != \"rtsp\") {\n    ret = avio_open2(&format_ctx_->pb, destination_url.c_str(), AVIO_FLAG_WRITE,\n                     nullptr, nullptr);\n    if (ret < 0) {\n      GET_FFMPEG_ERR(ret, ffmpeg_err);\n      MBLOG_ERROR << \"avio_open2 failed, url \" << destination_url << \", format \"\n                  << format_name << \", ret \" << ffmpeg_err;\n      return modelbox::STATUS_FAULT;\n    }\n  }\n\n  MBLOG_INFO << \"Open url \" << destination_url << \", format \" << format_name\n             << \" success\";\n  return modelbox::STATUS_SUCCESS;\n}"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/video_encoder/ffmpeg_writer.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#ifndef MODELBOX_FLOWUNIT_FFMPEG_WRITER_H_\n#define MODELBOX_FLOWUNIT_FFMPEG_WRITER_H_\n\n#include <modelbox/base/status.h>\n#include <memory>\n#include <string>\n\nextern \"C\" {\n#include <libavformat/avformat.h>\n}\n\nclass FfmpegWriter {\n public:\n  modelbox::Status Open(const std::string &format_name,\n                      const std::string &destination_url);\n\n  std::string GetFormatName() { return format_name_; }\n\n  std::string GetDestinationURL() { return destination_url_; }\n\n  std::shared_ptr<AVFormatContext> GetCtx() { return format_ctx_; }\n\n private:\n  std::string format_name_;\n  std::string destination_url_;\n  std::shared_ptr<AVFormatContext> format_ctx_;\n};\n\n#endif  // MODELBOX_FLOWUNIT_FFMPEG_WRITER_H_"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/video_encoder/video_encoder_flowunit.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"video_encoder_flowunit.h\"\n\n#include <securec.h>\n\n#include \"modelbox/flowunit.h\"\n#include \"modelbox/flowunit_api_helper.h\"\n\nVideoEncoderFlowUnit::VideoEncoderFlowUnit() = default;\nVideoEncoderFlowUnit::~VideoEncoderFlowUnit() = default;\n\nconst std::set<std::string> g_supported_fmt = {\"rtsp\", \"flv\", \"mp4\"};\n\nmodelbox::Status VideoEncoderFlowUnit::Open(\n    const std::shared_ptr<modelbox::Configuration> &opts) {\n  default_dest_url_ = opts->GetString(\"default_dest_url\", \"\");\n\n  format_name_ = opts->GetString(\"format\", \"rtsp\");\n  if (format_name_ != \"rtsp\" && format_name_ != \"flv\" &&\n      format_name_ != \"mp4\") {\n    MBLOG_ERROR << \"Bad value [\" << format_name_\n                << \"] for format, must be one of [rtsp|flv|mp4]\";\n    return modelbox::STATUS_BADCONF;\n  }\n\n  bit_rate_ = opts->GetUint64(\"bit_rate\", 3200000);\n  encoder_name_ = opts->GetString(\"encoder\", \"mpeg4\");\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status VideoEncoderFlowUnit::Close() { return modelbox::STATUS_OK; }\n\nmodelbox::Status VideoEncoderFlowUnit::Process(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  auto muxer = std::static_pointer_cast<FfmpegVideoMuxer>(\n      data_ctx->GetPrivate(MUXER_CTX));\n  auto encoder = std::static_pointer_cast<FfmpegVideoEncoder>(\n      data_ctx->GetPrivate(ENCODER_CTX));\n  auto color_cvt = std::static_pointer_cast<FfmpegColorConverter>(\n      data_ctx->GetPrivate(COLOR_CVT_CTX));\n  if (muxer == nullptr || encoder == nullptr || color_cvt == nullptr) {\n    MBLOG_ERROR << \"Stream not inited\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  std::vector<std::shared_ptr<AVFrame>> av_frame_list;\n  auto ret = ReadFrames(color_cvt, data_ctx, av_frame_list);\n  if (ret != modelbox::STATUS_SUCCESS) {\n    MBLOG_ERROR << \"Read input frame failed\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  if (reopen_remote_ == true) {\n\n    static time_t last_time = 0;\n    time_t now = time(nullptr);\n\n    if (now - last_time < 5) {\n      return modelbox::STATUS_SUCCESS;\n    }\n\n    muxer = nullptr;\n    encoder = nullptr;\n    color_cvt = nullptr;\n\n    auto frame_buffer_list = data_ctx->Input(FRAME_INFO_INPUT);\n    auto buffer = frame_buffer_list->At(0);\n\n    int32_t width = 0;\n    int32_t height = 0;\n    int32_t rate_num = 0;\n    int32_t rate_den = 0;\n\n    buffer->Get(\"width\", width);\n    buffer->Get(\"height\", height);\n    buffer->Get(\"rate_num\", rate_num);\n    buffer->Get(\"rate_den\", rate_den);\n\n    if (width == 0 || height == 0 || rate_num == 0 || rate_den == 0) {\n      MBLOG_ERROR << \"buffer meta is invalid\";\n      return modelbox::STATUS_SUCCESS;\n    }\n\n    CloseMuexer(data_ctx);\n    if (OpenMuxer(data_ctx, width, height, rate_num, rate_den, \"\") !=\n        modelbox::STATUS_SUCCESS) {\n      MBLOG_ERROR << \"Open muxer failed\";\n      return modelbox::STATUS_FAULT;\n    }\n\n    muxer = std::static_pointer_cast<FfmpegVideoMuxer>(\n        data_ctx->GetPrivate(MUXER_CTX));\n    encoder = std::static_pointer_cast<FfmpegVideoEncoder>(\n        data_ctx->GetPrivate(ENCODER_CTX));\n    color_cvt = std::static_pointer_cast<FfmpegColorConverter>(\n        data_ctx->GetPrivate(COLOR_CVT_CTX));\n    if (muxer == nullptr || encoder == nullptr || color_cvt == nullptr) {\n      MBLOG_ERROR << \"Open muxer failed\";\n      return modelbox::STATUS_FAULT;\n    }\n\n    reopen_remote_ = false;\n  }\n\n  std::vector<std::shared_ptr<AVPacket>> av_packet_list;\n  ret = EncodeFrame(encoder, av_frame_list, av_packet_list);\n  if (ret != modelbox::STATUS_SUCCESS) {\n    MBLOG_ERROR << \"Encode frame failed\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  ret = MuxPacket(muxer, encoder->GetCtx()->time_base, av_packet_list);\n  if (ret != modelbox::STATUS_SUCCESS) {\n    if (ret == modelbox::STATUS_NOSTREAM) {\n      MBLOG_WARN << \"No stream to mux, retry.\";\n      reopen_remote_ = true;\n      return modelbox::STATUS_SUCCESS;\n    }\n\n    MBLOG_ERROR << \"Mux packet failed\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status VideoEncoderFlowUnit::ReadFrames(\n    const std::shared_ptr<FfmpegColorConverter> &color_cvt,\n    const std::shared_ptr<modelbox::DataContext> &data_ctx,\n    std::vector<std::shared_ptr<AVFrame>> &av_frame_list) {\n  auto frame_buffer_list = data_ctx->Input(FRAME_INFO_INPUT);\n  if (frame_buffer_list == nullptr || frame_buffer_list->Size() == 0) {\n    MBLOG_ERROR << \"Input frame list is empty\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  auto frame_index_ptr =\n      std::static_pointer_cast<int64_t>(data_ctx->GetPrivate(FRAME_INDEX_CTX));\n  for (auto frame_buffer : *frame_buffer_list) {\n    std::shared_ptr<AVFrame> av_frame;\n    auto ret = ReadFrameFromBuffer(frame_buffer, av_frame);\n    av_frame->pts = *frame_index_ptr;\n    ++(*frame_index_ptr);\n    if (ret != modelbox::STATUS_SUCCESS) {\n      MBLOG_ERROR << \"Read frame from buffer failed\";\n      return ret;\n    }\n\n    std::shared_ptr<AVFrame> yuv420p_frame;\n    ret = CvtFrameToYUV420P(color_cvt, av_frame, yuv420p_frame);\n    if (ret != modelbox::STATUS_SUCCESS) {\n      MBLOG_ERROR << \"Convert frame to yuv420p failed\";\n      return ret;\n    }\n\n    av_frame_list.push_back(yuv420p_frame);\n  }\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status VideoEncoderFlowUnit::ReadFrameFromBuffer(\n    std::shared_ptr<modelbox::Buffer> &frame_buffer,\n    std::shared_ptr<AVFrame> &av_frame) {\n  auto *frame_ptr = av_frame_alloc();\n  if (frame_ptr == nullptr) {\n    MBLOG_ERROR << \"Alloca frame failed\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  av_frame.reset(frame_ptr, [](AVFrame *ptr) { av_frame_free(&ptr); });\n  frame_buffer->Get(\"width\", av_frame->width);\n  frame_buffer->Get(\"height\", av_frame->height);\n  std::string pix_fmt;\n  frame_buffer->Get(\"pix_fmt\", pix_fmt);\n  auto iter = videodecode::g_av_pix_fmt_map.find(pix_fmt);\n  if (iter == videodecode::g_av_pix_fmt_map.end()) {\n    MBLOG_ERROR << \"Encoder not support pix fmt \" << pix_fmt;\n    return modelbox::STATUS_NOTSUPPORT;\n  }\n  av_frame->format = iter->second;\n  auto ret =\n      av_image_fill_arrays(av_frame->data, av_frame->linesize,\n                           (const uint8_t *)frame_buffer->ConstData(),\n                           iter->second, av_frame->width, av_frame->height, 1);\n  if (ret < 0) {\n    GET_FFMPEG_ERR(ret, ffmpeg_err);\n    MBLOG_ERROR << \"avpicture_fill failed, err \" << ffmpeg_err;\n    return modelbox::STATUS_FAULT;\n  }\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status VideoEncoderFlowUnit::CvtFrameToYUV420P(\n    const std::shared_ptr<FfmpegColorConverter> &color_cvt,\n    const std::shared_ptr<AVFrame> &origin,\n    std::shared_ptr<AVFrame> &yuv420p_frame) {\n  auto *frame = av_frame_alloc();\n  if (frame == nullptr) {\n    MBLOG_ERROR << \"Alloc frame failed\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  yuv420p_frame.reset(frame, [](AVFrame *ptr) {\n    av_freep(&ptr->data[0]);\n    av_frame_free(&ptr);\n  });\n  yuv420p_frame->width = origin->width;\n  yuv420p_frame->height = origin->height;\n  yuv420p_frame->format = AVPixelFormat::AV_PIX_FMT_YUV420P;\n  yuv420p_frame->pts = origin->pts;\n  auto ffmepg_ret = av_image_alloc(yuv420p_frame->data, yuv420p_frame->linesize,\n                                   yuv420p_frame->width, yuv420p_frame->height,\n                                   AVPixelFormat::AV_PIX_FMT_YUV420P, 1);\n  if (ffmepg_ret < 0) {\n    GET_FFMPEG_ERR(ffmepg_ret, ffmpeg_err);\n    MBLOG_ERROR << \"av_image_alloc failed, width \" << yuv420p_frame->width\n                << \",height \" << yuv420p_frame->height << \",err \" << ffmpeg_err;\n    return modelbox::STATUS_FAULT;\n  }\n\n  auto ret = color_cvt->CvtColor(origin, yuv420p_frame->data[0],\n                                 AVPixelFormat::AV_PIX_FMT_YUV420P);\n  if (ret != modelbox::STATUS_SUCCESS) {\n    MBLOG_ERROR << \"Conver color failed\";\n    return ret;\n  }\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status VideoEncoderFlowUnit::EncodeFrame(\n    const std::shared_ptr<FfmpegVideoEncoder> &encoder,\n    const std::vector<std::shared_ptr<AVFrame>> &av_frame_list,\n    std::vector<std::shared_ptr<AVPacket>> &av_packet_list) {\n  for (const auto &frame : av_frame_list) {\n    auto ret = encoder->Encode(frame, av_packet_list);\n    if (ret != modelbox::STATUS_SUCCESS) {\n      MBLOG_ERROR << \"Encoder encode frame failed\";\n      return ret;\n    }\n  }\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status VideoEncoderFlowUnit::MuxPacket(\n    const std::shared_ptr<FfmpegVideoMuxer> &muxer, const AVRational &time_base,\n    std::vector<std::shared_ptr<AVPacket>> &av_packet_list) {\n  for (const auto &packet : av_packet_list) {\n    auto ret = muxer->Mux(time_base, packet);\n    if (ret != modelbox::STATUS_SUCCESS) {\n      MBLOG_ERROR << \"Muxer mux packet failed\";\n      return ret;\n    }\n  }\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status VideoEncoderFlowUnit::OpenMuxer(\n    const std::shared_ptr<modelbox::DataContext> &data_ctx, int32_t width,\n    int32_t height, int32_t rate_num, int32_t rate_den, std::string dest_url) {\n  MBLOG_WARN << \"OpenMuxer, width \" << width << \", height \" << height\n             << \", rate_num \" << rate_num << \", rate_den \" << rate_den\n             << \", dest_url \" << dest_url;\n\n  if (rate_num == 0 || rate_den == 0) {\n    rate_num = 25;\n    rate_den = 1;\n  }\n\n  if (dest_url == \"\") {\n    auto dest_url_ptr =\n        std::static_pointer_cast<std::string>(data_ctx->GetPrivate(\"dest_url\"));\n    if (dest_url_ptr != nullptr) {\n      dest_url = *dest_url_ptr;\n    }\n\n    if (dest_url == \"\") {\n      MBLOG_ERROR << \"dest_url is empty\";\n      return modelbox::STATUS_FAULT;\n    }\n  }\n\n  auto encoder = std::make_shared<FfmpegVideoEncoder>();\n  auto ret = encoder->Init(width, height, {rate_num, rate_den}, bit_rate_,\n                           encoder_name_);\n  if (ret != modelbox::STATUS_SUCCESS) {\n    MBLOG_ERROR << \"Init encoder failed\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  auto writer = std::make_shared<FfmpegWriter>();\n  ret = writer->Open(format_name_, dest_url);\n  if (ret != modelbox::STATUS_SUCCESS) {\n    MBLOG_ERROR << \"Open ffmepg writer failed, format \" << format_name_\n                << \", url \" << dest_url;\n    return modelbox::STATUS_FAULT;\n  }\n\n  auto muxer = std::make_shared<FfmpegVideoMuxer>();\n  ret = muxer->Init(encoder->GetCtx(), writer);\n  if (ret != modelbox::STATUS_SUCCESS) {\n    MBLOG_ERROR << \"Init muxer failed\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  auto color_cvt = std::make_shared<FfmpegColorConverter>();\n\n  data_ctx->SetPrivate(MUXER_CTX, muxer);\n  data_ctx->SetPrivate(ENCODER_CTX, encoder);\n  data_ctx->SetPrivate(COLOR_CVT_CTX, color_cvt);\n  auto frame_index_ptr = std::make_shared<int64_t>(0);\n  data_ctx->SetPrivate(FRAME_INDEX_CTX, frame_index_ptr);\n  data_ctx->SetPrivate(\"dest_url\", std::make_shared<std::string>(dest_url));\n  MBLOG_INFO << \"Video encoder init success\"\n             << \", width \" << width << \", height \" << height << \", rate \"\n             << rate_num << \"/\" << rate_den << \", format \" << format_name_\n             << \", destination url \" << dest_url << \", encoder \"\n             << encoder_name_;\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status VideoEncoderFlowUnit::CloseMuexer(\n    const std::shared_ptr<modelbox::DataContext> &data_ctx) {\n  data_ctx->SetPrivate(MUXER_CTX, nullptr);\n  data_ctx->SetPrivate(ENCODER_CTX, nullptr);\n  data_ctx->SetPrivate(COLOR_CVT_CTX, nullptr);\n\n  auto frame_index_ptr = std::make_shared<int64_t>(0);\n  data_ctx->SetPrivate(FRAME_INDEX_CTX, frame_index_ptr);\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status VideoEncoderFlowUnit::DataPre(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  std::string dest_url;\n  auto ret = GetDestUrl(data_ctx, dest_url);\n  if (ret != modelbox::STATUS_SUCCESS) {\n    return modelbox::STATUS_FAULT;\n  }\n\n  auto frame_buffer_list = data_ctx->Input(FRAME_INFO_INPUT);\n  if (frame_buffer_list == nullptr || frame_buffer_list->Size() == 0) {\n    MBLOG_ERROR << \"Input [frame_info] is empty\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  auto frame_buffer = frame_buffer_list->At(0);\n  int32_t width = 0;\n  int32_t height = 0;\n  int32_t rate_num = 0;\n  int32_t rate_den = 0;\n  frame_buffer->Get(\"width\", width);\n  frame_buffer->Get(\"height\", height);\n  frame_buffer->Get(\"rate_num\", rate_num);\n  frame_buffer->Get(\"rate_den\", rate_den);\n\n  if (width == 0 || height == 0) {\n    MBLOG_ERROR << \"buffer meta is invalid\";\n    return modelbox::STATUS_INVALID;\n  }\n\n  return OpenMuxer(data_ctx, width, height, rate_num, rate_den, dest_url);\n}\n\nmodelbox::Status VideoEncoderFlowUnit::GetDestUrl(\n    std::shared_ptr<modelbox::DataContext> &data_ctx, std::string &dest_url) {\n  auto stream_meta = data_ctx->GetInputMeta(FRAME_INFO_INPUT);\n  if (stream_meta != nullptr) {\n    auto dest_url_ptr =\n        std::static_pointer_cast<std::string>(stream_meta->GetMeta(DEST_URL));\n    if (dest_url_ptr != nullptr) {\n      dest_url = *dest_url_ptr;\n      return modelbox::STATUS_SUCCESS;\n    }\n  }\n\n  MBLOG_WARN\n      << \"Input meta [dest_url] should be set in port [in_video_frame] for \"\n         \"each stream, Use default_dest_url in config is only \"\n         \"for debug\";\n  if (default_dest_url_.empty()) {\n    MBLOG_ERROR << \"default_dest_url in config is empty, no dest url available\";\n    return modelbox::STATUS_BADCONF;\n  }\n\n  dest_url = default_dest_url_;\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status VideoEncoderFlowUnit::DataPost(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  return modelbox::STATUS_OK;\n}\n\nMODELBOX_FLOWUNIT(VideoEncoderFlowUnit, desc) {\n  desc.SetFlowUnitName(FLOWUNIT_NAME);\n  desc.SetFlowUnitGroupType(\"Video\");\n  desc.AddFlowUnitInput({FRAME_INFO_INPUT});\n  desc.SetFlowType(modelbox::STREAM);\n  desc.SetInputContiguous(false);\n  desc.SetDescription(FLOWUNIT_DESC);\n  desc.AddFlowUnitOption(modelbox::FlowUnitOption(\n      \"default_dest_url\", \"string\", true, \"\", \"the encoder dest url\"));\n\n  std::map<std::string, std::string> fmt_list;\n\n  for (const auto &item : g_supported_fmt) {\n    fmt_list[item] = item;\n  }\n  desc.AddFlowUnitOption(modelbox::FlowUnitOption(\n      \"format\", \"list\", true, \"rtsp\", \"the encoder format\", fmt_list));\n  desc.AddFlowUnitOption(modelbox::FlowUnitOption(\n      \"encoder\", \"string\", true, \"mpeg4\", \"the encoder method\"));\n}\n\nMODELBOX_DRIVER_FLOWUNIT(desc) {\n  desc.Desc.SetName(FLOWUNIT_NAME);\n  desc.Desc.SetClass(modelbox::DRIVER_CLASS_FLOWUNIT);\n  desc.Desc.SetType(FLOWUNIT_TYPE);\n  desc.Desc.SetDescription(FLOWUNIT_DESC);\n  desc.Desc.SetVersion(\"1.0.0\");\n}\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/video_encoder/video_encoder_flowunit.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_VIDEO_ENCODER_CPU_H_\n#define MODELBOX_FLOWUNIT_VIDEO_ENCODER_CPU_H_\n\n#include <modelbox/base/device.h>\n#include <modelbox/base/status.h>\n#include <modelbox/flow.h>\n\n#include <vector>\n\n#include \"ffmpeg_video_encoder.h\"\n#include \"ffmpeg_video_muxer.h\"\n#include \"ffmpeg_writer.h\"\n#include \"modelbox/flowunit.h\"\n#include \"video_decode_common.h\"\n\nconstexpr const char *FLOWUNIT_NAME = \"video_encoder\";\nconstexpr const char *FLOWUNIT_TYPE = \"cpu\";\nconstexpr const char *FLOWUNIT_DESC =\n    \"\\n\\t@Brief: A video encoder flowunit on cpu. \\n\"\n    \"\\t@Port parameter: The input port buffer meta type is image \\n\"\n    \"\\t  The image type buffer contains the following meta fields:\\n\"\n    \"\\t\\tField Name: width,         Type: int32_t\\n\"\n    \"\\t\\tField Name: height,        Type: int32_t\\n\"\n    \"\\t\\tField Name: width_stride,  Type: int32_t\\n\"\n    \"\\t\\tField Name: height_stride, Type: int32_t\\n\"\n    \"\\t\\tField Name: channel,       Type: int32_t\\n\"\n    \"\\t\\tField Name: pix_fmt,       Type: string\\n\"\n    \"\\t\\tField Name: layout,        Type: int32_t\\n\"\n    \"\\t\\tField Name: shape,         Type: vector<size_t>\\n\"\n    \"\\t\\tField Name: type,          Type: ModelBoxDataType::MODELBOX_UINT8\\n\"\n    \"\\t@Constraint: The field value range of this flowunit supports: \"\n    \"'pix_fmt': \"\n    \"[rgb, bgr, nv12], 'layout': [hwc]. \";\nconstexpr const char *DEST_URL = \"dest_url\";\nconstexpr const char *COLOR_CVT_CTX = \"color_cvt_ctx\";\nconstexpr const char *FRAME_INDEX_CTX = \"frame_index_ctx\";\nconstexpr const char *ENCODER_CTX = \"encoder_ctx\";\nconstexpr const char *MUXER_CTX = \"muxer_ctx\";\nconstexpr const char *FORMAT_NAME = \"format_name\";\nconstexpr const char *CODEC_NAME = \"codec_name\";\nconstexpr const char *DESTINATION_URL = \"destination_url\";\nconstexpr const char *FRAME_INFO_INPUT = \"in_video_frame\";\n\nclass VideoEncoderFlowUnit : public modelbox::FlowUnit {\n public:\n  VideoEncoderFlowUnit();\n  ~VideoEncoderFlowUnit() override;\n\n  modelbox::Status Open(\n      const std::shared_ptr<modelbox::Configuration> &opts) override;\n\n  modelbox::Status Close() override;\n\n  modelbox::Status Process(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override;\n\n  modelbox::Status DataPre(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override;\n\n  modelbox::Status DataPost(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override;\n\n  modelbox::Status DataGroupPre(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override {\n    return modelbox::STATUS_OK;\n  };\n\n  modelbox::Status DataGroupPost(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override {\n    return modelbox::STATUS_OK;\n  };\n\n private:\n  modelbox::Status GetDestUrl(std::shared_ptr<modelbox::DataContext> &data_ctx,\n                              std::string &dest_url);\n\n  modelbox::Status ReadFrames(\n      const std::shared_ptr<FfmpegColorConverter> &color_cvt,\n      const std::shared_ptr<modelbox::DataContext> &data_ctx,\n      std::vector<std::shared_ptr<AVFrame>> &av_frame_list);\n\n  modelbox::Status ReadFrameFromBuffer(\n      std::shared_ptr<modelbox::Buffer> &frame_buffer,\n      std::shared_ptr<AVFrame> &av_frame);\n\n  modelbox::Status CvtFrameToYUV420P(\n      const std::shared_ptr<FfmpegColorConverter> &color_cvt,\n      const std::shared_ptr<AVFrame> &origin,\n      std::shared_ptr<AVFrame> &yuv420p_frame);\n\n  modelbox::Status EncodeFrame(\n      const std::shared_ptr<FfmpegVideoEncoder> &encoder,\n      const std::vector<std::shared_ptr<AVFrame>> &av_frame_list,\n      std::vector<std::shared_ptr<AVPacket>> &av_packet_list);\n\n  modelbox::Status MuxPacket(\n      const std::shared_ptr<FfmpegVideoMuxer> &muxer,\n      const AVRational &time_base,\n      std::vector<std::shared_ptr<AVPacket>> &av_packet_list);\n\n  modelbox::Status OpenMuxer(\n      const std::shared_ptr<modelbox::DataContext> &data_ctx, int32_t width,\n      int32_t height, int32_t rate_num, int32_t rate_den, std::string dest_url);\n\n  modelbox::Status CloseMuexer(\n      const std::shared_ptr<modelbox::DataContext> &data_ctx);\n\n  std::string default_dest_url_;\n  std::string format_name_;\n  std::string encoder_name_;\n  uint64_t bit_rate_{0};\n  bool reopen_remote_{false};\n};\n\n#endif  // MODELBOX_FLOWUNIT_VIDEO_ENCODER_CPU_H_\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/video_encoder/video_encoder_flowunit_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include <fstream>\n#include <functional>\n#include <future>\n#include <thread>\n\n#include \"modelbox/base/log.h\"\n#include \"modelbox/buffer.h\"\n#include \"driver_flow_test.h\"\n#include \"flowunit_mockflowunit/flowunit_mockflowunit.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"test/mock/minimodelbox/mockflow.h\"\n\nnamespace modelbox {\nclass VideoEncoderFlowUnitTest : public testing::Test {\n public:\n  VideoEncoderFlowUnitTest() = default;\n\n protected:\n  void SetUp() override{};\n\n  void TearDown() override{};\n\n public:\n  std::shared_ptr<MockFlow> flow_;\n\n  void StartFlow(std::string& toml_content, uint64_t millisecond);\n\n private:\n  Status AddMockFlowUnit();\n};\n\nvoid VideoEncoderFlowUnitTest::StartFlow(std::string& toml_content,\n                                         const uint64_t millisecond) {\n  flow_ = std::make_shared<MockFlow>();\n  auto ret = AddMockFlowUnit();\n  EXPECT_EQ(ret, STATUS_SUCCESS);\n\n  ret = flow_->BuildAndRun(\"VideoEncoder\", toml_content, millisecond);\n  EXPECT_EQ(ret, STATUS_STOP);\n}\n\nStatus VideoEncoderFlowUnitTest::AddMockFlowUnit() {\n  {\n    auto mock_desc =\n        GenerateFlowunitDesc(\"encoder_start_unit\", {}, {\"stream_meta\"});\n    auto open_func =\n        [=](const std::shared_ptr<modelbox::Configuration>& flow_option,\n            const std::shared_ptr<MockFlowUnit>& mock_flowunit) {\n          auto ext_data = mock_flowunit->CreateExternalData();\n          EXPECT_NE(ext_data, nullptr);\n          auto buffer_list = ext_data->CreateBufferList();\n          buffer_list->Build({1});\n          auto status = ext_data->Send(buffer_list);\n          EXPECT_EQ(status, STATUS_SUCCESS);\n          status = ext_data->Close();\n          EXPECT_EQ(status, STATUS_SUCCESS);\n          return modelbox::STATUS_OK;\n        };\n    auto data_pre_func =\n        [&](const std::shared_ptr<DataContext>& data_ctx,\n            const std::shared_ptr<MockFlowUnit>& mock_flowunit) {\n          MBLOG_INFO << \"stream_meta  \"\n                     << \"DataPre\";\n          auto test_meta = std::make_shared<std::string>(\"test\");\n          auto data_meta = std::make_shared<DataMeta>();\n          data_meta->SetMeta(\"test\", test_meta);\n          data_ctx->SetOutputMeta(\"stream_meta\", data_meta);\n          return modelbox::STATUS_OK;\n        };\n    auto process_func =\n        [=](const std::shared_ptr<DataContext>& data_ctx,\n            const std::shared_ptr<MockFlowUnit>& mock_flowunit) {\n          auto output_buf = data_ctx->Output(\"stream_meta\");\n          std::vector<size_t> shape(1, 1);\n          output_buf->Build(shape);\n\n          return modelbox::STATUS_OK;\n        };\n    auto mock_functions = std::make_shared<MockFunctionCollection>();\n    mock_functions->RegisterOpenFunc(open_func);\n    mock_functions->RegisterDataPreFunc(data_pre_func);\n    mock_functions->RegisterProcessFunc(process_func);\n    flow_->AddFlowUnitDesc(mock_desc, mock_functions->GenerateCreateFunc(),\n                           TEST_DRIVER_DIR);\n  }\n  {\n    auto mock_desc = GenerateFlowunitDesc(\"encoder_image_produce\",\n                                          {\"stream_meta\"}, {\"frame_info\"});\n    mock_desc->SetOutputType(EXPAND);\n    auto process_func =\n        [=](const std::shared_ptr<DataContext>& data_ctx,\n            const std::shared_ptr<MockFlowUnit>& mock_flowunit) {\n          std::string img_path;\n          static int64_t frame_index = 0;\n          if ((frame_index / 24) % 2 == 0) {\n            img_path =\n                std::string(TEST_ASSETS) + \"/video/rgb_460800_480x320_a.data\";\n          } else {\n            img_path =\n                std::string(TEST_ASSETS) + \"/video/rgb_460800_480x320_b.data\";\n          }\n\n          std::ifstream img_file(img_path);\n          if (!img_file.is_open()) {\n            MBLOG_ERROR << \"Open failed, path \" << img_path;\n            return STATUS_FAULT;\n          }\n\n          size_t file_size = 460800;\n          auto output_buff_list = data_ctx->Output(\"frame_info\");\n          std::vector<size_t> shape(1, file_size);\n          output_buff_list->Build(shape);\n          auto output_buff = output_buff_list->At(0);\n          auto* ptr = (char*)output_buff->MutableData();\n          img_file.read(ptr, file_size);\n          output_buff->Set(\"width\", 480);\n          output_buff->Set(\"height\", 320);\n          output_buff->Set(\"rate_num\", 24);\n          output_buff->Set(\"rate_den\", 1);\n          output_buff->Set(\"pix_fmt\", std::string(\"rgb\"));\n          output_buff->Set(\"index\", frame_index);\n\n          if (frame_index == 1339) {  // 60S\n            return modelbox::STATUS_STOP;\n          }\n\n          ++frame_index;\n          auto event = std::make_shared<FlowUnitEvent>();\n          data_ctx->SendEvent(event);\n          return modelbox::STATUS_CONTINUE;\n        };\n    auto mock_functions = std::make_shared<MockFunctionCollection>();\n    mock_functions->RegisterProcessFunc(process_func);\n    flow_->AddFlowUnitDesc(mock_desc, mock_functions->GenerateCreateFunc(),\n                           TEST_DRIVER_DIR);\n  }\n\n  return STATUS_SUCCESS;\n}\n\nTEST_F(VideoEncoderFlowUnitTest, InitUnit) {\n  const std::string test_lib_dir = TEST_DRIVER_DIR;\n  const std::string test_data_dir = TEST_DATA_DIR;\n  auto ret = system(\"nc localhost 554 -z\");\n  if (errno != 0 || ret != 0) {\n    GTEST_SKIP();\n  }\n\n  std::string dest_url = \"rtsp://localhost/test_\" + std::to_string(rand());\n  std::string toml_content = R\"(\n      [driver]\n      skip-default = true\n      dir=[\")\" + test_lib_dir +\n                             \"\\\",\\\"\" + test_data_dir + \"\\\"]\\n    \" +\n                             R\"([graph]\n      graphconf = '''digraph demo {\n            encoder_start_unit[type=flowunit, flowunit=encoder_start_unit, device=cpu, deviceid=0, label=\"<stream_meta>\"]\n            encoder_image_produce[type=flowunit, flowunit=encoder_image_produce, device=cpu, deviceid=0, label=\"<stream_meta> | <frame_info>\"]\n            videoencoder[type=flowunit, flowunit=video_encoder, device=cpu, deviceid=0, label=\"<in_video_frame>\", queue_size_frame_info=16, default_dest_url=\")\" +\n                             dest_url + R\"(\"]\n            encoder_start_unit:stream_meta -> encoder_image_produce:stream_meta\n            encoder_image_produce:frame_info -> videoencoder:in_video_frame\n          }'''\n      format = \"graphviz\"\n    )\";\n  StartFlow(toml_content, 1000 * 1000);\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/video_input/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nset(UNIT_DEVICE \"cpu\")\nset(UNIT_NAME \"video_input\")\n\nproject(modelbox-flowunit-${UNIT_DEVICE}-${UNIT_NAME})\n\nif (NOT OPENCV_FOUND) \n    message(STATUS \"Not found opencv, disable resize flowunit\")\n    return()\nendif()\n\nfile(GLOB_RECURSE UNIT_SOURCE *.cpp *.cc *.c)\ngroup_source_test_files(MODELBOX_UNIT_SOURCE MODELBOX_UNIT_TEST_SOURCE \"_test.c*\" ${UNIT_SOURCE})\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_CPU_INCLUDE})\ninclude_directories(${OpenCV_INCLUDE_DIRS})\n\n\nset(MODELBOX_UNIT_SHARED modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-shared)\nset(MODELBOX_UNIT_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\nadd_library(${MODELBOX_UNIT_SHARED} SHARED ${MODELBOX_UNIT_SOURCE})\n\nset(LIBMODELBOX_FLOWUNIT_RESIZE_CPU_SHARED ${MODELBOX_UNIT_SHARED})\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES \n    SOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n\nset(MODELBOX_UNIT_LINK_LIBRARY ${OpenCV_LIBS})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_DEVICE_CPU_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} rt)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} dl)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${MODELBOX_UNIT_LINK_LIBRARY})\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}\")\n\ninstall(TARGETS ${MODELBOX_UNIT_SHARED} \n    COMPONENT cpu-device-flowunit\n    RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n    LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    OPTIONAL\n    )\n\n\ninstall(DIRECTORY ${HEADER} \n    DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR} \n    COMPONENT cpu-device-flowunit-devel\n    )\n\nset(LIBMODELBOX_FLOWUNIT_VIDEOINPUT_CPU_SHARED ${MODELBOX_UNIT_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_VIDEOINPUT_CPU_INCLUDE ${MODELBOX_UNIT_SOURCE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_VIDEOINPUT_CPU_SOURCES ${MODELBOX_UNIT_SOURCE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_VIDEOINPUT_CPU_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}.so CACHE INTERNAL \"\")\n\n# driver test\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_UNIT_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_UNIT_SHARED})\nlist(APPEND DRIVER_UNIT_TEST_LINK_LIBRARIES ${MODELBOX_UNIT_LINK_LIBRARY})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_LINK_LIBRARIES ${DRIVER_UNIT_TEST_LINK_LIBRARIES} CACHE INTERNAL \"\")\n\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/video_input/video_input_flowunit.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"video_input_flowunit.h\"\n\n#include <securec.h>\n\n#include \"modelbox/flowunit.h\"\n#include \"modelbox/flowunit_api_helper.h\"\n\nVideoInputFlowUnit::VideoInputFlowUnit() = default;\nVideoInputFlowUnit::~VideoInputFlowUnit() = default;\n\nmodelbox::Status VideoInputFlowUnit::Open(\n    const std::shared_ptr<modelbox::Configuration> &opts) {\n  auto source_url = opts->GetString(\"source_url\");\n  auto repeat = opts->GetUint64(\"repeat\", 1);\n  // we need create new thread to send data to avoid stuck on queue\n  auto write_data_func = [source_url, repeat, this]() {\n    for (uint64_t i = 0; i < repeat; i++) {\n      auto ext_data = this->CreateExternalData();\n      if (!ext_data) {\n        MBLOG_ERROR << \"can not get external data.\";\n      }\n\n      auto output_buf = ext_data->CreateBufferList();\n      modelbox::TensorList output_tensor_list(output_buf);\n      output_tensor_list.BuildFromHost<unsigned char>(\n          {1, {source_url.size() + 1}}, (void *)source_url.data(),\n          source_url.size() + 1);\n\n      auto data_meta = std::make_shared<modelbox::DataMeta>();\n      data_meta->SetMeta(\"source_url\",\n                         std::make_shared<std::string>(source_url));\n\n      ext_data->SetOutputMeta(data_meta);\n\n      auto status = ext_data->Send(output_buf);\n      if (!status) {\n        MBLOG_ERROR << \"external data send buffer list failed:\" << status;\n      }\n\n      status = ext_data->Close();\n      if (!status) {\n        MBLOG_ERROR << \"external data close failed:\" << status;\n      }\n    }\n  };\n\n  std::thread write_data_thread(write_data_func);\n  write_data_thread.detach();\n  return modelbox::STATUS_OK;\n}\nmodelbox::Status VideoInputFlowUnit::Close() { return modelbox::STATUS_OK; }\n\nmodelbox::Status VideoInputFlowUnit::Process(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  auto output_buf = data_ctx->Output(\"out_video_url\");\n  std::vector<size_t> shape(1, 1);\n  output_buf->Build(shape);\n  return modelbox::STATUS_OK;\n}\n\nMODELBOX_FLOWUNIT(VideoInputFlowUnit, desc) {\n  desc.SetFlowUnitName(FLOWUNIT_NAME);\n  desc.SetFlowUnitGroupType(\"Video\");\n  desc.AddFlowUnitOutput({\"out_video_url\"});\n  desc.SetFlowType(modelbox::NORMAL);\n  desc.SetDescription(FLOWUNIT_DESC);\n  desc.AddFlowUnitOption(modelbox::FlowUnitOption(\"source_url\", \"string\", true,\n                                                  \"\", \"the video  source url\"));\n}\n\nMODELBOX_DRIVER_FLOWUNIT(desc) {\n  desc.Desc.SetName(FLOWUNIT_NAME);\n  desc.Desc.SetClass(modelbox::DRIVER_CLASS_FLOWUNIT);\n  desc.Desc.SetType(FLOWUNIT_TYPE);\n  desc.Desc.SetDescription(FLOWUNIT_DESC);\n  desc.Desc.SetVersion(\"1.0.0\");\n}\n"
  },
  {
    "path": "src/drivers/devices/cpu/flowunit/video_input/video_input_flowunit.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_VIDEOINPUTFLOWUNIT_CPU_H_\n#define MODELBOX_FLOWUNIT_VIDEOINPUTFLOWUNIT_CPU_H_\n\n#include <modelbox/base/device.h>\n#include <modelbox/base/status.h>\n#include <modelbox/flow.h>\n\n#include <algorithm>\n#include <opencv2/opencv.hpp>\n\n#include \"modelbox/buffer.h\"\n#include \"modelbox/flowunit.h\"\n\nconstexpr const char *FLOWUNIT_NAME = \"video_input\";\nconstexpr const char *FLOWUNIT_TYPE = \"cpu\";\nconstexpr const char *FLOWUNIT_DESC =\n    \"\\n\\t@Brief: The operator can convert the url configured by the user to \"\n    \"buffer data, and be used for video demux. \\n\"\n    \"\\t@Port parameter:  The output port buffer data indicate video path. \\n\"\n    \"\\t@Constraint: This flowunit is usually followed by 'video_demuxer'.\";\nconst int RGB_CHANNELS = 3;\n\nclass VideoInputFlowUnit : public modelbox::FlowUnit {\n public:\n  VideoInputFlowUnit();\n  ~VideoInputFlowUnit() override;\n\n  modelbox::Status Open(\n      const std::shared_ptr<modelbox::Configuration> &opts) override;\n\n  modelbox::Status Close() override;\n\n  /* run when processing data */\n  modelbox::Status Process(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override;\n};\n\n#endif  // MODELBOX_FLOWUNIT_VIDEOINPUTFLOWUNIT_CPU_H_\n"
  },
  {
    "path": "src/drivers/devices/cuda/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nproject(modelbox-drivers-cuda)\n\nif(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_BINARY_DIR})\n    message(FATAL_ERROR \"Do not build in source directory!\")\nendif()\n\nsubdirlist(SUBDIRS ${CMAKE_CURRENT_SOURCE_DIR} \"CMakeLists.txt\")\n\nadd_subdirectory(core)\nadd_subdirectory(flowunit)"
  },
  {
    "path": "src/drivers/devices/cuda/core/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nset(DEVICE_NAME \"cuda\")\nproject(modelbox-devices-${DEVICE_NAME})\n\nfile(GLOB_RECURSE LIBMODELBOX_DEVICE_SOURCES *.cpp *.cc *.c)\nset(LIBMODELBOX_DEVICE_CUDA_INCLUDE ${CMAKE_CURRENT_LIST_DIR}/include)\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_CUDA_INCLUDE})\ninclude_directories(${CUDA_INCLUDE_DIRS})\ninclude_directories(${MODELBOX_COMMON_DEVICE_STREAM_INCLUDE})\n\nset(HEADER \n    ${LIBMODELBOX_DEVICE_CUDA_INCLUDE}/modelbox\n)\n\n\n\nset(LIBMODELBOX_DEVICE_CUDA_STATIC libmodelbox-device-${DEVICE_NAME}-static)\nset(LIBMODELBOX_DEVICE_CUDA_SHARED libmodelbox-device-${DEVICE_NAME}-shared)\n\ncuda_add_library(${LIBMODELBOX_DEVICE_CUDA_STATIC} STATIC ${LIBMODELBOX_DEVICE_SOURCES})\ncuda_add_library(${LIBMODELBOX_DEVICE_CUDA_SHARED} SHARED ${LIBMODELBOX_DEVICE_SOURCES})\n\nset_target_properties(${LIBMODELBOX_DEVICE_CUDA_SHARED} PROPERTIES \n    SOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n\ntarget_link_libraries(${LIBMODELBOX_DEVICE_CUDA_STATIC} ${CUDA_CUDART_LIBRARY})\ntarget_link_libraries(${LIBMODELBOX_DEVICE_CUDA_STATIC} pthread)\ntarget_link_libraries(${LIBMODELBOX_DEVICE_CUDA_STATIC} rt)\ntarget_link_libraries(${LIBMODELBOX_DEVICE_CUDA_STATIC} dl)\ntarget_link_libraries(${LIBMODELBOX_DEVICE_CUDA_STATIC} ${MODELBOX_COMMON_DEVICE_STREAM_LIBRARY})\n\ntarget_link_libraries(${LIBMODELBOX_DEVICE_CUDA_SHARED} ${LIBMODELBOX_SHARED})\ntarget_link_libraries(${LIBMODELBOX_DEVICE_CUDA_SHARED} pthread)\ntarget_link_libraries(${LIBMODELBOX_DEVICE_CUDA_SHARED} rt)\ntarget_link_libraries(${LIBMODELBOX_DEVICE_CUDA_SHARED} dl)\ntarget_link_libraries(${LIBMODELBOX_DEVICE_CUDA_SHARED} ${CUDA_CUDART_LIBRARY})\ntarget_link_libraries(${LIBMODELBOX_DEVICE_CUDA_SHARED} ${MODELBOX_COMMON_DEVICE_STREAM_LIBRARY})\n\nset_target_properties(${LIBMODELBOX_DEVICE_CUDA_STATIC} ${LIBMODELBOX_DEVICE_CUDA_SHARED} \n    PROPERTIES OUTPUT_NAME \"modelbox-device-${DEVICE_NAME}\"\n)\nset_target_properties(${LIBMODELBOX_DEVICE_CUDA_STATIC} ${LIBMODELBOX_DEVICE_CUDA_SHARED}\n    PROPERTIES\n    ARCHIVE_OUTPUT_DIRECTORY \"${TEST_WORKING_LIB_DIR}\"\n    RUNTIME_OUTPUT_DIRECTORY \"${TEST_WORKING_BIN_DIR}\"\n)\n\nconfigure_file(${CMAKE_CURRENT_LIST_DIR}/libmodelbox-device-${DEVICE_NAME}.pc.in ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-device-${DEVICE_NAME}.pc @ONLY)\n\ninstall(TARGETS ${LIBMODELBOX_DEVICE_CUDA_SHARED}\n    COMPONENT cuda-device-flowunit\n    RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n    LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    OPTIONAL\n    )\n\ninstall(TARGETS ${LIBMODELBOX_DEVICE_CUDA_STATIC} \n    COMPONENT cuda-device-flowunit-devel\n    ARCHIVE DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    OPTIONAL\n    )\n\ninstall(DIRECTORY ${HEADER} \n    DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR}\n    COMPONENT cuda-device-flowunit-devel\n    )\n\ninstall(FILES ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-device-${DEVICE_NAME}.pc \n    DESTINATION ${CMAKE_INSTALL_LIBDIR}/pkgconfig\n    COMPONENT cuda-device-flowunit-devel\n    )\n\nset(LIBMODELBOX_DEVICE_CUDA_SHARED_LIBRARIES ${LIBMODELBOX_DEVICE_CUDA_SHARED_LIBRARIES} CACHE INTERNAL \"\")\nset(LIBMODELBOX_DEVICE_CUDA_SHARED ${LIBMODELBOX_DEVICE_CUDA_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_DEVICE_CUDA_INCLUDE ${LIBMODELBOX_DEVICE_CUDA_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_DEVICE_CUDA_BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR} CACHE INTERNAL \"\")\nset(LIBMODELBOX_DEVICE_SOURCES ${LIBMODELBOX_DEVICE_SOURCES} CACHE INTERNAL \"\")\nset(LIBMODELBOX_DEVICE_CUDA_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-device-${DEVICE_NAME}.so CACHE INTERNAL \"\")\n\n# driver test\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_UNIT_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${LIBMODELBOX_DEVICE_CUDA_SHARED})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")"
  },
  {
    "path": "src/drivers/devices/cuda/core/cuda_memory.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/device/cuda/cuda_memory.h\"\n\n#include \"modelbox/base/collector.h\"\nnamespace modelbox {\n\nconstexpr int MAX_CUDA_DEVICE_NUMBER = 32;\nstatic RefVar<CudaMemoryPool> kCudaMemoryPool(MAX_CUDA_DEVICE_NUMBER);\n\n/**\n * @brief Call be cuda stream.\n *   Will release mem reference used before.\n *   We need a new thread due to cuda api might be called.\n **/\nvoid CudaReleaseMemoryAsync(void *mem_list_ptr) {\n  auto *list =\n      (std::vector<std::shared_ptr<const DeviceMemory>> *)(mem_list_ptr);\n  list->clear();\n  delete list;\n}\n\nCudaStream::CudaStream(cudaStream_t stream, int32_t device_id)\n    : stream_(stream), device_id_(device_id) {}\n\nStatus CudaStream::Sync() const {\n  auto cuda_ret = cudaSetDevice(device_id_);\n  if (cuda_ret != cudaSuccess) {\n    MBLOG_ERROR << \"Bind cuda device \" << device_id_ << \" failed, cuda ret \"\n                << cuda_ret;\n    return STATUS_FAULT;\n  }\n\n  cuda_ret = cudaStreamSynchronize(stream_);\n  if (cuda_ret != cudaSuccess) {\n    MBLOG_ERROR << \"Cuda stream synchronize failed, gpu \" << device_id_\n                << \" cuda ret \" << cuda_ret;\n    return STATUS_FAULT;\n  }\n\n  return STATUS_SUCCESS;\n}\n\nStatus CudaStream::Bind(\n    std::vector<std::shared_ptr<const DeviceMemory>> mem_list) const {\n  auto cuda_ret = cudaSetDevice(device_id_);\n  if (cuda_ret != cudaSuccess) {\n    MBLOG_ERROR << \"Bind cuda device \" << device_id_ << \" failed, cuda ret \"\n                << cuda_ret;\n    return STATUS_FAULT;\n  }\n\n  auto *mem_list_ptr = new std::vector<std::shared_ptr<const DeviceMemory>>();\n  mem_list_ptr->assign(mem_list.begin(), mem_list.end());\n  cuda_ret =\n      cudaLaunchHostFunc(stream_, CudaReleaseMemoryAsync, (void *)mem_list_ptr);\n  if (cuda_ret != cudaSuccess) {\n    MBLOG_ERROR << \"cudaLaunchHostFunc failed, cuda ret \" << cuda_ret;\n    delete mem_list_ptr;\n    return STATUS_FAULT;\n  }\n\n  return STATUS_SUCCESS;\n}\n\nCudaStreamPool::CudaStreamPool(const std::string &device_id) {\n  device_id_ = atoi(device_id.c_str());\n  is_running_ = true;\n  release_stream_thread_ =\n      std::make_shared<std::thread>(&CudaStreamPool::ReleaseStreamWorker, this);\n}\n\nCudaStreamPool::~CudaStreamPool() {\n  release_stream_queue_.Shutdown();\n  is_running_ = false;\n  if (release_stream_thread_ != nullptr) {\n    MBLOG_INFO << \"Join release stream thread start\";\n    release_stream_thread_->join();\n    MBLOG_INFO << \"Release stream thread stop\";\n  }\n}\n\nvoid CudaStreamPool::ReleaseStreamWorker() {\n  while (is_running_ || !release_stream_queue_.Empty()) {\n    cudaStream_t stream = nullptr;\n    auto ret = release_stream_queue_.Pop(&stream, 100);\n    if (!ret || stream == nullptr) {\n      continue;\n    }\n\n    auto cuda_ret = cudaStreamDestroy(stream);\n    if (cudaSuccess != cuda_ret) {\n      MBLOG_ERROR << \"Destroy cuda stream failed, cuda ret \" << cuda_ret;\n      continue;\n    }\n\n    allocate_count_--;\n  }\n}\n\nstd::shared_ptr<CudaStream> CudaStreamPool::Alloc() {\n  auto cuda_ret = cudaSetDevice(device_id_);\n  if (cudaSuccess != cuda_ret) {\n    MBLOG_ERROR << \"Bind cuda device \" << device_id_ << \" failed, cuda ret \"\n                << cuda_ret;\n    return nullptr;\n  }\n\n  cudaStream_t stream;\n  cuda_ret = cudaStreamCreate(&stream);\n  if (cudaSuccess != cuda_ret) {\n    MBLOG_ERROR << \"Create cuda stream failed, cuda ret \" << cuda_ret;\n    return nullptr;\n  }\n\n  allocate_count_++;\n  std::shared_ptr<CudaStream> stream_ptr(new CudaStream(stream, device_id_),\n                                         [&](const CudaStream *stream_ptr) {\n                                           Free(stream_ptr);\n                                           delete stream_ptr;\n                                         });\n  return stream_ptr;\n}\n\nStatus CudaStreamPool::Free(const CudaStream *stream) {\n  if (stream == nullptr) {\n    return STATUS_SUCCESS;\n  }\n\n  release_stream_queue_.Push(stream->Get());\n  return STATUS_SUCCESS;\n}\n\nCudaMemory::CudaMemory(const std::shared_ptr<Device> &device,\n                       const std::shared_ptr<DeviceMemoryManager> &mem_mgr,\n                       const std::shared_ptr<void> &device_mem_ptr, size_t size)\n    : DeviceMemory(device, mem_mgr, device_mem_ptr, size, false) {}\n\nCudaMemory::~CudaMemory() = default;\n\nStatus CudaMemory::BindStream(const std::shared_ptr<CudaStream> &stream_ptr) {\n  if (cuda_stream_ptr_ != nullptr) {\n    if (cuda_stream_ptr_ == stream_ptr) {\n      return STATUS_SUCCESS;\n    }\n    // Change stream to another is not allowed\n    return {STATUS_BUSY, \"Memory has been bound to a stream\"};\n  }\n\n  Status ret = STATUS_SUCCESS;\n  if (stream_ptr != nullptr) {\n    if (stream_ptr->IsInDevice(device_->GetDeviceID())) {\n      cuda_stream_ptr_ = stream_ptr;\n      return STATUS_SUCCESS;\n    }\n    // We need create a new stream when cross gpu device, so bind failed in fact\n    ret = STATUS_BUSY;\n  }\n\n  auto cuda_mem_mgr = std::static_pointer_cast<CudaMemoryManager>(mem_mgr_);\n  cuda_stream_ptr_ = cuda_mem_mgr->AllocStream();\n  return ret;\n}\n\nStatus CudaMemory::DetachStream() {\n  if (cuda_stream_ptr_ == nullptr) {\n    return STATUS_SUCCESS;\n  }\n\n  auto ret = cuda_stream_ptr_->Sync();\n  if (ret != STATUS_SUCCESS) {\n    return ret;\n  }\n\n  cuda_stream_ptr_.reset();\n  return STATUS_SUCCESS;\n}\n\nStatus CudaMemory::CopyExtraMetaTo(std::shared_ptr<DeviceMemory> &device_mem) {\n  if (device_mem->GetDevice() != device_) {\n    return STATUS_SUCCESS;\n  }\n\n  auto target = std::static_pointer_cast<CudaMemory>(device_mem);\n  target->cuda_stream_ptr_ = cuda_stream_ptr_;\n  return STATUS_SUCCESS;\n}\n\nStatus CudaMemory::CombineExtraMeta(\n    const std::vector<std::shared_ptr<DeviceMemory>> &mem_list) {\n  for (const auto &mem : mem_list) {\n    auto cuda_mem = std::dynamic_pointer_cast<CudaMemory>(mem);\n    if (cuda_stream_ptr_ == nullptr) {\n      cuda_stream_ptr_ = cuda_mem->cuda_stream_ptr_;\n    } else {\n      auto other_cuda_stream_ptr = cuda_mem->cuda_stream_ptr_;\n      if (other_cuda_stream_ptr == nullptr) {\n        continue;\n      }\n\n      if (cuda_stream_ptr_ == other_cuda_stream_ptr) {\n        continue;\n      }\n\n      auto ret = other_cuda_stream_ptr->Sync();\n      if (ret != STATUS_SUCCESS) {\n        MBLOG_ERROR << \"Sync cuda stream failed when combine cuda memory\";\n        return STATUS_FAULT;\n      }\n    }\n  }\n\n  return STATUS_SUCCESS;\n}\n\nCudaMemoryPool::CudaMemoryPool(const std::string &device_id) {\n  gpu_id_ = atoi(device_id.c_str());\n}\n\nStatus CudaMemoryPool::Init() {\n  auto status = InitSlabCache();\n  if (!status) {\n    return {status, \"init mempool failed.\"};\n  }\n\n  auto timer = std::make_shared<TimerTask>();\n  timer->Callback(&CudaMemoryPool::OnTimer, this);\n  flush_timer_ = timer;\n\n  // flush slab every 10s\n  GetTimer()->Schedule(flush_timer_, 1000, 10000);\n  return STATUS_OK;\n}\n\nCudaMemoryPool::~CudaMemoryPool() {\n  ClearAllSlabs();\n  if (flush_timer_) {\n    flush_timer_->Stop();\n    flush_timer_ = nullptr;\n  }\n}\n\nvoid CudaMemoryPool::OnTimer() {\n  // TODO support config shrink time.\n}\n\nvoid *CudaMemoryPool::MemAlloc(size_t size) {\n  auto cuda_ret = cudaSetDevice(gpu_id_);\n  if (cuda_ret != cudaSuccess) {\n    MBLOG_ERROR << \"Bind device \" << gpu_id_ << \" failed, cuda ret \"\n                << cuda_ret;\n    return nullptr;\n  }\n\n  void *cuda_mem_ptr = nullptr;\n  cuda_ret = cudaMalloc(&cuda_mem_ptr, size);\n  if (cuda_ret != cudaSuccess) {\n    MBLOG_ERROR << \"Cuda malloc failed, size \" << size << \", err code \"\n                << cuda_ret;\n    return nullptr;\n  }\n\n  return cuda_mem_ptr;\n}\n\nvoid CudaMemoryPool::MemFree(void *ptr) {\n  auto free_func = [](int32_t gpu_id, void *mem_ptr, bool with_log) {\n    cudaError_t cuda_ret = cudaSuccess;\n    DeferCond { return cuda_ret != cudaSuccess && with_log; };\n    DeferCondAdd {\n      MBLOG_ERROR << \"Free mem on gpu \" << gpu_id << \" failed, cuda ret \"\n                  << cuda_ret;\n    };\n\n    cuda_ret = cudaSetDevice(gpu_id);\n    if (cuda_ret != cudaSuccess) {\n      return cuda_ret;\n    }\n\n    cuda_ret = cudaFree(mem_ptr);\n    if (cuda_ret != cudaSuccess) {\n      return cuda_ret;\n    }\n\n    return cuda_ret;\n  };\n\n  auto *timer = GetTimer();\n  auto with_log = (timer == nullptr);\n  auto ret = free_func(gpu_id_, ptr, with_log);\n  if (ret == cudaSuccess || timer == nullptr) {\n    return;\n  }\n\n  auto free_task = std::make_shared<TimerTask>(free_func, gpu_id_, ptr, true);\n  free_task->SetName(\"cudaMemFreeTask\");\n  timer->Schedule(free_task, 0, 0, true);\n}\n\nCudaMemoryManager::CudaMemoryManager(const std::string &device_id)\n    : DeviceMemoryManager(device_id),\n      stream_pool_(device_id),\n      mem_copy_kind_map_{{DeviceMemoryCopyKind::FromHost,\n                          cudaMemcpyKind::cudaMemcpyHostToDevice},\n                         {DeviceMemoryCopyKind::SameDeviceType,\n                          cudaMemcpyKind::cudaMemcpyDeviceToDevice},\n                         {DeviceMemoryCopyKind::ToHost,\n                          cudaMemcpyKind::cudaMemcpyDeviceToHost}} {\n  try {\n    gpu_id_ = std::stoi(device_id);\n  } catch (const std::exception &e) {\n    MBLOG_WARN << \"Convert device id to int failed, id \" << device_id\n               << \", err \" << e.what() << \"; use device 0 as default\";\n  }\n}\n\nCudaMemoryManager::~CudaMemoryManager() = default;\n\nStatus CudaMemoryManager::Init() {\n  static std::once_flag flag;\n  if (gpu_id_ >= MAX_CUDA_DEVICE_NUMBER) {\n    return {STATUS_RANGE, \"gpu id is out of range\"};\n  }\n\n  std::call_once(flag, []() {\n    kCudaMemoryPool.MakeFunc([](int gpuid) -> std::shared_ptr<CudaMemoryPool> {\n      std::string memorypool_name = \"cuda-\" + std::to_string(gpuid);\n      auto pool = std::make_shared<CudaMemoryPool>(std::to_string(gpuid));\n      if (pool->Init() != STATUS_OK) {\n        return nullptr;\n      }\n\n      pool->SetName(memorypool_name);\n      return pool;\n    });\n  });\n\n  mem_pool_ = kCudaMemoryPool.Get(gpu_id_);\n  if (mem_pool_ == nullptr) {\n    const auto *err_msg = \"Get cuda memory pool failed.\";\n    MBLOG_ERROR << err_msg;\n    return {STATUS_NOMEM, err_msg};\n  }\n\n  return STATUS_OK;\n}\n\nstd::shared_ptr<DeviceMemory> CudaMemoryManager::MakeDeviceMemory(\n    const std::shared_ptr<Device> &device, std::shared_ptr<void> mem_ptr,\n    size_t size) {\n  return std::make_shared<CudaMemory>(device, shared_from_this(), mem_ptr,\n                                      size);\n}\n\nstd::shared_ptr<void> CudaMemoryManager::AllocSharedPtr(size_t size,\n                                                        uint32_t mem_flags) {\n  if (mem_pool_ == nullptr) {\n    MBLOG_ERROR << \"cuda memory is not init.\";\n    return nullptr;\n  }\n\n  return mem_pool_->AllocSharedPtr(size);\n}\n\nvoid *CudaMemoryManager::Malloc(size_t size, uint32_t mem_flags) {\n  if (mem_pool_ == nullptr) {\n    MBLOG_ERROR << \"cuda memory is not init.\";\n    return nullptr;\n  }\n\n  return mem_pool_->MemAlloc(size);\n};\n\nvoid CudaMemoryManager::Free(void *mem_ptr, uint32_t mem_flags) {\n  if (mem_pool_ == nullptr) {\n    MBLOG_ERROR << \"cuda memory is not init.\";\n    return;\n  }\n\n  mem_pool_->MemFree(mem_ptr);\n}\n\nStatus CudaMemoryManager::Copy(void *dest, size_t dest_size,\n                               const void *src_buffer, size_t src_size,\n                               DeviceMemoryCopyKind kind) {\n  if (dest == nullptr || src_buffer == nullptr) {\n    MBLOG_ERROR << \"Cuda copy src \" << src_buffer << \" to dest \" << dest\n                << \"failed\";\n    return STATUS_INVALID;\n  }\n\n  if (dest_size < src_size) {\n    MBLOG_ERROR << \"Cuda memcpy failed, dest size < src size\";\n    return STATUS_RANGE;\n  }\n\n  auto cuda_ret = cudaSetDevice(gpu_id_);\n  if (cudaSuccess != cuda_ret) {\n    MBLOG_ERROR << \"Bind device \" << gpu_id_ << \" failed, cuda ret \"\n                << cuda_ret;\n    return STATUS_FAULT;\n  }\n\n  cudaMemcpyKind cuda_copy_kind;\n  GetCudaMemcpyKind(kind, cuda_copy_kind);\n  cuda_ret = cudaMemcpy(dest, src_buffer, src_size, cuda_copy_kind);\n  if (cudaSuccess != cuda_ret) {\n    MBLOG_ERROR << \"Cuda memcpy failed, ret \" << cuda_ret << \", src size \"\n                << src_size << \", cuda cpy kind \" << cuda_copy_kind;\n    return STATUS_FAULT;\n  }\n\n  return STATUS_SUCCESS;\n}\n\nStatus CudaMemoryManager::GetDeviceMemUsage(size_t *free, size_t *total) const {\n  auto cuda_ret = cudaSetDevice(gpu_id_);\n  if (cuda_ret != cudaSuccess) {\n    MBLOG_ERROR << \"Bind gpu device \" << device_id_ << \" failed, cuda ret \"\n                << cuda_ret;\n    return STATUS_FAULT;\n  }\n\n  size_t t_free;\n  size_t t_total;\n  cuda_ret = cudaMemGetInfo(&t_free, &t_total);\n  if (cuda_ret != cudaSuccess) {\n    MBLOG_ERROR << \"Get gpu \" << device_id_ << \" mem info failed, cuda ret\"\n                << cuda_ret;\n    return STATUS_FAULT;\n  }\n\n  if (free != nullptr) {\n    *free = t_free;\n  }\n\n  if (total != nullptr) {\n    *total = t_total;\n  }\n\n  return STATUS_SUCCESS;\n}\n\nStatus CudaMemoryManager::DeviceMemoryCopy(\n    const std::shared_ptr<DeviceMemory> &dest_memory, size_t dest_offset,\n    const std::shared_ptr<const DeviceMemory> &src_memory, size_t src_offset,\n    size_t src_size, DeviceMemoryCopyKind copy_kind) {\n  cudaMemcpyKind cuda_copy_kind;\n  GetCudaMemcpyKind(copy_kind, cuda_copy_kind);\n  std::shared_ptr<CudaStream> cuda_stream_ptr;\n  auto ret = SetupCudaStream(src_memory, dest_memory, cuda_stream_ptr);\n  if (ret != STATUS_SUCCESS) {\n    MBLOG_ERROR << \"Set up cuda stream failed, using sync mem copy\";\n  }\n\n  cudaStream_t cuda_stream =\n      cuda_stream_ptr == nullptr ? nullptr : cuda_stream_ptr->Get();\n  auto dest_device = dest_memory->GetDevice();\n  auto src_device = src_memory->GetDevice();\n  auto *dest_ptr = dest_memory->GetPtr<uint8_t>().get() + dest_offset;\n  const auto *src_ptr = src_memory->GetConstPtr<uint8_t>().get() + src_offset;\n  ret = CudaMemcpyAsync(dest_ptr, src_ptr, src_size, dest_device, src_device,\n                        cuda_copy_kind, cuda_stream);\n  if (ret != STATUS_SUCCESS) {\n    return ret;\n  }\n\n  if (cuda_stream_ptr != nullptr) {\n    if (dest_memory->IsHost()) {\n      cuda_stream_ptr->Sync();\n    } else {\n      // When async operation complete, the reference of memory will be\n      // released\n      cuda_stream_ptr->Bind({src_memory, dest_memory});\n    }\n  }\n\n  return STATUS_SUCCESS;\n}\n\nStatus CudaMemoryManager::CudaMemcpyAsync(\n    uint8_t *dest_ptr, const uint8_t *src_ptr, size_t src_size,\n    const std::shared_ptr<Device> &dest_device,\n    const std::shared_ptr<Device> &src_device, cudaMemcpyKind cuda_copy_kind,\n    cudaStream_t cuda_stream) {\n  cudaError_t cuda_ret;\n  auto dest_dev_id = atoi(dest_device->GetDeviceID().c_str());\n  auto src_dev_id = atoi(src_device->GetDeviceID().c_str());\n  if (cuda_copy_kind == cudaMemcpyKind::cudaMemcpyDeviceToDevice &&\n      dest_device != src_device) {\n    TryEnablePeerAccess(src_dev_id, dest_dev_id);\n    cudaSetDevice(dest_dev_id);\n    cuda_ret = cudaMemcpyPeerAsync(dest_ptr, dest_dev_id, src_ptr, src_dev_id,\n                                   src_size, cuda_stream);\n    if (cudaSuccess != cuda_ret) {\n      MBLOG_ERROR << \"cudaMemcpyAsync between gpu \" << src_dev_id << \" and gpu \"\n                  << dest_dev_id << \" failed, try transfer in host, cuda ret \"\n                  << cuda_ret << \", size \" << src_size << \", copy kind \"\n                  << cuda_copy_kind << \", stream \" << cuda_stream\n                  << \", src_ptr \" << (void *)src_ptr << \", dest_ptr \"\n                  << (void *)dest_ptr;\n      return STATUS_NOTSUPPORT;\n    }\n  } else {\n    auto gpu_id = dest_dev_id;\n    if (cuda_copy_kind == cudaMemcpyKind::cudaMemcpyDeviceToHost) {\n      gpu_id = src_dev_id;\n    }\n\n    cudaSetDevice(gpu_id);\n    cuda_ret = cudaMemcpyAsync(dest_ptr, src_ptr, src_size, cuda_copy_kind,\n                               cuda_stream);\n    if (cudaSuccess != cuda_ret) {\n      MBLOG_ERROR << \"cudaMemcpyAsync failed, err code \" << cuda_ret\n                  << \", size \" << src_size << \", copy kind \" << cuda_copy_kind\n                  << \", stream \" << cuda_stream << \", src \" << (void *)src_ptr\n                  << \", dest_ptr \" << (void *)dest_ptr << \", gpu_id \" << gpu_id;\n      return STATUS_FAULT;\n    }\n  }\n\n  return STATUS_SUCCESS;\n}\n\nvoid CudaMemoryManager::GetCudaMemcpyKind(DeviceMemoryCopyKind copy_kind,\n                                          cudaMemcpyKind &cuda_copy_kind) {\n  cuda_copy_kind = mem_copy_kind_map_[copy_kind];\n}\n\nvoid CudaMemoryManager::TryEnablePeerAccess(int32_t src_gpu_id,\n                                            int32_t dest_gpu_id) {\n  cudaSetDevice(src_gpu_id);\n  cudaDeviceEnablePeerAccess(dest_gpu_id, 0);\n  cudaSetDevice(dest_gpu_id);\n  cudaDeviceEnablePeerAccess(src_gpu_id, 0);\n}\n\nStatus CudaMemoryManager::SetupCudaStream(\n    const std::shared_ptr<const DeviceMemory> &src_memory,\n    const std::shared_ptr<DeviceMemory> &dest_memory,\n    std::shared_ptr<CudaStream> &cuda_stream_ptr) {\n  if (src_memory->IsHost()) {\n    cuda_stream_ptr = nullptr;\n  } else {\n    cuda_stream_ptr =\n        std::static_pointer_cast<const CudaMemory>(src_memory)->GetBindStream();\n  }\n\n  if (!dest_memory->IsHost()) {\n    auto dest_cuda_memory = std::dynamic_pointer_cast<CudaMemory>(dest_memory);\n    auto ret = dest_cuda_memory->BindStream(cuda_stream_ptr);\n    if (ret == STATUS_BUSY && cuda_stream_ptr != nullptr) {\n      // Case: Two memory has different stream, we choose to sync source\n      cuda_stream_ptr->Sync();\n    }\n\n    cuda_stream_ptr = dest_cuda_memory->GetBindStream();\n  }\n\n  return STATUS_SUCCESS;\n}\n}  // namespace modelbox"
  },
  {
    "path": "src/drivers/devices/cuda/core/device_cuda.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/device/cuda/device_cuda.h\"\n\n#include <cuda_runtime.h>\n#include <stdio.h>\n\n#include <thread>\n\n#include \"device_stream.h\"\n#include \"modelbox/base/log.h\"\n\nnamespace modelbox {\n\nCuda::Cuda(const std::shared_ptr<DeviceMemoryManager> &mem_mgr)\n    : Device(mem_mgr) {}\n\nCuda::~Cuda() = default;\n\nstd::string Cuda::GetType() const { return DEVICE_TYPE; }\n\nStatus Cuda::DeviceExecute(const DevExecuteCallBack &fun, int32_t priority,\n                           size_t count) {\n  if (0 == count) {\n    return STATUS_OK;\n  }\n\n  for (size_t i = 0; i < count; ++i) {\n    auto status = fun(i);\n    if ((status != STATUS_OK) && (status != STATUS_CONTINUE)) {\n      MBLOG_WARN << \"executor func failed: \" << status;\n      return status;\n    }\n  }\n\n  return STATUS_OK;\n};\n\nbool Cuda::NeedResourceNice() { return true; }\n\nCudaFactory::CudaFactory() = default;\nCudaFactory::~CudaFactory() = default;\n\nstd::map<std::string, std::shared_ptr<DeviceDesc>> CudaFactory::DeviceProbe() {\n  std::map<std::string, std::shared_ptr<DeviceDesc>> return_map;\n  std::vector<std::string> device_list = GetDeviceList();\n  cudaDeviceProp prop;\n  for (auto &device : device_list) {\n    auto cuda_ret = cudaGetDeviceProperties(&prop, std::stoi(device));\n    if (cudaSuccess != cuda_ret) {\n      MBLOG_WARN << \"Get device \" << device << \" properties failed, cuda_ret \"\n                 << cuda_ret;\n      continue;\n    }\n\n    auto device_desc = std::make_shared<CudaDesc>();\n    device_desc->SetDeviceDesc(\"This is a cuda device description.\");\n    device_desc->SetDeviceId(device);\n    device_desc->SetDeviceMemory(GetBytesReadable(prop.totalGlobalMem));\n    device_desc->SetDeviceType(\"cuda\");\n    return_map.insert(std::make_pair(device, device_desc));\n  }\n  return return_map;\n}\n\nstd::string CudaFactory::GetDeviceFactoryType() { return DEVICE_TYPE; }\n\nstd::vector<std::string> CudaFactory::GetDeviceList() {\n  std::vector<std::string> device_list;\n  int count;\n  auto cuda_ret = cudaGetDeviceCount(&count);\n  if (cuda_ret != cudaSuccess) {\n    MBLOG_ERROR << \"count device failed, cuda ret \" << cuda_ret;\n    return device_list;\n  }\n\n  for (int i = 0; i < count; i++) {\n    device_list.push_back(std::to_string(i));\n  }\n\n  return device_list;\n}\n\nstd::shared_ptr<Device> CudaFactory::CreateDevice(\n    const std::string &device_id) {\n  auto mem_mgr = std::make_shared<CudaMemoryManager>(device_id);\n  auto status = mem_mgr->Init();\n  if (!status) {\n    StatusError = status;\n    return nullptr;\n  }\n  return std::make_shared<Cuda>(mem_mgr);\n}\n\nCudaFlowUnit::CudaFlowUnit() = default;\n\nCudaFlowUnit::~CudaFlowUnit() = default;\n\nStatus CudaFlowUnit::Process(std::shared_ptr<modelbox::DataContext> data_ctx) {\n  auto cuda_ret = cudaSetDevice(dev_id_);\n  if (cuda_ret != cudaSuccess) {\n    MBLOG_ERROR << \"Set cuda device \" << dev_id_ << \" failed, cuda ret \"\n                << cuda_ret;\n    return STATUS_FAULT;\n  }\n\n  auto stream = GetDevSyncStream<CudaStream, CudaMemory>(data_ctx);\n  modelbox::Status status;\n  if (stream == nullptr) {\n    return {modelbox::STATUS_NOTFOUND, \"get sync stream failed.\"};\n  }\n\n  auto process_status = CudaProcess(data_ctx, stream->Get());\n  if (process_status != modelbox::STATUS_OK &&\n      process_status != modelbox::STATUS_CONTINUE) {\n    return process_status;\n  }\n\n  status = SetDevStream<CudaStream, CudaMemory>(data_ctx, stream);\n  if (!status) {\n    return status;\n  }\n\n  status = HoldMemory<CudaStream>(data_ctx, stream);\n  if (!status) {\n    return status;\n  }\n\n  return process_status;\n}\n\nCudaDesc::CudaDesc() = default;\n\nCudaDesc::~CudaDesc() = default;\n\n}  // namespace modelbox\n"
  },
  {
    "path": "src/drivers/devices/cuda/core/driver_desc.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <cuda_runtime.h>\n#include <modelbox/base/timer.h>\n#include <stdio.h>\n\n#include <memory>\n\n#include \"modelbox/base/driver_api_helper.h\"\n#include \"modelbox/device/cuda/device_cuda.h\"\n\n#define CUDA_DEVICE_SCHEDULE_FLAG \"CUDA_DEVICE_SCHEDULE_FLAG\"\n\nstatic std::map<std::string, unsigned int> device_flags_map = {\n    {\"cudaDeviceScheduleAuto\", cudaDeviceScheduleAuto},\n    {\"cudaDeviceScheduleSpin\", cudaDeviceScheduleSpin},\n    {\"cudaDeviceScheduleYield\", cudaDeviceScheduleYield},\n    {\"cudaDeviceScheduleBlockingSync\", cudaDeviceScheduleBlockingSync},\n    {\"cudaDeviceMapHost\", cudaDeviceMapHost}};\n\nstd::shared_ptr<modelbox::Timer> kDeviceTimer;\n\nmodelbox::Timer *GetTimer() { return kDeviceTimer.get(); }\n\nstd::shared_ptr<modelbox::DriverFactory> CreateDriverFactory() {\n  std::shared_ptr<modelbox::DriverFactory> factory =\n      std::make_shared<modelbox::CudaFactory>();\n  return factory;\n}\n\nvoid DriverDescription(modelbox::DriverDesc *desc) {\n  desc->SetClass(modelbox::DRIVER_CLASS_DEVICE);\n  desc->SetType(modelbox::DEVICE_TYPE);\n  desc->SetName(modelbox::DEVICE_DRIVER_NAME);\n  desc->SetDescription(modelbox::DEVICE_DRIVER_DESCRIPTION);\n}\n\nmodelbox::Status DriverInit() {\n  if (kDeviceTimer != nullptr) {\n    return modelbox::STATUS_OK;\n  }\n\n  kDeviceTimer = std::make_shared<modelbox::Timer>();\n  kDeviceTimer->SetName(\"Cuda-Timer\");\n  kDeviceTimer->Start();\n\n  auto *env_flag = getenv(CUDA_DEVICE_SCHEDULE_FLAG);\n  std::string cudaDeviceScheduleFlag;\n  if (env_flag != nullptr) {\n    cudaDeviceScheduleFlag = std::string(env_flag);\n  }\n\n  unsigned int flag = cudaDeviceScheduleAuto;\n  if (device_flags_map.find(cudaDeviceScheduleFlag) != device_flags_map.end()) {\n    flag = device_flags_map[cudaDeviceScheduleFlag];\n  }\n\n  auto cuda_ret = cudaSetDeviceFlags(flag);\n  if (cuda_ret != cudaSuccess) {\n    MBLOG_WARN << \"set cuda device flags \" << flag << \" failed, cuda ret \"\n               << cuda_ret;\n    return modelbox::STATUS_OK;\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nvoid DriverFini() {\n  if (kDeviceTimer == nullptr) {\n    return;\n  }\n\n  // Driver Fini.\n  kDeviceTimer->Stop();\n  kDeviceTimer = nullptr;\n}\n"
  },
  {
    "path": "src/drivers/devices/cuda/core/include/modelbox/device/cuda/cuda_memory.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_CUDA_MEMORY_H_\n#define MODELBOX_CUDA_MEMORY_H_\n\n#include <cuda_runtime.h>\n#include <modelbox/base/device.h>\n#include <modelbox/base/memory_pool.h>\n#include <modelbox/base/slab.h>\n#include <modelbox/base/status.h>\n#include <modelbox/base/timer.h>\n\n#include <atomic>\n\nextern modelbox::Timer *GetTimer();\n\nnamespace modelbox {\nvoid CudaReleaseMemoryAsync(void *mem_list_ptr);\n\nclass CudaMemory;\nclass CudaMemoryManager;\n\nclass CudaStream {\n  friend class CudaMemory;\n\n public:\n  CudaStream(const CudaStream &stream) = delete;\n  CudaStream(const CudaStream &&stream) = delete;\n  CudaStream &operator=(const CudaStream &stream) = delete;\n  CudaStream &operator=(const CudaStream &&stream) = delete;\n\n  CudaStream(cudaStream_t stream, int32_t device_id);\n\n  virtual ~CudaStream() = default;\n\n  inline bool IsInDevice(const std::string &device_id) const {\n    auto device_id_num = atoi(device_id.c_str());\n    return IsInDevice(device_id_num);\n  }\n\n  inline bool IsInDevice(int32_t device_id) const {\n    return device_id == device_id_;\n  }\n\n  inline cudaStream_t Get() const { return stream_; }\n\n  Status Sync() const;\n\n  Status Bind(std::vector<std::shared_ptr<const DeviceMemory>> mem_list) const;\n\n private:\n  cudaStream_t stream_;\n  int32_t device_id_;\n};\n\nclass CudaStreamPool {\n public:\n  /**\n   * @brief Cuda stream pool\n   * @param device_id cuda device id\n   */\n  CudaStreamPool(const std::string &device_id);\n\n  virtual ~CudaStreamPool();\n  /**\n   * @brief Allocate cuda stream associated with device\n   * @return Cuda stream or nullptr\n   */\n  std::shared_ptr<CudaStream> Alloc();\n\n  /**\n   * @brief Release cuda stream\n   * @param stream Cuda stream to free\n   */\n  Status Free(const CudaStream *stream);\n\n  /**\n   * @brief Get allocated stream count\n   * @return Allocated stream count\n   */\n  inline size_t GetAllocatedStreamCount() const {\n    return allocate_count_.load();\n  }\n\n  /**\n   * @brief Release streawm worker\n   */\n  void ReleaseStreamWorker();\n\n private:\n  std::atomic<size_t> allocate_count_{0};\n  int32_t device_id_;\n\n  std::shared_ptr<std::thread> release_stream_thread_;\n  std::atomic_bool is_running_{false};\n  BlockingQueue<cudaStream_t> release_stream_queue_;\n};\n\nclass CudaMemory : public DeviceMemory {\n  friend class CudaStream;\n\n public:\n  /**\n   * @brief Cuda memory\n   * @param device pointer to device\n   * @param mem_mgr pointer to memory manager\n   * @param device_mem_ptr cuda device memory pointer\n   * @param size device memory size\n   */\n  CudaMemory(const std::shared_ptr<Device> &device,\n             const std::shared_ptr<DeviceMemoryManager> &mem_mgr,\n             const std::shared_ptr<void> &device_mem_ptr, size_t size);\n\n  ~CudaMemory() override;\n  /**\n   * @brief Get bind cuda stream\n   * @return Cuda stream\n   */\n  inline std::shared_ptr<CudaStream> GetBindStream() const {\n    return cuda_stream_ptr_;\n  }\n\n  /**\n   * @brief Bind cuda stream\n   * @param stream_ptr Cuda stream\n   *        if null\n   *          new stream will be created\n   *        else\n   *          set stream when return success\n   *          has one different stream when return busy\n   */\n  Status BindStream(const std::shared_ptr<CudaStream> &stream_ptr = nullptr);\n\n  /**\n   * @brief Detach cuda stream\n   * @return detach result\n   */\n  Status DetachStream();\n\n protected:\n  Status CopyExtraMetaTo(std::shared_ptr<DeviceMemory> &device_mem) override;\n\n  Status CombineExtraMeta(\n      const std::vector<std::shared_ptr<DeviceMemory>> &mem_list) override;\n\n private:\n  std::shared_ptr<CudaStream> cuda_stream_ptr_;\n};\n\nclass CudaMemoryPool : public MemoryPoolBase {\n public:\n  CudaMemoryPool(const std::string &device_id);\n\n  ~CudaMemoryPool() override;\n\n  Status Init();\n\n  void *MemAlloc(size_t size) override;\n\n  void MemFree(void *ptr) override;\n\n  virtual void OnTimer();\n\n private:\n  int32_t gpu_id_{0};\n  std::shared_ptr<TimerTask> flush_timer_;\n};\n\nclass CudaMemoryManager : public DeviceMemoryManager {\n public:\n  /**\n   * @brief Cude memory manager\n   * @param device_id device id\n   */\n  CudaMemoryManager(const std::string &device_id);\n\n  ~CudaMemoryManager() override;\n\n  /**\n   * @brief Init memory manager\n   * @return init result\n   */\n  Status Init();\n\n  /**\n   * @brief Create a specified memory container\n   * @param device pointer to device\n   * @param mem_ptr shared pointer to memory\n   * @param size memory size\n   * @return Empty memory container\n   */\n  std::shared_ptr<DeviceMemory> MakeDeviceMemory(\n      const std::shared_ptr<Device> &device, std::shared_ptr<void> mem_ptr,\n      size_t size) override;\n\n  /**\n   * @brief Implement by specified device, alloc memory\n   * @param size Memory size to allocate\n   * @param mem_flags Flags to create device memory\n   * @return Device memory in shared ptr\n   */\n  std::shared_ptr<void> AllocSharedPtr(size_t size,\n                                       uint32_t mem_flags = 0) override;\n\n  /**\n   * @brief Implement by specified device, alloc memory\n   * @param size Memory size to allocate\n   * @param mem_flags Flags to create device memory\n   * @return Device memory.\n   */\n  void *Malloc(size_t size, uint32_t mem_flags = 0) override;\n\n  /**\n   * @brief Implement by specified device, free memory\n   * @param mem_ptr Memory to free\n   * @param mem_flags Flags of device memory\n   */\n  void Free(void *mem_ptr, uint32_t mem_flags = 0) override;\n\n  /**\n   * @brief Implement by specified device, copy data from src to dest\n   * @param dest dest buffer to write\n   * @param dest_size dest buffer size\n   * @param src_buffer src buffer to read\n   * @param src_size read data size\n   * @param kind data copy kind\n   * @return Status\n   */\n  Status Copy(void *dest, size_t dest_size, const void *src_buffer,\n              size_t src_size, DeviceMemoryCopyKind kind) override;\n\n  /**\n   * @brief Copy memory between current device and host\n   * @param dest_memory Destination memory\n   * @param dest_offset Destination memory offset\n   * @param src_memory Source memory\n   * @param src_offset Source offset\n   * @param src_size Source memory size\n   * @param copy_kind copy memory mode\n   * @return copy result\n   */\n  Status DeviceMemoryCopy(\n      const std::shared_ptr<DeviceMemory> &dest_memory, size_t dest_offset,\n      const std::shared_ptr<const DeviceMemory> &src_memory, size_t src_offset,\n      size_t src_size,\n      DeviceMemoryCopyKind copy_kind = DeviceMemoryCopyKind::FromHost) override;\n\n  /**\n   * @brief Get device memory info\n   * @return Status\n   */\n  Status GetDeviceMemUsage(size_t *free, size_t *total) const override;\n\n  /**\n   * @brief Alloc a new cuda stream\n   * @return pointer to cuda stream\n   */\n  inline std::shared_ptr<CudaStream> AllocStream() {\n    return stream_pool_.Alloc();\n  };\n\n private:\n  /**\n   * @brief Get matched cudaMemcpyKind\n   * @param copy_kind Device memory copy kind\n   * @param cuda_copy_kind Matched cudaMemcpyKind\n   */\n  void GetCudaMemcpyKind(DeviceMemoryCopyKind copy_kind,\n                         cudaMemcpyKind &cuda_copy_kind);\n\n  /**\n   * @brief Prepare cuda stream according to copy kind\n   * @param src_memory Source device memory in copy operation\n   * @param dest_memory Destination device memory in copy operation\n   * @param cuda_stream Cuda stream to use in cuda copy api\n   * @return Status\n   */\n  Status SetupCudaStream(const std::shared_ptr<const DeviceMemory> &src_memory,\n                         const std::shared_ptr<DeviceMemory> &dest_memory,\n                         std::shared_ptr<CudaStream> &cuda_stream_ptr);\n\n  void TryEnablePeerAccess(int32_t src_gpu_id, int32_t dest_gpu_id);\n\n  Status CudaMemcpyAsync(uint8_t *dest_ptr, const uint8_t *src_ptr,\n                         size_t src_size,\n                         const std::shared_ptr<Device> &dest_device,\n                         const std::shared_ptr<Device> &src_device,\n                         cudaMemcpyKind cuda_copy_kind,\n                         cudaStream_t cuda_stream);\n\n  CudaStreamPool stream_pool_;\n  std::shared_ptr<CudaMemoryPool> mem_pool_;\n  std::map<DeviceMemoryCopyKind, cudaMemcpyKind> mem_copy_kind_map_;\n  int32_t gpu_id_{0};\n};\n\n}  // namespace modelbox\n\n#endif  // MODELBOX_CUDA_MEMORY_H_"
  },
  {
    "path": "src/drivers/devices/cuda/core/include/modelbox/device/cuda/device_cuda.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_DEVICE_CUDA_H_\n#define MODELBOX_DEVICE_CUDA_H_\n\n#include <modelbox/base/device.h>\n#include <modelbox/base/status.h>\n#include <modelbox/base/timer.h>\n#include <modelbox/device/cuda/cuda_memory.h>\n#include <modelbox/flow.h>\n\n#include <list>\n\n#define GET_CUDA_API_ERROR(api, err_code, err_str)      \\\n  const char *err_name = NULL;                          \\\n  cuGetErrorName(err_code, &err_name);                  \\\n  std::ostringstream error_log;                         \\\n  error_log << #api << \", return code : \" << (err_code) \\\n            << \", error : \" << err_name;                \\\n  auto err_str = error_log.str();\n\n#define CUDA_API_CALL(api)                                      \\\n  do {                                                          \\\n    CUresult err_code = api;                                    \\\n    if (err_code != CUDA_SUCCESS) {                             \\\n      GET_CUDA_API_ERROR(api, err_code, err_str);               \\\n      throw NVDECException::MakeNVDECException(                 \\\n          err_str, err_code, __FUNCTION__, __FILE__, __LINE__); \\\n    }                                                           \\\n  } while (0)\n\n// This is a no-exception version of the above MACRO CUDA_API_CALL(api)\n#define CHECK_CUDA_API_RETURN(api)                           \\\n  do {                                                       \\\n    CUresult err_code = api;                                 \\\n    if (err_code != CUDA_SUCCESS) {                          \\\n      GET_CUDA_API_ERROR(api, err_code, err_str);            \\\n      MBLOG_ERROR << \"Failed to call CUDA API: \" << err_str; \\\n      return {modelbox::STATUS_FAULT, err_str};              \\\n    }                                                        \\\n  } while (0)\n\nnamespace modelbox {\n\nconstexpr const char *DEVICE_TYPE = \"cuda\";\nconstexpr const char *DEVICE_DRIVER_NAME = \"device-cuda\";\nconstexpr const char *DEVICE_DRIVER_DESCRIPTION = \"A gpu device driver\";\n\nclass Cuda : public Device {\n public:\n  Cuda(const std::shared_ptr<DeviceMemoryManager> &mem_mgr);\n  ~Cuda() override;\n  std::string GetType() const override;\n\n  Status DeviceExecute(const DevExecuteCallBack &fun, int32_t priority,\n                       size_t count) override;\n\n  bool NeedResourceNice() override;\n};\n\nclass CudaFactory : public DeviceFactory {\n public:\n  CudaFactory();\n  ~CudaFactory() override;\n\n  std::map<std::string, std::shared_ptr<DeviceDesc>> DeviceProbe() override;\n  std::string GetDeviceFactoryType() override;\n  std::vector<std::string> GetDeviceList() override;\n  std::shared_ptr<Device> CreateDevice(const std::string &device_id) override;\n};\n\nclass CudaDesc : public DeviceDesc {\n public:\n  CudaDesc();\n  ~CudaDesc() override;\n};\n\nclass CudaFlowUnit : public FlowUnit {\n public:\n  CudaFlowUnit();\n  ~CudaFlowUnit() override;\n\n  virtual Status CudaProcess(std::shared_ptr<modelbox::DataContext> data_ctx,\n                             cudaStream_t stream) = 0;\n\n  Status Process(std::shared_ptr<modelbox::DataContext> data_ctx) override;\n};\n\n}  // namespace modelbox\n\n#endif  // MODELBOX_DEVICE_CUDA_H_\n"
  },
  {
    "path": "src/drivers/devices/cuda/core/libmodelbox-device-cuda.pc.in",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nprefix=/usr\nexec_prefix=${prefix}\nlibdir=${prefix}/lib\nincludedir=${prefix}/include/modelbox/device/cuda\n\nName: libmodelbox-device-cuda\nDescription: modelbox cuda device SDK\nVersion: @MODELBOX_VERSION_STRING@\nLibs: -L${libdir} -lmodelbox-device-cuda\nCflags: -I${includedir}"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nproject(modelbox-drivers-cuda-flowunit)\n\nif(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_BINARY_DIR})\n    message(FATAL_ERROR \"Do not build in source directory!\")\nendif()\n\nsubdirlist(SUBDIRS ${CMAKE_CURRENT_SOURCE_DIR} \"CMakeLists.txt\")\n\nforeach(subdir ${SUBDIRS})\n    add_subdirectory(${subdir})\nendforeach()\n\nlist(APPEND DRIVER_UNIT_TEST_INCLUDE ${CUDA_INCLUDE_DIRS})\nset(DRIVER_UNIT_TEST_INCLUDE ${DRIVER_UNIT_TEST_INCLUDE} CACHE INTERNAL \"\")\nlist(APPEND DRIVER_UNIT_TEST_LINK_LIBRARIES ${CUDA_CUDART_LIBRARY})\nset(DRIVER_UNIT_TEST_LINK_LIBRARIES ${DRIVER_UNIT_TEST_LINK_LIBRARIES} CACHE INTERNAL \"\")"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/color_transpose/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nset(UNIT_DEVICE \"cuda\")\nset(UNIT_NAME \"color_convert\")\n\nproject(modelbox-flowunit-${UNIT_DEVICE}-${UNIT_NAME})\n\nfile(GLOB_RECURSE UNIT_SOURCE *.cpp *.cc *.c *.cu)\ngroup_source_test_files(MODELBOX_UNIT_SOURCE MODELBOX_UNIT_TEST_SOURCE \"_test.c*\" ${UNIT_SOURCE})\n\nif (NOT OPENCV_FOUND) \n    set(MODELBOX_UNIT_TEST_SOURCE \"\")\nelse()\n    list(APPEND DRIVER_UNIT_TEST_LINK_LIBRARIES opencv_imgproc)\nendif()\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_CUDA_INCLUDE})\ninclude_directories(${CUDA_INCLUDE_DIRS})\ninclude_directories(${MODELBOX_COMMON_NORMALIZE_INCLUDE})\n\n# for supress c++ compile wanring\nset(MODELBOX_UNIT_SHARED libmodelbox_unit_${UNIT_DEVICE}_${UNIT_NAME}_shared)\nset(MODELBOX_UNIT_SHARED_OUTPUT_NAME libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-shared)\nset(MODELBOX_UNIT_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\ncuda_add_library(${MODELBOX_UNIT_SHARED} SHARED ${MODELBOX_UNIT_SOURCE})\n\nset(LIBMODELBOX_FLOWUNIT_color_convert_CUDA_SHARED ${MODELBOX_UNIT_SHARED})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES \n    OUTPUT_NAME ${MODELBOX_UNIT_SHARED_OUTPUT_NAME}\n    SOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n    DEFINE_SYMBOL \"\"\n)\n\nfind_cuda_helper_libs(nppial)\n\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} rt)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} dl)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_DEVICE_CUDA_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${CUDA_nppc_LIBRARY})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${CUDA_nppicc_LIBRARY})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}\")\n\ninstall(TARGETS ${MODELBOX_UNIT_SHARED} \n    COMPONENT cuda-device-flowunit\n    RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n    LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    ARCHIVE DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    OPTIONAL\n    )\n\n\ninstall(DIRECTORY ${HEADER} \n    DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR} \n    COMPONENT cuda-device-flowunit-devel\n    )\n\nset(LIBMODELBOX_FLOWUNIT_COLOR_TRANSPOSE_CUDA_SHARED ${MODELBOX_UNIT_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_COLOR_TRANSPOSE_CUDA_SHARED ${MODELBOX_UNIT_SOURCE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_COLOR_TRANSPOSE_CUDA_SHARED ${MODELBOX_UNIT_SOURCE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_COLOR_TRANSPOSE_CUDA_SHARED ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}.so CACHE INTERNAL \"\")\n\n\n# driver test\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_UNIT_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_UNIT_SHARED})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_LINK_LIBRARIES ${DRIVER_UNIT_TEST_LINK_LIBRARIES} CACHE INTERNAL \"\")\n"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/color_transpose/color_transpose.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include \"color_transpose.h\"\n\n#include <unordered_map>\n\n#include \"modelbox/flowunit_api_helper.h\"\n#include \"color_transpose_cu.h\"\n\nconst std::set<std::string> SupportPixFormat = {\"bgr\", \"rgb\", \"gray\"};\n\nNppStatus RGBToGRAY(NppiSize &size, const uint8_t *input_data,\n                    uint8_t *output_data, cudaStream_t stream) {\n  const int nStepInput = COLOR_CHANNEL_COUNT * size.width;\n  const int nStepOutput = GRAY_CHANNEL_COUNT * size.width;\n\n  return nppiRGBToGray_8u_C3C1R(input_data, nStepInput, output_data,\n                                nStepOutput, size);\n}\n\nNppStatus BGRToGRAY(NppiSize &size, const uint8_t *input_data,\n                    uint8_t *output_data, cudaStream_t stream) {\n  const int nStepInput = COLOR_CHANNEL_COUNT * size.width;\n  const int nStepOutput = GRAY_CHANNEL_COUNT * size.width;\n\n  const Npp32f aCoefs[COLOR_CHANNEL_COUNT] = {0.114F, 0.587F, 0.299F};\n  return nppiColorToGray_8u_C3C1R(input_data, nStepInput, output_data,\n                                  nStepOutput, size, aCoefs);\n}\n\ntypedef NppStatus (*pColorTranspose)(NppiSize &, const uint8_t *, uint8_t *,\n                                     cudaStream_t);\nconst std::unordered_map<std::string, pColorTranspose> FunctionTable = {\n    {\"rgb_to_bgr\", RGBToBGR},   {\"bgr_to_rgb\", BGRToRGB},\n    {\"rgb_to_gray\", RGBToGRAY}, {\"bgr_to_gray\", BGRToGRAY},\n    {\"gray_to_rgb\", GRAYToRGB}, {\"gray_to_bgr\", GRAYToBGR},\n};\n\nNppStatus ColorTransposeFunction(const std::string &source_color,\n                                 const std::string &target_color,\n                                 NppiSize &size, const uint8_t *input,\n                                 uint8_t *output, cudaStream_t stream) {\n  std::string key = source_color + \"_\" + \"to\" + \"_\" + target_color;\n  auto iter = FunctionTable.find(key);\n  if (iter == FunctionTable.end()) {\n    MBLOG_WARN << \"can not find transpose function for \" << key;\n    return NPP_ERROR;\n  }\n\n  return iter->second(size, input, output, stream);\n}\n\nmodelbox::Status ColorTransposeFlowUnit::Open(\n    const std::shared_ptr<modelbox::Configuration> &opts) {\n  if (!opts->Contain(\"out_pix_fmt\")) {\n    MBLOG_ERROR << \"config must has out_pix_fmt\";\n    return modelbox::STATUS_BADCONF;\n  }\n\n  out_pix_fmt_ = opts->GetString(\"out_pix_fmt\", \"\");\n  if (SupportPixFormat.find(out_pix_fmt_) == SupportPixFormat.end()) {\n    MBLOG_ERROR << \"Invalid config out_pix_fmt = \" << out_pix_fmt_;\n    return modelbox::STATUS_BADCONF;\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nbool IsColor(const std::string &type) {\n  return type == \"rgb\" || type == \"bgr\" || type == \"ycbcr\";\n}\n\nstd::size_t NumberOfChannels(const std::string &type) {\n  return IsColor(type) ? COLOR_CHANNEL_COUNT : GRAY_CHANNEL_COUNT;\n}\n\nmodelbox::Status GetParm(const std::shared_ptr<modelbox::Buffer> &buffer,\n                         std::vector<size_t> &shape, std::string &input_layout,\n                         modelbox::ModelBoxDataType &type,\n                         std::string &in_pix_fmt) {\n  if (!buffer->Get(\"shape\", shape)) {\n    MBLOG_ERROR << \"can not get shape from buffer\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  if (shape.size() != GRAY_CHANNEL_COUNT &&\n      shape.size() != COLOR_CHANNEL_COUNT) {\n    MBLOG_ERROR << \"unsupport image shape: \" << shape.size();\n    return modelbox::STATUS_INVALID;\n  }\n\n  if (!buffer->Get(\"layout\", input_layout)) {\n    MBLOG_ERROR << \"can not get layout from buffer\";\n    return modelbox::STATUS_INVALID;\n  }\n\n  if (input_layout != \"hwc\") {\n    MBLOG_ERROR << \"unsupport layout: \" << input_layout << \" support hwc\";\n    return modelbox::STATUS_INVALID;\n  }\n\n  if (!buffer->Get(\"type\", type)) {\n    MBLOG_ERROR << \"can not get type from buffer\";\n    return modelbox::STATUS_INVALID;\n  }\n\n  if (type != modelbox::ModelBoxDataType::MODELBOX_UINT8) {\n    MBLOG_ERROR << \"unsupport type: \" << type\n                << \" support modelbox::ModelBoxDataType::MODELBOX_UINT8\";\n    return modelbox::STATUS_INVALID;\n  }\n\n  if (!buffer->Get(\"pix_fmt\", in_pix_fmt)) {\n    MBLOG_ERROR << \"can not get pix_fmt from buffer\";\n    return modelbox::STATUS_INVALID;\n  }\n\n  if (SupportPixFormat.find(in_pix_fmt) == SupportPixFormat.end()) {\n    MBLOG_ERROR << \"Invalid config in_pix_fmt = \" << in_pix_fmt;\n    return modelbox::STATUS_INVALID;\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status GetAndCheckParm(\n    const std::shared_ptr<modelbox::BufferList> &input,\n    std::vector<size_t> &shape, std::string &input_layout,\n    modelbox::ModelBoxDataType &type, std::string &in_pix_fmt) {\n  std::vector<size_t> tmp_shape;\n  std::string tmp_input_layout;\n  modelbox::ModelBoxDataType tmp_type = modelbox::MODELBOX_TYPE_INVALID;\n  std::string tmp_in_pix_fmt;\n\n  for (auto &buffer : *input) {\n    if (buffer == *input->begin()) {\n      if (!GetParm(buffer, shape, input_layout, type, in_pix_fmt)) {\n        return modelbox::STATUS_INVALID;\n      }\n    }\n\n    if (!GetParm(buffer, tmp_shape, tmp_input_layout, tmp_type,\n                 tmp_in_pix_fmt)) {\n      return modelbox::STATUS_INVALID;\n    }\n\n    if (tmp_shape != shape) {\n      MBLOG_ERROR << \"all image must has same shape.\";\n      return modelbox::STATUS_INVALID;\n    }\n\n    if (tmp_input_layout != input_layout) {\n      MBLOG_ERROR << \"all image must has same layout.\";\n      return modelbox::STATUS_INVALID;\n    }\n\n    if (tmp_type != type) {\n      MBLOG_ERROR << \"all image must has same type.\";\n      return modelbox::STATUS_INVALID;\n    }\n\n    if (tmp_in_pix_fmt != in_pix_fmt) {\n      MBLOG_ERROR << \"all image must has same type.\";\n      return modelbox::STATUS_INVALID;\n    }\n  }\n\n  return modelbox::STATUS_OK;\n}\n\n/* run when processing data */\nmodelbox::Status ColorTransposeFlowUnit::CudaProcess(\n    std::shared_ptr<modelbox::DataContext> data_ctx, cudaStream_t stream) {\n  auto input = data_ctx->Input(\"in_image\");\n  auto output = data_ctx->Output(\"out_image\");\n\n  std::vector<size_t> shape;\n  std::string input_layout;\n  modelbox::ModelBoxDataType type = modelbox::MODELBOX_TYPE_INVALID;\n  std::string in_pix_fmt;\n\n  auto status = GetAndCheckParm(input, shape, input_layout, type, in_pix_fmt);\n  if (!status) {\n    return status;\n  }\n\n  if (in_pix_fmt == out_pix_fmt_) {\n    MBLOG_INFO << \"out pix_fmt is same with in pix_fmt.\";\n    for (unsigned int i = 0; i < input->Size(); ++i) {\n      output->PushBack(input->At(i));\n    }\n\n    return modelbox::STATUS_OK;\n  }\n\n  size_t H = shape[0];\n  size_t W = shape[1];\n  size_t output_C = NumberOfChannels(out_pix_fmt_);\n  std::vector<size_t> shapes(input->Size(),\n                             H * W * output_C * GetDataTypeSize(type));\n  output->Build(shapes);\n\n  auto cuda_ret = cudaStreamSynchronize(stream);\n  if (cuda_ret != cudaSuccess) {\n    MBLOG_ERROR << \"sync stream  \" << stream << \" failed, err \" << cuda_ret;\n    return modelbox::STATUS_FAULT;\n  }\n\n  for (unsigned int i = 0; i < input->Size(); ++i) {\n    NppiSize size;\n    size.height = H;\n    size.width = W;\n\n    const uint8_t *input_data = (const uint8_t *)(input->At(i)->ConstData());\n    uint8_t *output_data = (uint8_t *)(output->At(i)->ConstData());\n\n    auto npp_status = ColorTransposeFunction(in_pix_fmt, out_pix_fmt_, size,\n                                             input_data, output_data, stream);\n    if (NPP_SUCCESS != npp_status) {\n      MBLOG_WARN << \"ColorTranspose npp return failed, status: \" << npp_status;\n      status = modelbox::STATUS_FAULT;\n      break;\n    }\n\n    output->At(i)->CopyMeta(input->At(i));\n    output->At(i)->Set(\"pix_fmt\", out_pix_fmt_);\n    output->At(i)->Set(\"channel\", output_C);\n    output->At(i)->Set(\"shape\", std::vector<size_t>({H, W, output_C}));\n  }\n\n  return status;\n}\n\nMODELBOX_FLOWUNIT(ColorTransposeFlowUnit, desc) {\n  desc.SetFlowUnitName(FLOWUNIT_NAME);\n  desc.SetFlowUnitGroupType(\"Image\");\n  desc.AddFlowUnitInput(modelbox::FlowUnitInput(\"in_image\", FLOWUNIT_TYPE));\n  desc.AddFlowUnitOutput(modelbox::FlowUnitOutput(\"out_image\", FLOWUNIT_TYPE));\n  desc.SetFlowType(modelbox::NORMAL);\n  desc.SetDescription(FLOWUNIT_DESC);\n\n  std::map<std::string, std::string> pix_fmt_list;\n\n  for (const auto &item : SupportPixFormat) {\n    pix_fmt_list[item] = item;\n  }\n  desc.AddFlowUnitOption(modelbox::FlowUnitOption(\n      \"out_pix_fmt\", \"list\", true, \"\",\n      \"the colour transpose output pixel format\", pix_fmt_list));\n}\n\nMODELBOX_DRIVER_FLOWUNIT(desc) {\n  desc.Desc.SetName(FLOWUNIT_NAME);\n  desc.Desc.SetClass(modelbox::DRIVER_CLASS_FLOWUNIT);\n  desc.Desc.SetType(FLOWUNIT_TYPE);\n  desc.Desc.SetDescription(FLOWUNIT_DESC);\n  desc.Desc.SetVersion(\"1.0.0\");\n}\n"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/color_transpose/color_transpose.cu",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include \"color_transpose_cu.h\"\n\n__global__ void TransposeRGBToBGR(const uint8_t *rgb_input, uint8_t *bgr_output,\n                                  unsigned int images_size) {\n  unsigned idx = blockIdx.x * blockDim.x + threadIdx.x;\n  if (idx >= images_size) {\n    return;\n  }\n\n  const uint8_t *in = &rgb_input[idx * COLOR_CHANNEL_COUNT];\n  uint8_t *out = &bgr_output[idx * COLOR_CHANNEL_COUNT];\n\n  out[0] = in[2];\n  out[1] = in[1];\n  out[2] = in[0];\n}\n\n__global__ void TransposeGrayToRGB(const uint8_t *gray_input,\n                                   uint8_t *rgb_output,\n                                   unsigned int images_size) {\n  unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;\n  if (idx >= images_size) {\n    return;\n  }\n\n  const uint8_t in = gray_input[idx];\n  uint8_t *out = &rgb_output[idx * COLOR_CHANNEL_COUNT];\n\n  out[0] = in;\n  out[1] = in;\n  out[2] = in;\n}\n\nauto TransposeBGRToRGB = TransposeRGBToBGR;\n\nconstexpr int MAX_CUDA_BLOCK_THD_SIZE = 1024;\n\nNppStatus RGBToBGR(NppiSize &size, const uint8_t *input_data,\n                   uint8_t *output_data, cudaStream_t stream) {\n  // For CUDA kernel\n  const unsigned int total_size = size.height * size.width;\n  const unsigned int block = total_size < MAX_CUDA_BLOCK_THD_SIZE\n                                 ? total_size\n                                 : MAX_CUDA_BLOCK_THD_SIZE;\n  const unsigned int grid = (total_size + block - 1) / block;\n\n  // RGB -> BGR\n  TransposeRGBToBGR<<<grid, block, 0, stream>>>(input_data, output_data,\n                                                total_size);\n  return NPP_SUCCESS;\n}\n\nNppStatus BGRToRGB(NppiSize &size, const uint8_t *input_data,\n                   uint8_t *output_data, cudaStream_t stream) {\n  // For CUDA kernel\n  const unsigned int total_size = size.height * size.width;\n  const unsigned int block = total_size < MAX_CUDA_BLOCK_THD_SIZE\n                                 ? total_size\n                                 : MAX_CUDA_BLOCK_THD_SIZE;\n  const unsigned int grid = (total_size + block - 1) / block;\n\n  // BGR -> RGB\n  TransposeBGRToRGB<<<grid, block, 0, stream>>>(input_data, output_data,\n                                                total_size);\n  return NPP_SUCCESS;\n}\n\nNppStatus GRAYToRGB(NppiSize &size, const uint8_t *input_data,\n                    uint8_t *output_data, cudaStream_t stream) {\n  const unsigned int total_size = size.height * size.width;\n  const unsigned int block = total_size < MAX_CUDA_BLOCK_THD_SIZE\n                                 ? total_size\n                                 : MAX_CUDA_BLOCK_THD_SIZE;\n  const unsigned int grid = (total_size + block - 1) / block;\n\n  TransposeGrayToRGB<<<grid, block, 0, stream>>>(input_data, output_data,\n                                                 total_size);\n  return NPP_SUCCESS;\n}\n\nNppStatus GRAYToBGR(NppiSize &size, const uint8_t *input_data,\n                    uint8_t *output_data, cudaStream_t stream) {\n  const unsigned int total_size = size.height * size.width;\n  const unsigned int block = total_size < MAX_CUDA_BLOCK_THD_SIZE\n                                 ? total_size\n                                 : MAX_CUDA_BLOCK_THD_SIZE;\n  const unsigned int grid = (total_size + block - 1) / block;\n\n  TransposeGrayToRGB<<<grid, block, 0, stream>>>(input_data, output_data,\n                                                 total_size);\n  return NPP_SUCCESS;\n}\n"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/color_transpose/color_transpose.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_NORMALIZE_H_\n#define MODELBOX_FLOWUNIT_NORMALIZE_H_\n\n#include <modelbox/base/device.h>\n#include <modelbox/base/status.h>\n#include <modelbox/flow.h>\n#include <modelbox/flowunit.h>\n#include \"modelbox/device/cuda/device_cuda.h\"\n\nconstexpr const char *FLOWUNIT_NAME = \"color_convert\";\nconstexpr const char *FLOWUNIT_TYPE = \"cuda\";\nconstexpr const char *FLOWUNIT_DESC =\n    \"\\n\\t@Brief: Convert image color space between rgb, bgr, gray .\\n\"\n    \"\\t@Port parameter: The input port buffer type and the output port buffer \"\n    \"type are image. \\n\"\n    \"\\t  The image type buffer contains the following meta fields:\\n\"\n    \"\\t\\tField Name: width,         Type: int32_t\\n\"\n    \"\\t\\tField Name: height,        Type: int32_t\\n\"\n    \"\\t\\tField Name: width_stride,  Type: int32_t\\n\"\n    \"\\t\\tField Name: height_stride, Type: int32_t\\n\"\n    \"\\t\\tField Name: channel,       Type: int32_t\\n\"\n    \"\\t\\tField Name: pix_fmt,       Type: string\\n\"\n    \"\\t\\tField Name: layout,        Type: int32_t\\n\"\n    \"\\t\\tField Name: shape,         Type: vector<size_t>\\n\"\n    \"\\t\\tField Name: type,          Type: ModelBoxDataType::MODELBOX_UINT8\\n\"\n    \"\\t@Constraint: This flowunit support: 'rgb' to 'bgr', 'bgr' to 'rgb', \"\n    \"'rgb' to 'gray', 'bgr' to 'gray', 'gray' to 'bgr', 'gray' to 'rgb'. \";\n\nclass ColorTransposeFlowUnit : public modelbox::CudaFlowUnit {\n public:\n  ColorTransposeFlowUnit() = default;\n  ~ColorTransposeFlowUnit() override = default;\n\n  modelbox::Status Open(\n      const std::shared_ptr<modelbox::Configuration> &opts) override;\n  modelbox::Status Close() override { return modelbox::STATUS_OK; }\n\n  /* run when processing data */\n  modelbox::Status CudaProcess(std::shared_ptr<modelbox::DataContext> data_ctx,\n                               cudaStream_t stream) override;\n\n  modelbox::Status DataPre(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override {\n    return modelbox::STATUS_OK;\n  }\n\n  modelbox::Status DataPost(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override {\n    return modelbox::STATUS_OK;\n  }\n\n  modelbox::Status DataGroupPre(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override {\n    return modelbox::STATUS_OK;\n  };\n\n  modelbox::Status DataGroupPost(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override {\n    return modelbox::STATUS_OK;\n  };\n\n private:\n  std::string out_pix_fmt_;\n};\n\n#endif  // MODELBOX_FLOWUNIT_NORMALIZE_H_"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/color_transpose/color_transpose_cu.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include <npp.h>\n#include <stdint.h>\n#include \"cuda_runtime.h\"\n\nconstexpr int COLOR_CHANNEL_COUNT = 3;\nconstexpr int GRAY_CHANNEL_COUNT = 1;\n\nNppStatus RGBToBGR(NppiSize &size, const uint8_t *input_data,\n                   uint8_t *output_data, cudaStream_t stream);\n\nNppStatus BGRToRGB(NppiSize &size, const uint8_t *input_data,\n                   uint8_t *output_data, cudaStream_t stream);\n\nNppStatus GRAYToRGB(NppiSize &size, const uint8_t *input_data,\n                    uint8_t *output_data, cudaStream_t stream);\n\nNppStatus GRAYToBGR(NppiSize &size, const uint8_t *input_data,\n                    uint8_t *output_data, cudaStream_t stream);"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/color_transpose/color_transpose_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include <cuda_runtime.h>\n#include <opencv2/imgproc/types_c.h>\n#include <securec.h>\n\n#include <functional>\n#include <future>\n#include <opencv2/opencv.hpp>\n#include <random>\n#include <thread>\n\n#include \"modelbox/base/log.h\"\n#include \"modelbox/base/utils.h\"\n#include \"modelbox/buffer.h\"\n#include \"driver_flow_test.h\"\n#include \"flowunit_mockflowunit/flowunit_mockflowunit.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"test/mock/minimodelbox/mockflow.h\"\n\nnamespace modelbox {\nclass ColorTransposeFlowUnitTest : public testing::Test {\n public:\n  ColorTransposeFlowUnitTest() : driver_flow_(std::make_shared<MockFlow>()) {}\n\n protected:\n  void SetUp() override {\n    int count = 0;\n    cudaGetDeviceCount(&count);\n    if (count <= 0) {\n      MBLOG_INFO << \"no cuda device, skip test suit\";\n      GTEST_SKIP();\n    }\n\n    auto ret = AddMockFlowUnit();\n    driver_flow_->Init(false);\n    EXPECT_EQ(ret, STATUS_OK);\n  };\n\n  void TearDown() override { driver_flow_ = nullptr; };\n  std::shared_ptr<MockFlow> GetDriverFlow();\n\n  const std::string test_lib_dir = TEST_DRIVER_DIR,\n                    test_data_dir = TEST_DATA_DIR, test_assets = TEST_ASSETS;\n\n private:\n  Status AddMockFlowUnit();\n  std::shared_ptr<MockFlow> driver_flow_;\n};\n\nstd::shared_ptr<MockFlow> ColorTransposeFlowUnitTest::GetDriverFlow() {\n  return driver_flow_;\n}\n\nStatus ColorTransposeFlowUnitTest::AddMockFlowUnit() {\n  {\n    auto mock_desc = GenerateFlowunitDesc(\"copy\", {\"input\"}, {\"output\"});\n    auto process_func =\n        [=](const std::shared_ptr<DataContext>& op_ctx,\n            const std::shared_ptr<MockFlowUnit>& mock_flowunit) -> Status {\n      auto input = op_ctx->Input(\"input\");\n      auto output = op_ctx->Output(\"output\");\n      for (size_t i = 0; i < input->Size(); ++i) {\n        output->PushBack(input->At(i));\n      }\n      return modelbox::STATUS_OK;\n    };\n    auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n    mock_funcitons->RegisterProcessFunc(process_func);\n    driver_flow_->AddFlowUnitDesc(\n        mock_desc, mock_funcitons->GenerateCreateFunc(), TEST_DRIVER_DIR);\n  }\n  return STATUS_OK;\n}\n\nTEST_F(ColorTransposeFlowUnitTest, ColorTransposeTest) {\n  const std::string test_lib_dir = TEST_DRIVER_DIR;\n  std::string toml_content = R\"(\n    [driver]\n    skip-default=true\n    dir=[\")\" + test_lib_dir + \"\\\"]\\n    \" +\n                             R\"([graph]\n    graphconf = '''digraph demo {\n          input1[type=input]   \n          color_transpose_gray[type=flowunit, flowunit=color_convert, device=cuda deviceid=0, label=\"<in_image> | <out_image>\", out_pix_fmt=\"gray\"]\n          color_transpose_rgb[type=flowunit, flowunit=color_convert, device=cuda deviceid=0, label=\"<in_image> | <out_image>\", out_pix_fmt=\"rgb\"]\n          color_transpose_bgr[type=flowunit, flowunit=color_convert, device=cuda deviceid=0, label=\"<in_image> | <out_image>\", out_pix_fmt=\"bgr\"]\n          copy_gray[type=flowunit, flowunit=copy, device=cpu, deviceid=0, label=\"<input> | <output>\"]\n          copy_rgb[type=flowunit, flowunit=copy, device=cpu, deviceid=0, label=\"<input> | <output>\"]\n          copy_bgr[type=flowunit, flowunit=copy, device=cpu, deviceid=0, label=\"<input> | <output>\"]\n          output_gray[type=output]\n          output_rgb[type=output]\n          output_bgr[type=output]         \n\n          input1 -> color_transpose_gray:in_image\n          input1 -> color_transpose_rgb:in_image\n          input1 -> color_transpose_bgr:in_image\n          color_transpose_gray:out_image -> copy_gray:input\n          color_transpose_rgb:out_image -> copy_rgb:input\n          color_transpose_bgr:out_image -> copy_bgr:input\n          copy_gray:output -> output_gray\n          copy_rgb:output -> output_rgb\n          copy_bgr:output -> output_bgr\n        }'''\n    format = \"graphviz\"\n  )\";\n\n  auto driver_flow = GetDriverFlow();\n  driver_flow->BuildAndRun(\"ColorTransposeTest\", toml_content, -1);\n  auto flow = driver_flow->GetFlow();\n\n  {\n    std::string gimg_path = std::string(TEST_ASSETS) + \"/test.jpg\";\n    cv::Mat bgr_img;\n    cv::Mat gray_img;\n    cv::Mat rgb_img;\n    bgr_img = cv::imread(gimg_path);\n\n    cv::cvtColor(bgr_img, rgb_img, CV_BGR2RGB);\n    cv::cvtColor(rgb_img, gray_img, CV_RGB2GRAY);\n\n    auto ext_data = flow->CreateExternalDataMap();\n    GTEST_ASSERT_NE(ext_data, nullptr);\n\n    std::vector<std::string> pix_fmt_list({\"bgr\", \"rgb\", \"gray\"});\n    std::vector<cv::Mat> img_list({bgr_img, rgb_img, gray_img});\n    std::vector<std::string> output_name(\n        {\"output_bgr\", \"output_rgb\", \"output_gray\"});\n    for (size_t i = 0; i < pix_fmt_list.size(); ++i) {\n      // TODO don't skip GRAY\n      if (i == 2) {\n        break;\n      }\n\n      auto color_bl = ext_data->CreateBufferList();\n      size_t img_size = img_list[i].total() * img_list[i].elemSize();\n      color_bl->BuildFromHost({img_size}, img_list[i].data, img_size);\n      // HWC\n      color_bl->Set(\n          \"shape\",\n          std::vector<size_t>({static_cast<size_t>(img_list[i].rows),\n                               static_cast<size_t>(img_list[i].cols),\n                               static_cast<size_t>(img_list[i].channels())}));\n      color_bl->Set(\"layout\", std::string(\"hwc\"));\n      color_bl->Set(\"type\", ModelBoxDataType::MODELBOX_UINT8);\n      color_bl->Set(\"pix_fmt\", pix_fmt_list[i]);\n\n      auto status = ext_data->Send(\"input1\", color_bl);\n      EXPECT_EQ(status, STATUS_OK);\n\n      OutputBufferList map_buffer_list;\n\n      status = ext_data->Recv(map_buffer_list);\n      EXPECT_EQ(status, STATUS_OK);\n\n      auto host_device = color_bl->GetDevice();\n\n      for (size_t j = 0; j < output_name.size(); j++) {\n        auto buffer_list = map_buffer_list[output_name[j]];\n        EXPECT_EQ(buffer_list->Size(), 1);\n        EXPECT_EQ(buffer_list->GetBytes(),\n                  img_list[j].total() * img_list[j].elemSize());\n        std::string out_pix_fmt;\n        buffer_list->At(0)->Get(\"pix_fmt\", out_pix_fmt);\n        EXPECT_EQ(out_pix_fmt, pix_fmt_list[j]);\n        auto* opencv_data = (uint8_t*)img_list[j].data;\n\n        auto* out_data = (uint8_t*)(buffer_list->ConstBufferData(0));\n        for (size_t k = 0; k < buffer_list->GetBytes(); ++k) {\n          // TODO don't skip GRAY\n          if (j == 2) {\n            break;\n          }\n\n          EXPECT_EQ(*(out_data + k), *(opencv_data + k));\n        }\n      }\n    }\n\n    auto status = ext_data->Shutdown();\n    EXPECT_EQ(status, STATUS_OK);\n  }\n\n  flow->Wait(1000);\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/dlengine/CMakeLists.txt",
    "content": "#\n# Copyright 2022 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nset(UNIT_DEVICE \"cuda\")\nset(UNIT_NAME \"dlengine_inference\")\n\nproject(modelbox-flowunit-${UNIT_DEVICE}-${UNIT_NAME})\n\nif (NOT DLENGINE_FOUND) \n    message(STATUS \"Not found dlengine, disable ${UNIT_NAME} flowunit\")\n    return()\nendif()\n\n\nfile(GLOB_RECURSE UNIT_SOURCE *.cpp *.cc *.c)\n\ngroup_source_test_files(MODELBOX_UNIT_SOURCE MODELBOX_UNIT_TEST_SOURCE \"_test.c*\" ${UNIT_SOURCE})\n\nconfigure_file(${CMAKE_CURRENT_SOURCE_DIR}/test_toml/modelbox.test.dlengine.cuda.inference.onnx.in ${TEST_WORKING_DATA_DIR}/dlengine_cuda/modelbox.test.dlengine.cuda.inference.onnx.toml @ONLY)\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_CUDA_INCLUDE})\ninclude_directories(${CUDA_INCLUDE_DIRS})\ninclude_directories(${LIBMODELBOX_VIRTUALDRIVER_INFERENCE_INCLUDE})\ninclude_directories(${MODELBOX_COMMON_INFERENCE_INCLUDE})\ninclude_directories(${DLENGINE_INCLUDE_DIR})\ninclude_directories(${LIBMODELBOX_FLOWUNIT_INFERENCE_DLENGINE_INCLUDE})\n\nset(MODELBOX_UNIT_SHARED libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-shared)\nset(MODELBOX_UNIT_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\nadd_library(${MODELBOX_UNIT_SHARED} SHARED ${MODELBOX_UNIT_SOURCE})\nset(LIBMODELBOX_FLOWUNIT_INFERENCE_DLENGINE_CUDA_SHARED ${MODELBOX_UNIT_SHARED})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES \nSOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} rt)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} dl)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${DLENGINE_LIBRARIES})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_DEVICE_CUDA_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_VIRTUALDRIVER_INFERENCE_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${MODELBOX_COMMON_INFERENCE_LIBRARY})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_FLOWUNIT_INFERENCE_DLENGINE_SHARED})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}\")\n\ninstall(TARGETS ${MODELBOX_UNIT_SHARED}\n        COMPONENT cuda-device-flowunit\n        RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n        LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        ARCHIVE DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        OPTIONAL)\n\nset(LIBMODELBOX_FLOWUNIT_INFERENCE_DLENGINE_CUDA_SHARED ${MODELBOX_UNIT_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_INFERENCE_DLENGINE_CUDA_INCLUDE ${MODELBOX_UNIT_SOURCE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_INFERENCE_DLENGINE_CUDA_SOURCES ${MODELBOX_UNIT_SOURCE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_INFERENCE_DLENGINE_CUDA_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}.so CACHE INTERNAL \"\")\n\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_UNIT_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_UNIT_SHARED})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")\n"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/dlengine/dlengine_cuda_inference_flowunit.cc",
    "content": "/*\n * Copyright 2022 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"dlengine_cuda_inference_flowunit.h\"\n\nconstexpr const char *BACKEND_TYPE = \"PBJAFgZcNjg=\";\n\nDLEngineCUDAInferenceFlowUnit::DLEngineCUDAInferenceFlowUnit()\n    : inference_(std::make_shared<DLEngineInference>()) {}\n\nDLEngineCUDAInferenceFlowUnit::~DLEngineCUDAInferenceFlowUnit() = default;\n\nmodelbox::Status DLEngineCUDAInferenceFlowUnit::Open(\n    const std::shared_ptr<modelbox::Configuration> &config) {\n  if (!config->Contain(\"config.model_type\")) {\n    config->SetProperty(\"config.model_type\", \"onnx\");\n  }\n\n  // fix backend on gpu\n  config->SetProperty(\"config.backend_type\", BACKEND_TYPE);\n\n  return inference_->Init(config, GetFlowUnitDesc(), GetBindDevice()->GetType(),\n                          dev_id_);\n}\n\nmodelbox::Status DLEngineCUDAInferenceFlowUnit::Close() {\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status DLEngineCUDAInferenceFlowUnit::CudaProcess(\n    std::shared_ptr<modelbox::DataContext> data_ctx, cudaStream_t stream) {\n  auto ret = cudaStreamSynchronize(stream);\n  if (ret != cudaSuccess) {\n    MBLOG_ERROR << \"cuda stream sync failed, err \" << ret;\n    return modelbox::STATUS_FAULT;\n  }\n\n  return inference_->Infer(data_ctx);\n}\n\nstd::shared_ptr<modelbox::FlowUnit>\nDLEngineCUDAInferenceFlowUnitFactory::VirtualCreateFlowUnit(\n    const std::string &unit_name, const std::string &unit_type,\n    const std::string &virtual_type) {\n  return std::make_shared<DLEngineCUDAInferenceFlowUnit>();\n}\n\nstd::string DLEngineCUDAInferenceFlowUnitFactory::GetFlowUnitFactoryType() {\n  return FLOWUNIT_TYPE;\n}\n\nstd::string DLEngineCUDAInferenceFlowUnitFactory::GetVirtualType() {\n  return INFERENCE_TYPE;\n}\n"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/dlengine/dlengine_cuda_inference_flowunit.h",
    "content": "/*\n * Copyright 2022 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_DLENGINE_CPU_INFERENCE_H_\n#define MODELBOX_FLOWUNIT_DLENGINE_CPU_INFERENCE_H_\n\n#include \"dlengine_inference_flowunit.h\"\n#include \"modelbox/device/cuda/device_cuda.h\"\n#include \"modelbox/flowunit.h\"\n\nconstexpr const char *FLOWUNIT_TYPE = \"cuda\";\n\nclass DLEngineCUDAInferenceFlowUnit : public modelbox::CudaFlowUnit {\n public:\n  DLEngineCUDAInferenceFlowUnit();\n\n  ~DLEngineCUDAInferenceFlowUnit() override;\n\n  modelbox::Status Open(\n      const std::shared_ptr<modelbox::Configuration> &config) override;\n\n  modelbox::Status Close() override;\n\n  modelbox::Status CudaProcess(std::shared_ptr<modelbox::DataContext> data_ctx,\n                               cudaStream_t stream) override;\n\n private:\n  std::shared_ptr<DLEngineInference> inference_;\n};\n\nclass DLEngineCUDAInferenceFlowUnitFactory : public modelbox::FlowUnitFactory {\n public:\n  std::shared_ptr<modelbox::FlowUnit> VirtualCreateFlowUnit(\n      const std::string &unit_name, const std::string &unit_type,\n      const std::string &virtual_type) override;\n\n  std::string GetFlowUnitFactoryType() override;\n\n  std::string GetVirtualType() override;\n};\n\n#endif  // MODELBOX_FLOWUNIT_DLENGINE_CPU_INFERENCE_H_\n"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/dlengine/dlengine_cuda_inference_flowunit_test.cc",
    "content": "/*\n * Copyright 2022 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"dlengine_inference_flowunit_test.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"test/mock/minimodelbox/mockflow.h\"\n\nclass DLEngineCUDAInferenceFlowUnitTest : public testing::Test {\n protected:\n  void SetUp() override {\n    test_impl_ = std::make_shared<DLEngineInferenceFlowUnitTest>(\"cuda\");\n  }\n\n  void TearDown() override { test_impl_ = nullptr; }\n\n  std::shared_ptr<DLEngineInferenceFlowUnitTest> test_impl_;\n};\n\nTEST_F(DLEngineCUDAInferenceFlowUnitTest, OnnxRunUnit) {\n  auto ret = test_impl_->SetUp(\"dlengine_inference_onnx\");\n  ASSERT_EQ(ret, modelbox::STATUS_OK);\n  test_impl_->Run(\"dlengine_cuda_onnx_RunUnit\");\n}\n"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/dlengine/flowunit_desc.cc",
    "content": "/*\n * Copyright 2022 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"dlengine_cuda_inference_flowunit.h\"\n#include \"modelbox/base/driver_api_helper.h\"\n#include \"modelbox/device/cuda/device_cuda.h\"\n\nconstexpr const char *FLOWUNIT_DESC = \"A dlengine cuda inference flowunit\";\n\nstd::shared_ptr<modelbox::DriverFactory> CreateDriverFactory() {\n  return std::make_shared<DLEngineCUDAInferenceFlowUnitFactory>();\n}\n\nvoid DriverDescription(modelbox::DriverDesc *desc) {\n  desc->SetName(FLOWUNIT_NAME);\n  desc->SetClass(modelbox::DRIVER_CLASS_INFERENCE);\n  desc->SetType(modelbox::DEVICE_TYPE);\n  desc->SetDescription(FLOWUNIT_DESC);\n}\n\nmodelbox::Status DriverInit() { return modelbox::STATUS_OK; }\n\nvoid DriverFini() {}\n"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/dlengine/test_toml/modelbox.test.dlengine.cuda.inference.onnx.in",
    "content": "[base]\nname = \"dlengine_inference_onnx\"\ndevice = \"cuda\"\nversion = \"1.0.0\"\ndescription = \"an dlengine cuda inference flowunit\"\nentry = \"@CMAKE_SOURCE_DIR@/test/assets/test_model/test_dynamic.onnx\"\ntype = \"inference\"\nvirtual_type = \"dlengine\"\n\n[config]\nmodel_type = \"onnx\"\nprecision = \"FP16\" # FP32/FP16/INT8\n\n[input]\n[input.input1]\nname = \"in1\"\nmin_shape = \"1x3x16x16\"\nopt_shape = \"4x3x16x16\"\nmax_shape = \"8x3x16x16\"\n\n[input.input2]\nname = \"in2\"\nmin_shape = \"1x3x16x16\"\nopt_shape = \"4x3x16x16\"\nmax_shape = \"8x3x16x16\"\n\n[output]\n[output.output1]\nname = \"out\"\n"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/image_rotate/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nset(UNIT_DEVICE \"cuda\")\nset(UNIT_NAME \"image_rotate\")\n\nproject(modelbox-unit-${UNIT_NAME}-${UNIT_DEVICE})\n\nfile(GLOB_RECURSE UNIT_SOURCE *.cpp *.cc * .cu)\ngroup_source_test_files(MODELBOX_UNIT_SOURCE MODELBOX_UNIT_TEST_SOURCE \"_test.c*\" ${UNIT_SOURCE})\n\nif (NOT OPENCV_FOUND) \n    set(MODELBOX_UNIT_TEST_SOURCE \"\")\nendif()\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_CUDA_INCLUDE})\ninclude_directories(${CUDA_INCLUDE_DIRS})\ninclude_directories(${MODELBOX_COMMON_IMAGE_ROTATE_INCLUDE})\n\nset(MODELBOX_UNIT_SHARED modelbox_unit_${UNIT_DEVICE}_${UNIT_NAME}_shared)\nset(MODELBOX_UNIT_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\ncuda_add_library(${MODELBOX_UNIT_SHARED} SHARED ${MODELBOX_UNIT_SOURCE})\n\nset(LIBMODELBOX_FLOWUNIT_ROTATE_CUDA_SHARED ${MODELBOX_UNIT_SHARED})\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES \n    SOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} rt)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} dl)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_DEVICE_CUDA_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${MODELBOX_COMMON_IMAGE_ROTATE_LIBRARY})\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}\")\n\ninstall(TARGETS ${MODELBOX_UNIT_SHARED} \n    COMPONENT cuda-device-flowunit\n    RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n    LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    ARCHIVE DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    OPTIONAL\n    )\n\ninstall(DIRECTORY ${HEADER} \n    DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR} \n    COMPONENT cuda-device-flowunit-devel\n    )\n\nset(LIBMODELBOX_FLOWUNIT_ROTATE_CUDA_SHARED ${MODELBOX_UNIT_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_ROTATE_CUDA_INCLUDE ${MODELBOX_UNIT_SOURCE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_ROTATE_CUDA_SOURCES ${MODELBOX_UNIT_SOURCE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_ROTATE_CUDA_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}.so CACHE INTERNAL \"\")\n\n# driver test\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_UNIT_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_UNIT_SHARED})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_LINK_LIBRARIES ${DRIVER_UNIT_TEST_LINK_LIBRARIES} CACHE INTERNAL \"\")\n"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/image_rotate/image_rotate.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"image_rotate.h\"\n\n#include \"image_rotate_cu.h\"\n#include \"modelbox/flowunit_api_helper.h\"\n\nmodelbox::Status ImageRotateGpuFlowUnit::RotateOneImage(\n    std::shared_ptr<modelbox::Buffer> input_buffer,\n    std::shared_ptr<modelbox::Buffer> output_buffer, int32_t rotate_angle,\n    int32_t width, int32_t height) {\n  auto cuda_ret = cudaSetDevice(dev_id_);\n  if (cuda_ret != cudaSuccess) {\n    MBLOG_ERROR << \"Set cuda device \" << dev_id_ << \" failed, cuda ret \"\n                << cuda_ret;\n    return modelbox::STATUS_FAULT;\n  }\n\n  std::shared_ptr<modelbox::CudaStream> stream;\n  if (GetStream(input_buffer, output_buffer, stream) != modelbox::STATUS_OK) {\n    return modelbox::STATUS_FAULT;\n  }\n\n  output_buffer->Build(input_buffer->GetBytes());\n  auto *output_data = static_cast<u_char *>(output_buffer->MutableData());\n\n  auto ret =\n      ClockWiseRotateGPU((u_char *)input_buffer->ConstData(), output_data,\n                         height, width, rotate_angle, stream->Get());\n  if (ret != 0) {\n    MBLOG_ERROR << \"gpu rotate image failed.\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  stream->Bind({input_buffer->GetDeviceMemory()});\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status ImageRotateGpuFlowUnit::GetStream(\n    const std::shared_ptr<modelbox::Buffer> &input_buffer,\n    const std::shared_ptr<modelbox::Buffer> &output_buffer,\n    std::shared_ptr<modelbox::CudaStream> &stream) {\n  auto input_cuda_mem = std::dynamic_pointer_cast<modelbox::CudaMemory>(\n      input_buffer->GetDeviceMemory());\n  auto in_stream = input_cuda_mem->GetBindStream();\n  // bind same stream\n  auto output_cuda_mem = std::dynamic_pointer_cast<modelbox::CudaMemory>(\n      output_buffer->GetDeviceMemory());\n  auto status = output_cuda_mem->BindStream(in_stream);\n  if (status != modelbox::STATUS_OK) {\n    auto err_msg = \"bind stream failed, \" + status.WrapErrormsgs();\n    MBLOG_WARN << err_msg;\n    return status;\n  }\n\n  stream = output_cuda_mem->GetBindStream();\n  return modelbox::STATUS_OK;\n}\n\nMODELBOX_FLOWUNIT(ImageRotateGpuFlowUnit, desc) {\n  desc.SetFlowUnitName(FLOWUNIT_NAME);\n  desc.SetFlowUnitGroupType(\"Image\");\n  desc.AddFlowUnitInput({\"in_image\"});\n  desc.AddFlowUnitOutput({\"out_image\"});\n\n  desc.AddFlowUnitOption(modelbox::FlowUnitOption(\n      \"rotate_angle\", \"int\", false, \"0\", \"the image rotate image\"));\n\n  desc.SetFlowType(modelbox::NORMAL);\n  desc.SetDescription(FLOWUNIT_DESC);\n}\n\nMODELBOX_DRIVER_FLOWUNIT(desc) {\n  desc.Desc.SetName(FLOWUNIT_NAME);\n  desc.Desc.SetClass(modelbox::DRIVER_CLASS_FLOWUNIT);\n  desc.Desc.SetType(FLOWUNIT_TYPE);\n  desc.Desc.SetDescription(FLOWUNIT_DESC);\n  desc.Desc.SetVersion(\"1.0.0\");\n}\n"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/image_rotate/image_rotate.cu",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"image_rotate_cu.h\"\n#include \"math.h\"\n\n#define GPU_BLOCK_SIZE_X 16\n#define GPU_BLOCK_SIZE_Y 16\n\n#define GET_GRID_SIZE(gridSizeX, gridSizeY, height, width) \\\n  gridSizeX = ceil((width) / (GPU_BLOCK_SIZE_X * 1.0));    \\\n  gridSizeY = ceil((height) / (GPU_BLOCK_SIZE_Y * 1.0));\n\nint32_t ClockWiseRotateGPU(const u_char *srcData, u_char *dstData,\n                           int32_t height, int32_t width, int32_t rotateAngle,\n                           cudaStream_t stream) {\n  if (srcData == nullptr) {\n    return -1;\n  }\n\n  cudaMemset(dstData, 0, height * width * 3 * sizeof(u_char));\n\n  dim3 blockSize(GPU_BLOCK_SIZE_X, GPU_BLOCK_SIZE_Y);\n  int32_t gridSizeX, gridSizeY;\n  GET_GRID_SIZE(gridSizeX, gridSizeY, height, width);\n  dim3 gridSize(gridSizeX, gridSizeY);\n\n  RotateImg_u8c3r<<<gridSize, blockSize, 0, stream>>>(srcData, dstData, width,\n                                                      height, rotateAngle);\n  return 0;\n}\n\n__global__ void RotateImg_u8c3r(const u_char *srcData, u_char *dstData,\n                                int32_t width, int32_t height,\n                                int32_t rotateAngle) {\n  const long tidX = blockIdx.x * blockDim.x + threadIdx.x;\n  const long tidY = blockIdx.y * blockDim.y + threadIdx.y;\n\n  if ((tidX >= width) || (tidY >= height)) {\n    return;\n  }\n\n  if (rotateAngle == 90) {\n    long rotateX = height - 1 - tidY;\n    long rotateY = tidX;\n\n    dstData[(rotateY * height + rotateX) * 3] =\n        srcData[(tidY * width + tidX) * 3];\n    dstData[(rotateY * height + rotateX) * 3 + 1] =\n        srcData[(tidY * width + tidX) * 3 + 1];\n    dstData[(rotateY * height + rotateX) * 3 + 2] =\n        srcData[(tidY * width + tidX) * 3 + 2];\n  } else if (rotateAngle == 180) {\n    long rotateX = width - 1 - tidX;\n    long rotateY = height - 1 - tidY;\n\n    dstData[(rotateY * width + rotateX) * 3] =\n        srcData[(tidY * width + tidX) * 3];\n    dstData[(rotateY * width + rotateX) * 3 + 1] =\n        srcData[(tidY * width + tidX) * 3 + 1];\n    dstData[(rotateY * width + rotateX) * 3 + 2] =\n        srcData[(tidY * width + tidX) * 3 + 2];\n  } else if (rotateAngle == 270) {\n    long rotateX = tidY;\n    long rotateY = width - 1 - tidX;\n\n    dstData[(rotateY * height + rotateX) * 3] =\n        srcData[(tidY * width + tidX) * 3];\n    dstData[(rotateY * height + rotateX) * 3 + 1] =\n        srcData[(tidY * width + tidX) * 3 + 1];\n    dstData[(rotateY * height + rotateX) * 3 + 2] =\n        srcData[(tidY * width + tidX) * 3 + 2];\n  }\n}\n"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/image_rotate/image_rotate.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_IMAGE_ROTATE_CUDA_H_\n#define MODELBOX_FLOWUNIT_IMAGE_ROTATE_CUDA_H_\n\n#include \"image_rotate_base.h\"\n#include \"modelbox/device/cuda/device_cuda.h\"\n\nconstexpr const char *FLOWUNIT_NAME = \"image_rotate\";\nconstexpr const char *FLOWUNIT_TYPE = \"cuda\";\nconstexpr const char *FLOWUNIT_DESC =\n    \"\\n\\t@Brief: An OpenCV rotate flowunit on cuda. \\n\"\n    \"\\t@Port parameter: The input port buffer type is image file binary, the \"\n    \"output port buffer type are image. \\n\"\n    \"\\t  The image type buffer contains the following meta fields:\\n\"\n    \"\\t\\tField Name: width,         Type: int32_t\\n\"\n    \"\\t\\tField Name: height,        Type: int32_t\\n\"\n    \"\\t\\tField Name: width_stride,  Type: int32_t\\n\"\n    \"\\t\\tField Name: height_stride, Type: int32_t\\n\"\n    \"\\t\\tField Name: rotate_angle,  Type: int32_t\\n\"\n    \"\\t\\tField Name: channel,       Type: int32_t\\n\"\n    \"\\t\\tField Name: pix_fmt,       Type: string\\n\"\n    \"\\t\\tField Name: layout,        Type: int32_t\\n\"\n    \"\\t\\tField Name: shape,         Type: vector<size_t>\\n\"\n    \"\\t\\tField Name: type,          Type: ModelBoxDataType::MODELBOX_UINT8\\n\"\n    \"\\t@Constraint:\";\n\nclass ImageRotateGpuFlowUnit : public ImageRotateFlowUnitBase {\n public:\n  modelbox::Status RotateOneImage(\n      std::shared_ptr<modelbox::Buffer> input_buffer,\n      std::shared_ptr<modelbox::Buffer> output_buffer, int32_t rotate_angle,\n      int32_t width, int32_t height) override;\n\n private:\n  modelbox::Status GetStream(\n      const std::shared_ptr<modelbox::Buffer> &input_buffer,\n      const std::shared_ptr<modelbox::Buffer> &output_buffer,\n      std::shared_ptr<modelbox::CudaStream> &stream);\n};\n\n#endif  // MODELBOX_FLOWUNIT_IMAGE_ROTATE_CUDA_H_\n"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/image_rotate/image_rotate_cu.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef IMAGE_ROTATE_CU_H_\n#define IMAGE_ROTATE_CU_H_\n\n#include <stdint.h>\n\n#include \"cuda_runtime.h\"\n\nint32_t ClockWiseRotateGPU(const u_char *srcData, u_char *dstData,\n                           int32_t height, int32_t width, int32_t rotateAngle,\n                           cudaStream_t stream);\n\n__global__ void RotateImg_u8c3r(const u_char *srcData, u_char *dstData,\n                                int32_t width, int32_t height,\n                                int32_t rotateAngle);\n\n__global__ void RotateImg_u8p3(const u_char *srcData, u_char *dstData,\n                               int width, int height, int32_t rotateAngle);\n\n#endif"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/image_rotate/image_rotate_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <cuda_runtime.h>\n\n#include <opencv2/opencv.hpp>\n\n#include \"image_rotate_test_base.h\"\n\nnamespace modelbox {\n\nTEST_F(ImageRotateFlowUnitTest, CudaRotateTest) {\n  int count = 0;\n  cudaGetDeviceCount(&count);\n  if (count <= 0) {\n    MBLOG_INFO << \"no cuda device, skip test suit\";\n    GTEST_SKIP();\n  }\n\n  const std::string test_lib_dir = TEST_DRIVER_DIR;\n  std::string toml_content = R\"(\n    [driver]\n    skip-default=true\n    dir=[\")\" + test_lib_dir + \"\\\"]\\n    \" +\n                             R\"([graph]\n    graphconf = '''digraph demo {\n          test_0_1_rotate[type=flowunit, flowunit=test_0_1_rotate, device=cpu, deviceid=0, label=\"<out_1>\"]\n          image_rotate[type=flowunit, flowunit=image_rotate, device=cuda, deviceid=0, label=\"<in_encoded_image> | <out_image>\", batch_size=3]\n          test_1_0_rotate[type=flowunit, flowunit=test_1_0_rotate, device=cpu, deviceid=0, label=\"<in_1>\",batch_size=3]                                \n          test_0_1_rotate:out_1 -> image_rotate:in_image \n          test_0_1_rotate:out_1 -> test_1_0_rotate:in_origin                                                                      \n          image_rotate:out_image -> test_1_0_rotate:in_rotate                                                                      \n        }'''\n    format = \"graphviz\"\n  )\";\n\n  auto driver_flow = GetDriverFlow();\n  auto ret = driver_flow->BuildAndRun(\"CudaRotateTest\", toml_content);\n  EXPECT_EQ(ret, STATUS_STOP);\n\n  for (auto rotate_angle : test_rotate_angle_) {\n    std::string expected_file_path = std::string(TEST_ASSETS) + \"/rotate_\" +\n                                     std::to_string(rotate_angle) + \".jpg\";\n    cv::Mat expected_img = cv::imread(expected_file_path);\n\n    std::string rotate_result_file_path = std::string(TEST_DATA_DIR) +\n                                          \"/rotate_result_\" +\n                                          std::to_string(rotate_angle) + \".jpg\";\n    cv::Mat rotate_result_img = cv::imread(rotate_result_file_path);\n\n    int result_data_size =\n        rotate_result_img.total() * rotate_result_img.elemSize();\n    int expected_data_size = expected_img.total() * expected_img.elemSize();\n    EXPECT_EQ(result_data_size, expected_data_size);\n\n    auto cmp_ret =\n        memcmp(rotate_result_img.data, expected_img.data, result_data_size);\n    EXPECT_EQ(cmp_ret, 0);\n\n    auto rmret = remove(rotate_result_file_path.c_str());\n    EXPECT_EQ(rmret, 0);\n  }\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/mean/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nset(UNIT_DEVICE \"cuda\")\nset(UNIT_NAME \"mean\")\n\nproject(modelbox-flowunit-${UNIT_DEVICE}-${UNIT_NAME})\n\nfile(GLOB_RECURSE UNIT_SOURCE *.cpp *.cc *.c)\ngroup_source_test_files(MODELBOX_UNIT_SOURCE MODELBOX_UNIT_TEST_SOURCE \"_test.c*\" ${UNIT_SOURCE})\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_CUDA_INCLUDE})\ninclude_directories(${CUDA_INCLUDE_DIRS})\ninclude_directories(${MODELBOX_COMMON_MEAN_INCLUDE})\n\n\nset(MODELBOX_UNIT_SHARED libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-shared)\nset(MODELBOX_UNIT_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\nadd_library(${MODELBOX_UNIT_SHARED} SHARED ${MODELBOX_UNIT_SOURCE})\n\nset(LIBMODELBOX_FLOWUNIT_MEAN_CUDA_SHARED ${MODELBOX_UNIT_SHARED})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES \n    SOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n    DEFINE_SYMBOL \"\"\n)\n\nfind_cuda_helper_libs(nppial)\n\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} rt)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} dl)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_DEVICE_CUDA_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${CUDA_nppial_LIBRARY})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${MODELBOX_COMMON_MEAN_LIBRARY})\nadd_dependencies(${MODELBOX_UNIT_SHARED} ${MODELBOX_COMMON_MEAN_LIBRARY})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}\")\n\ninstall(TARGETS ${MODELBOX_UNIT_SHARED} \n    COMPONENT cuda-device-flowunit\n    RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n    LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    ARCHIVE DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    OPTIONAL\n    )\n\n\ninstall(DIRECTORY ${HEADER} \n    DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR} \n    COMPONENT cuda-device-flowunit-devel\n    )\n\nset(LIBMODELBOX_FLOWUNIT_MEAN_CUDA_SHARED ${MODELBOX_UNIT_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_MEAN_CUDA_INCLUDE ${MODELBOX_UNIT_SOURCE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_MEAN_CUDA_SOURCES ${MODELBOX_UNIT_SOURCE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_MEAN_CUDA_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}.so CACHE INTERNAL \"\")\n\n\n# driver test\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_UNIT_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_UNIT_SHARED})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/mean/mean.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include \"mean.h\"\n\n#include \"nppi_arithmetic_and_logical_operations.h\"\n\nbool CheckRoiValid(const ImageRect &roi) {\n  if ((0 > roi.width) || (PIXEL_THRESHOLD < roi.width) || (0 > roi.height) ||\n      (PIXEL_THRESHOLD < roi.height) || (0 > roi.x) ||\n      (PIXEL_THRESHOLD < roi.x) || (0 > roi.y) || (PIXEL_THRESHOLD < roi.y)) {\n    return false;\n  }\n\n  return true;\n}\n\nint32_t Mean_PLANAR_32f_P3R(const float *pSrcPlanarData, int width, int height,\n                            const ImageRect &srcRoi, const ImageMean_32f &mean,\n                            cudaStream_t stream) {\n  NppStatus status = NPP_ERROR;\n\n  if (nullptr == pSrcPlanarData) {\n    MBLOG_ERROR << \"Parma is Null.\";\n    return static_cast<int32_t>(status);\n  }\n\n  if ((0 > width) || (PIXEL_THRESHOLD < width)) {\n    MBLOG_ERROR << \"image width is invalid.\";\n    return static_cast<int32_t>(status);\n  }\n\n  if ((0 > height) || (PIXEL_THRESHOLD < height)) {\n    MBLOG_ERROR << \"image height is invalid.\";\n    return static_cast<int32_t>(status);\n  }\n\n  if ((0.0 > mean.channel_0) || (255.0 < mean.channel_1) ||\n      (0.0 > mean.channel_2) || (255.0 < mean.channel_0) ||\n      (0.0 > mean.channel_1) || (255.0 < mean.channel_2)) {\n    MBLOG_ERROR << \"mean value is invalid.\";\n    return static_cast<int32_t>(status);\n  }\n\n  if (!CheckRoiValid(srcRoi)) {\n    return static_cast<int32_t>(status);\n  }\n\n  NppiSize oSizeROI;\n  oSizeROI.width = srcRoi.width;\n  oSizeROI.height = srcRoi.height;\n\n  status = nppiSubC_32f_C1R((Npp32f *)pSrcPlanarData, width * sizeof(float),\n                            mean.channel_0, (Npp32f *)pSrcPlanarData,\n                            width * sizeof(float), oSizeROI);\n  if (NPP_SUCCESS != status) {\n    return static_cast<int32_t>(status);\n  }\n\n  status = nppiSubC_32f_C1R((Npp32f *)pSrcPlanarData + height * width,\n                            width * sizeof(float), mean.channel_1,\n                            (Npp32f *)pSrcPlanarData + height * width,\n                            width * sizeof(float), oSizeROI);\n  if (NPP_SUCCESS != status) {\n    return static_cast<int32_t>(status);\n  }\n\n  status = nppiSubC_32f_C1R((Npp32f *)pSrcPlanarData + height * width * 2,\n                            width * sizeof(float), mean.channel_2,\n                            (Npp32f *)pSrcPlanarData + height * width * 2,\n                            width * sizeof(float), oSizeROI);\n\n  return static_cast<int32_t>(status);\n}"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/mean/mean.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#ifndef MODELBOX_MEAN_H_\n#define MODELBOX_MEAN_H_\n\n#include <cuda_runtime.h>\n#include <nppdefs.h>\n\n#include <iostream>\n\n#include \"modelbox/base/log.h\"\n\n#define PIXEL_THRESHOLD 4096\n\n/**\n * 2D Rectangle\n * This struct contains position and size information of a rectangle in\n * two space.\n * The rectangle's position is usually signified by the coordinate of its\n * upper-left corner.\n */\ntypedef struct {\n  int x;     /**<  x-coordinate of upper left corner (lowest memory address). */\n  int y;     /**<  y-coordinate of upper left corner (lowest memory address). */\n  int width; /**<  Rectangle width. */\n  int height; /**<  Rectangle height. */\n} ImageRect;\n\ntypedef struct {\n  float channel_0;\n  float channel_1;\n  float channel_2;\n} ImageMean_32f;\n\nbool CheckRoiValid(const ImageRect &roi);\n\nint32_t Mean_PLANAR_32f_P3R(const float *pSrcPlanarData, int width, int height,\n                            const ImageRect &srcRoi, const ImageMean_32f &mean,\n                            cudaStream_t stream);\n\n#endif  // MODELBOX_MEAN_H_\n"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/mean/mean_flowunit.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include \"mean_flowunit.h\"\n#include \"modelbox/flowunit_api_helper.h\"\n\nMeanFlowUnit::MeanFlowUnit() = default;\nMeanFlowUnit::~MeanFlowUnit() = default;\n\nmodelbox::Status MeanFlowUnit::Open(\n    const std::shared_ptr<modelbox::Configuration> &opts) {\n  if (!opts->Contain(\"mean\")) {\n    MBLOG_ERROR << \"mean flow unit does not contain mean param\";\n    return modelbox::STATUS_BADCONF;\n  }\n\n  auto input_params = opts->GetDoubles(\"mean\");\n  if (input_params.size() != CHANNEL_NUM) {\n    MBLOG_ERROR << \"mean param error\";\n    return modelbox::STATUS_BADCONF;\n  }\n\n  params_.means_.assign(input_params.begin(), input_params.end());\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status MeanFlowUnit::Close() { return modelbox::STATUS_OK; }\n\nmodelbox::Status MeanFlowUnit::CudaProcess(\n    std::shared_ptr<modelbox::DataContext> data_ctx, cudaStream_t stream) {\n  cudaStreamSynchronize(stream);\n  const auto input_bufs = data_ctx->Input(\"in_data\");\n  if (input_bufs->Size() == 0) {\n    MBLOG_ERROR << \"mean flowunit in_data invalied\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  auto output_bufs = data_ctx->Output(\"out_data\");\n  if (!BuildOutputBufferList(input_bufs, output_bufs)) {\n    MBLOG_ERROR << \"build output BufferList failed\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  for (size_t i = 0; i < input_bufs->Size(); ++i) {\n    auto input_buf = input_bufs->At(i);\n    int32_t width = 0;\n    int32_t height = 0;\n    modelbox::ModelBoxDataType type = modelbox::MODELBOX_TYPE_INVALID;\n    if (!CheckBufferValid(input_buf, width, height, type)) {\n      MBLOG_FATAL << \"mean flowunit input_buf invalied\";\n      continue;\n    }\n\n    auto out_buff = output_bufs->At(i);\n    out_buff->CopyMeta(input_buf);\n    out_buff->Set(\"type\", modelbox::ModelBoxDataType::MODELBOX_FLOAT);\n    auto *out_data = static_cast<float *>(out_buff->MutableData());\n\n    if (type == modelbox::ModelBoxDataType::MODELBOX_FLOAT) {\n      auto *in_data_f32 =\n          static_cast<float *>(const_cast<void *>(input_buf->ConstData()));\n      if (in_data_f32 == nullptr) {\n        MBLOG_ERROR << \"mean flowunit data is nullptr\";\n        continue;\n      }\n\n      cudaMemcpy(out_data, in_data_f32, input_buf->GetBytes(),\n                 cudaMemcpyDeviceToDevice);\n    } else {\n      auto *in_data_uint8 =\n          static_cast<uint8_t *>(const_cast<void *>(input_buf->ConstData()));\n      if (in_data_uint8 == nullptr) {\n        MBLOG_ERROR << \"mean flowunit data is nullptr\";\n        continue;\n      }\n\n      std::vector<uint8_t> host_data_uint8(input_buf->GetBytes());\n      cudaMemcpy(host_data_uint8.data(), in_data_uint8, input_buf->GetBytes(),\n                 cudaMemcpyDeviceToHost);\n      std::vector<float> host_data_f32(input_buf->GetBytes());\n      for (size_t i = 0; i < input_buf->GetBytes(); i++) {\n        host_data_f32[i] = host_data_uint8[i];\n      }\n\n      cudaMemcpy(out_data, host_data_f32.data(),\n                 input_buf->GetBytes() * sizeof(float), cudaMemcpyHostToDevice);\n    }\n\n    int32_t ret = MeanOperator(out_data, width, height);\n    if (ret < 0) {\n      MBLOG_ERROR << \"mean flowunit process failed\";\n      return modelbox::STATUS_FAULT;\n    }\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nbool MeanFlowUnit::CheckBufferValid(\n    const std::shared_ptr<modelbox::Buffer> &buffer, int32_t &width,\n    int32_t &height, modelbox::ModelBoxDataType &type) {\n  std::vector<size_t> shape;\n  if (!buffer->Get(\"shape\", shape)) {\n    MBLOG_ERROR << \"mean flowunit can not get shape from meta\";\n    return false;\n  }\n\n  if (shape.size() != SHAPE_SIZE) {\n    MBLOG_ERROR << \"mean flowunit only support hwc data\";\n    return false;\n  }\n\n  if (shape[2] != CHANNEL_NUM) {\n    MBLOG_ERROR << \"mean flowunit only support hwc and C is \" << CHANNEL_NUM;\n    return false;\n  }\n\n  height = shape[0];\n  width = shape[1];\n\n  if (!buffer->Get(\"type\", type)) {\n    MBLOG_ERROR << \"mean flowunit can not get input type from meta\";\n    return false;\n  }\n\n  return true;\n}\n\nbool MeanFlowUnit::MeanOperator(const float *data, int32_t width,\n                                int32_t height) {\n  /* sub the mean value of BGR channels */\n  ImageRect roi;\n  roi.x = roi.y = 0;\n  roi.width = width;\n  roi.height = height;\n\n  ImageMean_32f mean;\n  mean.channel_0 = params_.means_[0];\n  mean.channel_1 = params_.means_[1];\n  mean.channel_2 = params_.means_[2];\n\n  return Mean_PLANAR_32f_P3R(data, width, height, roi, mean, nullptr);\n}\n\nMODELBOX_FLOWUNIT(MeanFlowUnit, desc) {\n  desc.SetFlowType(modelbox::NORMAL);\n  desc.SetFlowUnitName(\"mean\");\n  desc.SetFlowUnitGroupType(\"Image\");\n  desc.SetInputContiguous(false);\n  desc.SetDescription(FLOWUNIT_DESC);\n  desc.AddFlowUnitInput(modelbox::FlowUnitInput(\"in_data\", FLOWUNIT_TYPE));\n  desc.AddFlowUnitOutput(modelbox::FlowUnitOutput(\"out_data\", FLOWUNIT_TYPE));\n  desc.AddFlowUnitOption(\n      modelbox::FlowUnitOption(\"mean\", \"string\", true, \"\", \"the mean param\"));\n}\n\nMODELBOX_DRIVER_FLOWUNIT(desc) {\n  desc.Desc.SetName(FLOWUNIT_NAME);\n  desc.Desc.SetClass(modelbox::DRIVER_CLASS_FLOWUNIT);\n  desc.Desc.SetType(FLOWUNIT_TYPE);\n  desc.Desc.SetDescription(FLOWUNIT_DESC);\n  desc.Desc.SetVersion(\"1.0.0\");\n}\n"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/mean/mean_flowunit.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#ifndef MODELBOX_FLOWUNIT_MEAN_H_\n#define MODELBOX_FLOWUNIT_MEAN_H_\n\n#include <modelbox/base/device.h>\n#include <modelbox/base/status.h>\n#include <modelbox/device/cuda/device_cuda.h>\n#include <modelbox/flow.h>\n#include <modelbox/flowunit.h>\n#include <mean_flowunit_base.h>\n\n#include \"mean.h\"\n\nconstexpr const char *FLOWUNIT_NAME = \"mean\";\nconstexpr const char *FLOWUNIT_TYPE = \"cuda\";\nconstexpr const char *FLOWUNIT_DESC =\n    \"\\n\\t@Brief: The operator is used to subtract the mean for tensor data, \"\n    \"for example the image(RGB/BGR), shape(W, H, C), subtract the \"\n    \"corresponding value for different channels. \\n\"\n    \"\\t@Port parameter: The input port and the output buffer type are tensor. \\n\"\n    \"\\t  The tensor type buffer contain the following meta fields:\\n\"\n    \"\\t\\tField Name: shape,         Type: vector<size_t>\\n\"\n    \"\\t\\tField Name: type,          Type: ModelBoxDataType::MODELBOX_UINT8\\n\"\n    \"\\t@Constraint: \";\n\nclass MeanFlowUnit : public modelbox::CudaFlowUnit {\n public:\n  MeanFlowUnit();\n  ~MeanFlowUnit() override;\n\n  modelbox::Status Open(\n      const std::shared_ptr<modelbox::Configuration> &opts) override;\n  modelbox::Status Close() override;\n\n  modelbox::Status CudaProcess(std::shared_ptr<modelbox::DataContext> data_ctx,\n                               cudaStream_t stream) override;\n\n private:\n  bool CheckBufferValid(const std::shared_ptr<modelbox::Buffer> &buffer,\n                        int32_t &width, int32_t &height,\n                        modelbox::ModelBoxDataType &type);\n  bool MeanOperator(const float *data, int32_t width, int32_t height);\n\n  MeanParams params_;\n};\n\n#endif  // MODELBOX_FLOWUNIT_MEAN_H_"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/mean/mean_flowunit_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include <functional>\n#include <future>\n#include <random>\n#include <thread>\n#include <cuda_runtime.h>\n\n#include \"modelbox/base/log.h\"\n#include \"modelbox/base/utils.h\"\n#include \"modelbox/buffer.h\"\n#include \"driver_flow_test.h\"\n#include \"flowunit_mockflowunit/flowunit_mockflowunit.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing ::testing::_;\n\nnamespace modelbox {\nclass MeanGpuFlowUnitTest : public testing::Test {\n public:\n  MeanGpuFlowUnitTest() : driver_flow_(std::make_shared<DriverFlowTest>()) {}\n\n protected:\n  void SetUp() override {\n    int count = 0;\n    cudaGetDeviceCount(&count);\n    if (count <= 0) {\n      MBLOG_INFO << \"no cuda device, skip test suit\";\n      GTEST_SKIP();\n    }\n\n    auto ret = AddMockFlowUnit();\n    EXPECT_EQ(ret, STATUS_OK);\n  }\n\n  void TearDown() override { driver_flow_->Clear(); };\n\n  std::shared_ptr<DriverFlowTest> GetDriverFlow();\n\n  const std::string test_lib_dir = TEST_DRIVER_DIR,\n                    test_data_dir = TEST_DATA_DIR, test_assets = TEST_ASSETS;\n\n private:\n  Status AddMockFlowUnit();\n  std::shared_ptr<DriverFlowTest> driver_flow_;\n};\n\nStatus MeanGpuFlowUnitTest::AddMockFlowUnit() {\n  auto ctl_ = driver_flow_->GetMockFlowCtl();\n  {\n    MockFlowUnitDriverDesc desc_flowunit;\n    desc_flowunit.SetClass(\"DRIVER-FLOWUNIT\");\n    desc_flowunit.SetType(\"cpu\");\n    desc_flowunit.SetName(\"test_mean_0\");\n    desc_flowunit.SetDescription(\"The test input data, 0 inputs 1 output\");\n    desc_flowunit.SetVersion(\"1.0.0\");\n    std::string file_path_flowunit =\n        std::string(TEST_DRIVER_DIR) + \"/libmodelbox-unit-cpu-test_mean_0.so\";\n    desc_flowunit.SetFilePath(file_path_flowunit);\n    auto mock_flowunit = std::make_shared<MockFlowUnit>();\n    auto mock_flowunit_desc = std::make_shared<FlowUnitDesc>();\n    mock_flowunit_desc->SetFlowUnitName(\"test_mean_0\");\n    mock_flowunit_desc->AddFlowUnitOutput(modelbox::FlowUnitOutput(\"Out_1\"));\n    mock_flowunit->SetFlowUnitDesc(mock_flowunit_desc);\n    std::weak_ptr<MockFlowUnit> mock_flowunit_wp;\n    mock_flowunit_wp = mock_flowunit;\n\n    EXPECT_CALL(*mock_flowunit, Open(_))\n        .WillRepeatedly(testing::Invoke(\n            [=](const std::shared_ptr<modelbox::Configuration>& flow_option) {\n              auto spt = mock_flowunit_wp.lock();\n              auto ext_data = spt->CreateExternalData();\n              if (!ext_data) {\n                MBLOG_ERROR << \"can not get external data.\";\n              }\n\n              auto buffer_list = ext_data->CreateBufferList();\n              buffer_list->Build({10 * sizeof(int)});\n              auto* data = (int*)buffer_list->MutableData();\n              for (size_t i = 0; i < 10; i++) {\n                data[i] = i;\n              }\n\n              auto status = ext_data->Send(buffer_list);\n              if (!status) {\n                MBLOG_ERROR << \"external data send buffer list failed:\"\n                            << status;\n              }\n\n              status = ext_data->Close();\n              if (!status) {\n                MBLOG_ERROR << \"external data close failed:\" << status;\n              }\n\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, DataPre(_))\n        .WillRepeatedly(\n            testing::Invoke([&](const std::shared_ptr<DataContext>& data_ctx) {\n              MBLOG_DEBUG << \"test_mean_0 \"\n                          << \"DataPre\";\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, DataPost(_))\n        .WillRepeatedly(\n            testing::Invoke([&](const std::shared_ptr<DataContext>& data_ctx) {\n              MBLOG_DEBUG << \"test_mean_0 \"\n                          << \"DataPost\";\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit,\n                Process(testing::An<std::shared_ptr<modelbox::DataContext>>()))\n        .WillRepeatedly(\n            testing::Invoke([=](const std::shared_ptr<DataContext>& op_ctx) {\n              auto output_buf_1 = op_ctx->Output(\"Out_1\");\n              std::vector<size_t> data_1_shape = {5 * 4 * 3 * sizeof(uint8_t)};\n              output_buf_1->Build(data_1_shape);\n              auto* dev_data_1 =\n                  static_cast<uint8_t*>(output_buf_1->At(0)->MutableData());\n              for (size_t i = 0; i < 3; ++i) {\n                for (size_t j = 0; j < 5; j++) {\n                  for (size_t k = 0; k < 4; k++) {\n                    dev_data_1[i * 20 + j * 4 + k] = static_cast<uint8_t>(100);\n                  }\n                }\n              }\n\n              std::vector<size_t> shape{4, 5, 3};\n              output_buf_1->Set(\"shape\", shape);\n              output_buf_1->Set(\"type\", ModelBoxDataType::MODELBOX_UINT8);\n\n              MBLOG_DEBUG << \"test_mean_0 gen data, 0\"\n                          << output_buf_1->GetBytes();\n\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, Close()).WillRepeatedly(testing::Invoke([=]() {\n      return modelbox::STATUS_OK;\n    }));\n\n    desc_flowunit.SetMockFlowUnit(mock_flowunit);\n    ctl_->AddMockDriverFlowUnit(\"test_mean_0\", \"cpu\", desc_flowunit,\n                                std::string(TEST_DRIVER_DIR));\n  }\n\n  {\n    MockFlowUnitDriverDesc desc_flowunit;\n    desc_flowunit.SetClass(\"DRIVER-FLOWUNIT\");\n    desc_flowunit.SetType(\"cpu\");\n    desc_flowunit.SetName(\"test_mean_1\");\n    desc_flowunit.SetDescription(\"The test output data, 1 input 0 outputs\");\n    desc_flowunit.SetVersion(\"1.0.0\");\n    std::string file_path_flowunit =\n        std::string(TEST_DRIVER_DIR) + \"/libmodelbox-unit-cpu-test_mean_1.so\";\n    desc_flowunit.SetFilePath(file_path_flowunit);\n    auto mock_flowunit = std::make_shared<MockFlowUnit>();\n    auto mock_flowunit_desc = std::make_shared<FlowUnitDesc>();\n    mock_flowunit_desc->SetFlowUnitName(\"test_mean_1\");\n    mock_flowunit_desc->AddFlowUnitInput(modelbox::FlowUnitInput(\"In_1\"));\n    mock_flowunit->SetFlowUnitDesc(mock_flowunit_desc);\n    std::weak_ptr<MockFlowUnit> mock_flowunit_wp;\n    mock_flowunit_wp = mock_flowunit;\n\n    EXPECT_CALL(*mock_flowunit, Open(_))\n        .WillRepeatedly(testing::Invoke(\n            [=](const std::shared_ptr<modelbox::Configuration>& flow_option) {\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, DataPre(_))\n        .WillRepeatedly(\n            testing::Invoke([&](const std::shared_ptr<DataContext>& data_ctx) {\n              MBLOG_DEBUG << \"test_mean_1 \"\n                          << \"DataPre\";\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, DataPost(_))\n        .WillRepeatedly(\n            testing::Invoke([&](const std::shared_ptr<DataContext>& data_ctx) {\n              MBLOG_DEBUG << \"test_mean_1 \"\n                          << \"DataPost\";\n              return modelbox::STATUS_STOP;\n            }));\n\n    EXPECT_CALL(*mock_flowunit,\n                Process(testing::An<std::shared_ptr<modelbox::DataContext>>()))\n        .WillRepeatedly(\n            testing::Invoke([=](const std::shared_ptr<DataContext>& op_ctx) {\n              auto input_bufs = op_ctx->Input(\"In_1\");\n              EXPECT_EQ(input_bufs->Size(), 1);\n              for (size_t i = 0; i < input_bufs->Size(); ++i) {\n                auto input_buf = input_bufs->At(i);\n                std::vector<size_t> shape;\n                input_buf->Get(\"shape\", shape);\n                size_t width = shape[1];\n                size_t height = shape[0];\n                EXPECT_EQ(width, 5);\n                EXPECT_EQ(height, 4);\n\n                const auto* const in_data =\n                    static_cast<const float*>(input_buf->ConstData());\n                for (size_t c = 0; c < 3; c++) {\n                  for (size_t j = 0; j < width; j++) {\n                    for (size_t k = 0; k < height; k++) {\n                      float data = in_data[c * width * height + j * height + k];\n                      if (c == 0) {\n                        EXPECT_NEAR(data, 100, 0.0001);\n                      } else if (c == 1) {\n                        EXPECT_NEAR(data, 90, 0.0001);\n                      } else {\n                        EXPECT_NEAR(data, 80, 0.0001);\n                      }\n                    }\n                  }\n                }\n              }\n\n              return modelbox::STATUS_STOP;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, Close()).WillRepeatedly(testing::Invoke([=]() {\n      return modelbox::STATUS_OK;\n    }));\n    desc_flowunit.SetMockFlowUnit(mock_flowunit);\n    ctl_->AddMockDriverFlowUnit(\"test_mean_1\", \"cpu\", desc_flowunit,\n                                std::string(TEST_DRIVER_DIR));\n  }\n\n  return STATUS_OK;\n}\n\nstd::shared_ptr<DriverFlowTest> MeanGpuFlowUnitTest::GetDriverFlow() {\n  return driver_flow_;\n}\n\nTEST_F(MeanGpuFlowUnitTest, RunUnit) {\n  std::string toml_content = R\"(\n    [driver]\n    skip-default=true\n    dir=[\")\" + test_lib_dir + \"\\\",\\\"\" +\n                             test_data_dir + \"\\\"]\\n    \" +\n                             R\"([graph]\n    graphconf = '''digraph demo {\n          test_mean_0[type=flowunit, flowunit=test_mean_0, device=cpu,deviceid=0, label=\"<Out_1>\"] \n          mean[type=flowunit, flowunit=mean, device=cuda, deviceid=0, label=\"<in_data> | <out_data>\", mean=\"0.0,10.0,20.0\"]\n          test_mean_1[type=flowunit, flowunit=test_mean_1, device=cpu, deviceid=0, label=\"<In_1>\"] \n\n          test_mean_0:Out_1 -> mean:in_data\n          mean:out_data -> test_mean_1:In_1\n        }'''\n    format = \"graphviz\"\n  )\";\n  auto driver_flow = GetDriverFlow();\n  auto ret = driver_flow->BuildAndRun(\"RunUnit\", toml_content);\n  EXPECT_EQ(ret, STATUS_STOP);\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/mindspore_lite_inference/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10.2)\n\nset(UNIT_DEVICE \"cuda\")\nset(UNIT_NAME \"mindspore-lite-inference\")\n\nproject(modelbox-flowunit-${UNIT_DEVICE}-${UNIT_NAME})\nif (NOT MINDSPORE_LITE_FOUND) \n    message(STATUS \"Not found mindspore-lite, disable ${UNIT_NAME} flowunit\")\n    return()\nendif()\nset(CMAKE_CXX_STANDARD 17)\n\nfile(GLOB_RECURSE UNIT_SOURCE *.cpp *.cc *.c)\ngroup_source_test_files(MODELBOX_UNIT_SOURCE MODELBOX_UNIT_TEST_SOURCE \"_test.c*\" ${UNIT_SOURCE})\n\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_CUDA_INCLUDE})\ninclude_directories(${CUDA_INCLUDE_DIRS})\ninclude_directories(${LIBMODELBOX_VIRTUALDRIVER_INFERENCE_INCLUDE})\ninclude_directories(${MINDSPORE_LITE_INCLUDE_DIR})\ninclude_directories(${LIBMODELBOX_FLOWUNIT_INFERENCE_MINDSPORE_LITE_INCLUDE})\ninclude_directories(${MODELBOX_COMMON_INFERENCE_INCLUDE})\n\nset(MODELBOX_UNIT_SHARED libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-shared)\nset(MODELBOX_UNIT_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\nadd_library(${MODELBOX_UNIT_SHARED} SHARED ${MODELBOX_UNIT_SOURCE})\nset(LIBMODELBOX_FLOWUNIT_INFERENCE_CUDA_SHARED ${MODELBOX_UNIT_SHARED})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES \n    SOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n\ntarget_compile_options(${MODELBOX_UNIT_SHARED} PUBLIC -fvisibility=hidden)\n\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} rt)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} dl)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_DEVICE_CUDA_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_VIRTUALDRIVER_INFERENCE_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_FLOWUNIT_INFERENCE_MINDSPORE_LITE_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${MODELBOX_COMMON_INFERENCE_LIBRARY})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}\")\n\nconfigure_file(${CMAKE_CURRENT_SOURCE_DIR}/test_toml/modelbox.test.mindspore.cuda.inference.in ${CMAKE_BINARY_DIR}/test/test-working-dir/data/virtual_mindspore_cuda_infer_test.toml @ONLY)\nconfigure_file(${CMAKE_CURRENT_SOURCE_DIR}/test_toml/modelbox.test.mindspore.cuda.inference.encrypt.in ${CMAKE_BINARY_DIR}/test/test-working-dir/data/virtual_mindspore_cuda_infer_test_en.toml @ONLY)\n\n\ninstall(TARGETS ${MODELBOX_UNIT_SHARED}\n        COMPONENT cuda-device-flowunit\n        RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n        LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        ARCHIVE DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        OPTIONAL)\ninstall(DIRECTORY ${HEADER} \n        DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR}\n        COMPONENT cuda-device-flowunit-devel)\n\nset(LIBMODELBOX_FLOWUNIT_MINDSPORE_LITE_CUDA_SHARED ${MODELBOX_UNIT_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_MINDSPORE_LITE_CUDA_INCLUDE ${MODELBOX_UNIT_SOURCE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_MINDSPORE_LITE_CUDA_SOURCES ${MODELBOX_UNIT_SOURCE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_MINDSPORE_LITE_CUDA_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}.so CACHE INTERNAL \"\")\n\n# driver test\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_UNIT_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_UNIT_SHARED})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")\n"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/mindspore_lite_inference/flowunit_desc.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <stdio.h>\n\n#include <memory>\n\n#include \"mindspore_cuda_inference_flowunit.h\"\n#include \"modelbox/base/driver_api_helper.h\"\n#include \"modelbox/base/status.h\"\n#include \"modelbox/device/cuda/device_cuda.h\"\n#include \"modelbox/flowunit.h\"\n\nconstexpr const char *FLOWUNIT_NAME = \"mindspore_inference\";\nconstexpr const char *FLOWUNIT_DESC = \"A mindspore cuda inference flowunit\";\n\nstd::shared_ptr<modelbox::DriverFactory> CreateDriverFactory() {\n  std::shared_ptr<modelbox::DriverFactory> factory =\n      std::make_shared<MindSporeInferenceCudaFlowUnitFactory>();\n  return factory;\n}\n\nvoid DriverDescription(modelbox::DriverDesc *desc) {\n  desc->SetName(FLOWUNIT_NAME);\n  desc->SetClass(modelbox::DRIVER_CLASS_INFERENCE);\n  desc->SetType(modelbox::DEVICE_TYPE);\n  desc->SetDescription(FLOWUNIT_DESC);\n  desc->SetGlobal(true);\n  return;\n}\n\nmodelbox::Status DriverInit() {\n  // Driver Init.\n  return modelbox::STATUS_OK;\n}\n\nvoid DriverFini() {\n  // Driver Fini.\n}\n"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/mindspore_lite_inference/mindspore_cuda_inference_flowunit.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"mindspore_cuda_inference_flowunit.h\"\n\nMindSporeInferenceCudaFlowUnit::MindSporeInferenceCudaFlowUnit() = default;\n\nMindSporeInferenceCudaFlowUnit::~MindSporeInferenceCudaFlowUnit() = default;\n\nmodelbox::Status MindSporeInferenceCudaFlowUnit::Open(\n    const std::shared_ptr<modelbox::Configuration> &opts) {\n  auto context = std::make_shared<mindspore::Context>();\n  auto &device_list = context->MutableDeviceInfo();\n  auto gpu_device_info = std::make_shared<mindspore::GPUDeviceInfo>();\n  gpu_device_info->SetDeviceID(dev_id_);\n  device_list.push_back(gpu_device_info);\n  auto cpu_device_info = std::make_shared<mindspore::CPUDeviceInfo>();\n  device_list.push_back(cpu_device_info);\n\n  infer_ = std::make_shared<MindSporeInference>(GetBindDevice(), context);\n  return infer_->Open(opts, this->GetFlowUnitDesc());\n}\n\nmodelbox::Status MindSporeInferenceCudaFlowUnit::CudaProcess(\n    std::shared_ptr<modelbox::DataContext> data_ctx, cudaStream_t stream) {\n  auto cuda_ret = cudaStreamSynchronize(stream);\n  if (cuda_ret != cudaSuccess) {\n    MBLOG_ERROR << \"sync stream  \" << stream << \" failed, err \" << cuda_ret;\n    return modelbox::STATUS_FAULT;\n  }\n\n  return infer_->Infer(data_ctx);\n}\n\nmodelbox::Status MindSporeInferenceCudaFlowUnit::Close() {\n  infer_ = nullptr;\n  return modelbox::STATUS_OK;\n}\n\nstd::shared_ptr<modelbox::FlowUnit>\nMindSporeInferenceCudaFlowUnitFactory::VirtualCreateFlowUnit(\n    const std::string &unit_name, const std::string &unit_type,\n    const std::string &virtual_type) {\n  auto inference_flowunit = std::make_shared<MindSporeInferenceCudaFlowUnit>();\n  return inference_flowunit;\n};\n"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/mindspore_lite_inference/mindspore_cuda_inference_flowunit.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_INFERENCE_MINDSPORE_CUDA_H_\n#define MODELBOX_FLOWUNIT_INFERENCE_MINDSPORE_CUDA_H_\n\n#include <modelbox/flowunit.h>\n\n#include \"mindspore_inference.h\"\n#include \"modelbox/device/cuda/device_cuda.h\"\n\nconstexpr const char *FLOWUNIT_TYPE = \"cuda\";\n\nclass MindSporeInferenceCudaFlowUnit : public modelbox::CudaFlowUnit {\n public:\n  MindSporeInferenceCudaFlowUnit();\n  virtual ~MindSporeInferenceCudaFlowUnit();\n\n  modelbox::Status Open(\n      const std::shared_ptr<modelbox::Configuration> &opts) override;\n\n  modelbox::Status Close() override;\n\n  modelbox::Status CudaProcess(std::shared_ptr<modelbox::DataContext> data_ctx,\n                               cudaStream_t stream) override;\n\n private:\n  std::shared_ptr<MindSporeInference> infer_;\n};\n\nclass MindSporeInferenceCudaFlowUnitFactory : public modelbox::FlowUnitFactory {\n public:\n  MindSporeInferenceCudaFlowUnitFactory() = default;\n  virtual ~MindSporeInferenceCudaFlowUnitFactory() = default;\n\n  std::shared_ptr<modelbox::FlowUnit> VirtualCreateFlowUnit(\n      const std::string &unit_name, const std::string &unit_type,\n      const std::string &virtual_type);\n\n  std::string GetFlowUnitFactoryType() { return FLOWUNIT_TYPE; };\n  std::string GetVirtualType() { return INFERENCE_TYPE; };\n\n  std::map<std::string, std::shared_ptr<modelbox::FlowUnitDesc>>\n  FlowUnitProbe() {\n    return std::map<std::string, std::shared_ptr<modelbox::FlowUnitDesc>>();\n  };\n};\n\n#endif  // MODELBOX_FLOWUNIT_INFERENCE_MINDSPORE_CUDA_H_\n"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/mindspore_lite_inference/mindspore_cuda_inference_flowunit_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <functional>\n#include <future>\n#include <random>\n#include <thread>\n\n#include \"flowunit_mockflowunit/flowunit_mockflowunit.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"mindspore_inference_flowunit_test.h\"\n#include \"modelbox/base/log.h\"\n#include \"modelbox/base/utils.h\"\n#include \"modelbox/buffer.h\"\n#include \"test/mock/minimodelbox/mockflow.h\"\n\nusing ::testing::_;\n\nnamespace modelbox {\nclass InferenceMindSporeCudaFlowUnitTest : public testing::Test {\n public:\n  InferenceMindSporeCudaFlowUnitTest()\n      : mindspore_flow_(std::make_shared<InferenceMindSporeFlowUnitTest>()) {}\n\n protected:\n  virtual void SetUp() {\n    int count = 0;\n    cudaGetDeviceCount(&count);\n    if (count <= 0) {\n      MBLOG_INFO << \"no cuda device, skip test suit\";\n      GTEST_SKIP();\n    }\n\n    auto ret = mindspore_flow_->Init();\n    EXPECT_EQ(ret, STATUS_OK);\n\n    const std::string src_file =\n        test_assets + \"/mindspore_inference/\" + test_model_file;\n    const std::string src_toml = test_data_dir + \"/\" + test_toml_file;\n    mindspore_inference_path = test_data_dir + \"/mindspore_inference\";\n    mkdir(mindspore_inference_path.c_str(), 0700);\n    dest_model_file = mindspore_inference_path + \"/\" + test_model_file;\n    dest_toml_file = mindspore_inference_path + \"/\" + test_toml_file;\n    CopyFile(src_file, dest_model_file, true);\n    CopyFile(src_toml, dest_toml_file, true);\n    const std::string src_file_en =\n        test_assets + \"/mindspore_inference/\" + test_model_file_en;\n    const std::string src_toml_en = test_data_dir + \"/\" + test_toml_file_en;\n    dest_model_file_en = mindspore_inference_path + \"/\" + test_model_file_en;\n    dest_toml_file_en = mindspore_inference_path + \"/\" + test_toml_file_en;\n    CopyFile(src_file_en, dest_model_file_en, true);\n    CopyFile(src_toml_en, dest_toml_file_en, true);\n  }\n\n  virtual void TearDown() {\n    remove(dest_model_file.c_str());\n    remove(dest_toml_file.c_str());\n    remove(dest_model_file_en.c_str());\n    remove(dest_toml_file_en.c_str());\n    remove(mindspore_inference_path.c_str());\n\n    mindspore_flow_ = nullptr;\n  };\n\n  const std::string test_lib_dir = TEST_DRIVER_DIR,\n                    test_data_dir = TEST_DATA_DIR, test_assets = TEST_ASSETS,\n                    test_model_file = \"tensor_add.mindir\",\n                    test_toml_file = \"virtual_mindspore_infer_test.toml\",\n                    test_model_file_en = \"tensor_add_en.mindir\",\n                    test_toml_file_en = \"virtual_mindspore_infer_test_en.toml\";\n\n  std::string mindspore_inference_path, dest_model_file, dest_toml_file,\n      dest_model_file_en, dest_toml_file_en;\n\n  std::shared_ptr<InferenceMindSporeFlowUnitTest> mindspore_flow_;\n};\n\nTEST_F(InferenceMindSporeCudaFlowUnitTest, RunUnit) {\n  std::string toml_content = R\"(\n    [driver]\n    skip-default=true\n    dir=[\")\" + test_lib_dir + \"\\\",\\\"\" +\n                             test_data_dir + \"\\\"]\\n    \" +\n                             R\"([graph]\n    graphconf = '''digraph demo {                                                                            \n          prepare_ms_infer_data[type=flowunit, flowunit=prepare_ms_infer_data, device=cpu, deviceid=0]             \n          mindspore_inference[type=flowunit, flowunit=mindspore_inference, device=cuda, deviceid=0, batch_size=2]\n          check_ms_infer_result[type=flowunit, flowunit=check_ms_infer_result, device=cpu, deviceid=0, batch_size=2]  \n                                  \n          prepare_ms_infer_data:out1 -> mindspore_inference:x_\n          prepare_ms_infer_data:out2 -> mindspore_inference:y_\n          mindspore_inference:output0-> check_ms_infer_result:in\n        }'''\n    format = \"graphviz\"\n  )\";\n  auto ret = mindspore_flow_->Run(\"RunUnit\", toml_content);\n  EXPECT_EQ(ret, STATUS_STOP);\n}\n\nTEST_F(InferenceMindSporeCudaFlowUnitTest, RunUnitEncrypt) {\n  std::string toml_content = R\"(\n    [driver]\n    skip-default=true\n    dir=[\")\" + test_lib_dir + \"\\\",\\\"\" +\n                             test_data_dir + \"\\\"]\\n    \" +\n                             R\"([graph]\n    graphconf = '''digraph demo {                                                                            \n          prepare_ms_infer_data[type=flowunit, flowunit=prepare_ms_infer_data, device=cpu, deviceid=0]             \n          mindspore_inference[type=flowunit, flowunit=mindspore_inference_encrypt, device=cuda, deviceid=0, batch_size=2]\n          check_ms_infer_result[type=flowunit, flowunit=check_ms_infer_result, device=cpu, deviceid=0, batch_size=2]  \n                                  \n          prepare_ms_infer_data:out1 -> mindspore_inference:x_\n          prepare_ms_infer_data:out2 -> mindspore_inference:y_\n          mindspore_inference:output0 -> check_ms_infer_result:in\n        }'''\n    format = \"graphviz\"\n  )\";\n  auto ret = mindspore_flow_->Run(\"RunUnit\", toml_content);\n  EXPECT_EQ(ret, STATUS_STOP);\n}\n\n}  // namespace modelbox\n"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/mindspore_lite_inference/test_toml/modelbox.test.mindspore.cuda.inference.encrypt.in",
    "content": "[base]\nname = \"mindspore_inference_encrypt\"\ndevice = \"cuda\"\nversion = \"1.0.0\"\ndescription = \"an mindspore cuda inference flowunit\"\nentry = \"@CMAKE_SOURCE_DIR@/test/assets/mindspore_inference/tensor_add_en.mindir\"\ntype = \"inference\"\nvirtual_type = \"mindspore\"\n\n[encryption]\nplugin_name = \"modeldecrypt-plugin\"\nplugin_version = \"1.0.0\"\nrootkey = \"F7Gx2mcbsZyPKrjh1jmlt+nty6LHCjMlpEB3gfoVBJBu07FZKeFegokUOaLgMEOJsE1PLwSi74+xELWyfSp8sBGI6ituru9SDWDGF82jiRwK\"\npasswd = \"NKEIuLCjW9UiZAtKa54PMPvWwdDV50kXePvixFJ8iKCW1QxEHswasfEHJ3NW79XG6aUlIk+Jdds+N5U+uIj4Bw==\"\n\n[input]\n[input.input1]\nname = \"x_\"\ntype = \"float\"\ndevice = \"cpu\"\n\n[input.input2]\nname = \"y_\"\ntype = \"float\"\ndevice = \"cpu\"\n\n[output]\n[output.output1]\nname = \"output0\"\ntype = \"float\"\n"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/mindspore_lite_inference/test_toml/modelbox.test.mindspore.cuda.inference.in",
    "content": "[base]\nname = \"mindspore_inference\"\ndevice = \"cuda\"\nversion = \"1.0.0\"\ndescription = \"an mindspore cuda inference flowunit\"\nentry = \"@CMAKE_SOURCE_DIR@/test/assets/mindspore_inference/tensor_add.mindir\"\ntype = \"inference\"\nvirtual_type = \"mindspore\"\n\n[config]\ninput_format = \"NCHW\"\n\n[input]\n[input.input1]\nname = \"x_\"\n\n[input.input2]\nname = \"y_\"\n\n[output]\n[output.output1]\nname = \"output0\"\n"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/normalize/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nset(UNIT_DEVICE \"cuda\")\nset(UNIT_NAME \"normalize\")\n\nproject(modelbox-flowunit-${UNIT_DEVICE}-${UNIT_NAME})\n\nfile(GLOB_RECURSE UNIT_SOURCE *.cpp *.cc *.c)\ngroup_source_test_files(MODELBOX_UNIT_SOURCE MODELBOX_UNIT_TEST_SOURCE \"_test.c*\" ${UNIT_SOURCE})\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_CUDA_INCLUDE})\ninclude_directories(${CUDA_INCLUDE_DIRS})\ninclude_directories(${MODELBOX_COMMON_NORMALIZE_INCLUDE})\n\n\nset(MODELBOX_UNIT_SHARED libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-shared)\nset(MODELBOX_UNIT_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\nadd_library(${MODELBOX_UNIT_SHARED} SHARED ${MODELBOX_UNIT_SOURCE})\n\nset(LIBMODELBOX_FLOWUNIT_NORMALIZE_CUDA_SHARED ${MODELBOX_UNIT_SHARED})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES \n    SOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n    DEFINE_SYMBOL \"\"\n)\n\nfind_cuda_helper_libs(nppial)\n\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} rt)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} dl)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_DEVICE_CUDA_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${CUDA_nppial_LIBRARY})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${MODELBOX_COMMON_NORMALIZE_LIBRARY})\nadd_dependencies(${MODELBOX_UNIT_SHARED} ${MODELBOX_COMMON_NORMALIZE_LIBRARY})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}\")\n\ninstall(TARGETS ${MODELBOX_UNIT_SHARED} \n    COMPONENT cuda-device-flowunit\n    RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n    LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    ARCHIVE DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    OPTIONAL\n    )\n\n\ninstall(DIRECTORY ${HEADER} \n    DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR} \n    COMPONENT cuda-device-flowunit-devel\n    )\n\nset(LIBMODELBOX_FLOWUNIT_NORMALIZE_CUDA_SHARED ${MODELBOX_UNIT_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_NORMALIZE_CUDA_INCLUDE ${MODELBOX_UNIT_SOURCE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_NORMALIZE_CUDA_SOURCES ${MODELBOX_UNIT_SOURCE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_NORMALIZE_CUDA_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}.so CACHE INTERNAL \"\")\n\n\n# driver test\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_UNIT_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_UNIT_SHARED})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/normalize/normalize.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include \"normalize.h\"\n\n#include \"nppi_arithmetic_and_logical_operations.h\"\n\nbool CheckRoiValid(const ImageRect &roi) {\n  if ((0 > roi.width) || (PIXEL_THRESHOLD < roi.width) || (0 > roi.height) ||\n      (PIXEL_THRESHOLD < roi.height) || (0 > roi.x) ||\n      (PIXEL_THRESHOLD < roi.x) || (0 > roi.y) || (PIXEL_THRESHOLD < roi.y)) {\n    return false;\n  }\n\n  return true;\n}\n\nint32_t Scale_32f_C1IR(float *imageData, int width, ImageRect &rect,\n                       float ratio) {\n  NppStatus status = NPP_ERROR;\n\n  if (nullptr == imageData) {\n    MBLOG_ERROR << \"Parma is Null.\";\n    return static_cast<int32_t>(status);\n  }\n\n  if ((0 > width) || (PIXEL_THRESHOLD < width)) {\n    MBLOG_ERROR << \"image width is invalid.\";\n    return static_cast<int32_t>(status);\n  }\n\n  if (!CheckRoiValid(rect)) {\n    return static_cast<int32_t>(status);\n  }\n\n  float *startPos = imageData + rect.y * width + rect.x;\n\n  NppiSize oSizeROI;\n  oSizeROI.height = rect.height;\n  oSizeROI.width = rect.width;\n\n  status = nppiMulC_32f_C1IR(ratio, (Npp32f *)startPos, width * sizeof(float),\n                             oSizeROI);\n  if (NPP_SUCCESS != status) {\n    MBLOG_ERROR << \"Scale_32f_C1R. Fail to scale. ratio:\" << ratio;\n  }\n\n  return static_cast<int32_t>(status);\n}"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/normalize/normalize.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#ifndef MODELBOX_NORMALIZE_H_\n#define MODELBOX_NORMALIZE_H_\n\n#include <cuda_runtime.h>\n#include <nppdefs.h>\n\n#include <iostream>\n\n#include \"modelbox/base/log.h\"\n\n#define PIXEL_THRESHOLD 4096\n\n/**\n * 2D Rectangle\n * This struct contains position and size information of a rectangle in\n * two space.\n * The rectangle's position is usually signified by the coordinate of its\n * upper-left corner.\n */\ntypedef struct {\n  int x;     /**<  x-coordinate of upper left corner (lowest memory address). */\n  int y;     /**<  y-coordinate of upper left corner (lowest memory address). */\n  int width; /**<  Rectangle width. */\n  int height; /**<  Rectangle height. */\n} ImageRect;\n\nbool CheckRoiValid(const ImageRect &roi);\n\nint32_t Scale_32f_C1IR(float *imageData, int width, ImageRect &rect,\n                       float ratio);\n\n#endif  // MODELBOX_NORMALIZE_H_\n"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/normalize/normalize_flowunit.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"normalize_flowunit.h\"\n#include \"modelbox/flowunit_api_helper.h\"\n\nNormalizeFlowUnit::NormalizeFlowUnit() = default;\nNormalizeFlowUnit::~NormalizeFlowUnit() = default;\n\nmodelbox::Status NormalizeFlowUnit::Open(\n    const std::shared_ptr<modelbox::Configuration> &opts) {\n  if (!opts->Contain(\"standard_deviation_inverse\")) {\n    MBLOG_ERROR << \"normalize flow unit does not contain normalize param\";\n    return modelbox::STATUS_BADCONF;\n  }\n\n  auto input_params = opts->GetDoubles(\"standard_deviation_inverse\");\n  if (input_params.size() != CHANNEL_NUM) {\n    MBLOG_ERROR << \"normalize param error\";\n    return modelbox::STATUS_BADCONF;\n  }\n\n  params_.normalizes_.assign(input_params.begin(), input_params.end());\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status NormalizeFlowUnit::Close() { return modelbox::STATUS_OK; }\n\nmodelbox::Status NormalizeFlowUnit::CudaProcess(\n    std::shared_ptr<modelbox::DataContext> data_ctx, cudaStream_t stream) {\n  cudaStreamSynchronize(stream);\n  const auto input_bufs = data_ctx->Input(\"in_data\");\n  if (input_bufs->Size() == 0) {\n    MBLOG_ERROR << \"normalize flowunit input invalied\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  auto output_bufs = data_ctx->Output(\"out_data\");\n  if (!BuildOutputBufferList(input_bufs, output_bufs)) {\n    MBLOG_ERROR << \"build output BufferList failed\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  for (size_t i = 0; i < input_bufs->Size(); ++i) {\n    auto input_buf = input_bufs->At(i);\n    int32_t width = 0;\n    int32_t height = 0;\n    modelbox::ModelBoxDataType type = modelbox::MODELBOX_TYPE_INVALID;\n    if (!CheckBufferValid(input_buf, width, height, type)) {\n      MBLOG_FATAL << \"normalize flowunit input_buf invalied\";\n      continue;\n    }\n\n    auto out_buff = output_bufs->At(i);\n    out_buff->CopyMeta(input_buf);\n    out_buff->Set(\"type\", modelbox::ModelBoxDataType::MODELBOX_FLOAT);\n    auto *out_data = static_cast<float *>(out_buff->MutableData());\n\n    if (type == modelbox::ModelBoxDataType::MODELBOX_FLOAT) {\n      auto *in_data_f32 =\n          static_cast<float *>(const_cast<void *>(input_buf->ConstData()));\n      if (in_data_f32 == nullptr) {\n        MBLOG_ERROR << \"normalize flowunit data is nullptr\";\n        continue;\n      }\n\n      cudaMemcpy(out_data, in_data_f32, input_buf->GetBytes(),\n                 cudaMemcpyDeviceToDevice);\n    } else {\n      auto *in_data_uint8 =\n          static_cast<uint8_t *>(const_cast<void *>(input_buf->ConstData()));\n      if (in_data_uint8 == nullptr) {\n        MBLOG_ERROR << \"normalize flowunit data is nullptr\";\n        continue;\n      }\n\n      std::vector<uint8_t> host_data_uint8(input_buf->GetBytes());\n      cudaMemcpy(host_data_uint8.data(), in_data_uint8, input_buf->GetBytes(),\n                 cudaMemcpyDeviceToHost);\n      std::vector<float> host_data_f32(input_buf->GetBytes());\n      for (size_t i = 0; i < input_buf->GetBytes(); i++) {\n        host_data_f32[i] = host_data_uint8[i];\n      }\n\n      cudaMemcpy(out_data, host_data_f32.data(),\n                 input_buf->GetBytes() * sizeof(float), cudaMemcpyHostToDevice);\n    }\n\n    int32_t ret = NormalizeOperator(out_data, width, height);\n    if (ret < 0) {\n      MBLOG_ERROR << \"normalize FlowUnit process failed\";\n      return modelbox::STATUS_FAULT;\n    }\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nbool NormalizeFlowUnit::CheckBufferValid(\n    const std::shared_ptr<modelbox::Buffer> &buffer, int32_t &width,\n    int32_t &height, modelbox::ModelBoxDataType &type) {\n  std::vector<size_t> shape;\n  if (!buffer->Get(\"shape\", shape)) {\n    MBLOG_ERROR << \"mean flowunit can not get shape from meta\";\n    return false;\n  }\n\n  if (shape.size() != SHAPE_SIZE) {\n    MBLOG_ERROR << \"mean flowunit only support hwc data\";\n    return false;\n  }\n\n  if (shape[2] != CHANNEL_NUM) {\n    MBLOG_ERROR << \"mean flowunit only support hwc and C is \" << CHANNEL_NUM;\n    return false;\n  }\n\n  height = shape[0];\n  width = shape[1];\n\n  if (!buffer->Get(\"type\", type)) {\n    MBLOG_ERROR << \"mean flowunit can not get input type from meta\";\n    return false;\n  }\n\n  return true;\n}\n\nbool NormalizeFlowUnit::NormalizeOperator(float *data, int32_t width,\n                                          int32_t height) {\n  ImageRect roi;\n  roi.x = roi.y = 0;\n  roi.width = width;\n  roi.height = height;\n\n  int32_t ret = Scale_32f_C1IR(data, width, roi, params_.normalizes_[0]);\n  if (ret < 0) {\n    MBLOG_ERROR << \"normalize FlowUnit process channel_0 failed\";\n    return ret;\n  }\n\n  ret =\n      Scale_32f_C1IR(data + width * height, width, roi, params_.normalizes_[1]);\n  if (ret < 0) {\n    MBLOG_ERROR << \"normalize FlowUnit process channel_1 failed\";\n    return ret;\n  }\n\n  ret = Scale_32f_C1IR(data + width * height * 2, width, roi,\n                       params_.normalizes_[2]);\n  if (ret < 0) {\n    MBLOG_ERROR << \"normalize FlowUnit process channel_2 failed\";\n  }\n\n  return ret;\n}\n\nMODELBOX_FLOWUNIT(NormalizeFlowUnit, desc) {\n  desc.SetFlowUnitName(\"normalize\");\n  desc.SetFlowUnitGroupType(\"Image\");\n  desc.AddFlowUnitInput(modelbox::FlowUnitInput(\"in_data\", FLOWUNIT_TYPE));\n  desc.AddFlowUnitOutput(modelbox::FlowUnitOutput(\"out_data\", FLOWUNIT_TYPE));\n  desc.SetFlowType(modelbox::NORMAL);\n  desc.SetInputContiguous(false);\n  desc.SetDescription(FLOWUNIT_DESC);\n  desc.AddFlowUnitOption(modelbox::FlowUnitOption(\n      \"standard_deviation_inverse\", \"string\", true, \"\", \"the normalize param\"));\n}\n\nMODELBOX_DRIVER_FLOWUNIT(desc) {\n  desc.Desc.SetName(FLOWUNIT_NAME);\n  desc.Desc.SetClass(modelbox::DRIVER_CLASS_FLOWUNIT);\n  desc.Desc.SetType(FLOWUNIT_TYPE);\n  desc.Desc.SetDescription(FLOWUNIT_DESC);\n  desc.Desc.SetVersion(\"1.0.0\");\n}\n"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/normalize/normalize_flowunit.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_NORMALIZE_H_\n#define MODELBOX_FLOWUNIT_NORMALIZE_H_\n\n#include <modelbox/base/device.h>\n#include <modelbox/base/status.h>\n#include <modelbox/flow.h>\n#include <modelbox/flowunit.h>\n#include <normalize.h>\n#include <normalize_flowunit_base.h>\n#include \"modelbox/device/cuda/device_cuda.h\"\n\nconstexpr const char *FLOWUNIT_TYPE = \"cuda\";\nconstexpr const char *FLOWUNIT_NAME = \"normalize\";\nconstexpr const char *FLOWUNIT_DESC =\n    \"\\n\\t@Brief: The operator is used to normalize for tensor data, \"\n    \"for example the image(RGB/BGR). \\n\"\n    \"\\t@Port parameter: The input port and the output buffer type are tensor. \\n\"\n    \"\\t  The tensor type buffer contain the following meta fields:\\n\"\n    \"\\t\\tField Name: shape,         Type: vector<size_t>\\n\"\n    \"\\t\\tField Name: type,          Type: ModelBoxDataType::MODELBOX_UINT8\\n\"\n    \"\\t@Constraint: \";\n\nclass NormalizeFlowUnit : public modelbox::CudaFlowUnit {\n public:\n  NormalizeFlowUnit();\n  ~NormalizeFlowUnit() override;\n\n  modelbox::Status Open(\n      const std::shared_ptr<modelbox::Configuration> &opts) override;\n  modelbox::Status Close() override;\n\n  modelbox::Status CudaProcess(std::shared_ptr<modelbox::DataContext> data_ctx,\n                               cudaStream_t stream) override;\n\n private:\n  bool CheckBufferValid(const std::shared_ptr<modelbox::Buffer> &buffer,\n                        int32_t &width, int32_t &height,\n                        modelbox::ModelBoxDataType &type);\n  bool NormalizeOperator(float *data, int32_t width, int32_t height);\n\n  NormalizeParams params_;\n};\n\n#endif  // MODELBOX_FLOWUNIT_NORMALIZE_H_"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/normalize/normalize_flowunit_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include <functional>\n#include <future>\n#include <random>\n#include <thread>\n#include <cuda_runtime.h>\n\n#include \"modelbox/base/log.h\"\n#include \"modelbox/base/utils.h\"\n#include \"modelbox/buffer.h\"\n#include \"driver_flow_test.h\"\n#include \"flowunit_mockflowunit/flowunit_mockflowunit.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing ::testing::_;\n\nnamespace modelbox {\nclass NormalizeGpuFlowUnitTest : public testing::Test {\n public:\n  NormalizeGpuFlowUnitTest()\n      : driver_flow_(std::make_shared<DriverFlowTest>()) {}\n\n protected:\n  void SetUp() override {\n    int count = 0;\n    cudaGetDeviceCount(&count);\n    if (count <= 0) {\n      MBLOG_INFO << \"no cuda device, skip test suit\";\n      GTEST_SKIP();\n    }\n\n    auto ret = AddMockFlowUnit();\n    EXPECT_EQ(ret, STATUS_OK);\n  }\n\n  void TearDown() override { driver_flow_->Clear(); };\n\n  std::shared_ptr<DriverFlowTest> GetDriverFlow();\n\n  const std::string test_lib_dir = TEST_DRIVER_DIR,\n                    test_data_dir = TEST_DATA_DIR, test_assets = TEST_ASSETS;\n\n private:\n  Status AddMockFlowUnit();\n  std::shared_ptr<DriverFlowTest> driver_flow_;\n};\n\nStatus NormalizeGpuFlowUnitTest::AddMockFlowUnit() {\n  auto ctl_ = driver_flow_->GetMockFlowCtl();\n  {\n    MockFlowUnitDriverDesc desc_flowunit;\n    desc_flowunit.SetClass(\"DRIVER-FLOWUNIT\");\n    desc_flowunit.SetType(\"cpu\");\n    desc_flowunit.SetName(\"test_normalize_0\");\n    desc_flowunit.SetDescription(\"The test input data, 0 inputs 1 output\");\n    desc_flowunit.SetVersion(\"1.0.0\");\n    std::string file_path_flowunit = std::string(TEST_DRIVER_DIR) +\n                                     \"/libmodelbox-unit-cpu-test_normalize_0.so\";\n    desc_flowunit.SetFilePath(file_path_flowunit);\n    auto mock_flowunit = std::make_shared<MockFlowUnit>();\n    auto mock_flowunit_desc = std::make_shared<FlowUnitDesc>();\n    mock_flowunit_desc->SetFlowUnitName(\"test_normalize_0\");\n    mock_flowunit_desc->AddFlowUnitOutput(modelbox::FlowUnitOutput(\"Out_1\"));\n    mock_flowunit->SetFlowUnitDesc(mock_flowunit_desc);\n    std::weak_ptr<MockFlowUnit> mock_flowunit_wp;\n    mock_flowunit_wp = mock_flowunit;\n\n    EXPECT_CALL(*mock_flowunit, Open(_))\n        .WillRepeatedly(testing::Invoke(\n            [=](const std::shared_ptr<modelbox::Configuration>& flow_option) {\n              modelbox::Status ret = modelbox::STATUS_FAULT;\n              auto spt = mock_flowunit_wp.lock();\n              auto ext_data = spt->CreateExternalData();\n              if (!ext_data) {\n                MBLOG_ERROR << \"can not get external data.\";\n                return ret;\n              }\n\n              auto buffer_list = ext_data->CreateBufferList();\n              buffer_list->Build({10 * sizeof(int)});\n              auto* data = (int*)buffer_list->MutableData();\n              for (size_t i = 0; i < 10; i++) {\n                data[i] = i;\n              }\n\n              auto status = ext_data->Send(buffer_list);\n              if (!status) {\n                MBLOG_ERROR << \"external data send buffer list failed:\"\n                            << status;\n                return status;\n              }\n\n              status = ext_data->Close();\n              if (!status) {\n                MBLOG_ERROR << \"external data close failed:\" << status;\n                return status;\n              }\n\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, DataPre(_))\n        .WillRepeatedly(\n            testing::Invoke([&](const std::shared_ptr<DataContext>& data_ctx) {\n              MBLOG_DEBUG << \"test_normalize_0 \"\n                          << \"DataPre\";\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, DataPost(_))\n        .WillRepeatedly(\n            testing::Invoke([&](const std::shared_ptr<DataContext>& data_ctx) {\n              MBLOG_DEBUG << \"test_normalize_0 \"\n                          << \"DataPost\";\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit,\n                Process(testing::An<std::shared_ptr<modelbox::DataContext>>()))\n        .WillRepeatedly(\n            testing::Invoke([=](const std::shared_ptr<DataContext>& op_ctx) {\n              auto output_buf_1 = op_ctx->Output(\"Out_1\");\n              std::vector<size_t> data_1_shape = {5 * 4 * 3 * sizeof(uint8_t)};\n              output_buf_1->Build(data_1_shape);\n              auto* dev_data_1 =\n                  static_cast<uint8_t*>(output_buf_1->At(0)->MutableData());\n              for (size_t i = 0; i < 3; ++i) {\n                for (size_t j = 0; j < 5; j++) {\n                  for (size_t k = 0; k < 4; k++) {\n                    dev_data_1[i * 20 + j * 4 + k] = static_cast<uint8_t>(255);\n                  }\n                }\n              }\n\n              std::vector<size_t> shape{4, 5, 3};\n              output_buf_1->Set(\"shape\", shape);\n              output_buf_1->Set(\"type\", ModelBoxDataType::MODELBOX_UINT8);\n\n              MBLOG_DEBUG << \"test_normalize_0 gen data, 0\"\n                          << output_buf_1->GetBytes();\n\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, Close()).WillRepeatedly(testing::Invoke([=]() {\n      return modelbox::STATUS_OK;\n    }));\n\n    desc_flowunit.SetMockFlowUnit(mock_flowunit);\n    ctl_->AddMockDriverFlowUnit(\"test_normalize_0\", \"cpu\", desc_flowunit,\n                                std::string(TEST_DRIVER_DIR));\n  }\n\n  {\n    MockFlowUnitDriverDesc desc_flowunit;\n    desc_flowunit.SetClass(\"DRIVER-FLOWUNIT\");\n    desc_flowunit.SetType(\"cpu\");\n    desc_flowunit.SetName(\"test_normalize_1\");\n    desc_flowunit.SetDescription(\"The test output data, 1 input 0 outputs\");\n    desc_flowunit.SetVersion(\"1.0.0\");\n    std::string file_path_flowunit = std::string(TEST_DRIVER_DIR) +\n                                     \"/libmodelbox-unit-cpu-test_normalize_1.so\";\n    desc_flowunit.SetFilePath(file_path_flowunit);\n    auto mock_flowunit = std::make_shared<MockFlowUnit>();\n    auto mock_flowunit_desc = std::make_shared<FlowUnitDesc>();\n    mock_flowunit_desc->SetFlowUnitName(\"test_normalize_1\");\n    mock_flowunit_desc->AddFlowUnitInput(modelbox::FlowUnitInput(\"In_1\"));\n    mock_flowunit->SetFlowUnitDesc(mock_flowunit_desc);\n    std::weak_ptr<MockFlowUnit> mock_flowunit_wp;\n    mock_flowunit_wp = mock_flowunit;\n\n    EXPECT_CALL(*mock_flowunit, Open(_))\n        .WillRepeatedly(testing::Invoke(\n            [=](const std::shared_ptr<modelbox::Configuration>& flow_option) {\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, DataPre(_))\n        .WillRepeatedly(\n            testing::Invoke([&](const std::shared_ptr<DataContext>& data_ctx) {\n              MBLOG_DEBUG << \"test_normalize_1 \"\n                          << \"DataPre\";\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, DataPost(_))\n        .WillRepeatedly(\n            testing::Invoke([&](const std::shared_ptr<DataContext>& data_ctx) {\n              MBLOG_DEBUG << \"test_normalize_1 \"\n                          << \"DataPost\";\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit,\n                Process(testing::An<std::shared_ptr<modelbox::DataContext>>()))\n        .WillRepeatedly(\n            testing::Invoke([=](const std::shared_ptr<DataContext>& op_ctx) {\n              auto input_bufs = op_ctx->Input(\"In_1\");\n              EXPECT_EQ(input_bufs->Size(), 1);\n              for (size_t i = 0; i < input_bufs->Size(); ++i) {\n                auto input_buf = input_bufs->At(i);\n                std::vector<size_t> shape;\n                input_buf->Get(\"shape\", shape);\n                size_t width = shape[1];\n                size_t height = shape[0];\n                EXPECT_EQ(width, 5);\n                EXPECT_EQ(height, 4);\n\n                const auto* const in_data =\n                    static_cast<const float*>(input_buf->ConstData());\n                for (size_t c = 0; c < 3; c++) {\n                  for (size_t j = 0; j < width; j++) {\n                    for (size_t k = 0; k < height; k++) {\n                      float data = in_data[c * width * height + j * height + k];\n                      if (c == 0) {\n                        EXPECT_NEAR(data, 1, 0.0001);\n                      } else if (c == 1) {\n                        EXPECT_NEAR(data, 255, 0.0001);\n                      } else {\n                        EXPECT_NEAR(data, 255, 0.0001);\n                      }\n                    }\n                  }\n                }\n              }\n\n              return modelbox::STATUS_STOP;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, Close()).WillRepeatedly(testing::Invoke([=]() {\n      return modelbox::STATUS_OK;\n    }));\n    desc_flowunit.SetMockFlowUnit(mock_flowunit);\n    ctl_->AddMockDriverFlowUnit(\"test_normalize_1\", \"cpu\", desc_flowunit,\n                                std::string(TEST_DRIVER_DIR));\n  }\n\n  return STATUS_OK;\n}\n\nstd::shared_ptr<DriverFlowTest> NormalizeGpuFlowUnitTest::GetDriverFlow() {\n  return driver_flow_;\n}\n\nTEST_F(NormalizeGpuFlowUnitTest, RunUnit) {\n  std::string toml_content = R\"(\n    [driver]\n    skip-default=true\n    dir=[\")\" + test_lib_dir + \"\\\",\\\"\" +\n                             test_data_dir + \"\\\"]\\n    \" +\n                             R\"([graph]\n    graphconf = '''digraph demo {\n          test_normalize_0[type=flowunit, flowunit=test_normalize_0, device=cpu,deviceid=0, label=\"<Out_1>\"] \n          normalize[type=flowunit, flowunit=normalize, device=cuda, deviceid=0, label=\"<in_data> | <out_data>\", standard_deviation_inverse=\"0.003921568627451,1,1\"]\n          test_normalize_1[type=flowunit, flowunit=test_normalize_1, device=cpu, deviceid=0, label=\"<In_1>\"] \n\n          test_normalize_0:Out_1 -> normalize:in_data\n          normalize:out_data -> test_normalize_1:In_1\n        }'''\n    format = \"graphviz\"\n  )\";\n  auto driver_flow = GetDriverFlow();\n  auto ret = driver_flow->BuildAndRun(\"InitUnit\", toml_content);\n  EXPECT_EQ(ret, STATUS_STOP);\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/normalize_v2/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nset(UNIT_DEVICE \"cuda\")\nset(UNIT_NAME \"image_preprocess\")\n\nproject(modelbox-flowunit-${UNIT_DEVICE}-${UNIT_NAME})\n\nfile(GLOB_RECURSE UNIT_SOURCE *.cpp *.cc *.c *.cu)\ngroup_source_test_files(MODELBOX_UNIT_SOURCE MODELBOX_UNIT_TEST_SOURCE \"_test.c*\" ${UNIT_SOURCE})\n\nif (NOT OPENCV_FOUND) \n    set(MODELBOX_UNIT_TEST_SOURCE \"\")\nendif()\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_CUDA_INCLUDE})\ninclude_directories(${CUDA_INCLUDE_DIRS})\ninclude_directories(${MODELBOX_COMMON_NORMALIZE_INCLUDE})\n\n# for supress c++ compile wanring\nset(MODELBOX_UNIT_SHARED libmodelbox_unit_${UNIT_DEVICE}_${UNIT_NAME}_shared)\nset(MODELBOX_UNIT_SHARED_OUTPUT_NAME libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-shared)\nset(MODELBOX_UNIT_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\ncuda_add_library(${MODELBOX_UNIT_SHARED} SHARED ${MODELBOX_UNIT_SOURCE})\n\nset(LIBMODELBOX_FLOWUNIT_NORMALIZE_CUDA_SHARED ${MODELBOX_UNIT_SHARED})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES \n    OUTPUT_NAME ${MODELBOX_UNIT_SHARED_OUTPUT_NAME}\n    SOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n    DEFINE_SYMBOL \"\"\n)\n\nfind_cuda_helper_libs(nppial)\n\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} rt)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} dl)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_DEVICE_CUDA_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${CUDA_nppial_LIBRARY})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${MODELBOX_COMMON_NORMALIZE_LIBRARY})\nadd_dependencies(${MODELBOX_UNIT_SHARED} ${MODELBOX_COMMON_NORMALIZE_LIBRARY})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}\")\n\ninstall(TARGETS ${MODELBOX_UNIT_SHARED} \n    COMPONENT cuda-device-flowunit\n    RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n    LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    ARCHIVE DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    OPTIONAL\n    )\n\n\ninstall(DIRECTORY ${HEADER} \n    DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR} \n    COMPONENT cuda-device-flowunit-devel\n    )\n\nset(LIBMODELBOX_FLOWUNIT_NORMALIZE_CUDA_SHARED ${MODELBOX_UNIT_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_NORMALIZE_CUDA_INCLUDE ${MODELBOX_UNIT_SOURCE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_NORMALIZE_CUDA_SOURCES ${MODELBOX_UNIT_SOURCE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_NORMALIZE_CUDA_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}.so CACHE INTERNAL \"\")\n\n\n# driver test\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_UNIT_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_UNIT_SHARED})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/normalize_v2/normalize_flowunit.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"normalize_flowunit.h\"\n\n#include \"modelbox/flowunit_api_helper.h\"\n#include \"normalize_flowunit_cu.h\"\n\nNormalizeFlowUnitV2::NormalizeFlowUnitV2() = default;\nNormalizeFlowUnitV2::~NormalizeFlowUnitV2() = default;\n\nconstexpr int COLOR_CHANNEL_COUNT = 3;\nconstexpr int GRAY_CHANNEL_COUNT = 1;\n\nmodelbox::Status NormalizeFlowUnitV2::Open(\n    const std::shared_ptr<modelbox::Configuration> &opts) {\n  if (!opts->Contain(\"output_layout\")) {\n    MBLOG_ERROR << \"config must has output_layout\";\n    return modelbox::STATUS_BADCONF;\n  }\n\n  output_layout_ = opts->GetString(\"output_layout\", \"\");\n  if (output_layout_ != \"chw\" && output_layout_ != \"hwc\") {\n    MBLOG_ERROR << \"Invalid config output_layout = \" << output_layout_;\n    return modelbox::STATUS_BADCONF;\n  }\n\n  std::vector<float> default_mean{0, 0, 0};\n  mean_ = opts->GetFloats(\"mean\", default_mean);\n\n  std::vector<float> default_std{1, 1, 1};\n  std_ = opts->GetFloats(\"standard_deviation_inverse\", default_std);\n\n  auto device = GetBindDevice();\n  mean_buffer_ = std::make_shared<modelbox::Buffer>(device);\n  mean_buffer_->BuildFromHost(mean_.data(), mean_.size() * sizeof(float));\n\n  std_buffer_ = std::make_shared<modelbox::Buffer>(device);\n  std_buffer_->BuildFromHost(std_.data(), std_.size() * sizeof(float));\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status GetParm(const std::shared_ptr<modelbox::Buffer> &buffer,\n                         std::vector<size_t> &shape, std::string &input_layout,\n                         modelbox::ModelBoxDataType &type) {\n  if (!buffer->Get(\"shape\", shape)) {\n    MBLOG_ERROR << \"can not get shape from buffer\";\n    return modelbox::STATUS_INVALID;\n  }\n\n  if (shape.size() != 1 && shape.size() != 3) {\n    MBLOG_ERROR << \"unsupport image shape: \" << shape.size();\n    return modelbox::STATUS_INVALID;\n  }\n\n  if (!buffer->Get(\"layout\", input_layout)) {\n    MBLOG_ERROR << \"can not get layout from buffer\";\n    return modelbox::STATUS_INVALID;\n  }\n\n  if (input_layout != \"chw\" && input_layout != \"hwc\") {\n    MBLOG_ERROR << \"unsupport layout: \" << input_layout\n                << \" support chw or hwc\";\n    return modelbox::STATUS_INVALID;\n  }\n\n  if (!buffer->Get(\"type\", type)) {\n    MBLOG_ERROR << \"can not get type from buffer\";\n    return modelbox::STATUS_INVALID;\n  }\n\n  if (type != modelbox::ModelBoxDataType::MODELBOX_UINT8) {\n    MBLOG_ERROR << \"unsupport type: \" << type\n                << \" support modelbox::ModelBoxDataType::MODELBOX_UINT8\";\n    return modelbox::STATUS_INVALID;\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status GetAndCheckParm(\n    const std::shared_ptr<modelbox::BufferList> &input,\n    std::vector<size_t> &shape, std::string &input_layout,\n    modelbox::ModelBoxDataType &type) {\n  std::vector<size_t> tmp_shape;\n  std::string tmp_input_layout;\n  modelbox::ModelBoxDataType tmp_type = modelbox::MODELBOX_TYPE_INVALID;\n\n  for (auto &buffer : *input) {\n    if (buffer == *input->begin()) {\n      if (!GetParm(buffer, shape, input_layout, type)) {\n        return modelbox::STATUS_INVALID;\n      }\n    }\n\n    if (!GetParm(buffer, tmp_shape, tmp_input_layout, tmp_type)) {\n      return modelbox::STATUS_INVALID;\n    }\n\n    if (tmp_shape != shape) {\n      MBLOG_ERROR << \"all image must has same shape.\";\n      return modelbox::STATUS_INVALID;\n    }\n\n    if (tmp_input_layout != input_layout) {\n      MBLOG_ERROR << \"all image must has same layout.\";\n      return modelbox::STATUS_INVALID;\n    }\n\n    if (tmp_type != type) {\n      MBLOG_ERROR << \"all image must has same type.\";\n      return modelbox::STATUS_INVALID;\n    }\n  }\n\n  return modelbox::STATUS_OK;\n}\n\n/* run when processing data */\nmodelbox::Status NormalizeFlowUnitV2::CudaProcess(\n    std::shared_ptr<modelbox::DataContext> data_ctx, cudaStream_t stream) {\n  auto input = data_ctx->Input(\"in_image\");\n  auto output = data_ctx->Output(\"out_data\");\n\n  std::vector<size_t> shape;\n  std::string input_layout;\n  modelbox::ModelBoxDataType type = modelbox::MODELBOX_TYPE_INVALID;\n\n  auto status = GetAndCheckParm(input, shape, input_layout, type);\n  if (!status) {\n    return status;\n  }\n\n  int H = 0;\n  int W = 0;\n  int C = 0;\n  if (input_layout == \"hwc\") {\n    H = shape[0];\n    W = shape[1];\n    C = shape[2];\n  } else {\n    MBLOG_ERROR << \"only support hwc, but input layout is \" << input_layout;\n    return modelbox::STATUS_INVALID;\n  }\n\n  if (C != GRAY_CHANNEL_COUNT && C != COLOR_CHANNEL_COUNT) {\n    MBLOG_ERROR << \"invalid image channels: \" << C << \" support 1 or 3\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  const auto *data = input->ConstData();\n  // TODO sizeof arg used config\n  output->Build(std::vector<size_t>(input->Size(), H * W * C * sizeof(float)));\n  std::vector<size_t> output_shape;\n  if (output_layout_ == \"hwc\") {\n    Normalize((uint8_t *)data, input->Size(), H, W, C,\n              (const float *)mean_buffer_->ConstData(),\n              (const float *)std_buffer_->ConstData(),\n              (float *)output->MutableData(), stream);\n    output_shape = {(size_t)H, (size_t)W, (size_t)C};\n  } else {\n    NormalizeAndCHW((uint8_t *)data, input->Size(), H, W, C,\n                    (const float *)mean_buffer_->ConstData(),\n                    (const float *)std_buffer_->ConstData(),\n                    (float *)output->MutableData(), stream);\n    output_shape = {(size_t)C, (size_t)H, (size_t)W};\n  }\n\n  output->CopyMeta(input);\n  output->Set(\"layout\", output_layout_);\n  output->Set(\"shape\", output_shape);\n  output->Set(\"type\", modelbox::ModelBoxDataType::MODELBOX_FLOAT);\n\n  return modelbox::STATUS_OK;\n}\n\nMODELBOX_FLOWUNIT(NormalizeFlowUnitV2, desc) {\n  desc.SetFlowUnitName(\"image_preprocess\");\n  desc.SetFlowUnitGroupType(\"Image\");\n  desc.AddFlowUnitInput(\n      modelbox::FlowUnitInput(\"in_image\", modelbox::DEVICE_TYPE));\n  desc.AddFlowUnitOutput(\n      modelbox::FlowUnitOutput(\"out_data\", modelbox::DEVICE_TYPE));\n  desc.SetFlowType(modelbox::NORMAL);\n  desc.AddFlowUnitOption(modelbox::FlowUnitOption(\n      \"output_layout\", \"string\", true, \"\", \"the normalize output layout\"));\n  desc.AddFlowUnitOption(modelbox::FlowUnitOption(\"mean\", \"string\", false, \"\",\n                                                  \"the normalize mean\"));\n  desc.AddFlowUnitOption(modelbox::FlowUnitOption(\n      \"standard_deviation_inverse\", \"string\", false, \"\", \"the normalize std\"));\n}\n\nMODELBOX_DRIVER_FLOWUNIT(desc) {\n  desc.Desc.SetName(FLOWUNIT_NAME);\n  desc.Desc.SetClass(modelbox::DRIVER_CLASS_FLOWUNIT);\n  desc.Desc.SetType(modelbox::DEVICE_TYPE);\n  desc.Desc.SetDescription(FLOWUNIT_DESC);\n  desc.Desc.SetVersion(\"1.0.0\");\n}\n"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/normalize_v2/normalize_flowunit.cu",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include \"normalize_flowunit_cu.h\"\n\n__global__ void NormalizeAndCHWKernel(const uint8_t *input, int H, int W, int C,\n                                      const float *normalize_mean,\n                                      const float *normalize_std,\n                                      float *output) {\n  const int n = blockIdx.x;\n  const int stride = H * W * C;\n\n  const uint8_t *in = input + n * stride;\n  float *out = output + n * stride;\n  int c = 0;\n  int h = 0;\n  int w = 0;\n\n  for (c = 0; c < C; ++c) {\n    for (h = threadIdx.y; h < H; h += blockDim.y) {\n      for (w = threadIdx.x; w < W; w += blockDim.x) {\n        out[w + c * H * W + h * W] =\n            static_cast<float>((static_cast<float>(in[c + h * W * C + w * C]) -\n                                normalize_mean[c]) *\n                               normalize_std[c]);\n      }\n    }\n  }\n}\n\n__global__ void NormalizeKernel(const uint8_t *input, int H, int W, int C,\n                                const float *normalize_mean,\n                                const float *normalize_std, float *output) {\n  const int n = blockIdx.x;\n  const int stride = H * W * C;\n\n  const uint8_t *in = input + n * stride;\n  float *out = output + n * stride;\n\n  int c = 0;\n  int h = 0;\n  int w = 0;\n\n  for (c = 0; c < C; ++c) {\n    for (h = threadIdx.y; h < H; h += blockDim.y) {\n      for (w = threadIdx.x; w < W; w += blockDim.x) {\n        out[c + h * W * C + w * C] =\n            static_cast<float>((static_cast<float>(in[c + h * W * C + w * C]) -\n                                normalize_mean[c]) *\n                               normalize_std[c]);\n      }\n    }\n  }\n}\n\nvoid NormalizeAndCHW(const uint8_t *input, int N, int H, int W, int C,\n                     const float *normalize_mean, const float *normalize_std,\n                     float *output, cudaStream_t stream) {\n  constexpr int BLOCK_X = 32;\n  constexpr int BLOCK_Y = 32;\n\n  NormalizeAndCHWKernel<<<N, dim3(BLOCK_X, BLOCK_Y), 0, stream>>>(\n      input, H, W, C, normalize_mean, normalize_std, output);\n  return;\n}\n\nvoid Normalize(const uint8_t *input, int N, int H, int W, int C,\n               const float *normalize_mean, const float *normalize_std,\n               float *output, cudaStream_t stream) {\n  constexpr int BLOCK_X = 32;\n  constexpr int BLOCK_Y = 32;\n\n  NormalizeKernel<<<N, dim3(BLOCK_X, BLOCK_Y), 0, stream>>>(\n      input, H, W, C, normalize_mean, normalize_std, output);\n  return;\n}"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/normalize_v2/normalize_flowunit.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#ifndef MODELBOX_FLOWUNIT_NORMALIZE_H_\n#define MODELBOX_FLOWUNIT_NORMALIZE_H_\n\n#include <modelbox/base/device.h>\n#include <modelbox/base/status.h>\n#include <modelbox/flow.h>\n#include <modelbox/flowunit.h>\n\n#include \"modelbox/device/cuda/device_cuda.h\"\n\nconstexpr const char *FLOWUNIT_NAME = \"image_preprocess\";\nconstexpr const char *FLOWUNIT_TYPE = \"cuda\";\nconstexpr const char *FLOWUNIT_DESC =\n    \"A cuda normalize flowunit, the operator is used to normalize for the \"\n    \"image(RGB/BGR).\";\n\n\nclass NormalizeFlowUnitV2 : public modelbox::CudaFlowUnit {\n public:\n  NormalizeFlowUnitV2();\n  ~NormalizeFlowUnitV2() override;\n\n  modelbox::Status Open(\n      const std::shared_ptr<modelbox::Configuration> &opts) override;\n  modelbox::Status Close() override { return modelbox::STATUS_OK; }\n\n  /* run when processing data */\n  modelbox::Status CudaProcess(std::shared_ptr<modelbox::DataContext> data_ctx,\n                               cudaStream_t stream) override;\n\n  modelbox::Status DataPre(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override {\n    return modelbox::STATUS_OK;\n  }\n\n  modelbox::Status DataPost(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override {\n    return modelbox::STATUS_OK;\n  }\n\n  modelbox::Status DataGroupPre(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override {\n    return modelbox::STATUS_OK;\n  };\n\n  modelbox::Status DataGroupPost(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override {\n    return modelbox::STATUS_OK;\n  };\n\n private:\n  std::string output_layout_;\n  std::string output_dtype_;\n  std::vector<float> mean_;\n  std::vector<float> std_;\n\n  std::shared_ptr<modelbox::Buffer> mean_buffer_;\n  std::shared_ptr<modelbox::Buffer> std_buffer_;\n};\n\n#endif  // MODELBOX_FLOWUNIT_NORMALIZE_H_"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/normalize_v2/normalize_flowunit_cu.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include <stdint.h>\n#include \"cuda_runtime.h\"\n\nvoid NormalizeAndCHW(const uint8_t *in_batch, int N, int H, int W, int C,\n                     const float *mean, const float *inv_std, float *out_batch,\n                     cudaStream_t stream);\n\nvoid Normalize(const uint8_t *in_batch, int N, int H, int W, int C,\n               const float *mean, const float *inv_std, float *out_batch,\n               cudaStream_t stream);"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/normalize_v2/normalize_flowunit_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include <securec.h>\n\n#include <functional>\n#include <future>\n#include <opencv2/opencv.hpp>\n#include <random>\n#include <thread>\n#include <cuda_runtime.h>\n\n#include \"modelbox/base/log.h\"\n#include \"modelbox/base/utils.h\"\n#include \"modelbox/buffer.h\"\n#include \"driver_flow_test.h\"\n#include \"flowunit_mockflowunit/flowunit_mockflowunit.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing ::testing::_;\n\nnamespace modelbox {\nclass NormalizeV2FlowUnitTest : public testing::Test {\n public:\n  NormalizeV2FlowUnitTest()\n      : driver_flow_(std::make_shared<DriverFlowTest>()) {}\n\n protected:\n  void SetUp() override {\n    int count = 0;\n    cudaGetDeviceCount(&count);\n    if (count <= 0) {\n      MBLOG_INFO << \"no cuda device, skip test suit\";\n      GTEST_SKIP();\n    }\n\n    auto ret = AddMockFlowUnit();\n    EXPECT_EQ(ret, STATUS_OK);\n  };\n\n  void TearDown() override { driver_flow_->Clear(); };\n  std::shared_ptr<DriverFlowTest> GetDriverFlow();\n\n private:\n  Status AddMockFlowUnit();\n  std::shared_ptr<DriverFlowTest> driver_flow_;\n};\n\nstd::shared_ptr<DriverFlowTest> NormalizeV2FlowUnitTest::GetDriverFlow() {\n  return driver_flow_;\n}\n\nStatus NormalizeV2FlowUnitTest::AddMockFlowUnit() {\n  auto ctl_ = driver_flow_->GetMockFlowCtl();\n  {\n    MockFlowUnitDriverDesc desc_flowunit;\n    desc_flowunit.SetClass(\"DRIVER-FLOWUNIT\");\n    desc_flowunit.SetType(\"cpu\");\n    desc_flowunit.SetName(\"copy\");\n    desc_flowunit.SetDescription(\"just copy data flowunit on CPU\");\n    desc_flowunit.SetVersion(\"1.0.0\");\n    std::string file_path_flowunit =\n        std::string(TEST_DRIVER_DIR) + \"/libmodelbox-unit-cpu-copy.so\";\n    desc_flowunit.SetFilePath(file_path_flowunit);\n    auto mock_flowunit = std::make_shared<MockFlowUnit>();\n    auto mock_flowunit_desc = std::make_shared<FlowUnitDesc>();\n    mock_flowunit_desc->SetFlowUnitName(\"copy\");\n    mock_flowunit_desc->AddFlowUnitInput(modelbox::FlowUnitInput(\"input\"));\n    mock_flowunit_desc->AddFlowUnitOutput(modelbox::FlowUnitOutput(\"output\"));\n    mock_flowunit_desc->SetFlowType(modelbox::NORMAL);\n    mock_flowunit->SetFlowUnitDesc(mock_flowunit_desc);\n    std::weak_ptr<MockFlowUnit> mock_flowunit_wp;\n    mock_flowunit_wp = mock_flowunit;\n\n    EXPECT_CALL(*mock_flowunit, Open(_))\n        .WillRepeatedly(testing::Invoke(\n            [=](const std::shared_ptr<modelbox::Configuration>& flow_option) {\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, DataPre(_))\n        .WillRepeatedly(\n            testing::Invoke([&](const std::shared_ptr<DataContext>& data_ctx) {\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, DataPost(_))\n        .WillRepeatedly(\n            testing::Invoke([&](const std::shared_ptr<DataContext>& data_ctx) {\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit,\n                Process(testing::An<std::shared_ptr<modelbox::DataContext>>()))\n        .WillRepeatedly(\n            testing::Invoke([=](const std::shared_ptr<DataContext>& op_ctx) {\n              auto input = op_ctx->Input(\"input\");\n              auto output = op_ctx->Output(\"output\");\n\n              for (size_t i = 0; i < input->Size(); ++i) {\n                output->PushBack(input->At(i));\n              }\n\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, Close()).WillRepeatedly(testing::Invoke([=]() {\n      return modelbox::STATUS_OK;\n    }));\n\n    desc_flowunit.SetMockFlowUnit(mock_flowunit);\n    ctl_->AddMockDriverFlowUnit(\"copy\", \"cpu\", desc_flowunit,\n                                std::string(TEST_DRIVER_DIR));\n  }\n\n  return STATUS_OK;\n}\n\nTEST_F(NormalizeV2FlowUnitTest, NormalizeV2Test) {\n  const std::string test_lib_dir = TEST_DRIVER_DIR;\n  std::string toml_content = R\"(\n    [driver]\n    skip-default=true\n    dir=[\")\" + test_lib_dir + \"\\\"]\\n    \" +\n                             R\"([graph]\n    graphconf = '''digraph demo {\n          input1[type=input]   \n          normalize_v2[type=flowunit, flowunit=image_preprocess, device=cuda, deviceid=0, label=\"<in_image> | <out_data>\", output_layout=\"hwc\", mean=\"0.0, 0.0, 0.0\", standard_deviation_inverse=\"1.0, 1.0, 1.0\"]\n          normalize_v2_chw[type=flowunit, flowunit=image_preprocess, device=cuda, deviceid=0, label=\"<in_image> | <out_data>\", output_layout=\"chw\", mean=\"0.0, 0.0, 0.0\", standard_deviation_inverse=\"1.0, 1.0, 1.0\"]\n          copy[type=flowunit, flowunit=copy, device=cpu, deviceid=0, label=\"<input> | <output>\"]\n          copy_chw[type=flowunit, flowunit=copy, device=cpu, deviceid=0, label=\"<input> | <output>\"]\n          output_hwc[type=output]\n          output_chw[type=output]      \n\n          input1 -> normalize_v2:in_image\n          input1 -> normalize_v2_chw:in_image\n          normalize_v2:out_data -> copy:input\n          normalize_v2_chw:out_data -> copy_chw:input\n          copy:output -> output_hwc\n          copy_chw:output -> output_chw\n        }'''\n    format = \"graphviz\"\n  )\";\n\n  auto driver_flow = GetDriverFlow();\n  auto ret = driver_flow->BuildAndRun(\"InitUnit\", toml_content, -1);\n  auto flow = driver_flow->GetFlow();\n\n  auto ext_data = flow->CreateExternalDataMap();\n\n  {\n    std::string gimg_path = std::string(TEST_ASSETS) + \"/test.jpg\";\n    cv::Mat bgr_img = cv::imread(gimg_path);\n\n    cv::Mat bgr_img_float;\n    cv::Mat bgr_img_float_chw;\n    bgr_img.convertTo(bgr_img_float, CV_32FC3);\n    bgr_img.convertTo(bgr_img_float_chw, CV_32FC3);\n\n    int height = bgr_img_float.rows;\n    int width = bgr_img_float.cols;\n    int channel = bgr_img_float.channels();\n\n    for (int c = 0; c < channel; ++c) {\n      for (int h = 0; h < height; ++h) {\n        for (int w = 0; w < width; ++w) {\n          int dstIdx = c * height * width + h * width + w;\n          int srcIdx = h * width * channel + w * channel + c;\n          *((float*)((void*)bgr_img_float_chw.data) + dstIdx) =\n              *(((float*)((void*)bgr_img_float.data)) + srcIdx);\n        }\n      }\n    }\n\n    std::vector<std::string> output_name({\"output_hwc\", \"output_chw\"});\n    std::vector<cv::Mat> opencv_out_check({bgr_img_float, bgr_img_float_chw});\n\n    auto color_bl = ext_data->CreateBufferList();\n    size_t img_size = bgr_img.total() * bgr_img.elemSize();\n    color_bl->BuildFromHost({img_size}, bgr_img.data, img_size);\n    // HWC\n    color_bl->Set(\"shape\", std::vector<size_t>(\n                               {static_cast<size_t>(bgr_img.rows),\n                                static_cast<size_t>(bgr_img.cols),\n                                static_cast<size_t>(bgr_img.channels())}));\n    color_bl->Set(\"layout\", std::string(\"hwc\"));\n    color_bl->Set(\"type\", ModelBoxDataType::MODELBOX_UINT8);\n    color_bl->Set(\"pix_fmt\", \"bgr\");\n\n    auto status = ext_data->Send(\"input1\", color_bl);\n    EXPECT_EQ(status, STATUS_OK);\n\n    OutputBufferList map_buffer_list;\n\n    status = ext_data->Recv(map_buffer_list);\n    EXPECT_EQ(status, STATUS_OK);\n\n    for (size_t j = 0; j < output_name.size(); j++) {\n      auto buffer_list = map_buffer_list[output_name[j]];\n      EXPECT_EQ(buffer_list->Size(), 1);\n      EXPECT_EQ(buffer_list->GetBytes(),\n                opencv_out_check[j].total() * opencv_out_check[j].elemSize());\n      ModelBoxDataType type = MODELBOX_TYPE_INVALID;\n      buffer_list->At(0)->Get(\"type\", type);\n      EXPECT_EQ(type, ModelBoxDataType::MODELBOX_FLOAT);\n      auto* opencv_data = (float*)opencv_out_check[j].data;\n      const auto* out_data = (float*)(buffer_list->ConstBufferData(0));\n      size_t count = buffer_list->GetBytes() / sizeof(float);\n      for (size_t k = 0; k < count; ++k) {\n        EXPECT_TRUE(*(out_data + k) - *(opencv_data + k) < 0.00000001);\n        EXPECT_TRUE(*(opencv_data + k) - *(out_data + k) < 0.00000001);\n      }\n    }\n  }\n\n  MBLOG_INFO << \"Send Shutdown\";\n  auto status = ext_data->Shutdown();\n  EXPECT_EQ(status, STATUS_OK);\n\n  flow->Wait(1000);\n}  // namespace modelbox\n\n}  // namespace modelbox"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/nppi_crop/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nset(UNIT_DEVICE \"cuda\")\nset(UNIT_NAME \"crop\")\n\nproject(modelbox-flowunit-${UNIT_DEVICE}-${UNIT_NAME})\n\nfile(GLOB_RECURSE UNIT_SOURCE *.cpp *.cc *.c)\ngroup_source_test_files(MODELBOX_UNIT_SOURCE MODELBOX_UNIT_TEST_SOURCE \"_test.c*\" ${UNIT_SOURCE})\n\nif (NOT OPENCV_FOUND) \n    set(MODELBOX_UNIT_TEST_SOURCE \"\")\nendif()\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_CUDA_INCLUDE})\ninclude_directories(${CUDA_INCLUDE_DIRS})\ninclude_directories(${MODELBOX_COMMON_IMAGE_PROCESS_INCLUDE})\n\nset(MODELBOX_UNIT_SHARED libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-shared)\nset(MODELBOX_UNIT_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\ncuda_add_library(${MODELBOX_UNIT_SHARED} SHARED ${MODELBOX_UNIT_SOURCE})\nset(LIBMODELBOX_FLOWUNIT_NPPI_CROP_CUDA_SHARED ${MODELBOX_UNIT_SHARED})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES \n    SOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n\nfind_cuda_helper_libs(nppidei) \ntarget_link_libraries(${MODELBOX_UNIT_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} rt)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} dl)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_DEVICE_CUDA_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${CUDA_nppidei_LIBRARY})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${MODELBOX_COMMON_IMAGE_PROCESS_LIBRARY})\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}\")\n\ninstall(TARGETS ${MODELBOX_UNIT_SHARED}\n        COMPONENT cuda-device-flowunit\n        RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n        LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        ARCHIVE DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        OPTIONAL)\ninstall(DIRECTORY ${HEADER} \n        DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR}\n        COMPONENT cuda-device-flowunit)\n\nset(LIBMODELBOX_FLOWUNIT_NPPI_CROP_CUDA_SHARED ${MODELBOX_UNIT_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_NPPI_CROP_CUDA_INCLUDE ${MODELBOX_UNIT_SOURCE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_NPPI_CROP_SOURCES ${MODELBOX_UNIT_SOURCE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_NPPI_CROP_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}.so CACHE INTERNAL \"\")\n\n# driver test\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_UNIT_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_UNIT_SHARED})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")\n"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/nppi_crop/nppi_crop_flowunit.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"nppi_crop_flowunit.h\"\n\n#include <npp.h>\n\n#include \"modelbox/flowunit_api_helper.h\"\n\nNppiCropFlowUnit::NppiCropFlowUnit() = default;\nNppiCropFlowUnit::~NppiCropFlowUnit() = default;\n\nstd::vector<std::string> kNppiCropMethod = {\"u8c3r\"};\n\nmodelbox::Status NppiCropFlowUnit::Open(\n    const std::shared_ptr<modelbox::Configuration> &opts) {\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status NppiCropFlowUnit::CudaProcess(\n    std::shared_ptr<modelbox::DataContext> data_ctx, cudaStream_t stream) {\n  auto input_img_bufs = data_ctx->Input(\"in_image\");\n  if (input_img_bufs->Size() <= 0) {\n    auto errMsg =\n        \"input images size is \" + std::to_string(input_img_bufs->Size());\n    MBLOG_ERROR << errMsg;\n    return {modelbox::STATUS_FAULT, errMsg};\n  }\n\n  auto input_box_bufs = data_ctx->Input(\"in_region\");\n  if (input_box_bufs->Size() <= 0) {\n    auto errMsg =\n        \"in_region roi box batch is \" + std::to_string(input_box_bufs->Size());\n    MBLOG_ERROR << errMsg;\n    return {modelbox::STATUS_FAULT, errMsg};\n  }\n\n  if (input_img_bufs->Size() != input_box_bufs->Size()) {\n    auto errMsg = \"in_image batch is not match in_region batch. in_image is \" +\n                  std::to_string(input_img_bufs->Size()) + \",in_region is \" +\n                  std::to_string(input_box_bufs->Size());\n    MBLOG_ERROR << errMsg;\n    return {modelbox::STATUS_FAULT, errMsg};\n  }\n\n  auto output_bufs = data_ctx->Output(\"out_image\");\n\n  std::vector<size_t> shape_vector;\n  int32_t channel = RGB_CHANNLES;\n  for (size_t i = 0; i < input_img_bufs->Size(); ++i) {\n    const auto *buff = input_box_bufs->ConstBufferData(i);\n    if (buff == nullptr) {\n      MBLOG_WARN << \"input buffer \" << i << \" is invalid.\";\n      continue;\n    }\n    const auto *bbox = static_cast<const imageprocess::RoiBox *>(buff);\n    if (bbox == nullptr) {\n      MBLOG_WARN << \"buffer is not box, buffer index: \" << i;\n      continue;\n    }\n\n    MBLOG_DEBUG << \"crop bbox : \" << bbox->x << \" \" << bbox->y << \" \" << bbox->w\n                << \" \" << bbox->h;\n    shape_vector.push_back((bbox->w) * (bbox->h) * channel * sizeof(u_char));\n  }\n\n  output_bufs->Build(shape_vector);\n\n  output_bufs->CopyMeta(input_img_bufs);\n\n  auto cuda_ret = cudaStreamSynchronize(stream);\n  if (cuda_ret != cudaSuccess) {\n    MBLOG_ERROR << \"sync stream  \" << stream << \" failed, err \" << cuda_ret;\n    return modelbox::STATUS_FAULT;\n  }\n\n  for (size_t i = 0; i < input_img_bufs->Size(); ++i) {\n    auto ret = ProcessOneImage(input_img_bufs, input_box_bufs, output_bufs, i);\n    if (ret != modelbox::STATUS_OK) {\n      MBLOG_ERROR << \"nppi crop image failed, index is \" << i;\n      return ret;\n    }\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status NppiCropFlowUnit::ProcessOneImage(\n    std::shared_ptr<modelbox::BufferList> &input_img_buffer_list,\n    std::shared_ptr<modelbox::BufferList> &input_box_buffer_list,\n    std::shared_ptr<modelbox::BufferList> &output_buffer_list, int index) {\n  ImageSize src_size;\n  std::string pix_fmt;\n\n  bool exists = false;\n\n  exists = input_img_buffer_list->At(index)->Get(\"height\", src_size.height);\n  if (!exists) {\n    MBLOG_ERROR << \"meta don't have key height\";\n    return {modelbox::STATUS_NOTSUPPORT, \"meta don't have key height\"};\n  }\n\n  exists = input_img_buffer_list->At(index)->Get(\"width\", src_size.width);\n  if (!exists) {\n    MBLOG_ERROR << \"meta don't have key width\";\n    return {modelbox::STATUS_NOTSUPPORT, \"meta don't have key width\"};\n  }\n\n  exists = input_img_buffer_list->At(index)->Get(\"pix_fmt\", pix_fmt);\n  if (!exists &&\n      !input_img_buffer_list->At(index)->Get(\"channel\", src_size.channel)) {\n    MBLOG_ERROR << \"meta don't have key pix_fmt or channel\";\n    return {modelbox::STATUS_NOTSUPPORT,\n            \"meta don't have key pix_fmt or channel\"};\n  }\n\n  if (exists && pix_fmt != \"rgb\" && pix_fmt != \"bgr\") {\n    MBLOG_ERROR << \"unsupport pix format.\";\n    return {modelbox::STATUS_NOTSUPPORT, \"unsupport pix format.\"};\n  }\n\n  src_size.channel = RGB_CHANNLES;\n\n  MBLOG_DEBUG << \"Input image width \" << src_size.width << \" height \"\n              << src_size.height << \" channel \" << src_size.channel;\n\n  const auto *bbox = static_cast<const imageprocess::RoiBox *>(\n      input_box_buffer_list->ConstBufferData(index));\n  if (bbox == nullptr) {\n    MBLOG_ERROR << \"input data at \" << index << \" is invalid.\";\n    return modelbox::STATUS_NODATA;\n  }\n\n  if (!imageprocess::CheckRoiBoxVaild(bbox, src_size.width, src_size.height)) {\n    return {modelbox::STATUS_FAULT, \"roi box param is invaild !\"};\n  }\n\n  imageprocess::RoiBox dst_size;\n  dst_size.w = bbox->w;\n  dst_size.h = bbox->h;\n  dst_size.x = bbox->x;\n  dst_size.y = bbox->y;\n\n  const auto *input_data = static_cast<const u_char *>(\n      input_img_buffer_list->ConstBufferData(index));\n\n  auto *output_data =\n      static_cast<u_char *>(output_buffer_list->MutableBufferData(index));\n\n  modelbox::Status ret = modelbox::STATUS_OK;\n  ret = NppiCrop_u8_c3r(input_data, src_size, output_data, dst_size);\n  if (ret != modelbox::STATUS_OK) {\n    return ret;\n  }\n\n  auto output_buffer = output_buffer_list->At(index);\n  output_buffer->Set(\"width\", dst_size.w);\n  output_buffer->Set(\"height\", dst_size.h);\n  output_buffer->Set(\"width_stride\", dst_size.w * 3);\n  output_buffer->Set(\"height_stride\", dst_size.h);\n  output_buffer->Set(\"channel\", src_size.channel);\n  output_buffer->Set(\"pix_fmt\", pix_fmt);\n  output_buffer->Set(\"type\", modelbox::ModelBoxDataType::MODELBOX_UINT8);\n  output_buffer->Set(\n      \"shape\", std::vector<size_t>{(size_t)dst_size.h, (size_t)dst_size.w, 3});\n  output_buffer->Set(\"layout\", std::string(\"hwc\"));\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status NppiCropFlowUnit::NppiCrop_u8_c3r(const u_char *p_src_data,\n                                                   ImageSize src_size,\n                                                   u_char *p_dst_data,\n                                                   imageprocess::RoiBox dst_size) {\n  const Npp8u *p_src = p_src_data;\n\n  p_src =\n      p_src + (dst_size.y * src_size.width + dst_size.x) * sizeof(u_char) * 3;\n\n  Npp8u *p_dst = p_dst_data;\n\n  NppiSize dst_npp_size;\n  dst_npp_size.width = dst_size.w;\n  dst_npp_size.height = dst_size.h;\n\n  NppStatus status =\n      nppiCopy_8u_C3R(p_src, src_size.width * sizeof(u_char) * 3, p_dst,\n                      dst_size.w * sizeof(u_char) * 3, dst_npp_size);\n  if (NPP_SUCCESS != status) {\n    MBLOG_ERROR << \"nppi error code \" << status;\n    std::string errMsg = \"cuda Crop failed, error code \" +\n                         std::to_string(status) +\n                         \", src image size: \" + std::to_string(src_size.width) +\n                         \" x \" + std::to_string(src_size.height);\n    MBLOG_ERROR << errMsg;\n    return {modelbox::STATUS_NODATA, errMsg};\n  }\n  return modelbox::STATUS_OK;\n}\n\nMODELBOX_FLOWUNIT(NppiCropFlowUnit, desc) {\n  desc.SetFlowUnitName(FLOWUNIT_NAME);\n  desc.SetFlowUnitGroupType(\"Image\");\n  desc.AddFlowUnitInput(modelbox::FlowUnitInput(\"in_image\", \"cuda\"));\n  desc.AddFlowUnitInput(modelbox::FlowUnitInput(\"in_region\", \"cpu\"));\n  desc.AddFlowUnitOutput(modelbox::FlowUnitOutput(\"out_image\", \"cuda\"));\n  desc.SetFlowType(modelbox::NORMAL);\n  desc.SetInputContiguous(false);\n  desc.SetDescription(FLOWUNIT_DESC);\n}\n\nMODELBOX_DRIVER_FLOWUNIT(desc) {\n  desc.Desc.SetName(FLOWUNIT_NAME);\n  desc.Desc.SetClass(modelbox::DRIVER_CLASS_FLOWUNIT);\n  desc.Desc.SetType(modelbox::DEVICE_TYPE);\n  desc.Desc.SetDescription(FLOWUNIT_DESC);\n  desc.Desc.SetVersion(\"1.0.0\");\n}\n"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/nppi_crop/nppi_crop_flowunit.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_NPPI_CROP_GPU_H_\n#define MODELBOX_FLOWUNIT_NPPI_CROP_GPU_H_\n\n#include <modelbox/base/device.h>\n#include <modelbox/base/status.h>\n#include <modelbox/buffer.h>\n#include <modelbox/flow.h>\n#include <modelbox/flowunit.h>\n#include <nppi_data_exchange_and_initialization.h>\n\n#include <typeinfo>\n\n#include \"image_process.h\"\n#include \"modelbox/device/cuda/device_cuda.h\"\n\nconstexpr const char *FLOWUNIT_NAME = \"crop\";\nconstexpr const char *FLOWUNIT_TYPE = \"cuda\";\nconstexpr const char *FLOWUNIT_DESC =\n    \"\\n\\t@Brief: A crop flowunit on cuda device. \\n\"\n    \"\\t@Port parameter: The input port 'in_image' and the output port \"\n    \"'out_image' buffer type are image. \\n\"\n    \"\\t  The image type buffer contain the following meta fields:\\n\"\n    \"\\t\\tField Name: width,         Type: int32_t\\n\"\n    \"\\t\\tField Name: height,        Type: int32_t\\n\"\n    \"\\t\\tField Name: width_stride,  Type: int32_t\\n\"\n    \"\\t\\tField Name: height_stride, Type: int32_t\\n\"\n    \"\\t\\tField Name: channel,       Type: int32_t\\n\"\n    \"\\t\\tField Name: pix_fmt,       Type: string\\n\"\n    \"\\t\\tField Name: layout,        Type: int32_t\\n\"\n    \"\\t\\tField Name: shape,         Type: vector<size_t>\\n\"\n    \"\\t\\tField Name: type,          Type: ModelBoxDataType::MODELBOX_UINT8\\n\"\n    \"\\t  The other input port 'in_region' buffer type is rectangle, the memory \"\n    \"arrangement is [x,y,w,h].\\n\"\n    \"\\t  it contain the following meta fields: \\n\"\n    \"\\t\\tField Name: type,          Type: ModelBoxDataType::MODELBOX_UINT8\\n\"\n    \"\\t@Constraint: The field value range of this flowunit support: 'pix_fmt': \"\n    \"[nv12], 'layout': [hwc]. One image can only be cropped with one \"\n    \"rectangle and output one crop image.\";\nconst int RGB_CHANNLES = 3;\n\ntypedef struct {\n  int32_t width;\n  int32_t height;\n  int32_t channel;\n} ImageSize;\nclass NppiCropFlowUnit : public modelbox::CudaFlowUnit {\n public:\n  NppiCropFlowUnit();\n  ~NppiCropFlowUnit() override;\n\n  modelbox::Status Open(\n      const std::shared_ptr<modelbox::Configuration> &opts) override;\n\n  modelbox::Status Close() override { return modelbox::STATUS_OK; };\n\n  modelbox::Status CudaProcess(std::shared_ptr<modelbox::DataContext> data_ctx,\n                               cudaStream_t stream) override;\n\n private:\n  modelbox::Status NppiCrop_u8_c3r(const u_char *p_src_data, ImageSize src_size,\n                                   u_char *p_dst_data, imageprocess::RoiBox dst_size);\n\n  modelbox::Status ProcessOneImage(\n      std::shared_ptr<modelbox::BufferList> &input_img_buffer_list,\n      std::shared_ptr<modelbox::BufferList> &input_box_buffer_list,\n      std::shared_ptr<modelbox::BufferList> &output_buffer_list, int index);\n};\n\n#endif  // MODELBOX_FLOWUNIT_NPPI_CROP_GPU_H_\n"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/nppi_crop/nppi_crop_flowunit_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <cuda_runtime.h>\n#include <securec.h>\n\n#include <functional>\n#include <future>\n#include <opencv2/opencv.hpp>\n#include <random>\n#include <thread>\n\n#include \"driver_flow_test.h\"\n#include \"flowunit_mockflowunit/flowunit_mockflowunit.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"modelbox/base/log.h\"\n#include \"modelbox/base/utils.h\"\n#include \"modelbox/buffer.h\"\n#include \"test/mock/minimodelbox/mockflow.h\"\n\nnamespace modelbox {\nclass NppiCropFlowUnitTest : public testing::Test {\n public:\n  NppiCropFlowUnitTest() : driver_flow_(std::make_shared<MockFlow>()) {}\n\n protected:\n  void SetUp() override {\n    int count = 0;\n    cudaGetDeviceCount(&count);\n    if (count <= 0) {\n      MBLOG_INFO << \"no cuda device, skip test suit\";\n      GTEST_SKIP();\n    }\n\n    auto ret = AddMockFlowUnit();\n    EXPECT_EQ(ret, STATUS_OK);\n  };\n\n  void TearDown() override { driver_flow_ = nullptr; };\n  std::shared_ptr<MockFlow> GetDriverFlow();\n\n private:\n  Status AddMockFlowUnit();\n  std::shared_ptr<MockFlow> driver_flow_;\n};\n\nstd::shared_ptr<MockFlow> NppiCropFlowUnitTest::GetDriverFlow() {\n  return driver_flow_;\n}\n\nStatus NppiCropFlowUnitTest::AddMockFlowUnit() {\n  typedef struct RoiBox {\n    int32_t x, y, width, height;\n  } RoiBox;\n\n  {\n    auto mock_desc = GenerateFlowunitDesc(\"c3r_test_0_1_nppi_crop\", {},\n                                          {\"Out_img\", \"Out_box\"});\n    mock_desc->SetFlowType(STREAM);\n    mock_desc->SetMaxBatchSize(16);\n    auto open_func = [=](const std::shared_ptr<Configuration>& opts,\n                         const std::shared_ptr<MockFlowUnit>& mock_flowunit) {\n      std::weak_ptr<MockFlowUnit> mock_flowunit_wp;\n      mock_flowunit_wp = mock_flowunit;\n      auto spt = mock_flowunit_wp.lock();\n      auto ext_data = spt->CreateExternalData();\n      if (!ext_data) {\n        MBLOG_ERROR << \"can not get external data.\";\n      }\n\n      auto buffer_list = ext_data->CreateBufferList();\n      buffer_list->Build({10 * sizeof(int)});\n      auto* data = (int*)buffer_list->MutableData();\n      for (size_t i = 0; i < 10; i++) {\n        data[i] = i;\n      }\n\n      auto status = ext_data->Send(buffer_list);\n      if (!status) {\n        MBLOG_ERROR << \"external data send buffer list failed:\" << status;\n        return status;\n      }\n\n      status = ext_data->Close();\n      if (!status) {\n        MBLOG_ERROR << \"external data close failed:\" << status;\n        return status;\n      }\n\n      return modelbox::STATUS_OK;\n    };\n\n    auto process_func =\n        [=](const std::shared_ptr<DataContext>& data_ctx,\n            const std::shared_ptr<MockFlowUnit>& mock_flowunit) -> Status {\n      MBLOG_INFO << \"test_0_1_nppi_crop process\";\n\n      auto output_img_bufs = data_ctx->Output(\"Out_img\");\n\n      uint32_t batch_size = 10;\n\n      std::string img_path = std::string(TEST_ASSETS) + \"/test.jpg\";\n      cv::Mat img_data = cv::imread(img_path);\n      MBLOG_INFO << \"image col \" << img_data.cols << \"  row \" << img_data.rows\n                 << \" channel \" << img_data.channels();\n      std::vector<size_t> img_shape_vector(\n          batch_size, img_data.total() * img_data.elemSize());\n\n      output_img_bufs->Build(img_shape_vector);\n\n      for (size_t i = 0; i < batch_size; ++i) {\n        std::string img_path = std::string(TEST_ASSETS) + \"/test.jpg\";\n        cv::Mat img_data = cv::imread(img_path);\n        int32_t cols = img_data.cols;\n        int32_t rows = img_data.rows;\n        int32_t channels = img_data.channels();\n        output_img_bufs->At(i)->Set(\"width\", cols);\n        output_img_bufs->At(i)->Set(\"height\", rows);\n        output_img_bufs->At(i)->Set(\"channel\", channels);\n        auto* output_img_data =\n            static_cast<uchar*>(output_img_bufs->MutableBufferData(i));\n        memcpy_s(output_img_data, output_img_bufs->At(i)->GetBytes(),\n                 img_data.data, img_data.total() * img_data.elemSize());\n      }\n\n      auto output_box_bufs = data_ctx->Output(\"Out_box\");\n\n      std::vector<size_t> box_shape_vector(batch_size, sizeof(RoiBox));\n\n      output_box_bufs->Build(box_shape_vector);\n\n      for (size_t i = 0; i < 5; ++i) {\n        auto* output_box1_data = output_box_bufs->MutableBufferData(2 * i);\n        std::shared_ptr<RoiBox> bbox1 = std::make_shared<RoiBox>();\n        bbox1->width = 100;\n        bbox1->height = 110;\n        bbox1->x = 30;\n        bbox1->y = 100;\n        memcpy_s(output_box1_data, sizeof(RoiBox), bbox1.get(), sizeof(RoiBox));\n\n        auto* output_box2_data = output_box_bufs->MutableBufferData(2 * i + 1);\n        std::shared_ptr<RoiBox> bbox2 = std::make_shared<RoiBox>();\n        bbox2->width = 50;\n        bbox2->height = 90;\n        bbox2->x = 60;\n        bbox2->y = 130;\n        memcpy_s(output_box2_data, sizeof(RoiBox), bbox2.get(), sizeof(RoiBox));\n      }\n      MBLOG_INFO << \"nppi_test_0_1 gen data finish\";\n\n      return modelbox::STATUS_OK;\n    };\n    auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n    mock_funcitons->RegisterOpenFunc(open_func);\n    mock_funcitons->RegisterProcessFunc(process_func);\n    driver_flow_->AddFlowUnitDesc(\n        mock_desc, mock_funcitons->GenerateCreateFunc(), TEST_DRIVER_DIR);\n  }\n\n  {\n    auto mock_desc =\n        GenerateFlowunitDesc(\"c3r_test_1_0_nppi_crop\", {\"In_img\"}, {});\n    mock_desc->SetFlowType(STREAM);\n    mock_desc->SetMaxBatchSize(16);\n    auto process_func =\n        [=](const std::shared_ptr<DataContext>& op_ctx,\n            const std::shared_ptr<MockFlowUnit>& mock_flowunit) -> Status {\n      MBLOG_INFO << \"c3r_test_1_0_nppi_crop process\";\n\n      auto input_buf = op_ctx->Input(\"In_img\");\n      if (input_buf->Size() <= 0) {\n        auto errMsg =\n            \"input images size is \" + std::to_string(input_buf->Size());\n        MBLOG_ERROR << errMsg;\n      }\n\n      for (size_t i = 0; i < input_buf->Size(); ++i) {\n        int32_t width = 0;\n        int32_t height = 0;\n        int32_t channels = 0;\n\n        bool exists = false;\n\n        exists = input_buf->At(i)->Get(\"width\", width);\n        if (!exists) {\n          MBLOG_ERROR << \"meta don't have key width\";\n        }\n        exists = input_buf->At(i)->Get(\"height\", height);\n        if (!exists) {\n          MBLOG_ERROR << \"meta don't have key height\";\n        }\n        exists = input_buf->At(i)->Get(\"channel\", channels);\n        if (!exists) {\n          MBLOG_ERROR << \"meta don't have key channel\";\n        }\n\n        const auto* input_data =\n            static_cast<const uchar*>(input_buf->ConstBufferData(i));\n\n        cv::Mat img_data(cv::Size(width, height), CV_8UC3);\n        memcpy_s(img_data.data, img_data.total() * img_data.elemSize(),\n                 input_data, input_buf->At(i)->GetBytes());\n        std::string name =\n            std::string(TEST_DATA_DIR) + \"/test\" + std::to_string(i) + \".jpg\";\n        cv::imwrite(name, img_data);\n      }\n\n      MBLOG_INFO << \"c3r_test_1_0_nppi_crop process data finish\";\n\n      return modelbox::STATUS_STOP;\n    };\n    auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n    mock_funcitons->RegisterProcessFunc(process_func);\n    driver_flow_->AddFlowUnitDesc(\n        mock_desc, mock_funcitons->GenerateCreateFunc(), TEST_DRIVER_DIR);\n  }\n  return STATUS_OK;\n}\n\nTEST_F(NppiCropFlowUnitTest, TestC3r) {\n  const std::string test_lib_dir = TEST_DRIVER_DIR;\n  std::string toml_content = R\"(\n    [driver]\n    skip-default=true\n    dir=[\")\" + test_lib_dir + \"\\\"]\\n    \" +\n                             R\"([graph]\n    graphconf = '''digraph demo {\n          c3r_test_0_1_nppi_crop[type=flowunit, flowunit=c3r_test_0_1_nppi_crop,\n          device=cpu, deviceid=0, label=\"<Out_img> | <Out_box>\",batch_size=10]\n          nppi_crop[type=flowunit, flowunit=crop, label=\"<in_image> | <in_region> | <out_image>\",batch_size=10]\n          c3r_test_1_0_nppi_crop[type=flowunit, flowunit=c3r_test_1_0_nppi_crop,\n          device=cpu, deviceid=0, label=\"<In_img>\",batch_size=10]\n\n          c3r_test_0_1_nppi_crop:Out_img -> nppi_crop:in_image\n          c3r_test_0_1_nppi_crop:Out_box -> nppi_crop:in_region\n          nppi_crop:out_image -> c3r_test_1_0_nppi_crop:In_img\n        }'''\n    format = \"graphviz\"\n  )\";\n\n  MBLOG_INFO << toml_content;\n  auto driver_flow = GetDriverFlow();\n  auto ret = driver_flow->BuildAndRun(\"TestC3r\", toml_content, 3 * 1000);\n  EXPECT_EQ(ret, STATUS_STOP);\n\n  std::vector<std::string> filePath;\n  ListFiles(std::string(TEST_DATA_DIR), \"*\", &filePath);\n  for (auto& elem : filePath) {\n    MBLOG_INFO << \"filePath: \" << elem;\n  }\n\n  for (size_t i = 0; i < 5; i++) {\n    for (size_t j = 0; j < 2; j++) {\n      std::string expected_file_path = std::string(TEST_ASSETS) +\n                                       \"/crop_result_\" + std::to_string(j) +\n                                       \".jpg\";\n      cv::Mat expected_img = cv::imread(expected_file_path);\n\n      std::string crop_result_file_path = std::string(TEST_DATA_DIR) + \"/test\" +\n                                          std::to_string(2 * i + j) + \".jpg\";\n      cv::Mat crop_result_img = cv::imread(crop_result_file_path);\n\n      int result_data_size =\n          crop_result_img.total() * crop_result_img.elemSize();\n      int expected_data_size = expected_img.total() * expected_img.elemSize();\n      EXPECT_EQ(result_data_size, expected_data_size);\n\n      int ret =\n          memcmp(crop_result_img.data, expected_img.data, result_data_size);\n      EXPECT_EQ(ret, 0);\n\n      auto rmret = remove(crop_result_file_path.c_str());\n      EXPECT_EQ(rmret, 0);\n    }\n  }\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/nppi_resize/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nset(UNIT_DEVICE \"cuda\")\nset(UNIT_NAME \"resize\")\n\nproject(modelbox-flowunit-${UNIT_DEVICE}-${UNIT_NAME})\n\nfile(GLOB_RECURSE UNIT_SOURCE *.cpp *.cc *.c)\ngroup_source_test_files(MODELBOX_UNIT_SOURCE MODELBOX_UNIT_TEST_SOURCE \"_test.c*\" ${UNIT_SOURCE})\n\nif (NOT OPENCV_FOUND) \n    set(MODELBOX_UNIT_TEST_SOURCE \"\")\nendif()\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_CUDA_INCLUDE})\ninclude_directories(${CUDA_INCLUDE_DIRS})\n\nset(MODELBOX_UNIT_SHARED libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-shared)\nset(MODELBOX_UNIT_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\ncuda_add_library(${MODELBOX_UNIT_SHARED} SHARED ${MODELBOX_UNIT_SOURCE})\nset(LIBMODELBOX_FLOWUNIT_RESIZE_CUDA_SHARED ${MODELBOX_UNIT_SHARED})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES \n    SOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\nfind_cuda_helper_libs(nppig) \ntarget_link_libraries(${MODELBOX_UNIT_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} rt)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} dl)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_DEVICE_CUDA_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${CUDA_nppig_LIBRARY})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}\")\nlist(APPEND DRIVER_UNIT_TEST_LINK_LIBRARIES ${OPENCV_CORE_LIBRARY} \n        ${OPENCV_IMGPROC_LIBRARY} \n        ${CUDA_nppig_LIBRARY}\n        ${OPENCV_IMGCODECS_LIBRARY})\n\ninstall(TARGETS ${MODELBOX_UNIT_SHARED}\n        COMPONENT cuda-device-flowunit\n        RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n        LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        ARCHIVE DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        OPTIONAL)\ninstall(DIRECTORY ${HEADER} \n        DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR}\n        COMPONENT cuda-device-flowunit)\n\nset(LIBMODELBOX_FLOWUNIT_RESIZE_CUDA_SHARED ${MODELBOX_UNIT_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_RESIZE_CUDA_INCLUDE ${MODELBOX_UNIT_SOURCE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_RESIZE_SOURCES ${MODELBOX_UNIT_SOURCE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_RESIZE_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}.so CACHE INTERNAL \"\")\n\n# driver test\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_UNIT_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_UNIT_SHARED})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_LINK_LIBRARIES ${DRIVER_UNIT_TEST_LINK_LIBRARIES} CACHE INTERNAL \"\")\n"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/nppi_resize/resize_flowunit.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"resize_flowunit.h\"\n\n#include <npp.h>\n\n#include \"modelbox/flowunit_api_helper.h\"\n\nNppiResizeFlowUnit::NppiResizeFlowUnit() = default;\nNppiResizeFlowUnit::~NppiResizeFlowUnit() = default;\n\nstd::map<std::string, NppiInterpolationMode> kNppiResizeInterpolation = {\n    {\"inter_nn\", NPPI_INTER_NN},           {\"inter_linear\", NPPI_INTER_LINEAR},\n    {\"inter_cubic\", NPPI_INTER_CUBIC},     {\"inter_super\", NPPI_INTER_SUPER},\n    {\"inter_lanczos\", NPPI_INTER_LANCZOS},\n};\n\nstd::vector<std::string> kNppiResizeMethod = {\"u8c3r\", \"u8p3\"};\n\nmodelbox::Status NppiResizeFlowUnit::Open(\n    const std::shared_ptr<modelbox::Configuration> &opts) {\n  dest_width_ = opts->GetUint32(\"width\", 0);\n  if (dest_width_ == 0) {\n    dest_width_ = opts->GetUint32(\"image_width\", 0);\n  }\n\n  dest_height_ = opts->GetUint32(\"height\", 0);\n  if (dest_height_ == 0) {\n    dest_height_ = opts->GetUint32(\"image_height\", 0);\n  }\n  if (dest_width_ <= 0 || dest_height_ <= 0) {\n    const auto *errMsg = \"resize width or height is not configured or invalid.\";\n    MBLOG_ERROR << errMsg;\n    return {modelbox::STATUS_BADCONF, errMsg};\n  }\n\n  interpolation_ = opts->GetString(\"interpolation\", \"inter_linear\");\n  if (kNppiResizeInterpolation.find(interpolation_) ==\n      kNppiResizeInterpolation.end()) {\n    auto errMsg =\n        \"resize interpolation is invalid, configure is :\" + interpolation_;\n    MBLOG_ERROR << errMsg;\n    std::string valid_interpolation;\n    for (const auto &iter : kNppiResizeInterpolation) {\n      if (valid_interpolation.length() > 0) {\n        valid_interpolation += \", \";\n      }\n      valid_interpolation += iter.first;\n    }\n    MBLOG_ERROR << \"Valid interpolation is: \" << valid_interpolation;\n    return {modelbox::STATUS_BADCONF, errMsg};\n  }\n\n  MBLOG_DEBUG << \"resize dest width \" << dest_width_ << \", resize dest height \"\n              << dest_height_ << \", resize interpolation \" << interpolation_;\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status NppiResizeFlowUnit::CudaProcess(\n    std::shared_ptr<modelbox::DataContext> data_ctx, cudaStream_t stream) {\n  auto input_bufs = data_ctx->Input(\"in_image\");\n  auto output_bufs = data_ctx->Output(\"out_image\");\n\n  if (input_bufs->Size() <= 0) {\n    auto errMsg = \"input images size is \" + std::to_string(input_bufs->Size());\n    MBLOG_ERROR << errMsg;\n    return {modelbox::STATUS_FAULT, errMsg};\n  }\n\n  size_t channel = RGB_CHANNLES;\n  std::vector<size_t> sub_shape{dest_width_, dest_height_, channel};\n  std::vector<size_t> tensor_shape(\n      input_bufs->Size(), modelbox::Volume(sub_shape) * sizeof(u_char));\n  output_bufs->Build(tensor_shape);\n\n  auto cuda_ret = cudaStreamSynchronize(stream);\n  if (cuda_ret != cudaSuccess) {\n    MBLOG_ERROR << \"sync stream  \" << stream << \" failed, err \" << cuda_ret;\n    return modelbox::STATUS_FAULT;\n  }\n\n  output_bufs->CopyMeta(input_bufs);\n  for (size_t i = 0; i < input_bufs->Size(); ++i) {\n    auto ret = ProcessOneImage(input_bufs, output_bufs, i);\n    if (ret != modelbox::STATUS_OK) {\n      MBLOG_ERROR << \"nppi resize image failed, index is \" << i;\n      return ret;\n    }\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status NppiResizeFlowUnit::ProcessOneImage(\n    std::shared_ptr<modelbox::BufferList> &input_buffer_list,\n    std::shared_ptr<modelbox::BufferList> &output_buffer_list, int index) {\n  ImageSize srcResize;\n  std::string pix_fmt;\n  bool exists = false;\n  exists = input_buffer_list->At(index)->Get(\"height\", srcResize.height);\n  if (!exists) {\n    MBLOG_ERROR << \"meta don't have key width\";\n    return {modelbox::STATUS_NOTSUPPORT, \"meta don't have key width\"};\n  }\n\n  exists = input_buffer_list->At(index)->Get(\"width\", srcResize.width);\n  if (!exists) {\n    MBLOG_ERROR << \"meta don't have key height\";\n    return {modelbox::STATUS_NOTSUPPORT, \"meta don't have key height\"};\n  }\n\n  exists = input_buffer_list->At(index)->Get(\"pix_fmt\", pix_fmt);\n  if (!exists &&\n      !input_buffer_list->At(index)->Get(\"channel\", srcResize.channel)) {\n    MBLOG_ERROR << \"meta don't have key pix_fmt or channel\";\n    return {modelbox::STATUS_NOTSUPPORT,\n            \"meta don't have key pix_fmt or channel\"};\n  }\n\n  if (exists && pix_fmt != \"rgb\" && pix_fmt != \"bgr\") {\n    MBLOG_ERROR << \"unsupport pix format.\";\n    return {modelbox::STATUS_NOTSUPPORT, \"unsupport pix format.\"};\n  }\n\n  srcResize.channel = RGB_CHANNLES;\n  MBLOG_DEBUG << \"get \" << srcResize.width << \" rows \" << srcResize.height\n              << \" channel \" << srcResize.channel;\n\n  ImageSize dstResize;\n  dstResize.height = dest_height_;\n  dstResize.width = dest_width_;\n  dstResize.channel = srcResize.channel;\n\n  const auto *input_data =\n      static_cast<const u_char *>(input_buffer_list->ConstBufferData(index));\n  auto *output_data =\n      static_cast<u_char *>(output_buffer_list->MutableBufferData(index));\n\n  // resize image\n  auto nppiMethod = GetNppiResizeInterpolation(interpolation_);\n\n  modelbox::Status ret = modelbox::STATUS_OK;\n  ret = NppiResize_u8_c3r(input_data, srcResize, output_data, dstResize,\n                          nppiMethod);\n  if (ret != modelbox::STATUS_OK) {\n    return ret;\n  }\n\n  // output resize image\n  auto output_buffer = output_buffer_list->At(index);\n  output_buffer->Set(\"width\", dstResize.width);\n  output_buffer->Set(\"height\", dstResize.height);\n  output_buffer->Set(\"width_stride\", dstResize.width * 3);\n  output_buffer->Set(\"height_stride\", dstResize.height);\n  output_buffer->Set(\"channel\", srcResize.channel);\n  output_buffer->Set(\"pix_fmt\", pix_fmt);\n  output_buffer->Set(\"layout\", \"hwc\");\n  output_buffer->Set(\n      \"shape\", std::vector<size_t>({static_cast<size_t>(dstResize.height),\n                                    static_cast<size_t>(dstResize.width),\n                                    static_cast<size_t>(srcResize.channel)}));\n  output_buffer->Set(\"type\", modelbox::ModelBoxDataType::MODELBOX_UINT8);\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status NppiResizeFlowUnit::NppiResize_u8_P3(\n    const u_char *pSrcPlanarData, ImageSize srcSize, u_char *pDstPlanarData,\n    ImageSize dstSize, NppiInterpolationMode method) {\n  const Npp8u *pSrc[3];\n  pSrc[0] = pSrcPlanarData;\n  pSrc[1] = pSrcPlanarData + srcSize.width * srcSize.height;\n  pSrc[2] = pSrcPlanarData + srcSize.width * srcSize.height * 2;\n\n  NppiRect oSrcRectROI;\n  oSrcRectROI.x = 0;\n  oSrcRectROI.y = 0;\n  oSrcRectROI.width = srcSize.width;\n  oSrcRectROI.height = srcSize.height;\n\n  Npp8u *pDst[3];\n  pDst[0] = pDstPlanarData;\n  pDst[1] = pDstPlanarData + dstSize.width * dstSize.height;\n  pDst[2] = pDstPlanarData + dstSize.width * dstSize.height * 2;\n\n  NppiRect oDstRectROI;\n  oDstRectROI.x = 0;\n  oDstRectROI.y = 0;\n  oDstRectROI.width = dstSize.width;\n  oDstRectROI.height = dstSize.height;\n\n  NppiSize srcNppSize;\n  srcNppSize.width = srcSize.width;\n  srcNppSize.height = srcSize.height;\n\n  NppiSize dstNppSize;\n  dstNppSize.width = dstSize.width;\n  dstNppSize.height = dstSize.height;\n\n  NppStatus status = nppiResize_8u_P3R(\n      pSrc, srcSize.width * sizeof(u_char), srcNppSize, oSrcRectROI, pDst,\n      dest_width_ * sizeof(u_char), dstNppSize, oDstRectROI, method);\n  if (NPP_SUCCESS != status) {\n    MBLOG_ERROR << \"npp error code \" << status;\n    std::string errMsg = \"Nppi resize failed, error code \" +\n                         std::to_string(status) +\n                         \", src image size: \" + std::to_string(srcSize.width) +\n                         \" x \" + std::to_string(srcSize.height);\n    MBLOG_ERROR << errMsg;\n    return {modelbox::STATUS_NODATA, errMsg};\n  }\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status NppiResizeFlowUnit::NppiResize_u8_c3r(\n    const u_char *pSrcPlanarData, ImageSize srcSize, u_char *pDstPlanarData,\n    ImageSize dstSize, NppiInterpolationMode method) {\n  const Npp8u *pSrc = pSrcPlanarData;\n\n  NppiRect oSrcRectROI;\n  oSrcRectROI.x = 0;\n  oSrcRectROI.y = 0;\n  oSrcRectROI.width = srcSize.width;\n  oSrcRectROI.height = srcSize.height;\n\n  Npp8u *pDst = pDstPlanarData;\n\n  NppiRect oDstRectROI;\n  oDstRectROI.x = 0;\n  oDstRectROI.y = 0;\n  oDstRectROI.width = dstSize.width;\n  oDstRectROI.height = dstSize.height;\n\n  NppiSize srcNppSize;\n  srcNppSize.width = srcSize.width;\n  srcNppSize.height = srcSize.height;\n\n  NppiSize dstNppSize;\n  dstNppSize.width = dstSize.width;\n  dstNppSize.height = dstSize.height;\n\n  NppStatus status = nppiResize_8u_C3R(\n      pSrc, srcSize.width * sizeof(u_char) * 3, srcNppSize, oSrcRectROI, pDst,\n      dest_width_ * sizeof(u_char) * 3, dstNppSize, oDstRectROI, method);\n  if (NPP_SUCCESS != status) {\n    MBLOG_ERROR << \"nppi error code \" << status;\n    std::string errMsg = \"cuda resize failed, error code \" +\n                         std::to_string(status) +\n                         \", src image size: \" + std::to_string(srcSize.width) +\n                         \" x \" + std::to_string(srcSize.height);\n    MBLOG_ERROR << errMsg;\n    return {modelbox::STATUS_NODATA, errMsg};\n  }\n  return modelbox::STATUS_OK;\n}\n\nNppiInterpolationMode NppiResizeFlowUnit::GetNppiResizeInterpolation(\n    std::string resizeType) {\n  transform(resizeType.begin(), resizeType.end(), resizeType.begin(),\n            ::tolower);\n\n  if (kNppiResizeInterpolation.find(resizeType) ==\n      kNppiResizeInterpolation.end()) {\n    MBLOG_WARN << \"cuda resize not support method \\\"\" << resizeType << \"\\\"\";\n    MBLOG_WARN << \"using defalt method \\\"inter_linear\\\"\";\n    return NPPI_INTER_LINEAR;\n  }\n\n  return kNppiResizeInterpolation[resizeType];\n}\n\nMODELBOX_FLOWUNIT(NppiResizeFlowUnit, desc) {\n  desc.SetFlowUnitName(FLOWUNIT_NAME);\n  desc.SetFlowUnitGroupType(\"Image\");\n  desc.AddFlowUnitInput(modelbox::FlowUnitInput(\"in_image\", FLOWUNIT_TYPE));\n  desc.AddFlowUnitOutput(modelbox::FlowUnitOutput(\"out_image\", FLOWUNIT_TYPE));\n  desc.SetFlowType(modelbox::NORMAL);\n  desc.SetInputContiguous(false);\n  desc.SetDescription(FLOWUNIT_DESC);\n  desc.AddFlowUnitOption(modelbox::FlowUnitOption(\"image_width\", \"int\", true,\n                                                  \"640\", \"the resize width\"));\n  desc.AddFlowUnitOption(modelbox::FlowUnitOption(\"image_height\", \"int\", true,\n                                                  \"480\", \"the resize height\"));\n  std::map<std::string, std::string> interpolation_list;\n\n  for (auto &item : kNppiResizeInterpolation) {\n    interpolation_list[item.first] = item.first;\n  }\n\n  desc.AddFlowUnitOption(\n      modelbox::FlowUnitOption(\"interpolation\", \"list\", true, \"inter_linear\",\n                               \"the resize interpolation\", interpolation_list));\n}\n\nMODELBOX_DRIVER_FLOWUNIT(desc) {\n  desc.Desc.SetName(FLOWUNIT_NAME);\n  desc.Desc.SetClass(modelbox::DRIVER_CLASS_FLOWUNIT);\n  desc.Desc.SetType(FLOWUNIT_TYPE);\n  desc.Desc.SetDescription(FLOWUNIT_DESC);\n  desc.Desc.SetVersion(\"1.0.0\");\n}\n"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/nppi_resize/resize_flowunit.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#ifndef MODELBOX_FLOWUNIT_NPPI_RESIZE_GPU_H_\n#define MODELBOX_FLOWUNIT_NPPI_RESIZE_GPU_H_\n\n#include <modelbox/base/device.h>\n#include <modelbox/base/status.h>\n#include <modelbox/buffer.h>\n#include <modelbox/flow.h>\n#include <modelbox/flowunit.h>\n#include <nppi_geometry_transforms.h>\n#include \"modelbox/device/cuda/device_cuda.h\"\n\n#include <typeinfo>\n\nconstexpr const char *FLOWUNIT_NAME = \"resize\";\nconstexpr const char *FLOWUNIT_TYPE = \"cuda\";\nconstexpr const char *FLOWUNIT_DESC =\n    \"\\n\\t@Brief: A resize flowunit on cuda device. \\n\"\n    \"\\t@Port parameter: The input port buffer type and the output port buffer \"\n    \"type are image. \\n\"\n    \"\\t  The image type buffer contains the following meta fields:\\n\"\n    \"\\t\\tField Name: width,         Type: int32_t\\n\"\n    \"\\t\\tField Name: height,        Type: int32_t\\n\"\n    \"\\t\\tField Name: width_stride,  Type: int32_t\\n\"\n    \"\\t\\tField Name: height_stride, Type: int32_t\\n\"\n    \"\\t\\tField Name: channel,       Type: int32_t\\n\"\n    \"\\t\\tField Name: pix_fmt,       Type: string\\n\"\n    \"\\t\\tField Name: layout,        Type: int32_t\\n\"\n    \"\\t\\tField Name: shape,         Type: vector<size_t>\\n\"\n    \"\\t\\tField Name: type,          Type: ModelBoxDataType::MODELBOX_UINT8\\n\"\n    \"\\t@Constraint: The field value range of this flowunit supports: 'pix_fmt': \"\n    \"[rgb_packed,bgr_packed], 'layout': [hwc]. \";\nconst int RGB_CHANNLES = 3;\n\ntypedef struct {\n  int32_t width;  /**<  Rectangle width. */\n  int32_t height; /**<  Rectangle height. */\n  int32_t channel;\n} ImageSize;\n\nclass NppiResizeFlowUnit : public modelbox::CudaFlowUnit {\n public:\n  NppiResizeFlowUnit();\n  ~NppiResizeFlowUnit() override;\n\n  modelbox::Status Open(\n      const std::shared_ptr<modelbox::Configuration> &opts) override;\n\n  modelbox::Status Close() override { return modelbox::STATUS_OK; };\n\n  modelbox::Status CudaProcess(std::shared_ptr<modelbox::DataContext> data_ctx,\n                               cudaStream_t stream) override;\n\n  modelbox::Status DataPre(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override {\n    return modelbox::STATUS_OK;\n  };\n\n  modelbox::Status DataPost(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override {\n    return modelbox::STATUS_OK;\n  };\n\n  modelbox::Status DataGroupPre(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override {\n    return modelbox::STATUS_OK;\n  };\n\n  modelbox::Status DataGroupPost(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override {\n    return modelbox::STATUS_OK;\n  };\n\n private:\n  modelbox::Status NppiResize_u8_P3(const u_char *pSrcPlanarData,\n                                  ImageSize srcSize, u_char *pDstPlanarData,\n                                  ImageSize dstSize,\n                                  NppiInterpolationMode method);\n\n  modelbox::Status NppiResize_u8_c3r(const u_char *pSrcPlanarData,\n                                   ImageSize srcSize, u_char *pDstPlanarData,\n                                   ImageSize dstSize,\n                                   NppiInterpolationMode method);\n\n  NppiInterpolationMode GetNppiResizeInterpolation(std::string resizeType);\n\n  modelbox::Status ProcessOneImage(\n      std::shared_ptr<modelbox::BufferList> &input_buffer_list,\n      std::shared_ptr<modelbox::BufferList> &output_buffer_list, int index);\n\n  size_t dest_width_{0};\n  size_t dest_height_{0};\n  std::string interpolation_{\"inter_linear\"};\n};\n\n#endif  // MODELBOX_FLOWUNIT_NPPI_RESIZE_GPU_H_\n"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/nppi_resize/resize_flowunit_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include <securec.h>\n\n#include <functional>\n#include <future>\n#include <opencv2/opencv.hpp>\n#include <random>\n#include <thread>\n#include <cuda_runtime.h>\n\n#include \"modelbox/base/log.h\"\n#include \"modelbox/base/utils.h\"\n#include \"modelbox/buffer.h\"\n#include \"driver_flow_test.h\"\n#include \"flowunit_mockflowunit/flowunit_mockflowunit.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing ::testing::_;\n\nnamespace modelbox {\nclass NppiResizeFlowUnitTest : public testing::Test {\n public:\n  NppiResizeFlowUnitTest() : driver_flow_(std::make_shared<DriverFlowTest>()) {}\n\n protected:\n  void SetUp() override {\n    int count = 0;\n    cudaGetDeviceCount(&count);\n    if (count <= 0) {\n      MBLOG_INFO << \"no cuda device, skip test suit\";\n      GTEST_SKIP();\n    }\n\n    auto ret = AddMockFlowUnit();\n    EXPECT_EQ(ret, STATUS_OK);\n  };\n\n  void TearDown() override { driver_flow_->Clear(); };\n  std::shared_ptr<DriverFlowTest> GetDriverFlow();\n\n private:\n  Status AddMockFlowUnit();\n  std::shared_ptr<DriverFlowTest> driver_flow_;\n};\n\nstd::shared_ptr<DriverFlowTest> NppiResizeFlowUnitTest::GetDriverFlow() {\n  return driver_flow_;\n}\n\nStatus NppiResizeFlowUnitTest::AddMockFlowUnit() {\n  auto ctl_ = driver_flow_->GetMockFlowCtl();\n  {\n    MockFlowUnitDriverDesc desc_flowunit;\n    desc_flowunit.SetClass(\"DRIVER-FLOWUNIT\");\n    desc_flowunit.SetType(\"cpu\");\n    desc_flowunit.SetName(\"p3_test_0_1_resize\");\n    desc_flowunit.SetDescription(\"the test in 0 out 1\");\n    desc_flowunit.SetVersion(\"1.0.0\");\n    std::string file_path_flowunit =\n        std::string(TEST_DRIVER_DIR) +\n        \"/libmodelbox-unit-cpu-p3_test_0_1_resize.so\";\n    desc_flowunit.SetFilePath(file_path_flowunit);\n    auto mock_flowunit = std::make_shared<MockFlowUnit>();\n    auto mock_flowunit_desc = std::make_shared<FlowUnitDesc>();\n    mock_flowunit_desc->SetFlowUnitName(\"p3_test_0_1_resize\");\n    mock_flowunit_desc->AddFlowUnitOutput(modelbox::FlowUnitOutput(\"Out_1\"));\n    mock_flowunit_desc->SetFlowType(STREAM);\n    mock_flowunit_desc->SetMaxBatchSize(16);\n    mock_flowunit->SetFlowUnitDesc(mock_flowunit_desc);\n    std::weak_ptr<MockFlowUnit> mock_flowunit_wp;\n    mock_flowunit_wp = mock_flowunit;\n\n    EXPECT_CALL(*mock_flowunit, Open(_))\n        .WillRepeatedly(testing::Invoke(\n            [=](const std::shared_ptr<modelbox::Configuration>& flow_option) {\n              auto spt = mock_flowunit_wp.lock();\n              auto ext_data = spt->CreateExternalData();\n              if (!ext_data) {\n                MBLOG_ERROR << \"can not get external data.\";\n              }\n\n              std::string gimg_path = std::string(TEST_ASSETS) + \"/test.jpg\";\n\n              auto output_buf = ext_data->CreateBufferList();\n              modelbox::TensorList output_tensor_list(output_buf);\n              output_tensor_list.BuildFromHost<uchar>(\n                  {1, {gimg_path.size() + 1}}, (void*)gimg_path.data(),\n                  gimg_path.size() + 1);\n\n              auto status = ext_data->Send(output_buf);\n              if (!status) {\n                MBLOG_ERROR << \"external data send buffer list failed:\"\n                            << status;\n              }\n\n              status = ext_data->Close();\n              if (!status) {\n                MBLOG_ERROR << \"external data close failed:\" << status;\n              }\n\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, DataPre(_))\n        .WillRepeatedly(\n            testing::Invoke([&](const std::shared_ptr<DataContext>& data_ctx) {\n              MBLOG_INFO << \"p3_test_0_1_resize \"\n                         << \"DataPre\";\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, DataPost(_))\n        .WillRepeatedly(\n            testing::Invoke([&](const std::shared_ptr<DataContext>& data_ctx) {\n              MBLOG_INFO << \"p3_test_0_1_resize \"\n                         << \"DataPost\";\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit,\n                Process(testing::An<std::shared_ptr<modelbox::DataContext>>()))\n        .WillRepeatedly(\n            testing::Invoke([=](const std::shared_ptr<DataContext>& data_ctx) {\n              MBLOG_INFO << \"p3_test_0_1_resize process\";\n              auto output_buf = data_ctx->Output(\"Out_1\");\n              auto external = data_ctx->External();\n              std::string gimg_path =\n                  std::string((char*)(*external)[0]->ConstData());\n\n              cv::Mat gimg_data = cv::imread(gimg_path);\n\n              MBLOG_INFO << \"gimage col \" << gimg_data.cols << \"  grow \"\n                         << gimg_data.rows\n                         << \" gchannel:\" << gimg_data.channels();\n\n              size_t gcols = gimg_data.cols;\n              size_t grows = gimg_data.rows;\n              size_t gchannels = gimg_data.channels();\n\n              uint32_t batch_size = 5;\n              std::vector<size_t> shape_vector(\n                  batch_size,\n                  modelbox::Volume({grows, gcols, gchannels}) * sizeof(uchar));\n              output_buf->Build(shape_vector);\n              for (size_t i = 0; i < batch_size; ++i) {\n                std::string img_path = std::string(TEST_ASSETS) + \"/test.jpg\";\n                cv::Mat img_data = cv::imread(img_path);\n                std::vector<cv::Mat> vecChannels;\n                cv::split(img_data, vecChannels);\n                cv::Mat mergeImg;\n                cv::merge(vecChannels, mergeImg);\n                MBLOG_DEBUG << \"image col \" << img_data.cols << \"  row \"\n                            << img_data.rows\n                            << \" channel:\" << img_data.channels();\n\n                int32_t cols = img_data.cols;\n                int32_t rows = img_data.rows;\n                int32_t channels = img_data.channels();\n\n                output_buf->At(i)->Set(\"width\", cols);\n                output_buf->At(i)->Set(\"height\", rows);\n                output_buf->At(i)->Set(\"channel\", channels);\n\n                auto* output_data =\n                    static_cast<uchar*>(output_buf->MutableBufferData(i));\n                for (int32_t j = 0; j < channels; j++) {\n                  cv::Mat tmpMat = vecChannels.at(j);\n                  memcpy_s(\n                      output_data + (tmpMat.total() * tmpMat.elemSize() * j),\n                      output_buf->At(i)->GetBytes() / channels, tmpMat.data,\n                      tmpMat.total() * tmpMat.elemSize());\n                }\n              }\n\n              MBLOG_INFO << \"test_0_1 gen data finish\";\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, Close()).WillRepeatedly(testing::Invoke([=]() {\n      return modelbox::STATUS_OK;\n    }));\n    desc_flowunit.SetMockFlowUnit(mock_flowunit);\n    ctl_->AddMockDriverFlowUnit(\"p3_test_0_1_resize\", \"cpu\", desc_flowunit,\n                                std::string(TEST_DRIVER_DIR));\n  }\n\n  {\n    MockFlowUnitDriverDesc desc_flowunit;\n    desc_flowunit.SetClass(\"DRIVER-FLOWUNIT\");\n    desc_flowunit.SetType(\"cpu\");\n    desc_flowunit.SetName(\"p3_test_1_0_resize\");\n    desc_flowunit.SetDescription(\"the test in 1 out 0\");\n    desc_flowunit.SetVersion(\"1.0.0\");\n    std::string file_path_flowunit =\n        std::string(TEST_DRIVER_DIR) +\n        \"/libmodelbox-unit-cpu-p3_test_1_0_resize.so\";\n    desc_flowunit.SetFilePath(file_path_flowunit);\n    auto mock_flowunit = std::make_shared<MockFlowUnit>();\n    auto mock_flowunit_desc = std::make_shared<FlowUnitDesc>();\n    mock_flowunit_desc->SetFlowUnitName(\"p3_test_1_0_resize\");\n    mock_flowunit_desc->AddFlowUnitInput(modelbox::FlowUnitInput(\"In_1\"));\n    mock_flowunit_desc->SetFlowType(STREAM);\n    mock_flowunit_desc->SetMaxBatchSize(16);\n    mock_flowunit->SetFlowUnitDesc(mock_flowunit_desc);\n    std::weak_ptr<MockFlowUnit> mock_flowunit_wp;\n    mock_flowunit_wp = mock_flowunit;\n\n    EXPECT_CALL(*mock_flowunit, Open(_))\n        .WillRepeatedly(testing::Invoke(\n            [=](const std::shared_ptr<modelbox::Configuration>& flow_option) {\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, DataPre(_))\n        .WillRepeatedly(\n            testing::Invoke([&](const std::shared_ptr<DataContext>& data_ctx) {\n              MBLOG_INFO << \"p3_test_1_0_resize \"\n                         << \"DataPre\";\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, DataPost(_))\n        .WillRepeatedly(\n            testing::Invoke([&](const std::shared_ptr<DataContext>& data_ctx) {\n              MBLOG_INFO << \"p3_test_1_0_resize \"\n                         << \"DataPost\";\n              return modelbox::STATUS_STOP;\n            }));\n\n    EXPECT_CALL(*mock_flowunit,\n                Process(testing::An<std::shared_ptr<modelbox::DataContext>>()))\n        .WillRepeatedly(\n            testing::Invoke([=](const std::shared_ptr<DataContext>& op_ctx) {\n              MBLOG_INFO << \"p3_test_1_0_resize process\";\n              auto input_buf = op_ctx->Input(\"In_1\");\n              int32_t cols = 0;\n              int32_t rows = 0;\n              int32_t channels = 0;\n              for (size_t i = 0; i < input_buf->Size(); i++) {\n                input_buf->At(i)->Get(\"width\", cols);\n                input_buf->At(i)->Get(\"height\", rows);\n                input_buf->At(i)->Get(\"channel\", channels);\n                MBLOG_INFO << \"image col \" << cols << \"  row \" << rows\n                           << \" channel:\" << channels;\n                const auto* input_data =\n                    static_cast<const uchar*>(input_buf->ConstBufferData(i));\n\n                cv::Mat img_data(cv::Size(cols, rows), CV_8UC3);\n\n                std::vector<cv::Mat> vecChannelsDest;\n                for (int32_t j = 0; j < channels; j++) {\n                  cv::Mat tmp(cv::Size(cols, rows), CV_8UC1);\n                  memcpy_s(tmp.data, cols * rows, input_data + cols * rows * j,\n                           input_buf->At(i)->GetBytes() / channels);\n                  vecChannelsDest.push_back(tmp);\n                }\n                cv::merge(vecChannelsDest, img_data);\n\n                std::string name = std::string(TEST_DATA_DIR) + \"/test\" +\n                                   std::to_string(i) + \".jpg\";\n                cv::imwrite(name, img_data);\n              }\n              MBLOG_DEBUG << \"p3_test_1_0_resize process data finish\";\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, Close()).WillRepeatedly(testing::Invoke([=]() {\n      return modelbox::STATUS_OK;\n    }));\n    desc_flowunit.SetMockFlowUnit(mock_flowunit);\n    ctl_->AddMockDriverFlowUnit(\"p3_test_1_0_resize\", \"cpu\", desc_flowunit,\n                                std::string(TEST_DRIVER_DIR));\n  }\n\n  {\n    MockFlowUnitDriverDesc desc_flowunit;\n    desc_flowunit.SetClass(\"DRIVER-FLOWUNIT\");\n    desc_flowunit.SetType(\"cpu\");\n    desc_flowunit.SetName(\"c3r_test_0_1_resize\");\n    desc_flowunit.SetDescription(\"the test in 0 out 1\");\n    desc_flowunit.SetVersion(\"1.0.0\");\n    std::string file_path_flowunit =\n        std::string(TEST_DRIVER_DIR) +\n        \"/libmodelbox-unit-cpu-c3r_test_0_1_resize.so\";\n    desc_flowunit.SetFilePath(file_path_flowunit);\n    auto mock_flowunit = std::make_shared<MockFlowUnit>();\n    auto mock_flowunit_desc = std::make_shared<FlowUnitDesc>();\n    mock_flowunit_desc->SetFlowUnitName(\"c3r_test_0_1_resize\");\n    mock_flowunit_desc->AddFlowUnitOutput(modelbox::FlowUnitOutput(\"Out_1\"));\n    mock_flowunit_desc->SetFlowType(STREAM);\n    mock_flowunit_desc->SetMaxBatchSize(16);\n    mock_flowunit->SetFlowUnitDesc(mock_flowunit_desc);\n    std::weak_ptr<MockFlowUnit> mock_flowunit_wp;\n    mock_flowunit_wp = mock_flowunit;\n\n    EXPECT_CALL(*mock_flowunit, Open(_))\n        .WillRepeatedly(testing::Invoke(\n            [=](const std::shared_ptr<modelbox::Configuration>& flow_option) {\n              auto spt = mock_flowunit_wp.lock();\n              auto ext_data = spt->CreateExternalData();\n              if (!ext_data) {\n                MBLOG_ERROR << \"can not get external data.\";\n              }\n\n              std::string gimg_path = std::string(TEST_ASSETS) + \"/test.jpg\";\n\n              auto output_buf = ext_data->CreateBufferList();\n              modelbox::TensorList output_tensor_list(output_buf);\n              output_tensor_list.BuildFromHost<uchar>(\n                  {1, {gimg_path.size() + 1}}, (void*)gimg_path.data(),\n                  gimg_path.size() + 1);\n\n              auto status = ext_data->Send(output_buf);\n              if (!status) {\n                MBLOG_ERROR << \"external data send buffer list failed:\"\n                            << status;\n              }\n\n              status = ext_data->Close();\n              if (!status) {\n                MBLOG_ERROR << \"external data close failed:\" << status;\n              }\n\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, DataPre(_))\n        .WillRepeatedly(\n            testing::Invoke([&](const std::shared_ptr<DataContext>& data_ctx) {\n              MBLOG_INFO << \"stream_info \"\n                         << \"DataPre\";\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, DataPost(_))\n        .WillRepeatedly(\n            testing::Invoke([&](const std::shared_ptr<DataContext>& data_ctx) {\n              MBLOG_INFO << \"stream_info \"\n                         << \"DataPost\";\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit,\n                Process(testing::An<std::shared_ptr<modelbox::DataContext>>()))\n        .WillRepeatedly(\n            testing::Invoke([=](const std::shared_ptr<DataContext>& data_ctx) {\n              auto output_bufs = data_ctx->Output(\"Out_1\");\n              auto external = data_ctx->External();\n              std::string gimg_path =\n                  std::string((char*)(*external)[0]->ConstData());\n\n              cv::Mat gimg_data = cv::imread(gimg_path);\n\n              MBLOG_INFO << \"gimage col \" << gimg_data.cols << \"  grow \"\n                         << gimg_data.rows\n                         << \" gchannel:\" << gimg_data.channels();\n\n              long unsigned int gcols = gimg_data.cols;\n              long unsigned int grows = gimg_data.rows;\n              long unsigned int gchannels = gimg_data.channels();\n\n              uint32_t batch_size = 5;\n              std::vector<size_t> shape;\n              for (size_t i = 0; i < batch_size; i++) {\n                shape.push_back(grows * gcols * gchannels * sizeof(uchar));\n              }\n\n              output_bufs->Build(shape);\n\n              for (size_t i = 0; i < 5; ++i) {\n                const std::string& img_path = gimg_path;\n                cv::Mat img_data = cv::imread(img_path);\n                MBLOG_INFO << \"image col \" << img_data.cols << \"  row \"\n                           << img_data.rows\n                           << \" channel:\" << img_data.channels();\n\n                int32_t cols = img_data.cols;\n                int32_t rows = img_data.rows;\n                int32_t channels = img_data.channels();\n\n                output_bufs->At(i)->Set(\"width\", cols);\n                output_bufs->At(i)->Set(\"height\", rows);\n                output_bufs->At(i)->Set(\"channel\", channels);\n\n                auto* output_data = output_bufs->At(i)->MutableData();\n                memcpy_s(output_data, output_bufs->At(i)->GetBytes(),\n                         img_data.data, img_data.total() * img_data.elemSize());\n              }\n              MBLOG_INFO << \"test_0_1 gen data finish\";\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, Close()).WillRepeatedly(testing::Invoke([=]() {\n      return modelbox::STATUS_OK;\n    }));\n    desc_flowunit.SetMockFlowUnit(mock_flowunit);\n    ctl_->AddMockDriverFlowUnit(\"c3r_test_0_1_resize\", \"cpu\", desc_flowunit,\n                                std::string(TEST_DRIVER_DIR));\n  }\n\n  {\n    MockFlowUnitDriverDesc desc_flowunit;\n    desc_flowunit.SetClass(\"DRIVER-FLOWUNIT\");\n    desc_flowunit.SetType(\"cpu\");\n    desc_flowunit.SetName(\"c3r_test_1_0_resize\");\n    desc_flowunit.SetDescription(\"the test in 1 out 0\");\n    desc_flowunit.SetVersion(\"1.0.0\");\n    std::string file_path_flowunit =\n        std::string(TEST_DRIVER_DIR) +\n        \"/libmodelbox-unit-cpu-c3r_test_1_0_resize.so\";\n    desc_flowunit.SetFilePath(file_path_flowunit);\n    auto mock_flowunit = std::make_shared<MockFlowUnit>();\n    auto mock_flowunit_desc = std::make_shared<FlowUnitDesc>();\n    mock_flowunit_desc->SetFlowUnitName(\"c3r_test_1_0_resize\");\n    mock_flowunit_desc->AddFlowUnitInput(modelbox::FlowUnitInput(\"In_1\"));\n    mock_flowunit_desc->SetFlowType(STREAM);\n    mock_flowunit_desc->SetMaxBatchSize(16);\n    mock_flowunit->SetFlowUnitDesc(mock_flowunit_desc);\n    std::weak_ptr<MockFlowUnit> mock_flowunit_wp;\n    mock_flowunit_wp = mock_flowunit;\n\n    EXPECT_CALL(*mock_flowunit, Open(_))\n        .WillRepeatedly(testing::Invoke(\n            [=](const std::shared_ptr<modelbox::Configuration>& flow_option) {\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, DataPre(_))\n        .WillRepeatedly(\n            testing::Invoke([&](const std::shared_ptr<DataContext>& data_ctx) {\n              MBLOG_INFO << \"stream_info \"\n                         << \"DataPre\";\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, DataPost(_))\n        .WillRepeatedly(\n            testing::Invoke([&](const std::shared_ptr<DataContext>& data_ctx) {\n              MBLOG_INFO << \"stream_info \"\n                         << \"DataPost\";\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit,\n                Process(testing::An<std::shared_ptr<modelbox::DataContext>>()))\n        .WillRepeatedly(\n            testing::Invoke([=](const std::shared_ptr<DataContext>& op_ctx) {\n              MBLOG_INFO << \"c3r_test_1_0_resize process\";\n              auto input = op_ctx->Input(\"In_1\");\n\n              for (size_t i = 0; i < input->Size(); i++) {\n                int32_t cols = 0;\n                int32_t rows = 0;\n                int32_t channels = 0;\n                input->At(i)->Get(\"width\", cols);\n                input->At(i)->Get(\"height\", rows);\n                input->At(i)->Get(\"channel\", channels);\n                const auto* input_data = input->At(i)->ConstData();\n\n                cv::Mat img_data(cv::Size(cols, rows), CV_8UC3);\n                memcpy_s(img_data.data, img_data.total() * img_data.elemSize(),\n                         input_data, input->At(i)->GetBytes());\n\n                std::string name = std::string(TEST_DATA_DIR) + \"/test\" +\n                                   std::to_string(i) + \".jpg\";\n                cv::imwrite(name, img_data);\n              }\n\n              MBLOG_INFO << \"c3r_test_1_0_resize process data finish\";\n              return modelbox::STATUS_STOP;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, Close()).WillRepeatedly(testing::Invoke([=]() {\n      return modelbox::STATUS_OK;\n    }));\n    desc_flowunit.SetMockFlowUnit(mock_flowunit);\n    ctl_->AddMockDriverFlowUnit(\"c3r_test_1_0_resize\", \"cpu\", desc_flowunit,\n                                std::string(TEST_DRIVER_DIR));\n  }\n\n  return STATUS_OK;\n}\n\nTEST_F(NppiResizeFlowUnitTest, TestC3r) {\n  const std::string test_lib_dir = TEST_DRIVER_DIR;\n  std::string toml_content = R\"(\n    [driver]\n    skip-default=true\n    dir=[\")\" + test_lib_dir + \"\\\"]\\n    \" +\n                             R\"([graph]\n    graphconf = '''digraph demo {\n          c3r_test_0_1_resize[type=flowunit, flowunit=c3r_test_0_1_resize,\n          device=cpu, deviceid=0, label=\"<Out_1>\",batch_size=5]\n\n          nppi_resize[type=flowunit, flowunit=resize, device=cuda,\n          deviceid=0, label=\"<in_image> | <out_image>\", width=128, height=128,\n          method=\"u8c3r\", batch_size=5]\n\n          c3r_test_1_0_resize[type=flowunit, flowunit=c3r_test_1_0_resize,\n          device=cpu, deviceid=0, label=\"<In_1>\",batch_size=5]\n\n          c3r_test_0_1_resize:Out_1 -> nppi_resize:in_image\n          nppi_resize:out_image -> c3r_test_1_0_resize:In_1\n        }'''\n    format = \"graphviz\"\n  )\";\n\n  auto driver_flow = GetDriverFlow();\n  auto ret = driver_flow->BuildAndRun(\"TestC3r\", toml_content);\n  EXPECT_EQ(ret, STATUS_STOP);\n\n  std::vector<std::string> filePath;\n  ListFiles(std::string(TEST_DATA_DIR), \"*\", &filePath);\n  for (auto& elem : filePath) {\n    MBLOG_INFO << \"filePath: \" << elem;\n  }\n\n  for (size_t i = 0; i < 5; ++i) {\n    std::string expected_file_path =\n        std::string(TEST_ASSETS) + \"/nppi_resize_128x128_result.jpg\";\n    cv::Mat expected_img = cv::imread(expected_file_path);\n\n    std::string resize_result_file_path =\n        std::string(TEST_DATA_DIR) + \"/test\" + std::to_string(i) + \".jpg\";\n    cv::Mat resize_result_img = cv::imread(resize_result_file_path);\n\n    int result_data_size =\n        resize_result_img.total() * resize_result_img.elemSize();\n    int expected_data_size = expected_img.total() * expected_img.elemSize();\n    EXPECT_EQ(result_data_size, expected_data_size);\n\n    int ret =\n        memcmp(resize_result_img.data, expected_img.data, result_data_size);\n    EXPECT_EQ(ret, 0);\n\n    auto rmret = remove(resize_result_file_path.c_str());\n    EXPECT_EQ(rmret, 0);\n  }\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/nv_image_decoder/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nif (NOT OPENCV_FOUND) \n    message(STATUS \"Not found opencv, disable cuda image decoder flowunit\")\n    return()\nendif()\n\nset(UNIT_DEVICE \"cuda\")\nset(UNIT_NAME \"image_decoder\")\n\nproject(modelbox-flowunit-${UNIT_DEVICE}-${UNIT_NAME})\n\nfile(GLOB_RECURSE UNIT_SOURCE *.cpp *.cc *.c)\ngroup_source_test_files(MODELBOX_UNIT_SOURCE MODELBOX_UNIT_TEST_SOURCE \"_test.c*\" ${UNIT_SOURCE})\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_CUDA_INCLUDE})\ninclude_directories(${CUDA_INCLUDE_DIRS})\ninclude_directories(${OpenCV_INCLUDE_DIRS})\n\nset(MODELBOX_UNIT_SHARED libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-shared)\nset(MODELBOX_UNIT_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\ncuda_add_library(${MODELBOX_UNIT_SHARED} SHARED ${MODELBOX_UNIT_SOURCE})\nset(LIBMODELBOX_FLOWUNIT_IMAGE_DECODE_CUDA_SHARED ${MODELBOX_UNIT_SHARED})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES \n    SOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n\nfind_cuda_helper_libs(nppidei)\nfind_cuda_helper_libs(nvjpeg) \ntarget_link_libraries(${MODELBOX_UNIT_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} rt)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} dl)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_DEVICE_CUDA_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${OpenCV_LIBS})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${CUDA_nppidei_LIBRARY})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${CUDA_nvjpeg_LIBRARY})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}\")\nlist(APPEND DRIVER_UNIT_TEST_LINK_LIBRARIES ${OPENCV_CORE_LIBRARY} \n        ${OPENCV_IMGPROC_LIBRARY} \n        ${CUDA_nppidei_LIBRARY}\n        ${OPENCV_IMGCODECS_LIBRARY})\n\ninstall(TARGETS ${MODELBOX_UNIT_SHARED}\n        COMPONENT cuda-device-flowunit\n        RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n        LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        ARCHIVE DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        OPTIONAL)\ninstall(DIRECTORY ${HEADER} \n        DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR}\n        COMPONENT cuda-device-flowunit)\n\nset(LIBMODELBOX_FLOWUNIT_IMAGE_DECODE_CUDA_SHARED ${MODELBOX_UNIT_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_IMAGE_DECODE_CUDA_INCLUDE ${MODELBOX_UNIT_SOURCE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_IMAGE_DECODE_SOURCES ${MODELBOX_UNIT_SOURCE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_IMAGE_DECODE_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}.so CACHE INTERNAL \"\")\n\n# driver test\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_UNIT_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_UNIT_SHARED})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_LINK_LIBRARIES ${DRIVER_UNIT_TEST_LINK_LIBRARIES} CACHE INTERNAL \"\")\n"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/nv_image_decoder/nv_image_decoder.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"nv_image_decoder.h\"\n\n#include <cuda_runtime_api.h>\n#include <npp.h>\n#include <nppi_data_exchange_and_initialization.h>\n\n#include <opencv2/opencv.hpp>\n\n#include \"modelbox/device/cuda/device_cuda.h\"\n#include \"modelbox/flowunit_api_helper.h\"\n\nstd::map<std::string, nvjpegOutputFormat_t> kNvImgPixelFormat{\n    {\"bgr\", NVJPEG_OUTPUT_BGR}, {\"rgb\", NVJPEG_OUTPUT_RGB}};\n\nstd::map<ImageType, std::vector<uint8_t>> kImgStreamFormat{\n    {IMAGE_TYPE_JPEG, {0xff, 0xd8}},\n    {IMAGE_TYPE_PNG, {0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a}},\n    {IMAGE_TYPE_BMP, {0x42, 0x4d}}};\n\nNvImageDecoderFlowUnit::NvImageDecoderFlowUnit() = default;\nNvImageDecoderFlowUnit::~NvImageDecoderFlowUnit() = default;\n\nmodelbox::Status NvImageDecoderFlowUnit::Open(\n    const std::shared_ptr<modelbox::Configuration> &opts) {\n  pixel_format_ = opts->GetString(\"pix_fmt\", \"bgr\");\n  if (kNvImgPixelFormat.find(pixel_format_) == kNvImgPixelFormat.end()) {\n    auto errMsg = \"pixel_format is invalid, configure is :\" + pixel_format_;\n    MBLOG_ERROR << errMsg;\n    std::string valid_format;\n    for (const auto &iter : kNvImgPixelFormat) {\n      if (valid_format.length() > 0) {\n        valid_format += \", \";\n      }\n      valid_format += iter.first;\n    }\n    MBLOG_ERROR << \"Valid pixel_format is: \" << valid_format;\n    return {modelbox::STATUS_BADCONF, errMsg};\n  }\n  MBLOG_DEBUG << \"pixel_format \" << pixel_format_;\n\n  nvjpegStatus_t ret = NVJPEG_STATUS_SUCCESS;\n  ret = nvjpegCreate(NVJPEG_BACKEND_DEFAULT, nullptr, &handle_);\n  if (ret != NVJPEG_STATUS_SUCCESS) {\n    MBLOG_ERROR << \"nvjpegCreateSimple failed, ret \" << ret;\n    return modelbox::STATUS_FAULT;\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status NvImageDecoderFlowUnit::Process(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  MBLOG_DEBUG << \"process image decode\";\n\n  // get input\n  auto input_bufs = data_ctx->Input(\"in_encoded_image\");\n  auto output_bufs = data_ctx->Output(\"out_image\");\n\n  if (input_bufs->Size() <= 0) {\n    auto err_msg =\n        \"input images batch is \" + std::to_string(input_bufs->Size());\n    MBLOG_ERROR << err_msg;\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  // each thread instantiated nvjpegJpegState_t\n  nvjpegJpegState_t jpeg_handle{nullptr};\n  auto jpeg_ret = nvjpegJpegStateCreate(handle_, &jpeg_handle);\n  if (jpeg_ret != NVJPEG_STATUS_SUCCESS) {\n    MBLOG_ERROR << \"nvjpegJpegStateCreate failed, ret \" << jpeg_ret;\n    return modelbox::STATUS_FAULT;\n  }\n  Defer {\n    jpeg_ret = nvjpegJpegStateDestroy(jpeg_handle);\n    if (jpeg_ret != NVJPEG_STATUS_SUCCESS) {\n      MBLOG_ERROR << \"nvjpegJpegStateDestroy failed, ret \" << jpeg_ret;\n    }\n  };\n\n  // image decode\n  for (auto &buffer : *input_bufs) {\n    auto output_buffer = std::make_shared<modelbox::Buffer>(GetBindDevice());\n    const auto *input_data = static_cast<const uint8_t *>(buffer->ConstData());\n    bool decode_ret = false;\n    if (buffer->GetBytes() != 0) {\n      if (CheckImageType(input_data) == IMAGE_TYPE_JPEG) {\n        decode_ret = DecodeJpeg(buffer, output_buffer, jpeg_handle);\n      }\n\n      if (!decode_ret) {\n        decode_ret = DecodeOthers(buffer, output_buffer);\n      }\n    }\n\n    if (!decode_ret) {\n      output_buffer->SetError(\"ImageDecoder.DecodeFailed\",\n                              \"NvImageDecoder decode failed.\");\n    }\n    output_bufs->PushBack(output_buffer);\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status NvImageDecoderFlowUnit::Close() {\n  if (handle_ != nullptr) {\n    auto des_ret = nvjpegDestroy(handle_);\n    if (des_ret != NVJPEG_STATUS_SUCCESS) {\n      MBLOG_ERROR << \"nvjpegDestroy failed, ret \" << des_ret;\n      return modelbox::STATUS_FAULT;\n    }\n  }\n\n  return modelbox::STATUS_OK;\n};\n\nImageType NvImageDecoderFlowUnit::CheckImageType(const uint8_t *input_data) {\n  for (auto &format_value : kImgStreamFormat) {\n    int ret = memcmp(input_data, format_value.second.data(),\n                     format_value.second.size());\n    if (ret == 0) {\n      return format_value.first;\n    }\n  }\n\n  return IMAGE_TYPE_OHTER;\n}\n\nbool NvImageDecoderFlowUnit::DecodeJpeg(\n    const std::shared_ptr<modelbox::Buffer> &input_buffer,\n    std::shared_ptr<modelbox::Buffer> &output_buffer,\n    nvjpegJpegState_t &jpeg_handle) {\n  int n_component = 0;\n  nvjpegChromaSubsampling_t subsampling;\n  int widths[NVJPEG_MAX_COMPONENT];\n  int heights[NVJPEG_MAX_COMPONENT];\n\n  const auto *input_data =\n      static_cast<const uint8_t *>(input_buffer->ConstData());\n  auto ret = nvjpegGetImageInfo(handle_, input_data, input_buffer->GetBytes(),\n                                &n_component, &subsampling, widths, heights);\n  if (ret != NVJPEG_STATUS_SUCCESS) {\n    MBLOG_ERROR << \"get input encode image info failed, ret \" << ret;\n    return false;\n  }\n  MBLOG_DEBUG << \"widths: \" << widths[0] << \" \" << widths[1] << \" \" << widths[2]\n              << \" \" << widths[3];\n  MBLOG_DEBUG << \"heights \" << heights[0] << \" \" << heights[1] << \" \"\n              << heights[2] << \" \" << heights[3];\n\n  // build planner buffer\n  auto planner_buffer = std::make_shared<modelbox::Buffer>(GetBindDevice());\n  auto modelbox_ret = planner_buffer->Build(\n      ((size_t)widths[0] * (size_t)heights[0] * (size_t)n_component));\n  if (modelbox_ret != modelbox::STATUS_SUCCESS) {\n    MBLOG_ERROR << \"build planner buffer failed, ret \" << modelbox_ret;\n    return false;\n  }\n  auto *planner_data = static_cast<uint8_t *>(planner_buffer->MutableData());\n  auto cuda_mem = std::dynamic_pointer_cast<modelbox::CudaMemory>(\n      planner_buffer->GetDeviceMemory());\n  cuda_mem->BindStream();\n  auto stream = cuda_mem->GetBindStream();\n\n  nvjpegImage_t imgdesc = {{planner_data, planner_data + widths[0] * heights[0],\n                            planner_data + widths[0] * heights[0] * 2,\n                            planner_data + widths[0] * heights[0] * 3},\n                           {(uint32_t)widths[0], (uint32_t)widths[0],\n                            (uint32_t)widths[0], (uint32_t)widths[0]}};\n\n  ret = nvjpegDecode(handle_, jpeg_handle, input_data, input_buffer->GetBytes(),\n                     kNvImgPixelFormat[pixel_format_], &imgdesc, stream->Get());\n  cudaStreamSynchronize(stream->Get());\n  if (ret != NVJPEG_STATUS_SUCCESS) {\n    MBLOG_ERROR << \"nvjpegDecode failed, ret \" << ret;\n    return false;\n  }\n\n  // build output buffer\n  output_buffer->Build(((size_t)widths[0] * (size_t)heights[0] * (size_t)n_component));\n  auto *output_data = static_cast<uint8_t *>(output_buffer->MutableData());\n\n  // planner to packed image copy\n  Npp8u *dst_planer[3] = {(Npp8u *)(planner_data),\n                          (Npp8u *)(planner_data + widths[0] * heights[0]),\n                          (Npp8u *)(planner_data + widths[0] * heights[0] * 2)};\n  NppiSize dst_size = {widths[0], heights[0]};\n  auto nppi_ret = nppiCopy_8u_P3C3R(dst_planer, widths[0], (Npp8u *)output_data,\n                                    widths[0] * 3, dst_size);\n  if (nppi_ret != NPP_SUCCESS) {\n    MBLOG_ERROR << \"nppiCopy_8u_P3C3R failed. ret is \" << nppi_ret;\n    return false;\n  }\n\n  output_buffer->Set(\"width\", (int32_t)widths[0]);\n  output_buffer->Set(\"height\", (int32_t)heights[0]);\n  output_buffer->Set(\"width_stride\", (int32_t)widths[0]);\n  output_buffer->Set(\"height_stride\", (int32_t)heights[0]);\n  output_buffer->Set(\"channel\", (int32_t)n_component);\n  output_buffer->Set(\"pix_fmt\", pixel_format_);\n  output_buffer->Set(\"type\", modelbox::ModelBoxDataType::MODELBOX_UINT8);\n  output_buffer->Set(\"shape\",\n                     std::vector<size_t>{(size_t)heights[0], (size_t)widths[0],\n                                         (size_t)n_component});\n  output_buffer->Set(\"layout\", std::string(\"hwc\"));\n  return true;\n}\n\nbool NvImageDecoderFlowUnit::DecodeOthers(\n    const std::shared_ptr<modelbox::Buffer> &input_buffer,\n    std::shared_ptr<modelbox::Buffer> &output_buffer) {\n  const auto *input_data =\n      static_cast<const uint8_t *>(input_buffer->ConstData());\n  std::vector<uint8_t> input_data2(\n      input_data, input_data + input_buffer->GetBytes() / sizeof(uint8_t));\n\n  cv::Mat img_bgr = cv::imdecode(input_data2, cv::IMREAD_COLOR);\n  if (img_bgr.data == nullptr || img_bgr.size == nullptr) {\n    MBLOG_ERROR << \"input image buffer is invalid, imdecode failed.\";\n    return false;\n  }\n\n  cv::Mat img_dest;\n  if (pixel_format_ == \"bgr\") {\n    img_dest = img_bgr;\n  } else if (pixel_format_ == \"rgb\") {\n    cv::cvtColor(img_bgr, img_dest, cv::COLOR_BGR2RGB);\n  }\n\n  output_buffer->Set(\"height\", (int32_t)img_dest.rows);\n  output_buffer->Set(\"width\", (int32_t)img_dest.cols);\n  output_buffer->Set(\"height_stride\", (int32_t)img_dest.rows);\n  output_buffer->Set(\"width_stride\", (int32_t)img_dest.cols * 3);\n  output_buffer->Set(\"channel\", (int32_t)img_dest.channels());\n  output_buffer->Set(\"pix_fmt\", pixel_format_);\n  output_buffer->Set(\"type\", modelbox::ModelBoxDataType::MODELBOX_UINT8);\n  output_buffer->Set(\n      \"shape\", std::vector<size_t>{(size_t)img_dest.rows, (size_t)img_dest.cols,\n                                   (size_t)img_dest.channels()});\n  output_buffer->Set(\"layout\", std::string(\"hwc\"));\n  output_buffer->BuildFromHost(img_dest.data,\n                               img_dest.total() * img_dest.elemSize(), nullptr);\n\n  return true;\n}\n\nMODELBOX_FLOWUNIT(NvImageDecoderFlowUnit, desc) {\n  desc.SetFlowUnitName(FLOWUNIT_NAME);\n  desc.SetFlowUnitGroupType(\"Image\");\n  desc.AddFlowUnitInput(modelbox::FlowUnitInput(\"in_encoded_image\", \"cpu\"));\n  desc.AddFlowUnitOutput(modelbox::FlowUnitOutput(\"out_image\", FLOWUNIT_TYPE));\n  desc.SetFlowType(modelbox::NORMAL);\n  desc.SetInputContiguous(false);\n  desc.SetResourceNice(false);\n  desc.SetDescription(FLOWUNIT_DESC);\n  std::map<std::string, std::string> format_list;\n\n  for (auto &item : kNvImgPixelFormat) {\n    format_list[item.first] = item.first;\n  }\n\n  desc.AddFlowUnitOption(modelbox::FlowUnitOption(\n      \"pix_fmt\", \"list\", true, \"bgr\", \"the imdecode output pixel format\",\n      format_list));\n}\n\nMODELBOX_DRIVER_FLOWUNIT(desc) {\n  desc.Desc.SetName(FLOWUNIT_NAME);\n  desc.Desc.SetClass(modelbox::DRIVER_CLASS_FLOWUNIT);\n  desc.Desc.SetType(FLOWUNIT_TYPE);\n  desc.Desc.SetDescription(FLOWUNIT_DESC);\n  desc.Desc.SetVersion(\"1.0.0\");\n}\n"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/nv_image_decoder/nv_image_decoder.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_NV_IMAGE_DECODER_GPU_H_\n#define MODELBOX_FLOWUNIT_NV_IMAGE_DECODER_GPU_H_\n\n#include <modelbox/base/device.h>\n#include <modelbox/base/status.h>\n#include <modelbox/buffer.h>\n#include <modelbox/flow.h>\n#include <modelbox/flowunit.h>\n#include <nvjpeg.h>\n\n#include <typeinfo>\n\nconstexpr const char *FLOWUNIT_NAME = \"image_decoder\";\nconstexpr const char *FLOWUNIT_TYPE = \"cuda\";\nconstexpr const char *FLOWUNIT_DESC =\n    \"\\n\\t@Brief: An OpenCV crop flowunit on cpu. \\n\"\n    \"\\t@Port parameter: The input port buffer type is image file binary, the \"\n    \"output port buffer type are image. \\n\"\n    \"\\t  The image type buffer contains the following meta fields:\\n\"\n    \"\\t\\tField Name: width,         Type: int32_t\\n\"\n    \"\\t\\tField Name: height,        Type: int32_t\\n\"\n    \"\\t\\tField Name: width_stride,  Type: int32_t\\n\"\n    \"\\t\\tField Name: height_stride, Type: int32_t\\n\"\n    \"\\t\\tField Name: channel,       Type: int32_t\\n\"\n    \"\\t\\tField Name: pix_fmt,       Type: string\\n\"\n    \"\\t\\tField Name: layout,        Type: int32_t\\n\"\n    \"\\t\\tField Name: shape,         Type: vector<size_t>\\n\"\n    \"\\t\\tField Name: type,          Type: ModelBoxDataType::MODELBOX_UINT8\\n\"\n    \"\\t@Constraint:\";\n\nenum ImageType {\n  IMAGE_TYPE_JPEG,\n  IMAGE_TYPE_PNG,\n  IMAGE_TYPE_BMP,\n  IMAGE_TYPE_OHTER\n};\n\nclass NvImageDecoderFlowUnit : public modelbox::FlowUnit {\n public:\n  NvImageDecoderFlowUnit();\n  ~NvImageDecoderFlowUnit() override;\n\n  modelbox::Status Open(\n      const std::shared_ptr<modelbox::Configuration> &opts) override;\n\n  modelbox::Status Close() override;\n\n  modelbox::Status Process(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override;\n\n private:\n  ImageType CheckImageType(const uint8_t *input_data);\n\n  bool DecodeJpeg(const std::shared_ptr<modelbox::Buffer> &input_buffer,\n                  std::shared_ptr<modelbox::Buffer> &output_buffer,\n                  nvjpegJpegState_t &jpeg_handle);\n\n  bool DecodeOthers(const std::shared_ptr<modelbox::Buffer> &input_buffer,\n                    std::shared_ptr<modelbox::Buffer> &output_buffer);\n\n  std::string pixel_format_{\"bgr\"};\n\n  nvjpegHandle_t handle_{nullptr};\n};\n\n#endif  // MODELBOX_FLOWUNIT_NV_IMAGE_DECODER_GPU_H_"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/nv_image_decoder/nv_image_decoder_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include <securec.h>\n\n#include <cuda_runtime.h>\n#include <functional>\n#include <future>\n#include <opencv2/opencv.hpp>\n#include <random>\n#include <thread>\n\n#include \"modelbox/base/log.h\"\n#include \"modelbox/base/utils.h\"\n#include \"modelbox/buffer.h\"\n#include \"driver_flow_test.h\"\n#include \"flowunit_mockflowunit/flowunit_mockflowunit.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing ::testing::_;\n\nstd::vector<std::string> encode_fmt{\".jpg\", \".png\", \".bmp\"};\n\nnamespace modelbox {\nclass NvImageDecoderFlowUnitTest : public testing::Test {\n public:\n  NvImageDecoderFlowUnitTest()\n      : driver_flow_(std::make_shared<DriverFlowTest>()) {}\n\n protected:\n  void SetUp() override {\n    int count = 0;\n    cudaGetDeviceCount(&count);\n    if (count <= 0) {\n      MBLOG_INFO << \"no cuda device, skip test suit\";\n      GTEST_SKIP();\n    }\n\n    auto ret = AddMockFlowUnit();\n    EXPECT_EQ(ret, STATUS_OK);\n  };\n\n  void TearDown() override { driver_flow_->Clear(); };\n  std::shared_ptr<DriverFlowTest> GetDriverFlow();\n\n private:\n  Status AddMockFlowUnit();\n  std::shared_ptr<DriverFlowTest> driver_flow_;\n};\n\nstd::shared_ptr<DriverFlowTest> NvImageDecoderFlowUnitTest::GetDriverFlow() {\n  return driver_flow_;\n}\n\nStatus NvImageDecoderFlowUnitTest::AddMockFlowUnit() {\n  auto ctl_ = driver_flow_->GetMockFlowCtl();\n  {\n    MockFlowUnitDriverDesc desc_flowunit;\n    desc_flowunit.SetClass(\"DRIVER-FLOWUNIT\");\n    desc_flowunit.SetType(\"cpu\");\n    desc_flowunit.SetName(\"test_0_1_decode\");\n    desc_flowunit.SetDescription(\"the test in 0 out 1\");\n    desc_flowunit.SetVersion(\"1.0.0\");\n    std::string file_path_flowunit =\n        std::string(TEST_DRIVER_DIR) + \"/libmodelbox-unit-cpu-test_0_1_decode.so\";\n    desc_flowunit.SetFilePath(file_path_flowunit);\n    auto mock_flowunit = std::make_shared<MockFlowUnit>();\n    auto mock_flowunit_desc = std::make_shared<FlowUnitDesc>();\n    mock_flowunit_desc->SetFlowUnitName(\"test_0_1_decode\");\n    mock_flowunit_desc->AddFlowUnitOutput(modelbox::FlowUnitOutput(\"Out_1\"));\n    mock_flowunit_desc->SetFlowType(STREAM);\n    mock_flowunit->SetFlowUnitDesc(mock_flowunit_desc);\n    std::weak_ptr<MockFlowUnit> mock_flowunit_wp;\n    mock_flowunit_wp = mock_flowunit;\n\n    EXPECT_CALL(*mock_flowunit, Open(_))\n        .WillRepeatedly(testing::Invoke(\n            [=](const std::shared_ptr<modelbox::Configuration>& flow_option) {\n              auto spt = mock_flowunit_wp.lock();\n              auto ext_data = spt->CreateExternalData();\n              if (!ext_data) {\n                const auto* err_msg = \"can not get external data.\";\n                modelbox::Status ret = {modelbox::STATUS_NODATA, err_msg};\n                MBLOG_ERROR << err_msg;\n                return ret;\n              }\n\n              std::string gimg_path = std::string(TEST_ASSETS) + \"/test.jpg\";\n\n              auto output_buf = ext_data->CreateBufferList();\n              modelbox::TensorList output_tensor_list(output_buf);\n              output_tensor_list.BuildFromHost<uchar>(\n                  {1, {gimg_path.size() + 1}}, (void*)gimg_path.data(),\n                  gimg_path.size() + 1);\n\n              auto status = ext_data->Send(output_buf);\n              if (!status) {\n                MBLOG_ERROR << \"external data send buffer list failed:\"\n                            << status;\n                return status;\n              }\n\n              status = ext_data->Close();\n              if (!status) {\n                MBLOG_ERROR << \"external data close failed:\" << status;\n                return status;\n              }\n\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, DataPre(_))\n        .WillRepeatedly(\n            testing::Invoke([&](const std::shared_ptr<DataContext>& data_ctx) {\n              MBLOG_INFO << \"stream_info DataPre\";\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, DataPost(_))\n        .WillRepeatedly(\n            testing::Invoke([&](const std::shared_ptr<DataContext>& data_ctx) {\n              MBLOG_INFO << \"stream_info DataPost\";\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit,\n                Process(testing::An<std::shared_ptr<modelbox::DataContext>>()))\n        .WillRepeatedly(testing::Invoke([=](const std::shared_ptr<DataContext>&\n                                                data_ctx) {\n          MBLOG_INFO << \"test_0_1_decode process\";\n\n          auto external = data_ctx->External();\n          std::string gimg_path =\n              std::string((char*)(*external)[0]->ConstData());\n\n          cv::Mat gimg_data = cv::imread(gimg_path);\n\n          MBLOG_INFO << \"gimage col \" << gimg_data.cols << \"  grow \"\n                     << gimg_data.rows << \" gchannel:\" << gimg_data.channels();\n\n          // read img and encode diff type\n          uint32_t batch_size = encode_fmt.size();\n          std::vector<std::vector<u_char>> img_data_list;\n          std::vector<size_t> output_bufs_shape;\n          for (size_t i = 0; i < batch_size; ++i) {\n            const std::string& img_path = gimg_path;\n            cv::Mat ori_img = cv::imread(img_path);\n            MBLOG_INFO << \"input image col \" << ori_img.cols << \"  row \"\n                       << ori_img.rows << \" channel:\" << ori_img.channels()\n                       << \" encode fmt \" << encode_fmt[i];\n\n            std::vector<u_char> img_data;\n            std::vector<int> img_quality_param{cv::IMWRITE_JPEG_QUALITY, 100};\n            cv::imencode(encode_fmt[i], ori_img, img_data, img_quality_param);\n            img_data_list.push_back(img_data);\n            output_bufs_shape.push_back(img_data.size());\n\n            std::string ori_decode_name = std::string(TEST_DATA_DIR) +\n                                          \"/decode_ori_\" + std::to_string(i) +\n                                          \".jpg\";\n            cv::Mat ori_decode = cv::imdecode(img_data, cv::IMREAD_COLOR);\n            cv::imwrite(ori_decode_name, ori_decode);\n          }\n\n          // build output bufs\n          auto output_bufs = data_ctx->Output(\"Out_1\");\n          output_bufs->Build(output_bufs_shape);\n          for (size_t i = 0; i < batch_size; ++i) {\n            auto* output_data =\n                static_cast<u_char*>(output_bufs->MutableBufferData(i));\n            memcpy_s(output_data, output_bufs->At(i)->GetBytes(),\n                     img_data_list[i].data(), img_data_list[i].size());\n          }\n\n          return modelbox::STATUS_OK;\n        }));\n\n    EXPECT_CALL(*mock_flowunit, Close()).WillRepeatedly(testing::Invoke([=]() {\n      return modelbox::STATUS_OK;\n    }));\n    desc_flowunit.SetMockFlowUnit(mock_flowunit);\n    ctl_->AddMockDriverFlowUnit(\"test_0_1_decode\", \"cpu\", desc_flowunit,\n                                std::string(TEST_DRIVER_DIR));\n  }\n\n  {\n    MockFlowUnitDriverDesc desc_flowunit;\n    desc_flowunit.SetClass(\"DRIVER-FLOWUNIT\");\n    desc_flowunit.SetType(\"cpu\");\n    desc_flowunit.SetName(\"test_1_0_decode\");\n    desc_flowunit.SetDescription(\"the test in 1 out 0\");\n    desc_flowunit.SetVersion(\"1.0.0\");\n    std::string file_path_flowunit =\n        std::string(TEST_DRIVER_DIR) + \"/libmodelbox-unit-cpu-test_1_0_decode.so\";\n    desc_flowunit.SetFilePath(file_path_flowunit);\n    auto mock_flowunit = std::make_shared<MockFlowUnit>();\n    auto mock_flowunit_desc = std::make_shared<FlowUnitDesc>();\n    mock_flowunit_desc->SetFlowUnitName(\"test_1_0_decode\");\n    mock_flowunit_desc->AddFlowUnitInput(modelbox::FlowUnitInput(\"In_1\"));\n    mock_flowunit_desc->SetFlowType(STREAM);\n    mock_flowunit_desc->SetMaxBatchSize(16);\n    mock_flowunit->SetFlowUnitDesc(mock_flowunit_desc);\n    std::weak_ptr<MockFlowUnit> mock_flowunit_wp;\n    mock_flowunit_wp = mock_flowunit;\n\n    EXPECT_CALL(*mock_flowunit, Open(_))\n        .WillRepeatedly(testing::Invoke(\n            [=](const std::shared_ptr<modelbox::Configuration>& flow_option) {\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, DataPre(_))\n        .WillRepeatedly(\n            testing::Invoke([&](const std::shared_ptr<DataContext>& data_ctx) {\n              MBLOG_INFO << \"stream_info DataPre\";\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, DataPost(_))\n        .WillRepeatedly(\n            testing::Invoke([&](const std::shared_ptr<DataContext>& data_ctx) {\n              MBLOG_INFO << \"stream_info DataPost\";\n              return modelbox::STATUS_STOP;\n            }));\n\n    EXPECT_CALL(*mock_flowunit,\n                Process(testing::An<std::shared_ptr<modelbox::DataContext>>()))\n        .WillRepeatedly(\n            testing::Invoke([=](const std::shared_ptr<DataContext>& op_ctx) {\n              MBLOG_INFO << \"test_1_0_decode process\";\n              auto input_bufs = op_ctx->Input(\"In_1\");\n              int32_t cols = 0;\n              int32_t rows = 0;\n              int32_t channels = 0;\n\n              if (input_bufs->Size() == 0) {\n                MBLOG_ERROR << \"test_1_0_decode input buffersize is 0\";\n              }\n\n              for (size_t i = 0; i < input_bufs->Size(); i++) {\n                input_bufs->At(i)->Get(\"width\", cols);\n                input_bufs->At(i)->Get(\"height\", rows);\n                input_bufs->At(i)->Get(\"channel\", channels);\n                const auto* input_data =\n                    static_cast<const uchar*>(input_bufs->ConstBufferData(i));\n\n                cv::Mat img_data(cv::Size(cols, rows), CV_8UC3);\n                memcpy_s(img_data.data, img_data.total() * img_data.elemSize(),\n                         input_data, input_bufs->At(i)->GetBytes());\n\n                MBLOG_INFO << \"output image col \" << img_data.cols << \"  row \"\n                           << img_data.rows\n                           << \" channel:\" << img_data.channels();\n\n                std::string name = std::string(TEST_DATA_DIR) +\n                                   \"/decode_result_\" + std::to_string(i) +\n                                   \".jpg\";\n\n                cv::imwrite(name, img_data);\n              }\n\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, Close()).WillRepeatedly(testing::Invoke([=]() {\n      return modelbox::STATUS_OK;\n    }));\n    desc_flowunit.SetMockFlowUnit(mock_flowunit);\n    ctl_->AddMockDriverFlowUnit(\"test_1_0_decode\", \"cpu\", desc_flowunit,\n                                std::string(TEST_DRIVER_DIR));\n  }\n  return STATUS_OK;\n}\n\nTEST_F(NvImageDecoderFlowUnitTest, NvDecodeTest) {\n  const std::string test_lib_dir = TEST_DRIVER_DIR;\n  std::string toml_content = R\"(\n    [driver]\n    skip-default=true\n    dir=[\")\" + test_lib_dir + \"\\\"]\\n    \" +\n                             R\"([graph]\n    graphconf = '''digraph demo {\n          test_0_1_decode[type=flowunit, flowunit=test_0_1_decode, device=cpu, deviceid=0, label=\"<Out_1>\"]\n          nv_image_decoder[type=flowunit, flowunit=image_decoder, device=cuda, deviceid=0, label=\"<in_encoded_image> | <out_image>\", pix_fmt=\"bgr\", queue_size=64, batch_size=16]\n          test_1_0_decode[type=flowunit, flowunit=test_1_0_decode, device=cpu, deviceid=0, label=\"<In_1>\", batch_size=3]                                \n          test_0_1_decode:Out_1 -> nv_image_decoder:in_encoded_image \n          nv_image_decoder:out_image -> test_1_0_decode:In_1                                                                      \n        }'''\n    format = \"graphviz\"\n  )\";\n\n  auto driver_flow = GetDriverFlow();\n  auto ret =\n      driver_flow->BuildAndRun(\"NvDecodeTest\", toml_content, 9999 * 1000);\n  EXPECT_EQ(ret, STATUS_STOP);\n\n  std::vector<std::string> filePath;\n  ListFiles(std::string(TEST_DATA_DIR), \"*\", &filePath);\n  for (auto& elem : filePath) {\n    MBLOG_DEBUG << \"filePath: \" << elem;\n  }\n\n  for (size_t i = 0; i < encode_fmt.size(); ++i) {\n    std::string expected_file_path = std::string(TEST_DATA_DIR) +\n                                     \"/decode_ori_\" + std::to_string(i) +\n                                     \".jpg\";\n    if (encode_fmt[i] == \".jpg\") {\n      remove(expected_file_path.c_str());\n      auto copy_ret =\n          CopyFile(std::string(TEST_ASSETS) + \"/decode_cuda_jpeg_expect.jpg\",\n                   expected_file_path, true);\n      std::cout << copy_ret;\n    }\n    cv::Mat expected_img = cv::imread(expected_file_path);\n\n    std::string decode_result_file_path = std::string(TEST_DATA_DIR) +\n                                          \"/decode_result_\" +\n                                          std::to_string(i) + \".jpg\";\n    cv::Mat decode_result_img = cv::imread(decode_result_file_path);\n\n    int result_data_size =\n        decode_result_img.total() * decode_result_img.elemSize();\n    int expected_data_size = expected_img.total() * expected_img.elemSize();\n    EXPECT_EQ(result_data_size, expected_data_size);\n\n    int ret =\n        memcmp(decode_result_img.data, expected_img.data, result_data_size);\n    EXPECT_EQ(ret, 0);\n\n    auto rmret = remove(expected_file_path.c_str());\n    EXPECT_EQ(rmret, 0);\n\n    auto rmret2 = remove(decode_result_file_path.c_str());\n    EXPECT_EQ(rmret2, 0);\n  }\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/padding/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nset(UNIT_DEVICE \"cuda\")\nset(UNIT_NAME \"padding\")\n\nproject(modelbox-flowunit-${UNIT_DEVICE}-${UNIT_NAME})\n\nfile(GLOB_RECURSE UNIT_SOURCE *.cpp *.cc *.c)\ngroup_source_test_files(MODELBOX_UNIT_SOURCE MODELBOX_UNIT_TEST_SOURCE \"_test.c*\" ${UNIT_SOURCE})\n\nif (NOT OPENCV_FOUND) \n    set(MODELBOX_UNIT_TEST_SOURCE \"\")\nendif()\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_CUDA_INCLUDE})\ninclude_directories(${CUDA_INCLUDE_DIRS})\n\nset(MODELBOX_UNIT_SHARED libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-shared)\nset(MODELBOX_UNIT_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\ncuda_add_library(${MODELBOX_UNIT_SHARED} SHARED ${MODELBOX_UNIT_SOURCE})\nset(LIBMODELBOX_FLOWUNIT_PADDING_CUDA_SHARED ${MODELBOX_UNIT_SHARED})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES \n    SOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n\nfind_cuda_helper_libs(nppig)\nfind_cuda_helper_libs(nppidei)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} rt)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} dl)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_DEVICE_CUDA_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${CUDA_nppig_LIBRARY})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${CUDA_nppidei_LIBRARY})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}\")\n\ninstall(TARGETS ${MODELBOX_UNIT_SHARED}\n        COMPONENT cuda-device-flowunit\n        RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n        LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        ARCHIVE DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        OPTIONAL)\ninstall(DIRECTORY ${HEADER} \n        DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR}\n        COMPONENT cuda-device-flowunit)\n\nset(LIBMODELBOX_FLOWUNIT_PADDING_CUDA_SHARED ${MODELBOX_UNIT_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_PADDING_CUDA_INCLUDE ${MODELBOX_UNIT_SOURCE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_PADDING_SOURCES ${MODELBOX_UNIT_SOURCE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_PADDING_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}.so CACHE INTERNAL \"\")\n\n# driver test\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_UNIT_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_UNIT_SHARED})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")\n"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/padding/padding_flowunit.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"padding_flowunit.h\"\n\n#include \"modelbox/flowunit_api_helper.h\"\n\nconst std::map<std::string, NppiInterpolationMode> kNppiResizeInterpolation = {\n    {\"inter_nn\", NPPI_INTER_NN},           {\"inter_linear\", NPPI_INTER_LINEAR},\n    {\"inter_cubic\", NPPI_INTER_CUBIC},     {\"inter_super\", NPPI_INTER_SUPER},\n    {\"inter_lanczos\", NPPI_INTER_LANCZOS},\n};\n\nconst std::map<std::string, AlignType> kVerticalAlignType = {\n    {\"top\", AlignType::BEGIN},\n    {\"center\", AlignType::CENTER},\n    {\"bottom\", AlignType::END}};\n\nconst std::map<std::string, AlignType> kHorizontalAlignType = {\n    {\"left\", AlignType::BEGIN},\n    {\"center\", AlignType::CENTER},\n    {\"right\", AlignType::END}};\n\nPaddingFlowUnit::PaddingFlowUnit() = default;\nPaddingFlowUnit::~PaddingFlowUnit() = default;\n\nmodelbox::Status PaddingFlowUnit::Open(\n    const std::shared_ptr<modelbox::Configuration> &opts) {\n  width_ = opts->GetUint32(\"width\", 0);\n  if (width_ == 0) {\n    width_ = opts->GetUint32(\"image_width\", 0);\n  }\n\n  height_ = opts->GetUint32(\"height\", 0);\n  if (height_ == 0) {\n    height_ = opts->GetUint32(\"image_height\", 0);\n  }\n\n  if (width_ == 0 || height_ == 0) {\n    MBLOG_ERROR << \"width and height must set in config\";\n    return modelbox::STATUS_BADCONF;\n  }\n\n  output_buffer_size_ = width_ * height_ * 3;\n  auto vertical_align_str = opts->GetString(\"vertical_align\", \"top\");\n  auto item = kVerticalAlignType.find(vertical_align_str);\n  if (item == kVerticalAlignType.end()) {\n    MBLOG_ERROR << \"vertical align must be one of [top|center|bottom]\";\n    return modelbox::STATUS_BADCONF;\n  }\n  vertical_align_ = item->second;\n\n  auto horizontal_align_str = opts->GetString(\"horizontal_align\", \"left\");\n  item = kHorizontalAlignType.find(horizontal_align_str);\n  if (item == kHorizontalAlignType.end()) {\n    MBLOG_ERROR << \"horizontal align must be one of [left|center|right]\";\n    return modelbox::STATUS_BADCONF;\n  }\n  horizontal_align_ = item->second;\n\n  padding_data_ = opts->GetUint8s(\"padding_data\", {0, 0, 0});\n  if (padding_data_.size() != 3) {\n    MBLOG_ERROR << \"padding data size must be 3\";\n    return modelbox::STATUS_BADCONF;\n  }\n\n  need_scale_ = opts->GetBool(\"need_scale\", true);\n  auto interpolation_str = opts->GetString(\"interpolation\", \"inter_linear\");\n  auto interpolation_item = kNppiResizeInterpolation.find(interpolation_str);\n  if (interpolation_item == kNppiResizeInterpolation.end()) {\n    MBLOG_ERROR << \"not support interpolation \" << interpolation_str;\n    return modelbox::STATUS_BADCONF;\n  }\n  interpolation_ = interpolation_item->second;\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status PaddingFlowUnit::CudaProcess(\n    std::shared_ptr<modelbox::DataContext> data_ctx, cudaStream_t stream) {\n  auto input_buffer_list = data_ctx->Input(\"in_image\");\n  auto output_buffer_list = data_ctx->Output(\"out_image\");\n  auto image_count = input_buffer_list->Size();\n  if (image_count == 0) {\n    MBLOG_ERROR << \"input buffer count is zero\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  std::vector<size_t> output_shape(image_count, output_buffer_size_);\n  auto ret = output_buffer_list->Build(output_shape);\n  if (!ret) {\n    MBLOG_ERROR << \"build output buffer failed, count \" << image_count\n                << \",size \" << output_buffer_size_;\n    return modelbox::STATUS_FAULT;\n  }\n\n  auto cuda_ret = cudaStreamSynchronize(stream);\n  if (cuda_ret != cudaSuccess) {\n    MBLOG_ERROR << \"sync stream  \" << stream << \" failed, err \" << cuda_ret;\n    return modelbox::STATUS_FAULT;\n  }\n\n  for (size_t i = 0; i < image_count; ++i) {\n    auto in_image = input_buffer_list->At(i);\n    auto out_image = output_buffer_list->At(i);\n    auto ret = PaddingOneImage(in_image, out_image);\n    if (!ret) {\n      MBLOG_ERROR << \"padding image failed, err \" << ret;\n      return ret;\n    }\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status PaddingFlowUnit::PaddingOneImage(\n    std::shared_ptr<modelbox::Buffer> &in_image,\n    std::shared_ptr<modelbox::Buffer> &out_image) {\n  int32_t ori_width = 0;\n  int32_t ori_height = 0;\n  std::string pix_fmt;\n  auto ret = in_image->Get(\"width\", ori_width);\n  ret = ret && in_image->Get(\"height\", ori_height);\n  if (!ret) {\n    MBLOG_ERROR << \"input image must has width and height in meta\";\n    return modelbox::STATUS_INVALID;\n  }\n\n  in_image->Get(\"pix_fmt\", pix_fmt);\n  if (pix_fmt != \"rgb\" && pix_fmt != \"bgr\") {\n    MBLOG_ERROR << \"unsupport pix format \" << pix_fmt;\n    return modelbox::STATUS_NOTSUPPORT;\n  }\n\n  NppiSize src_size{.width = ori_width, .height = ori_height};\n  NppiRect src_roi{.x = 0, .y = 0, .width = ori_width, .height = ori_height};\n  NppiSize dest_size{.width = width_, .height = height_};\n  NppiRect dest_roi;\n  auto status = FillDestRoi(src_size, dest_roi);\n  if (!status) {\n    MBLOG_ERROR << \"fill dest roi failed\";\n    return status;\n  }\n\n  status = FillPaddingData(out_image);\n  if (!status) {\n    MBLOG_ERROR << \"fill padding data failed\";\n    return status;\n  }\n\n  auto nppi_ret =\n      nppiResize_8u_C3R((const Npp8u *)in_image->ConstData(), ori_width * 3,\n                        src_size, src_roi, (Npp8u *)out_image->MutableData(),\n                        width_ * 3, dest_size, dest_roi, interpolation_);\n  if (nppi_ret != NPP_SUCCESS) {\n    MBLOG_ERROR << \"nppiResize_8u_C3R failed, src_w:\" << ori_width\n                << \", src_h:\" << ori_height << \", dest_w:\" << width_\n                << \", dest_h:\" << height_ << \"dest_roi[x:\" << dest_roi.x\n                << \",y:\" << dest_roi.y << \",w:\" << dest_roi.width\n                << \",h:\" << dest_roi.height << \"], ret \" << nppi_ret;\n    return modelbox::STATUS_FAULT;\n  }\n\n  out_image->Set(\"width\", width_);\n  out_image->Set(\"height\", height_);\n  out_image->Set(\"width_stride\", width_ * 3);\n  out_image->Set(\"height_stride\", height_);\n  out_image->Set(\"channel\", (int32_t)3);\n  out_image->Set(\"pix_fmt\", pix_fmt);\n  std::string data_format = \"hwc\";\n  auto data_type = modelbox::MODELBOX_UINT8;\n  std::vector<size_t> data_shape = {(size_t)height_, (size_t)width_, 3};\n  out_image->Set(\"layout\", data_format);\n  out_image->Set(\"type\", data_type);\n  out_image->Set(\"shape\", data_shape);\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status PaddingFlowUnit::FillDestRoi(const NppiSize &src_size,\n                                              NppiRect &dest_roi) {\n  if (need_scale_) {\n    auto w_scale = (float)src_size.width / width_;\n    auto h_scale = (float)src_size.height / height_;\n    auto scale = std::max(w_scale, h_scale);\n    dest_roi.width = src_size.width / scale;\n    dest_roi.height = src_size.height / scale;\n  } else {\n    if (src_size.width > width_ || src_size.height > height_) {\n      MBLOG_ERROR << \"src image[w:\" << src_size.width\n                  << \",h:\" << src_size.height\n                  << \"] is great than dest size[w:\" << width_\n                  << \",h:\" << height_ << \"]. But need_scale is false\";\n      return modelbox::STATUS_INVALID;\n    }\n\n    dest_roi.width = src_size.width;\n    dest_roi.height = src_size.height;\n  }\n\n  dest_roi.x = GetAlignOffset(horizontal_align_, width_, dest_roi.width);\n  dest_roi.y = GetAlignOffset(vertical_align_, height_, dest_roi.height);\n  return modelbox::STATUS_OK;\n}\n\nuint32_t PaddingFlowUnit::GetAlignOffset(AlignType type, uint32_t dest_range,\n                                         uint32_t roi_range) {\n  if (roi_range >= dest_range) {\n    return 0;\n  }\n\n  uint32_t offset = 0;\n  switch (type) {\n    case AlignType::BEGIN:\n      break;\n\n    case AlignType::CENTER:\n      offset = (dest_range - roi_range) / 2;\n      break;\n\n    case AlignType::END:\n      offset = dest_range - roi_range;\n      break;\n\n    default:\n      break;\n  }\n\n  return offset;\n}\n\nmodelbox::Status PaddingFlowUnit::FillPaddingData(\n    std::shared_ptr<modelbox::Buffer> &out_image) {\n  NppiSize size{.width = width_, .height = height_};\n  auto ret =\n      nppiSet_8u_C3R(padding_data_.data(), (Npp8u *)out_image->MutableData(),\n                     width_ * 3, size);\n  if (ret != NPP_SUCCESS) {\n    MBLOG_ERROR << \"nppiSet_8u_C3R failed, size[w:\" << width_\n                << \",h:\" << height_ << \"], ret \" << ret;\n    return modelbox::STATUS_FAULT;\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nMODELBOX_FLOWUNIT(PaddingFlowUnit, desc) {\n  desc.SetFlowUnitName(FLOWUNIT_NAME);\n  desc.SetFlowUnitGroupType(\"Image\");\n  desc.AddFlowUnitInput(modelbox::FlowUnitInput(\"in_image\", FLOWUNIT_TYPE));\n  desc.AddFlowUnitOutput(modelbox::FlowUnitOutput(\"out_image\", FLOWUNIT_TYPE));\n  desc.SetFlowType(modelbox::NORMAL);\n  desc.SetInputContiguous(false);\n  desc.SetDescription(FLOWUNIT_DESC);\n\n  desc.AddFlowUnitOption(\n      modelbox::FlowUnitOption(\"image_width\", \"int\", true, \"0\", \"Output img width\"));\n  desc.AddFlowUnitOption(modelbox::FlowUnitOption(\"image_height\", \"int\", true, \"0\",\n                                                  \"Output img height\"));\n  std::map<std::string, std::string> vertical_align_list{\n      {\"top\", \"top\"}, {\"center\", \"center\"}, {\"bottom\", \"bottom\"}};\n  desc.AddFlowUnitOption(modelbox::FlowUnitOption(\n      \"vertical_align\", \"list\", false, \"top\", \"Output roi vertical align type\",\n      vertical_align_list));\n  std::map<std::string, std::string> horizontal_align_list{\n      {\"left\", \"left\"}, {\"center\", \"center\"}, {\"right\", \"right\"}};\n  desc.AddFlowUnitOption(modelbox::FlowUnitOption(\n      \"horizontal_align\", \"list\", false, \"left\",\n      \"Output roi horizontal align type\", horizontal_align_list));\n  desc.AddFlowUnitOption(modelbox::FlowUnitOption(\n      \"padding_data\", \"string\", false, \"0,0,0\", \"Data for padding\"));\n  desc.AddFlowUnitOption(\n      modelbox::FlowUnitOption(\"need_scale\", \"bool\", false, \"true\",\n                               \"Will scale roi to fit output image\"));\n  std::map<std::string, std::string> interpolation_list{\n      {\"inter_nn\", \"inter_nn\"},\n      {\"inter_linear\", \"inter_linear\"},\n      {\"inter_cubic\", \"inter_cubic\"},\n      {\"inter_super\", \"inter_super\"},\n      {\"inter_lanczos\", \"inter_lanczos\"}};\n  desc.AddFlowUnitOption(modelbox::FlowUnitOption(\n      \"interpolation\", \"list\", false, \"inter_linear\",\n      \"Interpolation method to scale roi\", interpolation_list));\n}\n\nMODELBOX_DRIVER_FLOWUNIT(desc) {\n  desc.Desc.SetName(FLOWUNIT_NAME);\n  desc.Desc.SetClass(modelbox::DRIVER_CLASS_FLOWUNIT);\n  desc.Desc.SetType(FLOWUNIT_TYPE);\n  desc.Desc.SetDescription(FLOWUNIT_DESC);\n  desc.Desc.SetVersion(\"1.0.0\");\n}\n"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/padding/padding_flowunit.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#ifndef MODELBOX_FLOWUNIT_NPPI_PADDING_GPU_H_\n#define MODELBOX_FLOWUNIT_NPPI_PADDING_GPU_H_\n\n#include <modelbox/base/device.h>\n#include <modelbox/base/status.h>\n#include <modelbox/buffer.h>\n#include <modelbox/flow.h>\n#include <modelbox/flowunit.h>\n#include \"modelbox/device/cuda/device_cuda.h\"\n#include <npp.h>\n\n#include <string>\n#include <typeinfo>\n\nconstexpr const char *FLOWUNIT_NAME = \"padding\";\nconstexpr const char *FLOWUNIT_TYPE = \"cuda\";\nconstexpr const char *FLOWUNIT_DESC =\n    \"\\n\\t@Brief: A padding flowunit on cuda. \\n\"\n    \"\\t@Port parameter: The input port buffer type and the output port buffer \"\n    \"type are image. \\n\"\n    \"\\t  The image type buffer contains the following meta fields:\\n\"\n    \"\\t\\tField Name: width,         Type: int32_t\\n\"\n    \"\\t\\tField Name: height,        Type: int32_t\\n\"\n    \"\\t\\tField Name: width_stride,  Type: int32_t\\n\"\n    \"\\t\\tField Name: height_stride, Type: int32_t\\n\"\n    \"\\t\\tField Name: channel,       Type: int32_t\\n\"\n    \"\\t\\tField Name: pix_fmt,       Type: string\\n\"\n    \"\\t\\tField Name: layout,        Type: int32_t\\n\"\n    \"\\t\\tField Name: shape,         Type: vector<size_t>\\n\"\n    \"\\t\\tField Name: type,          Type: ModelBoxDataType::MODELBOX_UINT8\\n\"\n    \"\\t@Constraint: The field value range of this flowunit supports: 'pix_fmt': \"\n    \"[rgb,bgr], 'layout': [hwc]. \";\n\n\nenum class AlignType { BEGIN, CENTER, END };\n\nclass PaddingFlowUnit : public modelbox::CudaFlowUnit {\n public:\n  PaddingFlowUnit();\n  ~PaddingFlowUnit() override;\n\n  modelbox::Status Open(\n      const std::shared_ptr<modelbox::Configuration> &opts) override;\n\n  modelbox::Status Close() override { return modelbox::STATUS_OK; };\n\n  modelbox::Status CudaProcess(std::shared_ptr<modelbox::DataContext> data_ctx,\n                               cudaStream_t stream) override;\n\n private:\n  modelbox::Status PaddingOneImage(std::shared_ptr<modelbox::Buffer> &in_image,\n                                 std::shared_ptr<modelbox::Buffer> &out_image);\n\n  modelbox::Status FillDestRoi(const NppiSize &src_size, NppiRect &dest_roi);\n\n  uint32_t GetAlignOffset(AlignType type, uint32_t dest_range,\n                          uint32_t roi_range);\n\n  modelbox::Status FillPaddingData(std::shared_ptr<modelbox::Buffer> &out_image);\n\n  int32_t width_{0};\n  int32_t height_{0};\n  size_t output_buffer_size_{0};\n  AlignType vertical_align_{AlignType::BEGIN};\n  AlignType horizontal_align_{AlignType::BEGIN};\n  std::vector<uint8_t> padding_data_;\n  bool need_scale_{true};\n  NppiInterpolationMode interpolation_{NPPI_INTER_LINEAR};\n};\n\n#endif  // MODELBOX_FLOWUNIT_NPPI_PADDING_GPU_H_\n"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/padding/padding_flowunit_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include <cuda_runtime.h>\n#include <securec.h>\n\n#include <functional>\n#include <future>\n#include <opencv2/opencv.hpp>\n#include <random>\n#include <thread>\n\n#include \"modelbox/base/log.h\"\n#include \"modelbox/base/utils.h\"\n#include \"modelbox/buffer.h\"\n#include \"driver_flow_test.h\"\n#include \"flowunit_mockflowunit/flowunit_mockflowunit.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"test/mock/minimodelbox/mockflow.h\"\n\nnamespace modelbox {\nclass NppiPaddingFlowUnitTest : public testing::Test {\n public:\n  NppiPaddingFlowUnitTest() : driver_flow_(std::make_shared<MockFlow>()) {}\n\n protected:\n  void SetUp() override {\n    int count = 0;\n    cudaGetDeviceCount(&count);\n    if (count <= 0) {\n      MBLOG_INFO << \"no cuda device, skip test suit\";\n      GTEST_SKIP();\n    }\n  };\n\n  void TearDown() override { driver_flow_ = nullptr; };\n  std::shared_ptr<MockFlow> GetDriverFlow();\n\n private:\n  std::shared_ptr<MockFlow> driver_flow_;\n};\n\nstd::shared_ptr<MockFlow> NppiPaddingFlowUnitTest::GetDriverFlow() {\n  return driver_flow_;\n}\n\nTEST_F(NppiPaddingFlowUnitTest, TestPaddingImage) {\n  const std::string test_lib_dir = TEST_DRIVER_DIR;\n  std::string toml_content = R\"(\n    [driver]\n    skip-default=true\n    dir=[\")\" + test_lib_dir + \"\\\"]\\n    \" +\n                             R\"([graph]\n    graphconf = '''digraph demo {\n          input[type=input]\n          output[type=output]\n          padding[type=flowunit, flowunit=padding, device=cuda, deviceid=0, label=\"<in_image> | <out_image>\",\n          width=200, height=100, vertical_align=top, horizontal_align=center, padding_data=\"0, 255, 0\"]\n\n          input -> padding:in_image\n          padding:out_image -> output\n        }'''\n    format = \"graphviz\"\n  )\";\n\n  MBLOG_INFO << toml_content;\n  auto driver_flow = GetDriverFlow();\n  auto ret = driver_flow->BuildAndRun(\"TestPaddingImage\", toml_content, 10);\n\n  auto img = cv::imread(std::string(TEST_ASSETS) + \"/test.jpg\");\n  auto extern_data = driver_flow->GetFlow()->CreateExternalDataMap();\n  auto input_buffer_list = extern_data->CreateBufferList();\n  input_buffer_list->Build({img.total() * img.elemSize()});\n  auto input_buffer = input_buffer_list->At(0);\n  input_buffer->Set(\"width\", img.cols);\n  input_buffer->Set(\"height\", img.rows);\n  input_buffer->Set(\"pix_fmt\", std::string(\"bgr\"));\n  auto e_ret = memcpy_s(input_buffer->MutableData(), input_buffer->GetBytes(),\n                        img.data, img.total() * img.elemSize());\n  EXPECT_EQ(e_ret, 0);\n  auto status = extern_data->Send(\"input\", input_buffer_list);\n  EXPECT_EQ(status, STATUS_OK);\n\n  OutputBufferList map_buffer_list;\n  status = extern_data->Recv(map_buffer_list);\n  EXPECT_EQ(status, STATUS_OK);\n  auto output_buffer_list = map_buffer_list[\"output\"];\n  ASSERT_EQ(output_buffer_list->Size(), 1);\n  auto output_buffer = output_buffer_list->At(0);\n  cv::Mat out_img(cv::Size(200, 100), CV_8UC3);\n  auto cuda_ret = cudaMemcpy(out_img.data, output_buffer->ConstData(),\n                             output_buffer->GetBytes(), cudaMemcpyDeviceToHost);\n  EXPECT_EQ(cuda_ret, cudaSuccess);\n\n  auto expected_img =\n      cv::imread(std::string(TEST_ASSETS) + \"/nppi_padding_200x100_result.png\");\n  ASSERT_EQ(expected_img.cols, out_img.cols);\n  ASSERT_EQ(expected_img.rows, out_img.rows);\n  for (int32_t y = 0; y < expected_img.rows; ++y) {\n    for (int32_t x = 0; x < expected_img.cols; ++x) {\n      auto expected_pix = expected_img.at<cv::Vec3b>(y, x);\n      auto pix = out_img.at<cv::Vec3b>(y, x);\n      ASSERT_EQ(expected_pix[0], pix[0]);\n      ASSERT_EQ(expected_pix[1], pix[1]);\n      ASSERT_EQ(expected_pix[2], pix[2]);\n    }\n  }\n\n  driver_flow->GetFlow()->Wait(3 * 1000);\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/tensorflow/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nset(UNIT_DEVICE \"cuda\")\nset(UNIT_NAME \"tensorflow_inference\")\n\nproject(modelbox-flowunit-${UNIT_DEVICE}-${UNIT_NAME})\n\nif (NOT TENSORFLOW_FOUND) \n    message(STATUS \"Not found tensorflow, disable ${UNIT_NAME} flowunit\")\n    return()\nendif()\n\n\nfile(GLOB_RECURSE UNIT_SOURCE *.cpp *.cc *.c)\n\ngroup_source_test_files(MODELBOX_UNIT_SOURCE MODELBOX_UNIT_TEST_SOURCE \"_test.c*\" ${UNIT_SOURCE})\n\nconfigure_file(${CMAKE_CURRENT_SOURCE_DIR}/test_toml/modelbox.test.cuda.tensorflow.in ${TEST_WORKING_DATA_DIR}/virtual_tfgpu_test.toml @ONLY)\nconfigure_file(${CMAKE_CURRENT_SOURCE_DIR}/test_toml/modelbox.test.cuda.tensorflow.savemodel.in ${TEST_WORKING_DATA_DIR}/virtual_tfgpu_save_model_test.toml @ONLY)\nconfigure_file(${CMAKE_CURRENT_SOURCE_DIR}/test_toml/modelbox.test.cuda.tensorflow_plugin.in ${TEST_WORKING_DATA_DIR}/virtual_tfgpu_plugin_test.toml @ONLY)\nconfigure_file(${CMAKE_CURRENT_SOURCE_DIR}/test_toml/modelbox.test.cuda.tensorflow_plugin.in ${TEST_WORKING_DATA_DIR}/virtual_tfgpu_plugin_test.toml @ONLY)\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_CUDA_INCLUDE})\ninclude_directories(${LIBMODELBOX_VIRTUALDRIVER_INFERENCE_INCLUDE})\ninclude_directories(${CUDA_INCLUDE_DIRS})\ninclude_directories(${MODELBOX_COMMON_INFERENCE_INCLUDE})\ninclude_directories(${LIBMODELBOX_FLOWUNIT_INFERENCE_TENSORFLOW_INCLUDE})\n\nset(MODELBOX_UNIT_SHARED libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-shared)\nset(MODELBOX_UNIT_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\nadd_library(${MODELBOX_UNIT_SHARED} SHARED ${MODELBOX_UNIT_SOURCE})\nset(LIBMODELBOX_FLOWUNIT_INFERENCE_TENSORFLOW_CUDA_SHARED ${MODELBOX_UNIT_SHARED})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES \nSOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} rt)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} dl)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_DEVICE_CUDA_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${CUDA_CUDART_LIBRARY})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_VIRTUALDRIVER_INFERENCE_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${MODELBOX_COMMON_INFERENCE_LIBRARY})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_FLOWUNIT_INFERENCE_TENSORFLOW_SHARED})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}\")\n\ninstall(TARGETS ${MODELBOX_UNIT_SHARED}\n        COMPONENT cuda-device-flowunit\n        RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n        LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        ARCHIVE DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        OPTIONAL)\n\ninstall(FILES ${HEADER} \n        DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR}/modelbox/drivers/devices/cuda/flowunit/tensorflow\n        COMPONENT cuda-device-flowunit-devel)\n\nset(LIBMODELBOX_FLOWUNIT_INFERENCE_TENSORFLOW_CUDA_SHARED ${MODELBOX_UNIT_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_INFERENCE_TENSORFLOW_CUDA_INCLUDE ${MODELBOX_UNIT_SOURCE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_INFERENCE_TENSORFLOW_CUDA_SOURCES ${MODELBOX_UNIT_SOURCE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_INFERENCE_TENSORFLOW_CUDA_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}.so CACHE INTERNAL \"\")\n\n# driver test\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_UNIT_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_UNIT_SHARED})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")\n"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/tensorflow/flowunit_desc.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <stdio.h>\n\n#include <memory>\n\n#include \"modelbox/base/driver_api_helper.h\"\n#include \"modelbox/base/status.h\"\n#include \"modelbox/device/cuda/device_cuda.h\"\n#include \"modelbox/flowunit.h\"\n#include \"tensorflow_gpu_inference_flowunit.h\"\n\nconstexpr const char *FLOWUNIT_NAME = \"tensorflow_inference\";\nconstexpr const char *FLOWUNIT_DESC = \"A gpu tensorflow inference flowunit\";\n\nstd::shared_ptr<modelbox::DriverFactory> CreateDriverFactory() {\n  std::shared_ptr<modelbox::DriverFactory> factory =\n      std::make_shared<InferenceTensorflowGpuFlowUnitFactory>();\n  return factory;\n}\n\nvoid DriverDescription(modelbox::DriverDesc *desc) {\n  desc->SetName(FLOWUNIT_NAME);\n  desc->SetClass(modelbox::DRIVER_CLASS_INFERENCE);\n  desc->SetType(modelbox::DEVICE_TYPE);\n  desc->SetDescription(FLOWUNIT_DESC);\n  desc->SetNodelete(true);\n}\n\nmodelbox::Status DriverInit() {\n  // Driver Init.\n  return modelbox::STATUS_OK;\n}\n\nvoid DriverFini() {\n  // Driver Fini.\n}\n"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/tensorflow/tensorflow_gpu_inference_flowunit.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"tensorflow_gpu_inference_flowunit.h\"\n\nstd::shared_ptr<modelbox::FlowUnit>\nInferenceTensorflowGpuFlowUnitFactory::VirtualCreateFlowUnit(\n    const std::string &unit_name, const std::string &unit_type,\n    const std::string &virtual_type) {\n  auto inference_flowunit = std::make_shared<InferenceTensorflowGpuFlowUnit>();\n  return inference_flowunit;\n};\n"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/tensorflow/tensorflow_gpu_inference_flowunit.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_INFERENCE_TENSORFLOW_GPU_H_\n#define MODELBOX_FLOWUNIT_INFERENCE_TENSORFLOW_GPU_H_\n\n#include <modelbox/flowunit.h>\n\n#include \"tensorflow_inference_common.h\"\n\nconstexpr const char *FLOWUNIT_TYPE = \"cuda\";\n\nclass InferenceTensorflowGpuFlowUnit : public InferenceTensorflowFlowUnit {\n public:\n  InferenceTensorflowGpuFlowUnit() = default;\n  ~InferenceTensorflowGpuFlowUnit() override = default;\n};\n\nclass InferenceTensorflowGpuFlowUnitFactory : public modelbox::FlowUnitFactory {\n public:\n  InferenceTensorflowGpuFlowUnitFactory() = default;\n  ~InferenceTensorflowGpuFlowUnitFactory() override = default;\n\n  std::shared_ptr<modelbox::FlowUnit> VirtualCreateFlowUnit(\n      const std::string &unit_name, const std::string &unit_type,\n      const std::string &virtual_type) override;\n\n  std::string GetFlowUnitFactoryType() override { return FLOWUNIT_TYPE; };\n  std::string GetVirtualType() override { return INFERENCE_TYPE; };\n  std::string GetFlowUnitInputDeviceType() override { return \"cpu\"; };\n\n  std::map<std::string, std::shared_ptr<modelbox::FlowUnitDesc>> FlowUnitProbe()\n      override {\n    return std::map<std::string, std::shared_ptr<modelbox::FlowUnitDesc>>();\n  };\n};\n\n#endif  // MODELBOX_FLOWUNIT_INFERENCE_TENSORFLOW_GPU_H_\n"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/tensorflow/tensorflow_gpu_inference_flowunit_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <cuda_runtime.h>\n#include <dlfcn.h>\n\n#include <functional>\n#include <future>\n#include <random>\n#include <thread>\n\n#include \"common/tensorflow_inference/tensorflow_inference_mock.h\"\n#include \"driver_flow_test.h\"\n#include \"flowunit_mockflowunit/flowunit_mockflowunit.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"modelbox/base/log.h\"\n#include \"modelbox/base/utils.h\"\n#include \"modelbox/buffer.h\"\n\nusing namespace tensorflow_inference; // NOLINT\n\nnamespace modelbox {\nclass InferenceTensorflowCudaFlowUnitTest : public testing::Test {\n public:\n  InferenceTensorflowCudaFlowUnitTest()\n      : driver_flow_(std::make_shared<DriverFlowTest>()) {}\n\n protected:\n  void SetUp() override {\n    int count = 0;\n    cudaGetDeviceCount(&count);\n    if (count <= 0) {\n      has_cuda_ = false;\n      MBLOG_INFO << \"no cuda device, skip test suit\";\n      GTEST_SKIP();\n    }\n\n    auto version = GetTFVersion();\n    if (SUPPORT_TF_VERSION.find(version) == SUPPORT_TF_VERSION.end()) {\n      version_suitable_ = false;\n      MBLOG_INFO << \"the version is \" << version\n                 << \", not in support version, skip test suit\";\n      GTEST_SKIP();\n    }\n\n    auto ret = AddMockFlowUnit(driver_flow_);\n    EXPECT_EQ(ret, STATUS_OK);\n\n    SetUpTomlFiles(version);\n  }\n\n  void TearDown() override {\n    if (!has_cuda_ || !version_suitable_) {\n      GTEST_SKIP();\n    }\n\n    RemoveFiles();\n    driver_flow_->Clear();\n  };\n\n  std::shared_ptr<DriverFlowTest> GetDriverFlow();\n\n  const std::string test_lib_dir = TEST_DRIVER_DIR,\n                    test_data_dir = TEST_DATA_DIR, test_assets = TEST_ASSETS,\n                    test_toml_file = \"virtual_tfgpu_test.toml\",\n                    test_toml_plugin_file = \"virtual_tfgpu_plugin_test.toml\",\n                    test_toml_save_model_file =\n                        \"virtual_tfgpu_save_model_test.toml\";\n  std::string tensorflow_cuda_path, dest_toml_file;\n  std::string tensorflow_cuda_plugin_path, dest_toml_plugin_file;\n  std::string tensorflow_cuda_save_model_path, dest_toml_save_model_file;\n\n private:\n  void SetUpTomlFiles(const std::string &version);\n  void RemoveFiles();\n\n  std::shared_ptr<DriverFlowTest> driver_flow_;\n  bool has_cuda_{true}, version_suitable_{true};\n};\n\nvoid InferenceTensorflowCudaFlowUnitTest::RemoveFiles() {\n  auto ret = remove(dest_toml_file.c_str());\n  EXPECT_EQ(ret, 0);\n  ret = remove(tensorflow_cuda_path.c_str());\n  EXPECT_EQ(ret, 0);\n\n  ret = remove(dest_toml_plugin_file.c_str());\n  EXPECT_EQ(ret, 0);\n  ret = remove(tensorflow_cuda_plugin_path.c_str());\n  EXPECT_EQ(ret, 0);\n\n  ret = remove(dest_toml_save_model_file.c_str());\n  EXPECT_EQ(ret, 0);\n  ret = remove(tensorflow_cuda_save_model_path.c_str());\n  EXPECT_EQ(ret, 0);\n}\n\nvoid InferenceTensorflowCudaFlowUnitTest::SetUpTomlFiles(\n    const std::string &version) {\n  const std::string src_file_dir = test_assets + \"/tensorflow/\" + version;\n\n  const std::string src_file_pb_toml = test_data_dir + \"/\" + test_toml_file;\n  const std::string src_plugin_toml =\n      test_data_dir + \"/\" + test_toml_plugin_file;\n  const std::string src_save_model_toml =\n      test_data_dir + \"/\" + test_toml_save_model_file;\n\n  tensorflow_cuda_path = test_data_dir + \"/tensorflow_cuda\";\n  auto mkdir_ret = mkdir(tensorflow_cuda_path.c_str(), 0700);\n  EXPECT_EQ(mkdir_ret, 0);\n\n  tensorflow_cuda_plugin_path = test_data_dir + \"/tensorflow_cuda_plugin\";\n  mkdir_ret = mkdir(tensorflow_cuda_plugin_path.c_str(), 0700);\n  EXPECT_EQ(mkdir_ret, 0);\n\n  tensorflow_cuda_save_model_path =\n      test_data_dir + \"/tensorflow_cuda_save_model\";\n  mkdir_ret = mkdir(tensorflow_cuda_save_model_path.c_str(), 0700);\n  EXPECT_EQ(mkdir_ret, 0);\n\n  dest_toml_file = tensorflow_cuda_path + \"/\" + test_toml_file;\n  auto status = ReplaceVersion(src_file_pb_toml, dest_toml_file, version);\n  EXPECT_EQ(status, STATUS_OK);\n\n  dest_toml_plugin_file =\n      tensorflow_cuda_plugin_path + \"/\" + test_toml_plugin_file;\n  status = ReplaceVersion(src_plugin_toml, dest_toml_plugin_file, version);\n  EXPECT_EQ(status, STATUS_OK);\n\n  dest_toml_save_model_file =\n      tensorflow_cuda_save_model_path + \"/\" + test_toml_save_model_file;\n  status =\n      ReplaceVersion(src_save_model_toml, dest_toml_save_model_file, version);\n  EXPECT_EQ(status, STATUS_OK);\n}\n\nstd::shared_ptr<DriverFlowTest>\nInferenceTensorflowCudaFlowUnitTest::GetDriverFlow() {\n  return driver_flow_;\n}\n\nTEST_F(InferenceTensorflowCudaFlowUnitTest, RunUnitBatch) {\n  std::string toml_content = R\"(\n    [driver]\n    skip-default=true\n    dir=[\")\" + test_lib_dir + \"\\\",\\\"\" +\n                             test_data_dir + \"/tensorflow_cuda\\\"]\\n    \" +\n                             R\"([graph]\n    graphconf = '''digraph demo {                                                                            \n          test_0_1_batch[type=flowunit, flowunit=test_0_1_batch, device=cpu, deviceid=0, label=\"<Out_1>\"]             \n          inference[type=flowunit, flowunit=inference, device=cuda, deviceid=0, label=\"<input> | <output>\", batch_size=10]\n          test_1_0_batch[type=flowunit, flowunit=test_1_0_batch, device=cpu, deviceid=0, label=\"<In_1>\", batch_size=10]  \n                                  \n          test_0_1_batch:Out_1 -> inference:input\n          inference:output -> test_1_0_batch:In_1                                                                  \n        }'''\n    format = \"graphviz\"\n  )\";\n  auto driver_flow = GetDriverFlow();\n  auto ret = driver_flow->BuildAndRun(\"RunUnit\", toml_content, 99999);\n  EXPECT_EQ(ret, STATUS_STOP);\n}\n\nTEST_F(InferenceTensorflowCudaFlowUnitTest, RunUnitSingle) {\n  std::string toml_content = R\"(\n    [driver]\n    skip-default=true\n    dir=[\")\" + test_lib_dir + \"\\\",\\\"\" +\n                             test_data_dir + \"/tensorflow_cuda\\\"]\\n    \" +\n                             R\"([graph]\n    graphconf = '''digraph demo {                                                                            \n          test_0_1[type=flowunit, flowunit=test_0_1, device=cpu, deviceid=0, label=\"<Out_1>\"]             \n          inference[type=flowunit, flowunit=inference, device=cuda, deviceid=0, label=\"<input> | <output>\"]\n          test_1_0[type=flowunit, flowunit=test_1_0, device=cpu, deviceid=0, label=\"<In_1>\"]                          \n          test_0_1:Out_1 -> inference:input\n          inference:output -> test_1_0:In_1                                                                  \n        }'''\n    format = \"graphviz\"\n  )\";\n  auto driver_flow = GetDriverFlow();\n  auto ret = driver_flow->BuildAndRun(\"RunUnit\", toml_content, 99999);\n  EXPECT_EQ(ret, STATUS_STOP);\n}\n\nTEST_F(InferenceTensorflowCudaFlowUnitTest, RunPlugin) {\n  std::string toml_content = R\"(\n    [driver]\n    skip-default=true\n    dir=[\")\" + test_lib_dir + \"\\\",\\\"\" +\n                             test_data_dir +\n                             \"/tensorflow_cuda_plugin\\\"]\\n    \" +\n                             R\"([graph]\n    graphconf = '''digraph demo {                                                                            \n          test_0_1[type=flowunit, flowunit=test_0_1, device=cpu, deviceid=0, label=\"<Out_1>\"]             \n          inference[type=flowunit, flowunit=inference_plugin, device=cuda, deviceid=0, label=\"<input> | <output>\"]\n          test_1_0[type=flowunit, flowunit=test_1_0, device=cpu, deviceid=0, label=\"<In_1>\"]                          \n          test_0_1:Out_1 -> inference:input\n          inference:output -> test_1_0:In_1                                                                  \n        }'''\n    format = \"graphviz\"\n  )\";\n  auto driver_flow = GetDriverFlow();\n  auto ret = driver_flow->BuildAndRun(\"RunPlugin\", toml_content, 99999);\n  EXPECT_EQ(ret, STATUS_STOP);\n}\n\nTEST_F(InferenceTensorflowCudaFlowUnitTest, RunSaveModel) {\n  std::string toml_content = R\"(\n    [driver]\n    skip-default=true\n    dir=[\")\" + test_lib_dir + \"\\\",\\\"\" +\n                             test_data_dir +\n                             \"/tensorflow_cuda_save_model\\\"]\\n    \" +\n                             R\"([graph]\n    graphconf = '''digraph demo {                                                                            \n          test_0_1[type=flowunit, flowunit=test_0_1, device=cpu, deviceid=0, label=\"<Out_1>\"]             \n          inference[type=flowunit, flowunit=inference_save_model, device=cuda, deviceid=0, label=\"<input> | <output>\"]\n          test_1_0[type=flowunit, flowunit=test_1_0, device=cpu, deviceid=0, label=\"<In_1>\"]                          \n          test_0_1:Out_1 -> inference:input\n          inference:output -> test_1_0:In_1                                                                  \n        }'''\n    format = \"graphviz\"\n  )\";\n  auto driver_flow = GetDriverFlow();\n  auto ret = driver_flow->BuildAndRun(\"RunSaveModel\", toml_content, 99999);\n  EXPECT_EQ(ret, STATUS_STOP);\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/tensorflow/test_toml/modelbox.test.cuda.tensorflow.encrypt.in",
    "content": "[base]\nname = \"inference_encrypt\"\ndevice = \"cuda\"\nversion = \"1.1.2\"\ndescription = \"a cuda inference flowunit\"\nentry = \"@CMAKE_SOURCE_DIR@/test/assets/tensorflow/TF_VERSION/tensorflow_pb/frozen_model_en.pb\"\ntype = \"inference\"\nvirtual_type = \"tensorflow\"\n\n[encryption]\nplugin_name = \"modeldecrypt-plugin\"\nplugin_version = \"1.0.0\"\nrootkey = \"LfxL6P4NBBSgVjw6ay075j2GL4SjIBdHiDgm+gEBWx/bwjgbaO2CpygEgZqKyYRheDVuqKkvjf1adLyTTO5dRB5xsIlWqL+rvrUpWu5qyilM\"\npasswd = \"jXkGiVoIA/4mh0lNdxeAOOg3NfGjXUNKgwWWujvpRZI/Xu5wfTI9xQJwjBy5LQn7ZHiUSZ+AWFXjiZMDYgaAgQ==\"\n\n[input]\n[input.input1]\nname = \"input\"\ntype = \"float\"\ndevice = \"cpu\"\n\n[output]\n[output.output1]\nname = \"output\"\ntype = \"float\""
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/tensorflow/test_toml/modelbox.test.cuda.tensorflow.in",
    "content": "[base]\nname = \"inference\"\ndevice = \"cuda\"\nversion = \"1.1.2\"\ndescription = \"a cuda inference flowunit\"\nentry = \"@CMAKE_SOURCE_DIR@/test/assets/tensorflow/TF_VERSION/tensorflow_pb/frozen_model.pb\"\ntype = \"inference\"\nvirtual_type = \"tensorflow\"\n\n[input]\n[input.input1]\nname = \"input\"\n\n[output]\n[output.output1]\nname = \"output\""
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/tensorflow/test_toml/modelbox.test.cuda.tensorflow.savemodel.in",
    "content": "[base]\nname = \"inference_save_model\"\ndevice = \"cuda\"\nversion = \"1.1.2\"\ndescription = \"a cuda inference flowunit\"\nentry = \"@CMAKE_SOURCE_DIR@/test/assets/tensorflow/TF_VERSION/tensorflow_save_model\"\ntype = \"inference\"\nvirtual_type = \"tensorflow\"\n\n[input]\n[input.input1]\nname = \"input\"\n\n[output]\n[output.output1]\nname = \"output\""
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/tensorflow/test_toml/modelbox.test.cuda.tensorflow_plugin.in",
    "content": "[base]\nname = \"inference_plugin\"\ndevice = \"cuda\"\nversion = \"1.1.2\"\ndescription = \"a cuda inference flowunit\"\nentry = \"@CMAKE_SOURCE_DIR@/test/assets/tensorflow/TF_VERSION/tensorflow_pb/frozen_model.pb\"\ntype = \"inference\"\nvirtual_type = \"tensorflow\"\n\n[config]\nplugin = \"@CMAKE_SOURCE_DIR@/build/test/test-working-dir/drivers/libmodelbox-unit-inferece-plugin.so\"\n\n[input]\n[input.input1]\nname = \"input\"\ntype = \"float\"\ndevice = \"cpu\"\n\n[output]\n[output.output1]\nname = \"output\"\ntype = \"float\""
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/tensorrt/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nset(UNIT_DEVICE \"cuda\")\nset(UNIT_NAME \"tensorrt_inference\")\n\nproject(modelbox-flowunit-${UNIT_DEVICE}-${UNIT_NAME})\n\nif (NOT TENSORRT_FOUND) \n    message(STATUS \"Not found tensorrt, disable ${UNIT_NAME} flowunit\")\n    return()\nendif()\n\nif (${TENSORRT_VERSION} STREQUAL \"7.1.3\")\n    message(\"found tensorrt version is \" ${TENSORRT_VERSION} \" define tensorrt7\")\n    ADD_DEFINITIONS(-DTENSORRT7)\nendif()\n\nif (${TENSORRT_VERSION} STRGREATER_EQUAL \"8.0.0\" AND ${TENSORRT_VERSION} STRLESS \"9.0.0\")\n    message(\"found tensorrt version is \" ${TENSORRT_VERSION} \" define tensorrt8\")\n    add_definitions(-DTENSORRT8)\n    set(TRT_VERSION8 TRUE)\nendif()\n\nfile(GLOB UNIT_SOURCE *.cpp *.cc *.c *.cu)\nif (NOT TRT_VERSION8)\n    file(GLOB NV_PLUGIN_FILES ./nvplugin/*.cc ./nvplugin/*.cpp ./nvplugin/*.cu)\nendif()\nlist(APPEND UNIT_SOURCE ${NV_PLUGIN_FILES})\nSET(HEADER tensorrt_inference_plugin.h)\n\ngroup_source_test_files(MODELBOX_UNIT_SOURCE MODELBOX_UNIT_TEST_SOURCE \"_test.c*\" ${UNIT_SOURCE})\n\nconfigure_file(${CMAKE_CURRENT_SOURCE_DIR}/test_toml/modelbox.test.tensorrt.in ${CMAKE_BINARY_DIR}/test/test-working-dir/data/virtual_tensorrt_test.toml @ONLY)\nconfigure_file(${CMAKE_CURRENT_SOURCE_DIR}/test_toml/modelbox.test.tensorrt.encrypt.in ${CMAKE_BINARY_DIR}/test/test-working-dir/data/virtual_tensorrt_encrypt_test.toml @ONLY)\nconfigure_file(${CMAKE_CURRENT_SOURCE_DIR}/test_toml/modelbox.test.plugin.tensorrt.in ${CMAKE_BINARY_DIR}/test/test-working-dir/data/virtual_plugin_tensorrt_test.toml @ONLY)\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_CUDA_INCLUDE})\ninclude_directories(${LIBMODELBOX_VIRTUALDRIVER_INFERENCE_INCLUDE})\ninclude_directories(${TENSORRT_INCLUDE_DIR})\ninclude_directories(${CUDA_INCLUDE_DIRS})\ninclude_directories(${MODELBOX_COMMON_INFERENCE_INCLUDE})\n\n# for supress c++ compile wanring\nset(MODELBOX_UNIT_SHARED libmodelbox_unit_${UNIT_DEVICE}_${UNIT_NAME}_shared)\nset(MODELBOX_UNIT_SHARED_OUTPUT_NAME libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-shared)\nset(MODELBOX_UNIT_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\ncuda_add_library(${MODELBOX_UNIT_SHARED} SHARED ${MODELBOX_UNIT_SOURCE})\nset(LIBMODELBOX_FLOWUNIT_INFERENCE_CUDA_SHARED ${MODELBOX_UNIT_SHARED})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES \n    OUTPUT_NAME ${MODELBOX_UNIT_SHARED_OUTPUT_NAME}\n    SOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} rt)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} dl)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_DEVICE_CUDA_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${TENSORRT_LIBRARIES})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${CUDA_CUDART_LIBRARY})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_VIRTUALDRIVER_INFERENCE_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${MODELBOX_COMMON_INFERENCE_LIBRARY})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}\")\n\ninstall(TARGETS ${MODELBOX_UNIT_SHARED}\n        COMPONENT cuda-device-flowunit\n        RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n        LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        ARCHIVE DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        OPTIONAL)\ninstall(FILES ${HEADER} \n        DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR}/modelbox/drivers/devices/cuda/flowunit/tensorrt\n        COMPONENT cuda-device-flowunit-devel)\n\nset(LIBMODELBOX_FLOWUNIT_INFERENCE_CUDA_SHARED ${MODELBOX_UNIT_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_INFERENCE_CUDA_INCLUDE ${MODELBOX_UNIT_SOURCE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_INFERENCE_SOURCES ${MODELBOX_UNIT_SOURCE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_INFERENCE_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}.so CACHE INTERNAL \"\")\n\n# driver test\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_UNIT_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_UNIT_SHARED})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")\n\nadd_subdirectory(test_plugin)"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/tensorrt/common_util.h",
    "content": "\n/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_TENSORRT_COMMOM_H_\n#define MODELBOX_TENSORRT_COMMOM_H_\n\n#include <NvInfer.h>\n\n#include <memory>\n\ninline int Volume(nvinfer1::Dims dims) {\n  auto* begin_dim_d = (dims.d[0] == -1 ? (dims.d + 1) : dims.d);\n  return std::accumulate(begin_dim_d, dims.d + dims.nbDims, 1,\n                         std::multiplies<int>());\n}\n\nstruct TensorRTInferDeleter {\n  template <typename T>\n  void operator()(T* obj) const {\n    if (obj == nullptr) {\n      return;\n    }\n\n#ifdef TENSORRT8\n    delete obj;\n#else\n    obj->destroy();\n#endif\n  }\n};\n\ntemplate <typename T>\ninline std::shared_ptr<T> TensorRTInferObject(T* obj) {\n  if (obj == nullptr) {\n    return nullptr;\n  }\n  \n  return std::shared_ptr<T>(obj, TensorRTInferDeleter());\n}\n\n#endif"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/tensorrt/flowunit_desc.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/base/driver_api_helper.h\"\n#include \"modelbox/base/status.h\"\n#include \"modelbox/device/cuda/device_cuda.h\"\n#include \"modelbox/flowunit.h\"\n#include \"tensorrt_inference_flowunit.h\"\n\nconstexpr const char *FLOWUNIT_NAME = \"tensorrt_inference\";\nconstexpr const char *FLOWUNIT_DESC = \"A tensorrt inference flowunit\";\n\niLogger gLogger;\n\nstd::shared_ptr<modelbox::DriverFactory> CreateDriverFactory() {\n  std::shared_ptr<modelbox::DriverFactory> factory =\n      std::make_shared<TensorRTInferenceFlowUnitFactory>();\n  return factory;\n}\n\nvoid DriverDescription(modelbox::DriverDesc *desc) {\n  desc->SetName(FLOWUNIT_NAME);\n  desc->SetClass(modelbox::DRIVER_CLASS_INFERENCE);\n  desc->SetType(modelbox::DEVICE_TYPE);\n  desc->SetDescription(FLOWUNIT_DESC);\n}\n\nmodelbox::Status DriverInit() {\n  // Driver Init.\n  initLibNvInferPlugins(&gLogger, \"\");\n\n  return modelbox::STATUS_OK;\n}\n\nvoid DriverFini() {\n  // Driver Fini.\n}\n"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/tensorrt/nvplugin/plugin_factory.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_INFERENCE_PLUGIN_FACTORY_H\n#define MODELBOX_FLOWUNIT_INFERENCE_PLUGIN_FACTORY_H\n\n#include <NvCaffeParser.h>\n#include <NvInfer.h>\n#include <NvInferPlugin.h>\n\n#include <condition_variable>\n#include <cstring>\n#include <fstream>\n#include <iostream>\n#include <regex>\n#include <sstream>\n#include <string>\n\n#include \"upsample-layer.h\"\n\nstatic constexpr float NEG_SLOPE2 = 0.1;\nstatic constexpr float UPSAMPLE_SCALE2 = 2.0;\nstatic constexpr int CUDA_THREAD_NUM2 = 512;\n\n// Integration for serialization.\nclass YoloPluginFactory : public nvinfer1::IPluginFactory,\n                          public nvcaffeparser1::IPluginFactoryExt {\n public:\n  // NOLINTNEXTLINE\n  virtual ~YoloPluginFactory() = default;\n  inline bool isLeakyRelu(const char *layerName) {\n    std::string src(layerName);\n    bool LeakyRelu_flag = src.find(\"leaky\", 0) != std::string::npos and\n                          src.find(\"layer\", 0) != std::string::npos;\n    return LeakyRelu_flag;\n  }\n\n  inline bool isUpsample(const char *layerName) {\n    std::string src(layerName);\n    bool Upsample_flag = src.find(\"upsample\", 0) != std::string::npos and\n                         src.find(\"layer\", 0) != std::string::npos;\n    return Upsample_flag;\n  }\n\n  nvinfer1::IPlugin *createPlugin(const char *layerName,\n                                  const nvinfer1::Weights *weights,\n                                  int nbWeights) override {\n    if (isPlugin(layerName) == false) {\n      MBLOG_ERROR << \"plugin layername is null\";\n      return nullptr;\n    }\n\n#ifndef TENSORRT7\n    if (isLeakyRelu(layerName)) {\n      if (nbWeights != 0 || weights != nullptr) {\n        MBLOG_ERROR << \"weights error.\";\n        return nullptr;\n      }\n\n      auto plugin = std::unique_ptr<nvinfer1::plugin::INvPlugin,\n                                    void (*)(nvinfer1::plugin::INvPlugin *)>(\n          nvinfer1::plugin::createPReLUPlugin(NEG_SLOPE2), nvPluginDeleter);\n      mPluginLeakyRelu.emplace_back(std::move(plugin));\n      return mPluginLeakyRelu.back().get();\n    }\n#endif\n\n    if (isUpsample(layerName)) {\n      if (nbWeights != 0 || weights != nullptr) {\n        MBLOG_ERROR << \"weights error.\";\n        return nullptr;\n      }\n\n      auto plugin = std::unique_ptr<nvinfer1::UpsampleLayerPlugin2>(\n          new nvinfer1::UpsampleLayerPlugin2(UPSAMPLE_SCALE2,\n                                             CUDA_THREAD_NUM2));\n      mPluginUpsample.emplace_back(std::move(plugin));\n      return mPluginUpsample.back().get();\n    }\n\n    return nullptr;\n  }\n\n  nvinfer1::IPlugin *createPlugin(const char *layerName, const void *serialData,\n                                  size_t serialLength) override {\n    if (isPlugin(layerName) == false) {\n      MBLOG_ERROR << \"plugin layername is null\";\n      return nullptr;\n    }\n\n#ifndef TENSORRT7\n    if (isLeakyRelu(layerName)) {\n      auto plugin = std::unique_ptr<nvinfer1::plugin::INvPlugin,\n                                    void (*)(nvinfer1::plugin::INvPlugin *)>(\n          nvinfer1::plugin::createPReLUPlugin(serialData, serialLength),\n          nvPluginDeleter);\n      mPluginLeakyRelu.emplace_back(std::move(plugin));\n      return mPluginLeakyRelu.back().get();\n    }\n#endif\n\n    if (isUpsample(layerName)) {\n      auto plugin = std::unique_ptr<nvinfer1::UpsampleLayerPlugin2>(\n          new nvinfer1::UpsampleLayerPlugin2(serialData, serialLength));\n      mPluginUpsample.emplace_back(std::move(plugin));\n      return mPluginUpsample.back().get();\n    }\n\n    return nullptr;\n  }\n\n  bool isPlugin(const char *name) override { return isPluginExt(name); }\n\n  bool isPluginExt(const char *name) override {\n#ifndef TENSORRT7\n    return (isLeakyRelu(name) or isUpsample(name));\n#else\n    return isUpsample(name);\n#endif\n  }\n\n  // The application has to destroy the plugin when it knows it's safe to do so.\n  void destroyPlugin() {\n    for (auto &item : mPluginUpsample) {\n      item.reset();\n    }\n  }\n\n  void (*nvPluginDeleter)(nvinfer1::plugin::INvPlugin *){\n      [](nvinfer1::plugin::INvPlugin *ptr) { ptr->destroy(); }};\n\n  std::vector<std::unique_ptr<nvinfer1::plugin::INvPlugin,\n                              void (*)(nvinfer1::plugin::INvPlugin *)>>\n      mPluginLeakyRelu{};\n  std::vector<std::unique_ptr<nvinfer1::UpsampleLayerPlugin2>>\n      mPluginUpsample{};\n};\n\n#endif  // MODELBOX_FLOWUNIT_INFERENCE_PLUGIN_FACTORY_H\n"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/tensorrt/nvplugin/upsample-layer.cpp",
    "content": "\n/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"upsample-layer.h\"\n\n#include <modelbox/device/cuda/device_cuda.h>\n\n#include \"modelbox/base/log.h\"\n\nnamespace nvinfer1 {\ntemplate <typename T>\nvoid write_buffer(char *&buffer, const T &val) {\n  *reinterpret_cast<T *>(buffer) = val;\n  buffer += sizeof(T);\n}\n\ntemplate <typename T>\nvoid read_buffer(const char *&buffer, T &val) {\n  val = *reinterpret_cast<const T *>(buffer);\n  buffer += sizeof(T);\n}\n\nUpsampleLayerPlugin2::UpsampleLayerPlugin2(const float scale,\n                                           const float cudaThread /*= 512*/)\n    : mScale(scale), mThreadCount(cudaThread) {\n  mScale = 0.0;\n  mOutputWidth = 0;\n  mOutputHeight = 0;\n  mThreadCount = 0;\n}\n\nUpsampleLayerPlugin2::~UpsampleLayerPlugin2() = default;\n\n// create the plugin at runtime from a byte stream\nUpsampleLayerPlugin2::UpsampleLayerPlugin2(const void *data, size_t length) {\n  const auto *d = reinterpret_cast<const char *>(data);\n  if (d == nullptr) {\n    return;\n  }\n  const char *a = d;\n  read_buffer(d, mCHW);\n  read_buffer(d, mDataType);\n  read_buffer(d, mScale);\n  read_buffer(d, mOutputWidth);\n  read_buffer(d, mOutputHeight);\n  read_buffer(d, mThreadCount);\n  if (d != a + length) {\n    MBLOG_ERROR << \"create plugin from byte stream error.\";\n  }\n}\n\nvoid UpsampleLayerPlugin2::serialize(void *buffer) {\n  auto *d = static_cast<char *>(buffer);\n  if (d == nullptr) {\n    return;\n  }\n  char *a = d;\n  write_buffer(d, mCHW);\n  write_buffer(d, mDataType);\n  write_buffer(d, mScale);\n  write_buffer(d, mOutputWidth);\n  write_buffer(d, mOutputHeight);\n  write_buffer(d, mThreadCount);\n  if (d != a + getSerializationSize()) {\n    MBLOG_ERROR << \"create plugin from byte serialization data error.\";\n  }\n}\n\nint UpsampleLayerPlugin2::initialize() {\n  int inputHeight = mCHW.d[1];\n  int inputWidth = mCHW.d[2];\n\n  mOutputHeight = inputHeight * mScale;\n  mOutputWidth = inputWidth * mScale;\n\n  int totalElems = mCHW.d[0] * mCHW.d[1] * mCHW.d[2];\n  cudaHostAlloc(&mInputBuffer, totalElems * type2size(mDataType),\n                cudaHostAllocDefault);\n\n  totalElems = mCHW.d[0] * mOutputHeight * mOutputWidth;\n  cudaHostAlloc(&mOutputBuffer, totalElems * type2size(mDataType),\n                cudaHostAllocDefault);\n\n  return 0;\n}\n\nvoid UpsampleLayerPlugin2::configureWithFormat(\n    const Dims *inputDims, int nbInputs, const Dims *outputDims, int nbOutputs,\n    DataType type, PluginFormat format, int maxBatchSize) {\n  if (type != DataType::kFLOAT && type != DataType::kHALF) {\n    MBLOG_ERROR << \"unsupport data type.\";\n    return;\n  }\n\n  if (format != PluginFormat::kNCHW) {\n    MBLOG_ERROR << \"unsupport data format.\";\n    return;\n  }\n\n  mDataType = type;\n}\n\n// it is called prior to any call to initialize().\nDims UpsampleLayerPlugin2::getOutputDimensions(int index, const Dims *inputs,\n                                               int nbInputDims) {\n  mCHW = inputs[0];\n  mOutputHeight = inputs[0].d[1] * mScale;\n  mOutputWidth = inputs[0].d[2] * mScale;\n  return Dims3(mCHW.d[0], mOutputHeight, mOutputWidth);\n}\n}  // namespace nvinfer1"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/tensorrt/nvplugin/upsample-layer.cu",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include \"modelbox/base/log.h\"\n#include \"upsample-layer.h\"\n#include <typeinfo>\n\nnamespace nvinfer1\n{\n    __device__ int translate_idx(int index, int d1, int d2, int d3, int scale) {\n        int x, y, z, w;\n        w = index % d3;\n        index = index/d3;\n        z = index % d2;\n        index = index/d2;\n        y = index % d1;\n        index = index/d1;\n        x = index;\n        w = w/scale;\n        z = z/scale;\n        d2 /= scale;\n        d3 /= scale;\n        return (((x*d1+y)*d2)+z)*d3+w;\n    }\n \n    //template <typename Dtype>\n    __global__ void upscaleFp(const float *input, float *output,\n                            int no_elements, int scale_factor, int d1, int d2, int d3) {\n        int index = threadIdx.x + blockDim.x * blockIdx.x;\n        if (index >= no_elements) return;\n        int ipidx = translate_idx(index, d1, d2, d3, scale_factor);\n        output[index]=input[ipidx];\n    }\n \n    __global__ void upscaleInt8(const uint8_t *input, uint8_t *output,\n                              int no_elements, int scale_factor, int d1, int d2, int d3) {\n        int index = threadIdx.x + blockDim.x * blockIdx.x;\n        if (index >= no_elements) return;\n        int ipidx = translate_idx(index, d1, d2, d3, scale_factor);\n        output[index]=input[ipidx];\n    }\n \n \n    template <typename Dtype>\n    void UpsampleLayerPlugin2::forwardGpu(const Dtype * input,Dtype * output,\n                                         int N,int C,int H ,int W, cudaStream_t stream) {\n        int numElem = N*C*H*W;\n \n        if (typeid(Dtype) == typeid(float)) {\n            upscaleFp<<<(numElem + mThreadCount - 1) / mThreadCount, mThreadCount, 0, stream>>>((float *)input, (float *)output, numElem, mScale, C, H, W);\n            return;\n        } \n        \n        if (typeid(Dtype) == typeid(uint8_t)) {\n            upscaleInt8<<<(numElem + mThreadCount - 1) / mThreadCount, mThreadCount, 0, stream>>>((uint8_t *)input, (uint8_t *)output, numElem, mScale, C, H, W);\n            return;\n        } \n\n        MBLOG_WARN << \"upsample layer plugin forwardGpu only support float, int8\";\n    }\n \n    int UpsampleLayerPlugin2::enqueue(int batchSize, const void*const * inputs, void** outputs, void* workspace, cudaStream_t stream)\n    {\n        const int channel = mCHW.d[0];\n        const int64_t in_height = mCHW.d[1];\n        const int64_t in_width = mCHW.d[2];\n        const int64_t out_height = mOutputHeight;\n        const int64_t out_width = mOutputWidth;\n        int totalElems = batchSize * in_height * in_width * channel;\n \n        // Handle no-op resizes efficiently.\n        if (out_height == in_height && out_width == in_width) {\n            cudaMemcpyAsync(outputs[0], inputs[0], totalElems * type2size(mDataType), cudaMemcpyDeviceToDevice, stream);\n            return 0;\n        }\n        cudaStreamSynchronize(stream);\n \n        switch (mDataType)\n        {\n            case DataType::kFLOAT :\n                forwardGpu<float>((const float *)inputs[0],(float *)outputs[0],batchSize,mCHW.d[0],mOutputHeight,mOutputWidth, stream);\n                break;\n            case DataType::kHALF:\n                forwardGpu<__half>((const __half *)inputs[0],(__half *)outputs[0],batchSize,mCHW.d[0],mOutputHeight,mOutputWidth, stream);\n                break;\n            case DataType::kINT8:\n                forwardGpu<u_int8_t>((const u_int8_t *)inputs[0],(u_int8_t *)outputs[0],batchSize,mCHW.d[0],mOutputHeight,mOutputWidth, stream);\n                break;\n            default:\n                MBLOG_ERROR << \"unsupport data type\";\n        }\n \n        return 0;\n    };\n}"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/tensorrt/nvplugin/upsample-layer.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#ifndef MODELBOX_FLOWUNIT_INFERENCE_UPSAMPLE_LAYER_H\n#define MODELBOX_FLOWUNIT_INFERENCE_UPSAMPLE_LAYER_H\n\n#include <NvInfer.h>\n#include <assert.h>\n#include <cublas_v2.h>\n#include <cudnn.h>\n#include <string.h>\n\n#include <cmath>\n#include <iostream>\n\nnamespace nvinfer1 {\nclass UpsampleLayerPlugin2 : public IPluginExt {\n public:\n  explicit UpsampleLayerPlugin2(float scale, float cudaThread = 512);\n  // create the plugin at runtime from a byte stream\n  UpsampleLayerPlugin2(const void *data, size_t length);\n\n  ~UpsampleLayerPlugin2() override;\n\n  int getNbOutputs() const override { return 1; }\n\n  Dims getOutputDimensions(int index, const Dims *inputs,\n                           int nbInputDims) override;\n\n  bool supportsFormat(DataType type, PluginFormat format) const override {\n    return (type == DataType::kFLOAT || type == DataType::kHALF ||\n            type == DataType::kINT8) &&\n           format == PluginFormat::kNCHW;\n  }\n\n  void configureWithFormat(const Dims *inputDims, int nbInputs,\n                           const Dims *outputDims, int nbOutputs, DataType type,\n                           PluginFormat format, int maxBatchSize) override;\n\n  int initialize() override;\n\n  void terminate() override{};\n\n  size_t getWorkspaceSize(int maxBatchSize) const override { return 0; }\n\n  int enqueue(int batchSize, const void *const *inputs, void **outputs,\n              void *workspace, cudaStream_t stream) override;\n\n  size_t getSerializationSize() override {\n    return sizeof(nvinfer1::Dims) + sizeof(mDataType) + sizeof(mScale) +\n           sizeof(mOutputWidth) + sizeof(mOutputHeight) + sizeof(mThreadCount);\n  }\n\n  void serialize(void *buffer) override;\n\n  template <typename Dtype>\n  void forwardGpu(const Dtype *input, Dtype *outputint, int N, int C, int H,\n                  int W, cudaStream_t stream);\n\n private:\n  size_t type2size(DataType type) {\n    return type == DataType::kFLOAT ? sizeof(float) : sizeof(__half);\n  }\n\n  nvinfer1::Dims mCHW = {0};\n  DataType mDataType{DataType::kFLOAT};\n  float mScale = 0.0;\n  int mOutputWidth = 0;\n  int mOutputHeight = 0;\n  int mThreadCount = 0;\n\n  void *mInputBuffer{nullptr};  // host\n  void *mOutputBuffer{nullptr};\n};\n};  // namespace nvinfer1\n\n#endif  // MODELBOX_FLOWUNIT_INFERENCE_UPSAMPLE_LAYER_H"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/tensorrt/tensorrt_inference_flowunit.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"tensorrt_inference_flowunit.h\"\n\n#include <dlfcn.h>\n#include <model_decrypt.h>\n\n#include <cstddef>\n#include <fstream>\n#include <random>\n#include <utility>\n\n#include \"common_util.h\"\n#include \"modelbox/base/status.h\"\n#include \"modelbox/device/cuda/device_cuda.h\"\n#ifndef TENSORRT8\n#include \"nvplugin/plugin_factory.h\"\n#endif\n#include \"virtualdriver_inference.h\"\n\nstatic std::map<nvinfer1::DataType, modelbox::ModelBoxDataType>\n    trttype_mbtype_map = {\n        {nvinfer1::DataType::kFLOAT, modelbox::MODELBOX_FLOAT},\n        {nvinfer1::DataType::kINT8, modelbox::MODELBOX_INT8},\n        {nvinfer1::DataType::kINT32, modelbox::MODELBOX_INT32},\n        {nvinfer1::DataType::kHALF, modelbox::MODELBOX_HALF}};\n\nstatic std::map<modelbox::ModelBoxDataType, nvinfer1::DataType>\n    mbtype_trttype_map = {\n        {modelbox::MODELBOX_FLOAT, nvinfer1::DataType::kFLOAT},\n        {modelbox::MODELBOX_INT8, nvinfer1::DataType::kINT8},\n        {modelbox::MODELBOX_INT32, nvinfer1::DataType::kINT32},\n        {modelbox::MODELBOX_HALF, nvinfer1::DataType::kHALF}};\n\nmodelbox::Status ConvertTrtTypeToModelBoxType(\n    nvinfer1::DataType trt_type, modelbox::ModelBoxDataType& modelbox_type) {\n  auto iter = trttype_mbtype_map.find(trt_type);\n  if (iter == trttype_mbtype_map.end()) {\n    return {modelbox::STATUS_NOTSUPPORT,\n            \"covert TensorRT Type to ModelBoxType failed, unsupport type \" +\n                std::to_string(static_cast<int>(trt_type))};\n  }\n  modelbox_type = iter->second;\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status ConvertModelBoxTypeToTorchType(\n    modelbox::ModelBoxDataType modelbox_type, nvinfer1::DataType& trt_type) {\n  auto iter = mbtype_trttype_map.find(modelbox_type);\n  if (iter == mbtype_trttype_map.end()) {\n    return {modelbox::STATUS_NOTSUPPORT,\n            \"covert ModelBoxType to TensorRT Type failed, unsupport type \" +\n                std::to_string(modelbox_type)};\n  }\n  trt_type = iter->second;\n  return modelbox::STATUS_SUCCESS;\n}\n\nTensorRTInferenceFlowUnit::TensorRTInferenceFlowUnit() = default;\nTensorRTInferenceFlowUnit::~TensorRTInferenceFlowUnit() {\n  context_ = nullptr;\n  engine_ = nullptr;\n  plugin_factory_ = nullptr;\n\n  pre_process_ = nullptr;\n  post_process_ = nullptr;\n  data_pre_ = nullptr;\n  data_post_ = nullptr;\n  inference_plugin_ = nullptr;\n\n  if (driver_handler_ != nullptr) {\n    dlclose(driver_handler_);\n    driver_handler_ = nullptr;\n  }\n};\n\nRndInt8Calibrator::RndInt8Calibrator(\n    int total_samples, std::string cache_file,\n    std::map<std::string, nvinfer1::Dims3>& input_dims)\n    : total_samples_(total_samples), cache_file_(std::move(cache_file)) {\n  std::default_random_engine generator;\n  std::uniform_real_distribution<float> distribution(-1.0F, 1.0F);\n  for (auto& elem : input_dims) {\n    int elemCount = Volume(elem.second);\n\n    std::vector<float> rnd_data(elemCount);\n    for (auto& val : rnd_data) {\n      val = distribution(generator);\n    }\n\n    void* data = nullptr;\n    if (cudaMalloc(&data, elemCount * sizeof(float)) != 0) {\n      MBLOG_WARN << \"Cuda failure: cudaMalloc\";\n      continue;\n    }\n    if (cudaMemcpy(data, &rnd_data[0], elemCount * sizeof(float),\n                   cudaMemcpyHostToDevice) != 0) {\n      MBLOG_WARN << \"Cuda failure: cudaMemcpy\";\n      cudaFree(data);\n      continue;\n    }\n\n    input_device_buffers_.insert(std::make_pair(elem.first, data));\n  }\n}\n\nRndInt8Calibrator::~RndInt8Calibrator() {\n  for (auto& elem : input_device_buffers_) {\n    if (cudaFree(elem.second) != 0) {\n      MBLOG_WARN << \"Cuda failure: cudaFree\";\n    }\n  }\n}\n\nint RndInt8Calibrator::getBatchSize() const noexcept { return 1; }\n\nbool RndInt8Calibrator::getBatch(void* bindings[], const char* names[],\n                                 int nbBindings) noexcept {\n  if (current_sample_ >= total_samples_) {\n    return false;\n  }\n\n  for (int i = 0; i < nbBindings; ++i) {\n    bindings[i] = input_device_buffers_[names[i]];\n  }\n\n  ++current_sample_;\n  return true;\n}\n\nconst void* RndInt8Calibrator::readCalibrationCache(size_t& length) noexcept {\n  calibration_cache_.clear();\n  std::ifstream input(cache_file_, std::ios::binary);\n  input >> std::noskipws;\n  if (input.good()) {\n    std::copy(std::istream_iterator<char>(input), std::istream_iterator<char>(),\n              std::back_inserter(calibration_cache_));\n  }\n\n  length = calibration_cache_.size();\n  return length ? &calibration_cache_[0] : nullptr;\n}\n\nvoid RndInt8Calibrator::writeCalibrationCache(const void* /*ptr*/,\n                                              size_t /*length*/) noexcept {}\n\nmodelbox::Status TensorRTInferenceFlowUnit::PreProcess(\n    const std::shared_ptr<modelbox::DataContext>& data_ctx) {\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status TensorRTInferenceFlowUnit::PostProcess(\n    const std::shared_ptr<modelbox::DataContext>& data_ctx) {\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status TensorRTInferenceFlowUnit::DataPre(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  if (!data_pre_) {\n    return modelbox::STATUS_OK;\n  }\n\n  return data_pre_(data_ctx);\n}\n\nmodelbox::Status TensorRTInferenceFlowUnit::DataPost(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  if (!data_post_) {\n    return modelbox::STATUS_OK;\n  }\n\n  return data_post_(data_ctx);\n}\n\nvoid TensorRTInferenceFlowUnit::SetUpOtherConfig(\n    const std::shared_ptr<modelbox::Configuration>& config) {\n  params_.calibration_cache =\n      config->GetString(\"calibration_cache\", \"CalibrationTable\");\n\n  params_.plugin = config->GetString(\"plugin\");\n  params_.dynamic_batch_contain = config->Contain(\"dynamic_batch\");\n  params_.dynamic_batch = config->GetBool(\"dynamic_batch\", false);\n  params_.onnx_opt_batch_size = config->GetInt64(\"onnx_opt_batch_size\", 1);\n  params_.onnx_max_batch_size = config->GetInt64(\"onnx_max_batch_size\", 1);\n  params_.workspace_size = config->GetInt64(\"workspace_size\", 16);\n  params_.use_DLACore = config->GetInt64(\"use_dla_core\", -1);\n  params_.fp16 = config->GetBool(\"fp16\", false);\n  params_.int8 = config->GetBool(\"int8\", false);\n  params_.verbose = config->GetBool(\"verbose\", false);\n  params_.allow_GPUFallback = config->GetBool(\"allow_gpu_fallback\", false);\n  params_.pct = config->GetFloat(\"pct\", 99);\n}\n\nmodelbox::Status TensorRTInferenceFlowUnit::SetUpModelFile(\n    const std::shared_ptr<modelbox::Configuration>& config,\n    const std::string& model_file) {\n  std::string suffix_str = model_file.substr(model_file.find_last_of('.') + 1);\n\n  if (suffix_str == SUFFIX_UFF) {\n    params_.uff_file = model_file;\n  }\n\n  if (!params_.uff_file.empty()) {\n    std::vector<std::string> uff_inputs_string =\n        config->GetStrings(\"uff_input\");\n    if (uff_inputs_string.empty()) {\n      return {modelbox::STATUS_BADCONF,\n              \"uff file need to config uffInput, please configure uffInput \"\n              \"like 'name, c, h ,w'.\"};\n    }\n\n    for (const auto& i : uff_inputs_string) {\n      // TODO wait for configure adjust for ',' in string\n      std::vector<std::string> split_uff_inputs_string =\n          modelbox::StringSplit(i, '.');\n      std::string name = split_uff_inputs_string[0];\n      std::shared_ptr<nvinfer1::Dims> dims = nullptr;\n\n      switch (split_uff_inputs_string.size()) {\n        case 1:\n        case 2:\n          MBLOG_ERROR << \"invalid uffInputs\";\n          break;\n        case 3:\n          dims = std::make_shared<nvinfer1::Dims2>(\n              atoi(split_uff_inputs_string[1].c_str()),\n              atoi(split_uff_inputs_string[2].c_str()));\n          break;\n        case 4:\n          dims = std::make_shared<nvinfer1::Dims3>(\n              atoi(split_uff_inputs_string[1].c_str()),\n              atoi(split_uff_inputs_string[2].c_str()),\n              atoi(split_uff_inputs_string[3].c_str()));\n          break;\n        case 5:\n          dims = std::make_shared<nvinfer1::Dims4>(\n              atoi(split_uff_inputs_string[1].c_str()),\n              atoi(split_uff_inputs_string[2].c_str()),\n              atoi(split_uff_inputs_string[3].c_str()),\n              atoi(split_uff_inputs_string[4].c_str()));\n          break;\n        default:\n          MBLOG_ERROR << \"invalid uffInputs\";\n          break;\n      }\n\n      for (int i = 0; i < dims->nbDims; ++i) {\n        if (dims->d[i] != 0) {\n          continue;\n        }\n        MBLOG_ERROR << \"invalid uffInputs\";\n        return {modelbox::STATUS_BADCONF, \"invalid uffInputs.\"};\n      }\n      params_.uff_input_list.emplace_back(name, dims);\n    }\n\n    return modelbox::STATUS_OK;\n  }\n\n  if (suffix_str == SUFFIX_ONNX) {\n    params_.onnx_model_file = model_file;\n    return modelbox::STATUS_OK;\n  }\n\n  if (suffix_str == SUFFIX_PROTXT) {\n    params_.deploy_file = model_file;\n    params_.model_file = config->GetString(\"model_file\");\n    return modelbox::STATUS_OK;\n  }\n\n  params_.engine = model_file;\n\n  return modelbox::STATUS_OK;\n}\n\nvoid TensorRTInferenceFlowUnit::configureBuilder(\n    const std::shared_ptr<nvinfer1::IBuilder>& builder,\n    RndInt8Calibrator& calibrator) {\n#ifndef TENSORRT8\n  builder->setMaxWorkspaceSize(static_cast<unsigned int>(params_.workspace_size)\n                               << 20);\n  builder->setFp16Mode(params_.fp16);\n\n  if (!params_.fp16 && params_.int8) {\n    builder->setInt8Mode(true);\n    builder->setInt8Calibrator(&calibrator);\n  }\n\n  if (params_.use_DLACore >= 0) {\n    builder->setDefaultDeviceType(nvinfer1::DeviceType::kDLA);\n    builder->setDLACore(params_.use_DLACore);\n    if (params_.allow_GPUFallback) {\n      builder->allowGPUFallback(true);\n    }\n  }\n#endif\n}\n\nvoid TensorRTInferenceFlowUnit::PrintModelBindInfo(\n    const std::vector<std::string>& name_list) {\n  for (const auto& bind_name : name_list) {\n    auto bind_index = engine_->getBindingIndex(bind_name.c_str());\n    auto bind_dims = engine_->getBindingDimensions(bind_index);\n    std::stringstream dim_info;\n    dim_info << \"flowunit: \" << GetFlowUnitDesc()->GetFlowUnitName()\n             << \", bind name: \" << bind_name << \", dims: [\";\n    for (int dim_index = 0; dim_index < bind_dims.nbDims; ++dim_index) {\n      dim_info << bind_dims.d[dim_index];\n      if (dim_index != bind_dims.nbDims - 1) {\n        dim_info << \", \";\n      }\n    }\n    dim_info << \"]\";\n    MBLOG_INFO << dim_info.str();\n\n#ifdef TENSORRT8\n    params_.use_enqueue_v2 = true;\n#else\n    params_.use_enqueue_v2 = (bind_dims.d[0] == -1);\n#endif\n  }\n\n  if (params_.dynamic_batch_contain) {\n    params_.use_enqueue_v2 = params_.dynamic_batch;\n  }\n}\n\nmodelbox::Status TensorRTInferenceFlowUnit::CaffeToTRTModel(\n    const std::shared_ptr<modelbox::Configuration>& config,\n    std::shared_ptr<nvinfer1::IBuilder>& builder,\n    std::shared_ptr<nvinfer1::INetworkDefinition>& network) {\n#ifndef TENSORRT8\n  // parse the caffe model to populate the network, then set the outputs\n  auto parser = TensorRTInferObject(nvcaffeparser1::createCaffeParser());\n  if (parser == nullptr) {\n    const auto* err_msg = \"create parser from caffe model failed.\";\n    MBLOG_ERROR << err_msg;\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  const nvcaffeparser1::IBlobNameToTensor* blobNameToTensor = nullptr;\n  auto drivers_ptr = GetBindDevice()->GetDeviceManager()->GetDrivers();\n\n  ModelDecryption deploy_decrypt;\n  auto ret = deploy_decrypt.Init(params_.deploy_file, drivers_ptr, config);\n  if (ret != modelbox::STATUS_SUCCESS) {\n    return {modelbox::STATUS_FAULT, \"open caffe deploy failed.\"};\n  }\n  ModelDecryption model_decrypt;\n  ret = model_decrypt.Init(params_.model_file, drivers_ptr, config);\n  if (ret != modelbox::STATUS_SUCCESS && !params_.model_file.empty()) {\n    return {modelbox::STATUS_FAULT, \"open caffe model failed.\"};\n  }\n\n  if (deploy_decrypt.GetModelState() == ModelDecryption::MODEL_STATE_ENCRYPT ||\n      model_decrypt.GetModelState() == ModelDecryption::MODEL_STATE_ENCRYPT) {\n    int64_t deploy_len = 0;\n    std::shared_ptr<uint8_t> deployBuf =\n        deploy_decrypt.GetModelSharedBuffer(deploy_len);\n    int64_t model_len = 0;\n    std::shared_ptr<uint8_t> modelBuf =\n        model_decrypt.GetModelSharedBuffer(model_len);\n    if (!deployBuf || (!modelBuf && !params_.model_file.empty())) {\n      return {modelbox::STATUS_FAULT, \"Decrypt model fail\"};\n    }\n    blobNameToTensor = parser->parseBuffers(\n        (const char*)deployBuf.get(), (size_t)deploy_len,\n        modelBuf ? (const char*)modelBuf.get() : nullptr, (size_t)deploy_len,\n        *network,\n        params_.fp16 ? nvinfer1::DataType::kHALF : nvinfer1::DataType::kFLOAT);\n  } else {\n    blobNameToTensor = parser->parse(\n        params_.deploy_file.c_str(),\n        params_.model_file.empty() ? nullptr : params_.model_file.c_str(),\n        *network,\n        params_.fp16 ? nvinfer1::DataType::kHALF : nvinfer1::DataType::kFLOAT);\n  }\n  if (!blobNameToTensor) {\n    return {modelbox::STATUS_FAULT, \"parser caffe model failed.\"};\n  }\n\n  for (int i = 0, n = network->getNbInputs(); i < n; i++) {\n    auto* input = network->getInput(i);\n    if (input == nullptr) {\n      MBLOG_ERROR << \"input \" << i << \"is invalid\";\n      return {modelbox::STATUS_FAULT, \"get input failed\"};\n    }\n\n    nvinfer1::Dims3 dims =\n        static_cast<nvinfer1::Dims3&&>(input->getDimensions());\n    input_dims_.insert(std::make_pair(input->getName(), dims));\n  }\n\n  // specify which tensors are outputs\n  for (auto& output_item : params_.outputs_name_list) {\n    if (blobNameToTensor->find(output_item.c_str()) == nullptr) {\n      auto err_msg = \"could not find output blob, \" + output_item;\n      return {modelbox::STATUS_FAULT, err_msg};\n    }\n    network->markOutput(*blobNameToTensor->find(output_item.c_str()));\n  }\n\n  // Build the engine\n  RndInt8Calibrator calibrator(1, params_.calibration_cache, input_dims_);\n  configureBuilder(builder, calibrator);\n\n  engine_ = TensorRTInferObject(builder->buildCudaEngine(*network));\n  if (engine_ == nullptr) {\n    const auto* err_msg = \"build engine from caffe model failed.\";\n    MBLOG_ERROR << err_msg;\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  context_ = TensorRTInferObject(engine_->createExecutionContext());\n  if (context_ == nullptr) {\n    const auto* err_msg = \"build context from caffe model engine failed.\";\n    MBLOG_ERROR << err_msg;\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n#endif  // TENSORRT8\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status TensorRTInferenceFlowUnit::UffToTRTModel(\n    const std::shared_ptr<modelbox::Configuration>& config,\n    std::shared_ptr<nvinfer1::IBuilder>& builder,\n    std::shared_ptr<nvinfer1::INetworkDefinition>& network) {\n#ifndef TENSORRT8\n  // parse the uff model to populate the network, then set the outputs\n  auto parser = TensorRTInferObject(nvuffparser::createUffParser());\n  if (parser == nullptr) {\n    const auto* err_msg = \"create parser from uff model engine failed.\";\n    MBLOG_ERROR << err_msg;\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  // specify which tensors are outputs\n  for (auto& output_item : params_.outputs_name_list) {\n    if (!parser->registerOutput(output_item.c_str())) {\n      auto err_msg =\n          \"Failed to register output \" + output_item + \" in uff file.\";\n      return {modelbox::STATUS_FAULT, err_msg};\n    }\n  }\n\n  // specify which tensors are inputs (and their dimensions)\n  // TODO set nhwc or nchw\n  for (auto& input_item : params_.uff_input_list) {\n    if (!parser->registerInput(input_item.first.c_str(), *(input_item.second),\n                               nvuffparser::UffInputOrder::kNCHW)) {\n      auto err_msg =\n          \"Failed to register input \" + input_item.first + \" in uff file.\";\n      return {modelbox::STATUS_FAULT, err_msg};\n    }\n  }\n\n  bool parseRet = false;\n  auto drivers_ptr = GetBindDevice()->GetDeviceManager()->GetDrivers();\n  ModelDecryption uff_decrypt;\n  // do not need to check return , just use GetModelState\n  uff_decrypt.Init(params_.uff_file, drivers_ptr, config);\n  if (uff_decrypt.GetModelState() == ModelDecryption::MODEL_STATE_ENCRYPT) {\n    int64_t model_len = 0;\n    std::shared_ptr<uint8_t> modelBuf =\n        uff_decrypt.GetModelSharedBuffer(model_len);\n    if (modelBuf) {\n      parseRet = parser->parseBuffer((const char*)modelBuf.get(),\n                                     (size_t)model_len, *network,\n                                     params_.fp16 ? nvinfer1::DataType::kHALF\n                                                  : nvinfer1::DataType::kFLOAT);\n    }\n  } else if (uff_decrypt.GetModelState() ==\n             ModelDecryption::MODEL_STATE_PLAIN) {\n    parseRet = parser->parse(\n        params_.uff_file.c_str(), *network,\n        params_.fp16 ? nvinfer1::DataType::kHALF : nvinfer1::DataType::kFLOAT);\n  }\n  if (!parseRet) {\n    return {modelbox::STATUS_FAULT, \"parser uff model failed.\"};\n  }\n\n  for (int i = 0, n = network->getNbInputs(); i < n; i++) {\n    auto* input = network->getInput(i);\n    if (input == nullptr) {\n      MBLOG_ERROR << \"input \" << i << \"is invalid\";\n      return {modelbox::STATUS_FAULT, \"get input failed\"};\n    }\n\n    nvinfer1::Dims3 dims =\n        static_cast<nvinfer1::Dims3&&>(input->getDimensions());\n    input_dims_.insert(std::make_pair(input->getName(), dims));\n  }\n\n  // Build the engine\n  RndInt8Calibrator calibrator(1, params_.calibration_cache, input_dims_);\n  configureBuilder(builder, calibrator);\n\n  engine_ = TensorRTInferObject(builder->buildCudaEngine(*network));\n  if (engine_ == nullptr) {\n    const auto* err_msg = \"build engine from uff model failed.\";\n    MBLOG_ERROR << err_msg;\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  context_ = TensorRTInferObject(engine_->createExecutionContext());\n  if (context_ == nullptr) {\n    const auto* err_msg = \"build context from uff model engine failed.\";\n    MBLOG_ERROR << err_msg;\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n#endif  // TENSORRT8\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status TensorRTInferenceFlowUnit::OnnxToTRTModel(\n    const std::shared_ptr<modelbox::Configuration>& config,\n    std::shared_ptr<nvinfer1::IBuilder>& builder,\n    std::shared_ptr<nvinfer1::INetworkDefinition>& network) {\n  auto verbosity = (int)nvinfer1::ILogger::Severity::kWARNING;\n\n  // parse the onnx model to populate the network, then set the outputs\n  std::shared_ptr<nvonnxparser::IParser> parser =\n      TensorRTInferObject(nvonnxparser::createParser(*network, gLogger));\n  if (parser == nullptr) {\n    const auto* err_msg = \"create parser from onnx model engine failed.\";\n    MBLOG_ERROR << err_msg;\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  bool parseRet = false;\n  auto drivers_ptr = GetBindDevice()->GetDeviceManager()->GetDrivers();\n\n  ModelDecryption onnx_decrypt;\n  onnx_decrypt.Init(params_.onnx_model_file, drivers_ptr, config);\n  // do not need to check return , just use GetModelState\n  if (onnx_decrypt.GetModelState() == ModelDecryption::MODEL_STATE_ENCRYPT) {\n    int64_t model_len = 0;\n    std::shared_ptr<uint8_t> modelBuf =\n        onnx_decrypt.GetModelSharedBuffer(model_len);\n    if (modelBuf) {\n      parseRet = parser->parse((void const*)modelBuf.get(), (size_t)model_len);\n    }\n  } else if (onnx_decrypt.GetModelState() ==\n             ModelDecryption::MODEL_STATE_PLAIN) {\n    parseRet =\n        parser->parseFromFile(params_.onnx_model_file.c_str(), verbosity);\n  }\n  if (!parseRet) {\n    return {modelbox::STATUS_FAULT, \"failed to parse onnex file.\"};\n  }\n\n#if defined(TENSORRT7) || defined(TENSORRT8)\n  auto* builder_config = builder->createBuilderConfig();\n  auto* profile = builder->createOptimizationProfile();\n  for (int i = 0, n = network->getNbInputs(); i < n; i++) {\n    auto* input = network->getInput(i);\n    nvinfer1::Dims dims = input->getDimensions();\n    if (dims.d[0] == -1) {\n      dims.d[0] = 1;\n      profile->setDimensions(input->getName(),\n                             nvinfer1::OptProfileSelector::kMIN, dims);\n      dims.d[0] = params_.onnx_opt_batch_size;\n      profile->setDimensions(input->getName(),\n                             nvinfer1::OptProfileSelector::kOPT, dims);\n      dims.d[0] = params_.onnx_max_batch_size;\n      profile->setDimensions(input->getName(),\n                             nvinfer1::OptProfileSelector::kMAX, dims);\n    } else {\n      profile->setDimensions(input->getName(),\n                             nvinfer1::OptProfileSelector::kMIN, dims);\n      profile->setDimensions(input->getName(),\n                             nvinfer1::OptProfileSelector::kOPT, dims);\n      profile->setDimensions(input->getName(),\n                             nvinfer1::OptProfileSelector::kMAX, dims);\n    }\n  }\n  builder_config->addOptimizationProfile(profile);\n#ifdef TENSORRT8\n  auto serialized_engine = TensorRTInferObject(\n      builder->buildSerializedNetwork(*network, *builder_config));\n  std::shared_ptr<nvinfer1::IRuntime> infer =\n      TensorRTInferObject(nvinfer1::createInferRuntime(gLogger));\n  if (infer == nullptr) {\n    const auto* err_msg = \"create runtime from model_file engine failed.\";\n    MBLOG_ERROR << err_msg;\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n  engine_ = TensorRTInferObject(infer->deserializeCudaEngine(\n      serialized_engine->data(), serialized_engine->size()));\n#else\n  engine_ = TensorRTInferObject(\n      builder->buildEngineWithConfig(*network, *builder_config));\n#endif\n#else\n  for (int i = 0, n = network->getNbInputs(); i < n; i++) {\n    auto input = network->getInput(i);\n    if (input == nullptr) {\n      MBLOG_ERROR << \"input \" << i << \"is invalid\";\n      return {modelbox::STATUS_FAULT, \"get input failed\"};\n    }\n\n    nvinfer1::Dims3 dims =\n        static_cast<nvinfer1::Dims3&&>(input->getDimensions());\n    input_dims_.insert(std::make_pair(input->getName(), dims));\n  }\n\n  // Build the engine\n  RndInt8Calibrator calibrator(1, params_.calibration_cache, input_dims_);\n  configureBuilder(builder, calibrator);\n\n  engine_ = TensorRTInferObject(builder->buildCudaEngine(*network));\n#endif\n  if (engine_ == nullptr) {\n    const auto* err_msg = \"build engine from onnx model failed.\";\n    MBLOG_ERROR << err_msg;\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  MBLOG_INFO << \"flowunit: \" << GetFlowUnitDesc()->GetFlowUnitName()\n             << \", max batch size: \" << engine_->getMaxBatchSize();\n  MBLOG_INFO << \"flowunit: \" << GetFlowUnitDesc()->GetFlowUnitName()\n             << \" model inputs num:\" << params_.inputs_name_list.size();\n  PrintModelBindInfo(params_.inputs_name_list);\n  MBLOG_INFO << \"flowunit: \" << GetFlowUnitDesc()->GetFlowUnitName()\n             << \" model outputs num:\" << params_.outputs_name_list.size();\n  PrintModelBindInfo(params_.outputs_name_list);\n\n  context_ = TensorRTInferObject(engine_->createExecutionContext());\n  if (context_ == nullptr) {\n    const auto* err_msg = \"build context from onnx model engine failed.\";\n    MBLOG_ERROR << err_msg;\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status TensorRTInferenceFlowUnit::EngineToModel(\n    const std::shared_ptr<modelbox::Configuration>& config) {\n  MBLOG_INFO << \"engines: \" << params_.engine;\n  std::shared_ptr<nvinfer1::IRuntime> infer =\n      TensorRTInferObject(nvinfer1::createInferRuntime(gLogger));\n  if (infer == nullptr) {\n    const auto* err_msg = \"create runtime from model_file engine failed.\";\n    MBLOG_ERROR << err_msg;\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  if (params_.use_DLACore >= 0) {\n    infer->setDLACore(params_.use_DLACore);\n  }\n\n  SetPluginFactory(params_.plugin);\n\n  auto drivers_ptr = GetBindDevice()->GetDeviceManager()->GetDrivers();\n  ModelDecryption engine_decrypt;\n  auto ret = engine_decrypt.Init(params_.engine, drivers_ptr, config);\n  if (ret != modelbox::STATUS_SUCCESS) {\n    const auto* err_msg = \"open engine deploy failed.\";\n    MBLOG_ERROR << err_msg;\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n  // do not need to check return , just use GetModelState\n  auto modelState = engine_decrypt.GetModelState();\n  if (modelState == ModelDecryption::MODEL_STATE_ENCRYPT) {\n    int64_t model_len = 0;\n    std::shared_ptr<uint8_t> modelBuf =\n        engine_decrypt.GetModelSharedBuffer(model_len);\n    if (modelBuf == nullptr) {\n      auto err_msg =\n          \"failed to decrypt model, the model file \" + params_.engine;\n      MBLOG_ERROR << err_msg;\n      return {modelbox::STATUS_BADCONF, err_msg};\n    }\n#ifdef TENSORRT8\n    engine_ = TensorRTInferObject(\n        infer->deserializeCudaEngine(modelBuf.get(), model_len));\n#else\n    engine_ = TensorRTInferObject(infer->deserializeCudaEngine(\n        modelBuf.get(), model_len, plugin_factory_.get()));\n#endif\n  } else if (modelState == ModelDecryption::MODEL_STATE_PLAIN) {\n    std::vector<char> trtModelStream;\n    size_t size{0};\n    std::ifstream file(params_.engine, std::ios::binary);\n    if (!file.good()) {\n      auto err_msg = \"read model file failed, the model file \" + params_.engine;\n      MBLOG_ERROR << err_msg;\n      return {modelbox::STATUS_FAULT, err_msg};\n    }\n\n    file.seekg(0, std::ifstream::end);\n    size = file.tellg();\n    file.seekg(0, std::ifstream::beg);\n    trtModelStream.resize(size);\n    file.read(trtModelStream.data(), size);\n    file.close();\n#ifdef TENSORRT8\n    engine_ = TensorRTInferObject(\n        infer->deserializeCudaEngine(trtModelStream.data(), size));\n#else\n    engine_ = TensorRTInferObject(infer->deserializeCudaEngine(\n        trtModelStream.data(), size, plugin_factory_.get()));\n#endif\n\n  } else {\n    const auto* err_msg = \"model state error.\";\n    MBLOG_ERROR << err_msg;\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  if (engine_ == nullptr) {\n    const auto* err_msg = \"build engine from model_file failed.\";\n    MBLOG_ERROR << err_msg;\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  MBLOG_INFO << \"flowunit: \" << GetFlowUnitDesc()->GetFlowUnitName()\n             << \", max batch size: \" << engine_->getMaxBatchSize();\n  MBLOG_INFO << \"flowunit: \" << GetFlowUnitDesc()->GetFlowUnitName()\n             << \" model inputs num:\" << params_.inputs_name_list.size();\n  PrintModelBindInfo(params_.inputs_name_list);\n  MBLOG_INFO << \"flowunit: \" << GetFlowUnitDesc()->GetFlowUnitName()\n             << \" model outputs num:\" << params_.outputs_name_list.size();\n  PrintModelBindInfo(params_.outputs_name_list);\n\n  context_ = TensorRTInferObject(engine_->createExecutionContext());\n  if (context_ == nullptr) {\n    const auto* err_msg = \"build context from model_file engine failed.\";\n    MBLOG_ERROR << err_msg;\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status TensorRTInferenceFlowUnit::CreateEngine(\n    const std::shared_ptr<modelbox::Configuration>& config) {\n  modelbox::Status status;\n  // load directly from serialized engine file if deploy not specified\n  if (!params_.engine.empty()) {\n    return EngineToModel(config);\n  }\n\n  std::shared_ptr<nvinfer1::IBuilder> builder =\n      TensorRTInferObject(nvinfer1::createInferBuilder(gLogger));\n  if (builder == nullptr) {\n    const auto* err_msg = \"create builder failed.\";\n    MBLOG_ERROR << err_msg;\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  // parse the caffe model to populate the network, then set the outputs\n#if defined(TENSORRT7) || defined(TENSORRT8)\n  std::shared_ptr<nvinfer1::INetworkDefinition> network =\n      TensorRTInferObject(builder->createNetworkV2(\n          1U << static_cast<int>(\n              nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH)));\n#else\n  std::shared_ptr<nvinfer1::INetworkDefinition> network =\n      TensorRTInferObject(builder->createNetwork());\n#endif\n  if (network == nullptr) {\n    const auto* err_msg = \"creat network failed.\";\n    MBLOG_ERROR << err_msg;\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  if (!params_.deploy_file.empty()) {\n    return CaffeToTRTModel(config, builder, network);\n  }\n\n  if (!params_.uff_file.empty()) {\n    return UffToTRTModel(config, builder, network);\n  }\n\n  if (!params_.onnx_model_file.empty()) {\n    return OnnxToTRTModel(config, builder, network);\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nvoid TensorRTInferenceFlowUnit::SetPluginFactory(\n    const std::string& pluginName) {\n  if (pluginName.empty()) {\n    return;\n  }\n#ifndef TENSORRT8\n  if (pluginName == \"yolo\") {\n    plugin_factory_ = std::make_shared<YoloPluginFactory>();\n    return;\n  }\n#endif\n  MBLOG_DEBUG << \"The plugin \" << pluginName.c_str() << \" is not supported\";\n}\n\nmodelbox::Status TensorRTInferenceFlowUnit::InitConfig(\n    const std::shared_ptr<modelbox::Configuration>& fu_config) {\n  auto inference_desc_ =\n      std::dynamic_pointer_cast<VirtualInferenceFlowUnitDesc>(\n          this->GetFlowUnitDesc());\n  const std::vector<modelbox::FlowUnitInput>& flowunit_input_list =\n      inference_desc_->GetFlowUnitInput();\n  const std::vector<modelbox::FlowUnitOutput>& flowunit_output_list =\n      inference_desc_->GetFlowUnitOutput();\n\n  auto model_file = inference_desc_->GetModelEntry();\n\n  auto inner_config = fu_config->GetSubConfig(\"config\");\n  SetUpOtherConfig(inner_config);\n  auto status = SetUpModelFile(inner_config, model_file);\n  if (status != modelbox::STATUS_OK) {\n    return {modelbox::STATUS_BADCONF,\n            \"parser config failed, \" + status.WrapErrormsgs()};\n  }\n\n  for (const auto& output_item : flowunit_output_list) {\n    params_.outputs_name_list.push_back(output_item.GetPortName());\n    params_.outputs_type_list.push_back(output_item.GetPortType());\n  }\n\n  for (const auto& input_item : flowunit_input_list) {\n    params_.inputs_name_list.push_back(input_item.GetPortName());\n  }\n\n  status = CreateEngine(fu_config);\n  if (status != modelbox::STATUS_OK) {\n    auto err_msg = \"engine create failed.\" + status.WrapErrormsgs();\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status TensorRTInferenceFlowUnit::SetUpDynamicLibrary(\n    const std::shared_ptr<modelbox::Configuration>& config) {\n  typedef std::shared_ptr<TensorRTInferencePlugin> (*PluginObject)();\n  auto status = modelbox::STATUS_OK;\n  void* driver_handler = dlopen(plugin_.c_str(), RTLD_NOW | RTLD_LOCAL);\n  if (driver_handler == nullptr) {\n    auto* dl_errmsg = dlerror();\n    auto err_msg = \"dlopen \" + plugin_ + \" failed\";\n    if (dl_errmsg) {\n      err_msg += \", error: \" + std::string(dl_errmsg);\n    }\n\n    MBLOG_ERROR << err_msg;\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  DeferCond { return !status; };\n  DeferCondAdd {\n    if (driver_handler != nullptr) {\n      dlclose(driver_handler);\n      driver_handler = nullptr;\n    }\n  };\n\n  auto create_plugin =\n      reinterpret_cast<PluginObject>(dlsym(driver_handler, \"CreatePlugin\"));\n  if (create_plugin == nullptr) {\n    auto* dlerr_msg = dlerror();\n    std::string err_msg = \"dlsym CreatePlugin failed\";\n    if (dlerr_msg) {\n      err_msg += \" error: \";\n      err_msg += dlerr_msg;\n    }\n\n    MBLOG_ERROR << err_msg;\n    status = {modelbox::STATUS_FAULT, err_msg};\n    return status;\n  }\n\n  std::shared_ptr<TensorRTInferencePlugin> inference_plugin = create_plugin();\n  if (inference_plugin == nullptr) {\n    const auto* err_msg = \"CreatePlugin failed\";\n    MBLOG_ERROR << err_msg;\n    status = {modelbox::STATUS_FAULT, err_msg};\n    return status;\n  }\n\n  status = inference_plugin->PluginInit(config);\n  if (status != modelbox::STATUS_OK) {\n    auto err_msg = \"plugin init failed, error: \" + status.WrapErrormsgs();\n    MBLOG_ERROR << err_msg;\n    status = {modelbox::STATUS_FAULT, err_msg};\n    return status;\n  }\n\n  driver_handler_ = driver_handler;\n  inference_plugin_ = inference_plugin;\n\n  pre_process_ = std::bind(&TensorRTInferencePlugin::PreProcess,\n                           inference_plugin_, std::placeholders::_1);\n  post_process_ = std::bind(&TensorRTInferencePlugin::PostProcess,\n                            inference_plugin_, std::placeholders::_1);\n  data_pre_ = std::bind(&TensorRTInferencePlugin::DataPre, inference_plugin_,\n                        std::placeholders::_1);\n  data_post_ = std::bind(&TensorRTInferencePlugin::DataPost, inference_plugin_,\n                         std::placeholders::_1);\n\n  return status;\n}\n\nmodelbox::Status TensorRTInferenceFlowUnit::SetUpInferencePlugin(\n    const std::shared_ptr<modelbox::Configuration>& config) {\n  if (plugin_.empty()) {\n    pre_process_ = std::bind(&TensorRTInferenceFlowUnit::PreProcess, this,\n                             std::placeholders::_1);\n    post_process_ = std::bind(&TensorRTInferenceFlowUnit::PostProcess, this,\n                              std::placeholders::_1);\n    return modelbox::STATUS_OK;\n  }\n\n  if (!modelbox::IsAbsolutePath(plugin_)) {\n    auto relpath = modelbox::GetDirName(plugin_);\n    plugin_ = relpath + \"/\" + plugin_;\n  }\n\n  return SetUpDynamicLibrary(config);\n}\n\nmodelbox::Status TensorRTInferenceFlowUnit::Open(\n    const std::shared_ptr<modelbox::Configuration>& opts) {\n  auto inference_desc = std::dynamic_pointer_cast<VirtualInferenceFlowUnitDesc>(\n      this->GetFlowUnitDesc());\n  auto config = inference_desc->GetConfiguration();\n\n  auto merge_config = std::make_shared<modelbox::Configuration>();\n  // opts override python_desc_ config\n  merge_config->Add(*config);\n  merge_config->Add(*opts);\n\n  params_.device = dev_id_;\n  auto status = InitConfig(merge_config);\n  if (status != modelbox::STATUS_OK) {\n    MBLOG_ERROR << status.WrapErrormsgs();\n    return {modelbox::STATUS_BADCONF, status.WrapErrormsgs()};\n  }\n\n  plugin_ = merge_config->GetString(\"plugin\");\n  status = SetUpInferencePlugin(merge_config);\n  if (status != modelbox::STATUS_OK) {\n    auto err_msg =\n        \"setup preprocess and postprocess failed: \" + status.WrapErrormsgs();\n    MBLOG_ERROR << err_msg;\n    return {status, err_msg};\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status TensorRTInferenceFlowUnit::BindMemory(\n    std::vector<void*>& buffers, const std::string& name, const void* mem,\n    size_t mem_size, size_t size) {\n  int data_type_size = 0;\n\n  const int binding_index = engine_->getBindingIndex(name.c_str());\n  if (binding_index >= (int)buffers.size() || binding_index < 0) {\n    auto err_msg = name + \" not found in network\";\n    MBLOG_ERROR << err_msg;\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  nvinfer1::DataType data_type =\n      engine_->getBindingDataType((int)binding_index);\n  switch (data_type) {\n    case nvinfer1::DataType::kFLOAT:\n      data_type_size = sizeof(float);\n      break;\n    case nvinfer1::DataType::kHALF:\n      data_type_size = sizeof(short);\n      break;\n    case nvinfer1::DataType::kINT8:\n      data_type_size = sizeof(char);\n      break;\n    case nvinfer1::DataType::kINT32:\n      data_type_size = sizeof(int);\n      break;\n    default:\n      break;\n  }\n\n  const nvinfer1::Dims dims = engine_->getBindingDimensions((int)binding_index);\n  const size_t expect_size = Volume(dims) * size * data_type_size;\n  if (expect_size != mem_size) {\n    auto err_msg = \"the input buffer size \" + std::to_string(mem_size) +\n                   \" is not equal tensorrt input real size \" +\n                   std::to_string(expect_size) +\n                   \", batch size: \" + std::to_string(size) +\n                   \", input name: \" + name +\n                   \", input tensorrt type: \" + std::to_string(int(data_type));\n    err_msg += \", input dims: [\";\n    for (int idx = 0; idx < dims.nbDims; ++idx) {\n      err_msg += std::to_string(dims.d[idx]) + \", \";\n    }\n    err_msg += \"]\";\n\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  buffers[binding_index] = const_cast<void*>(mem);\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status TensorRTInferenceFlowUnit::CreateMemory(\n    std::vector<void*>& buffers, const std::string& name,\n    std::shared_ptr<modelbox::BufferList>& output_buf, size_t size) {\n  int data_type_size = 0;\n  modelbox::Status status;\n\n  const int binding_index = engine_->getBindingIndex(name.c_str());\n  if (binding_index >= (int)buffers.size() || binding_index < 0) {\n    auto err_msg = name + \" not found in network\";\n    MBLOG_ERROR << err_msg;\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  const nvinfer1::Dims dims = engine_->getBindingDimensions((int)binding_index);\n  const nvinfer1::DataType data_type =\n      engine_->getBindingDataType((int)binding_index);\n  switch (data_type) {\n    case nvinfer1::DataType::kFLOAT:\n      data_type_size = sizeof(float);\n      break;\n    case nvinfer1::DataType::kHALF:\n      data_type_size = sizeof(short);\n      break;\n    case nvinfer1::DataType::kINT8:\n      data_type_size = sizeof(char);\n      break;\n    case nvinfer1::DataType::kINT32:\n      data_type_size = sizeof(int);\n      break;\n    default:\n      break;\n  }\n\n  auto single_bytes = Volume(dims) * data_type_size;\n  std::vector<size_t> output_shape;\n  output_shape.reserve(dims.nbDims);\n  for (int i = 0; i < dims.nbDims; ++i) {\n    auto dim = dims.d[i];\n    if (dim == -1) {\n      dim = 1;\n    }\n\n    output_shape.push_back(dim);\n  }\n  std::vector<size_t> shape_vector(size, single_bytes);\n  status = output_buf->Build(shape_vector);\n\n  modelbox::ModelBoxDataType modelbox_type = modelbox::MODELBOX_TYPE_INVALID;\n  status = ConvertTrtTypeToModelBoxType(data_type, modelbox_type);\n  if (!status) {\n    auto err_msg =\n        \"output type convert failed ,error: \" + status.WrapErrormsgs();\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n  output_buf->Set(\"type\", modelbox_type);\n  output_buf->Set(\"shape\", output_shape);\n  buffers[binding_index] = output_buf->MutableData();\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status TensorRTInferenceFlowUnit::PrePareInput(\n    std::shared_ptr<modelbox::DataContext>& data_ctx,\n    std::vector<void*>& memory) {\n  for (const auto& input_name : params_.inputs_name_list) {\n    auto input_buf = data_ctx->Input(input_name);\n    const auto* data = input_buf->ConstData();\n    auto status = BindMemory(memory, input_name, data, input_buf->GetBytes(),\n                             input_buf->Size());\n    if (status != modelbox::STATUS_OK) {\n      auto err_msg =\n          \"bindMemory \" + input_name + \" failed.\" + status.WrapErrormsgs();\n      MBLOG_ERROR << err_msg;\n      return {modelbox::STATUS_FAULT, err_msg};\n    }\n  }\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status TensorRTInferenceFlowUnit::PrePareOutput(\n    std::shared_ptr<modelbox::DataContext>& data_ctx,\n    std::vector<void*>& memory) {\n  size_t size = data_ctx->Input(params_.inputs_name_list[0])->Size();\n  for (const auto& output_name : params_.outputs_name_list) {\n    auto output_buf = data_ctx->Output(output_name);\n    auto status = CreateMemory(memory, output_name, output_buf, size);\n    if (status != modelbox::STATUS_OK) {\n      auto err_msg =\n          \"createMemory \" + output_name + \" failed.\" + status.WrapErrormsgs();\n      MBLOG_ERROR << err_msg;\n      return {modelbox::STATUS_FAULT, err_msg};\n    }\n  }\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status TensorRTInferenceFlowUnit::CudaProcess(\n    std::shared_ptr<modelbox::DataContext> data_ctx, cudaStream_t stream) {\n  modelbox::Status status;\n  std::vector<void*> memory(params_.inputs_name_list.size() +\n                            params_.outputs_name_list.size());\n\n  status = pre_process_(data_ctx);\n  if (status != modelbox::STATUS_OK) {\n    auto err_msg = \"pre_process failed, \" + status.WrapErrormsgs();\n    MBLOG_ERROR << err_msg;\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  status = PrePareInput(data_ctx, memory);\n  if (status != modelbox::STATUS_OK) {\n    auto err_msg = \"prepare input failed, \" + status.WrapErrormsgs();\n    MBLOG_ERROR << err_msg;\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  status = PrePareOutput(data_ctx, memory);\n  if (status != modelbox::STATUS_OK) {\n    auto err_msg = \"prepare output failed, \" + status.WrapErrormsgs();\n    MBLOG_ERROR << err_msg;\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  bool enqueue_res;\n#if defined(TENSORRT7) || defined(TENSORRT8)\n  if (params_.use_enqueue_v2) {\n    for (auto& input_name : params_.inputs_name_list) {\n      auto bind_index = engine_->getBindingIndex(input_name.c_str());\n      auto bind_dims = engine_->getBindingDimensions(bind_index);\n      auto input_batch_size = data_ctx->Input(input_name)->Size();\n      bind_dims.d[0] = input_batch_size;\n      context_->setBindingDimensions(bind_index, bind_dims);\n    }\n\n    enqueue_res = context_->enqueueV2(&memory[0], stream, nullptr);\n  } else\n#endif\n  {\n    size_t size = data_ctx->Input(params_.inputs_name_list[0])->Size();\n    if (engine_->getMaxBatchSize() < (int)size) {\n      auto err_msg = \"engine max batch size is \" +\n                     std::to_string(engine_->getMaxBatchSize()) +\n                     \", less than batch_size: \" + std::to_string(size);\n      MBLOG_ERROR << err_msg;\n      return {modelbox::STATUS_FAULT, err_msg};\n    }\n\n    enqueue_res = context_->enqueue(size, &memory[0], stream, nullptr);\n  }\n\n  if (!enqueue_res) {\n    const auto* err_msg = \"enqueue failed.\";\n    MBLOG_ERROR << err_msg;\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  auto cuda_ret = cudaStreamSynchronize(stream);\n  if (cuda_ret != cudaSuccess) {\n    MBLOG_ERROR << \"Cuda stream synchronize failed, gpu \"\n                << \" cuda ret \" << cuda_ret;\n    return modelbox::STATUS_FAULT;\n  }\n\n  status = post_process_(data_ctx);\n  if (status != modelbox::STATUS_OK) {\n    auto err_msg = \"post_process failed, \" + status.WrapErrormsgs();\n    MBLOG_ERROR << err_msg;\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status TensorRTParams::Clear() {\n  uff_input_list.clear();\n  inputs_name_list.clear();\n  outputs_name_list.clear();\n  outputs_type_list.clear();\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status TensorRTInferenceFlowUnit::Close() {\n  input_dims_.clear();\n  return params_.Clear();\n}\n\nstd::shared_ptr<modelbox::FlowUnit>\nTensorRTInferenceFlowUnitFactory::VirtualCreateFlowUnit(\n    const std::string& unit_name, const std::string& unit_type,\n    const std::string& virtual_type) {\n  auto inference_flowunit = std::make_shared<TensorRTInferenceFlowUnit>();\n  return inference_flowunit;\n};\n"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/tensorrt/tensorrt_inference_flowunit.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_INFERENCE_H_\n#define MODELBOX_FLOWUNIT_INFERENCE_H_\n\n#include <NvCaffeParser.h>\n#include <NvInfer.h>\n#include <NvInferPlugin.h>\n#include <NvOnnxParser.h>\n#include <NvUffParser.h>\n#include <modelbox/base/device.h>\n#include <modelbox/base/status.h>\n#include <modelbox/base/utils.h>\n#include <modelbox/buffer.h>\n#include <modelbox/device/cuda/device_cuda.h>\n#include <modelbox/flow.h>\n#include <modelbox/flowunit.h>\n#include <modelbox/tensor.h>\n#include <modelbox/tensor_list.h>\n\n#include <typeinfo>\n\n#include \"tensorrt_inference_plugin.h\"\n\nconstexpr const char* FLOWUNIT_TYPE = \"cuda\";\nconstexpr const char* INFERENCE_TYPE = \"tensorrt\";\nconst std::string SUFFIX_ENGINE = \"engine\";\nconst std::string SUFFIX_UFF = \"uff\";\nconst std::string SUFFIX_ONNX = \"onnx\";\nconst std::string SUFFIX_PROTXT = \"prototxt\";\n\nusing TensorRTProcess =\n    std::function<modelbox::Status(std::shared_ptr<modelbox::DataContext>)>;\n\nclass RndInt8Calibrator;\n\nclass iLogger : public nvinfer1::ILogger {\n  void log(Severity severity, const char* msg) noexcept override {\n    // suppress info-level messages\n    if (severity < Severity::kINFO) {\n      std::cout << msg << std::endl;\n    }\n  }\n};\n\nextern iLogger gLogger;\n\nclass TensorRTParams {\n public:\n  TensorRTParams() = default;\n  virtual ~TensorRTParams() = default;\n\n  modelbox::Status Clear();\n  // caffe model file\n  // .prototxt net file\n  std::string deploy_file;\n  // .caffemodel weight file\n  std::string model_file;\n  // uff model file\n  std::string uff_file;\n  std::vector<std::pair<std::string, std::shared_ptr<nvinfer1::Dims>>>\n      uff_input_list;\n  // onnx model file\n  std::string onnx_model_file;\n  // tensorrt engine file\n  std::string engine;\n\n  std::vector<std::string> inputs_name_list, outputs_name_list;\n  std::vector<std::string> outputs_type_list;\n  std::string calibration_cache{\"CalibrationTable\"};\n  std::string plugin;\n  int device{0};\n  int onnx_opt_batch_size{1};\n  int onnx_max_batch_size{1};\n  int workspace_size{16};\n  int use_DLACore{-1};\n  bool dynamic_batch_contain{false};\n  bool dynamic_batch{false};\n  bool use_enqueue_v2{false};\n  bool fp16{false};\n  bool int8{false};\n  bool verbose{false};\n  bool allow_GPUFallback{false};\n  float pct{99};\n};\n\nclass TensorRTInferenceFlowUnit : public modelbox::CudaFlowUnit {\n public:\n  TensorRTInferenceFlowUnit();\n  ~TensorRTInferenceFlowUnit() override;\n\n  modelbox::Status Open(\n      const std::shared_ptr<modelbox::Configuration>& opts) override;\n\n  modelbox::Status Close() override;\n\n  modelbox::Status DataPre(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override;\n\n  modelbox::Status DataPost(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override;\n\n  /* run when processing data */\n  modelbox::Status CudaProcess(std::shared_ptr<modelbox::DataContext> data_ctx,\n                               cudaStream_t stream) override;\n\n private:\n  void SetUpOtherConfig(const std::shared_ptr<modelbox::Configuration>& config);\n  modelbox::Status InitConfig(\n      const std::shared_ptr<modelbox::Configuration>& fu_config);\n  modelbox::Status CreateEngine(\n      const std::shared_ptr<modelbox::Configuration>& config);\n  modelbox::Status SetUpModelFile(\n      const std::shared_ptr<modelbox::Configuration>& config,\n      const std::string& model_file);\n  modelbox::Status SetUpDynamicLibrary(\n      const std::shared_ptr<modelbox::Configuration>& config);\n  modelbox::Status SetUpInferencePlugin(\n      const std::shared_ptr<modelbox::Configuration>& config);\n  void configureBuilder(const std::shared_ptr<nvinfer1::IBuilder>& builder,\n                        RndInt8Calibrator& calibrator);\n  modelbox::Status PrePareOutput(\n      std::shared_ptr<modelbox::DataContext>& data_ctx,\n      std::vector<void*>& memory);\n  modelbox::Status PrePareInput(\n      std::shared_ptr<modelbox::DataContext>& data_ctx,\n      std::vector<void*>& memory);\n  modelbox::Status PreProcess(\n      const std::shared_ptr<modelbox::DataContext>& data_ctx);\n  modelbox::Status PostProcess(\n      const std::shared_ptr<modelbox::DataContext>& data_ctx);\n  modelbox::Status CreateMemory(\n      std::vector<void*>& buffers, const std::string& name,\n      std::shared_ptr<modelbox::BufferList>& output_buf, size_t size);\n  modelbox::Status BindMemory(std::vector<void*>& buffers,\n                              const std::string& name, const void* mem,\n                              size_t mem_size, size_t size);\n  modelbox::Status EngineToModel(\n      const std::shared_ptr<modelbox::Configuration>& config);\n  void PrintModelBindInfo(const std::vector<std::string>& name_list);\n  modelbox::Status UffToTRTModel(\n      const std::shared_ptr<modelbox::Configuration>& config,\n      std::shared_ptr<nvinfer1::IBuilder>& builder,\n      std::shared_ptr<nvinfer1::INetworkDefinition>& network);\n  modelbox::Status OnnxToTRTModel(\n      const std::shared_ptr<modelbox::Configuration>& config,\n      std::shared_ptr<nvinfer1::IBuilder>& builder,\n      std::shared_ptr<nvinfer1::INetworkDefinition>& network);\n  modelbox::Status CaffeToTRTModel(\n      const std::shared_ptr<modelbox::Configuration>& config,\n      std::shared_ptr<nvinfer1::IBuilder>& builder,\n      std::shared_ptr<nvinfer1::INetworkDefinition>& network);\n  void SetPluginFactory(const std::string& pluginName);\n\n  TensorRTProcess pre_process_{nullptr}, post_process_{nullptr};\n  TensorRTProcess data_pre_{nullptr}, data_post_{nullptr};\n  TensorRTParams params_;\n  std::string plugin_;\n  void* driver_handler_{nullptr};\n  std::shared_ptr<TensorRTInferencePlugin> inference_plugin_{nullptr};\n\n  std::shared_ptr<nvinfer1::ICudaEngine> engine_{nullptr};\n  std::shared_ptr<nvinfer1::IExecutionContext> context_{nullptr};\n  std::shared_ptr<nvinfer1::IPluginFactory> plugin_factory_{nullptr};\n  std::map<std::string, nvinfer1::Dims3> input_dims_;\n};\n\nclass RndInt8Calibrator : public nvinfer1::IInt8EntropyCalibrator {\n public:\n  RndInt8Calibrator(int total_samples, std::string cache_file,\n                    std::map<std::string, nvinfer1::Dims3>& input_dims);\n\n  ~RndInt8Calibrator() override;\n  int getBatchSize() const noexcept override;\n  bool getBatch(void* bindings[], const char* names[],\n                int nbBindings) noexcept override;\n  const void* readCalibrationCache(size_t& length) noexcept override;\n  void writeCalibrationCache(const void* ptr, size_t length) noexcept override;\n\n private:\n  int total_samples_{0};\n  int current_sample_{0};\n  std::string cache_file_;\n  std::map<std::string, void*> input_device_buffers_;\n  std::vector<char> calibration_cache_;\n};\n\nclass TensorRTInferenceFlowUnitFactory : public modelbox::FlowUnitFactory {\n public:\n  TensorRTInferenceFlowUnitFactory() = default;\n  ~TensorRTInferenceFlowUnitFactory() override = default;\n\n  std::shared_ptr<modelbox::FlowUnit> VirtualCreateFlowUnit(\n      const std::string& unit_name, const std::string& unit_type,\n      const std::string& virtual_type) override;\n\n  std::string GetFlowUnitFactoryType() override { return FLOWUNIT_TYPE; };\n  std::string GetVirtualType() override { return INFERENCE_TYPE; };\n\n  std::map<std::string, std::shared_ptr<modelbox::FlowUnitDesc>> FlowUnitProbe()\n      override {\n    return std::map<std::string, std::shared_ptr<modelbox::FlowUnitDesc>>();\n  };\n};\n\n#endif  // MODELBOX_FLOWUNIT_INFERENCE_H_\n"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/tensorrt/tensorrt_inference_flowunit_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include <cuda_runtime.h>\n\n#include <functional>\n#include <future>\n#include <random>\n#include <thread>\n\n#include \"modelbox/base/log.h\"\n#include \"modelbox/base/utils.h\"\n#include \"modelbox/buffer.h\"\n#include \"driver_flow_test.h\"\n#include \"flowunit_mockflowunit/flowunit_mockflowunit.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing ::testing::_;\n\nnamespace modelbox {\nclass TensorRTFlowUnitTest : public testing::Test {\n public:\n  TensorRTFlowUnitTest() : driver_flow_(std::make_shared<DriverFlowTest>()) {}\n\n protected:\n  void SetUp() override {\n    int count = 0;\n    cudaGetDeviceCount(&count);\n    if (count <= 0) {\n      MBLOG_INFO << \"no cuda device, skip test suit\";\n      GTEST_SKIP();\n    }\n\n    SetUpTomlFile();\n    auto ret = AddMockFlowUnit();\n    EXPECT_EQ(ret, STATUS_OK);\n  }\n\n  void TearDown() override {\n    int count = 0;\n    cudaGetDeviceCount(&count);\n    if (count <= 0) {\n      GTEST_SKIP();\n    }\n\n    RemoveTomlFile();\n\n    driver_flow_->Clear();\n  };\n  std::shared_ptr<DriverFlowTest> GetDriverFlow();\n\n  const std::string test_lib_dir = TEST_DRIVER_DIR,\n                    test_data_dir = TEST_DATA_DIR, test_assets = TEST_ASSETS,\n                    test_onnx_file = \"model.onnx\",\n                    test_toml_file = \"virtual_tensorrt_test.toml\",\n                    test_onnx_file_en = \"model_en.onnx\",\n                    test_toml_file_en = \"virtual_tensorrt_encrypt_test.toml\",\n                    test_plugin_toml_file = \"virtual_plugin_tensorrt_test.toml\";\n\n  std::string tensorrt_path, dest_model_file, dest_toml_file;\n  std::string tensorrt_path_en, dest_model_file_en, dest_toml_file_en;\n  std::string tensorrt_plugin_path, dest_plugin_model_file,\n      dest_plugin_toml_file;\n\n private:\n  Status AddMockFlowUnit();\n  std::shared_ptr<DriverFlowTest> driver_flow_;\n  void SetUpTomlFile();\n  void RemoveTomlFile();\n};\n\nvoid TensorRTFlowUnitTest::SetUpTomlFile() {\n  const std::string src_file = test_assets + \"/tensorrt/\" + test_onnx_file;\n  const std::string src_toml = test_data_dir + \"/\" + test_toml_file;\n  const std::string src_file_en = test_assets + \"/tensorrt/\" + test_onnx_file_en;\n  const std::string src_toml_en = test_data_dir + \"/\" + test_toml_file_en;\n  const std::string src_plugin_toml =\n      test_data_dir + \"/\" + test_plugin_toml_file;\n\n  tensorrt_path = test_data_dir + \"/tensorrt\";\n  auto mkdir_ret = mkdir(tensorrt_path.c_str(), 0700);\n  EXPECT_EQ(mkdir_ret, 0);\n  dest_model_file = tensorrt_path + \"/\" + test_onnx_file;\n  dest_toml_file = tensorrt_path + \"/\" + test_toml_file;\n  auto status = CopyFile(src_file, dest_model_file, 0);\n  EXPECT_EQ(status, STATUS_OK);\n  status = CopyFile(src_toml, dest_toml_file, 0);\n  EXPECT_EQ(status, STATUS_OK);\n\n  tensorrt_path_en = test_data_dir + \"/tensorrt_encrypt\";\n  mkdir_ret = mkdir(tensorrt_path_en.c_str(), 0700);\n  EXPECT_EQ(mkdir_ret, 0);\n  dest_model_file_en = tensorrt_path_en + \"/\" + test_onnx_file_en;\n  dest_toml_file_en = tensorrt_path_en + \"/\" + test_toml_file_en;\n  status = CopyFile(src_file_en, dest_model_file_en, 0);\n  EXPECT_EQ(status, STATUS_OK);\n  status = CopyFile(src_toml_en, dest_toml_file_en, 0);\n  EXPECT_EQ(status, STATUS_OK);\n\n  tensorrt_plugin_path = test_data_dir + \"/tensorrt_plugin\";\n  mkdir_ret = mkdir(tensorrt_plugin_path.c_str(), 0700);\n  EXPECT_EQ(mkdir_ret, 0);\n\n  dest_plugin_model_file = tensorrt_plugin_path + \"/\" + test_onnx_file;\n  dest_plugin_toml_file = tensorrt_plugin_path + \"/\" + test_plugin_toml_file;\n  status = CopyFile(src_file, dest_plugin_model_file, 0);\n  EXPECT_EQ(status, STATUS_OK);\n  status = CopyFile(src_plugin_toml, dest_plugin_toml_file, 0);\n  EXPECT_EQ(status, STATUS_OK);\n}\n\nvoid TensorRTFlowUnitTest::RemoveTomlFile() {\n  auto ret = remove(dest_model_file.c_str());\n  EXPECT_EQ(ret, 0);\n  ret = remove(dest_toml_file.c_str());\n  EXPECT_EQ(ret, 0);\n  ret = remove(tensorrt_path.c_str());\n  EXPECT_EQ(ret, 0);\n\n  ret = remove(dest_model_file_en.c_str());\n  EXPECT_EQ(ret, 0);\n  ret = remove(dest_toml_file_en.c_str());\n  EXPECT_EQ(ret, 0);\n  ret = remove(tensorrt_path_en.c_str());\n  EXPECT_EQ(ret, 0);\n\n  ret = remove(dest_plugin_model_file.c_str());\n  EXPECT_EQ(ret, 0);\n  ret = remove(dest_plugin_toml_file.c_str());\n  EXPECT_EQ(ret, 0);\n  ret = remove(tensorrt_plugin_path.c_str());\n  EXPECT_EQ(ret, 0);\n}\n\nStatus TensorRTFlowUnitTest::AddMockFlowUnit() {\n  auto ctl_ = driver_flow_->GetMockFlowCtl();\n\n  {\n    MockFlowUnitDriverDesc desc_flowunit;\n    desc_flowunit.SetClass(\"DRIVER-FLOWUNIT\");\n    desc_flowunit.SetType(\"cpu\");\n    desc_flowunit.SetName(\"test_0_1\");\n    desc_flowunit.SetDescription(\"The test input data, 0 inputs 1 output\");\n    desc_flowunit.SetVersion(\"1.0.0\");\n    std::string file_path_flowunit =\n        std::string(TEST_DRIVER_DIR) + \"/libmodelbox-unit-cpu-test_0_1.so\";\n    desc_flowunit.SetFilePath(file_path_flowunit);\n    auto mock_flowunit = std::make_shared<MockFlowUnit>();\n    auto mock_flowunit_desc = std::make_shared<FlowUnitDesc>();\n    mock_flowunit_desc->SetFlowUnitName(\"test_0_1\");\n    mock_flowunit_desc->AddFlowUnitOutput(modelbox::FlowUnitOutput(\"Out_1\"));\n    mock_flowunit->SetFlowUnitDesc(mock_flowunit_desc);\n    std::weak_ptr<MockFlowUnit> mock_flowunit_wp;\n    mock_flowunit_wp = mock_flowunit;\n\n    EXPECT_CALL(*mock_flowunit, Open(_))\n        .WillRepeatedly(testing::Invoke(\n            [=](const std::shared_ptr<modelbox::Configuration>& flow_option) {\n              auto spt = mock_flowunit_wp.lock();\n              auto ext_data = spt->CreateExternalData();\n              if (!ext_data) {\n                MBLOG_ERROR << \"can not get external data.\";\n              }\n\n              auto buffer_list = ext_data->CreateBufferList();\n              buffer_list->Build({10 * sizeof(int)});\n              auto* data = (int*)buffer_list->MutableData();\n              for (size_t i = 0; i < 10; i++) {\n                data[i] = i;\n              }\n\n              auto status = ext_data->Send(buffer_list);\n              if (!status) {\n                MBLOG_ERROR << \"external data send buffer list failed:\"\n                            << status;\n              }\n\n              status = ext_data->Close();\n              if (!status) {\n                MBLOG_ERROR << \"external data close failed:\" << status;\n              }\n\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, DataPre(_))\n        .WillRepeatedly(\n            testing::Invoke([&](const std::shared_ptr<DataContext>& data_ctx) {\n              MBLOG_INFO << \"test_0_1 \"\n                         << \"DataPre\";\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, DataPost(_))\n        .WillRepeatedly(\n            testing::Invoke([&](const std::shared_ptr<DataContext>& data_ctx) {\n              MBLOG_INFO << \"test_0_1 \"\n                         << \"DataPost\";\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit,\n                Process(testing::An<std::shared_ptr<modelbox::DataContext>>()))\n        .WillRepeatedly(\n            testing::Invoke([=](const std::shared_ptr<DataContext>& op_ctx) {\n              auto output_buf_1 = op_ctx->Output(\"Out_1\");\n              std::vector<size_t> shape_vector(1, 784 * sizeof(float));\n              modelbox::ModelBoxDataType type = MODELBOX_FLOAT;\n              output_buf_1->Build(shape_vector);\n              output_buf_1->Set(\"type\", type);\n              std::vector<size_t> shape{784};\n              output_buf_1->Set(\"shape\", shape);\n              auto* dev_data = (float*)(output_buf_1->MutableData());\n              for (size_t i = 0; i < output_buf_1->Size(); ++i) {\n                for (size_t j = 0; j < 784; ++j) {\n                  dev_data[i * 784 + j] = 0.0;\n                }\n              }\n\n              MBLOG_DEBUG << output_buf_1->GetBytes();\n              MBLOG_DEBUG << \"test_0_1 gen data, 0\" << output_buf_1->Size();\n\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, Close()).WillRepeatedly(testing::Invoke([=]() {\n      return modelbox::STATUS_OK;\n    }));\n    desc_flowunit.SetMockFlowUnit(mock_flowunit);\n    ctl_->AddMockDriverFlowUnit(\"test_0_1\", \"cpu\", desc_flowunit,\n                                std::string(TEST_DRIVER_DIR));\n  }\n\n  {\n    MockFlowUnitDriverDesc desc_flowunit;\n    desc_flowunit.SetClass(\"DRIVER-FLOWUNIT\");\n    desc_flowunit.SetType(\"cpu\");\n    desc_flowunit.SetName(\"test_1_0\");\n    desc_flowunit.SetDescription(\"The test output data, 1 input 0 outputs\");\n    desc_flowunit.SetVersion(\"1.0.0\");\n    std::string file_path_flowunit =\n        std::string(TEST_DRIVER_DIR) + \"/libmodelbox-unit-cpu-test_1_0.so\";\n    desc_flowunit.SetFilePath(file_path_flowunit);\n    auto mock_flowunit = std::make_shared<MockFlowUnit>();\n    auto mock_flowunit_desc = std::make_shared<FlowUnitDesc>();\n    mock_flowunit_desc->SetFlowUnitName(\"test_1_0\");\n    mock_flowunit_desc->AddFlowUnitInput(modelbox::FlowUnitInput(\"In_1\"));\n    mock_flowunit_desc->SetFlowType(STREAM);\n    mock_flowunit->SetFlowUnitDesc(mock_flowunit_desc);\n    std::weak_ptr<MockFlowUnit> mock_flowunit_wp;\n    mock_flowunit_wp = mock_flowunit;\n\n    EXPECT_CALL(*mock_flowunit, Open(_))\n        .WillRepeatedly(testing::Invoke(\n            [=](const std::shared_ptr<modelbox::Configuration>& flow_option) {\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, DataPre(_))\n        .WillRepeatedly(\n            testing::Invoke([&](const std::shared_ptr<DataContext>& data_ctx) {\n              MBLOG_INFO << \"test_1_0 \"\n                         << \"DataPre\";\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, DataPost(_))\n        .WillRepeatedly(\n            testing::Invoke([&](const std::shared_ptr<DataContext>& data_ctx) {\n              MBLOG_INFO << \"test_1_0 \"\n                         << \"DataPost\";\n              return modelbox::STATUS_STOP;\n            }));\n\n    EXPECT_CALL(*mock_flowunit,\n                Process(testing::An<std::shared_ptr<modelbox::DataContext>>()))\n        .WillRepeatedly(\n            testing::Invoke([=](const std::shared_ptr<DataContext>& op_ctx) {\n              std::shared_ptr<BufferList> input_bufs = op_ctx->Input(\"In_1\");\n              EXPECT_EQ(input_bufs->Size(), 1);\n              std::vector<size_t> shape_vector{10};\n              std::vector<size_t> input_shape;\n              auto result = input_bufs->At(0)->Get(\"shape\", input_shape);\n              EXPECT_TRUE(result);\n\n              const auto* input_data =\n                  static_cast<const float*>(input_bufs->ConstBufferData(0));\n              for (int i = 0; i < 10; ++i) {\n                MBLOG_DEBUG << input_data[i];\n              }\n\n              EXPECT_NEAR(input_data[0], 0.0356422, 1e-6);\n              EXPECT_NEAR(input_data[1], 0.0931573, 1e-6);\n              EXPECT_NEAR(input_data[2], 0.0815316, 1e-6);\n              EXPECT_NEAR(input_data[3], 0.0455169, 1e-6);\n              EXPECT_NEAR(input_data[4], 0.0595113, 1e-6);\n              EXPECT_NEAR(input_data[5], 0.4212710, 1e-6);\n              EXPECT_NEAR(input_data[6], 0.051922, 1e-6);\n              EXPECT_NEAR(input_data[7], 0.160296, 1e-6);\n              EXPECT_NEAR(input_data[8], 0.00811869, 1e-6);\n              EXPECT_NEAR(input_data[9], 0.0430332, 1e-6);\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, Close()).WillRepeatedly(testing::Invoke([=]() {\n      return modelbox::STATUS_OK;\n    }));\n    desc_flowunit.SetMockFlowUnit(mock_flowunit);\n    ctl_->AddMockDriverFlowUnit(\"test_1_0\", \"cpu\", desc_flowunit,\n                                std::string(TEST_DRIVER_DIR));\n  }\n\n  return STATUS_OK;\n}  // namespace modelbox\n\nstd::shared_ptr<DriverFlowTest> TensorRTFlowUnitTest::GetDriverFlow() {\n  return driver_flow_;\n}\n\nTEST_F(TensorRTFlowUnitTest, RunUnitSingle) {\n  std::string toml_content = R\"(\n    [driver]\n    skip-default=true\n    dir=[\")\" + test_lib_dir + \"\\\",\\\"\" +\n                             test_data_dir + \"\\\"]\\n    \" +\n                             R\"([graph]\n    graphconf = '''digraph demo {                                                                            \n          test_0_1[type=flowunit, flowunit=test_0_1, device=cpu, deviceid=0, label=\"<Out_1>\"]             \n          tensorrt[type=flowunit, flowunit=tensorrt, device=cuda, deviceid=0, label=\"<input> | <output>\"]\n          test_1_0[type=flowunit, flowunit=test_1_0, device=cpu, deviceid=0, label=\"<In_1>\"]                          \n          test_0_1:Out_1 -> tensorrt:\"input:0\"\n          tensorrt:\"output:0\" -> test_1_0:In_1                                                                  \n        }'''\n    format = \"graphviz\"\n  )\";\n  auto driver_flow = GetDriverFlow();\n  auto ret = driver_flow->BuildAndRun(\"RunUnit\", toml_content);\n  EXPECT_EQ(ret, STATUS_STOP);\n}\n\nTEST_F(TensorRTFlowUnitTest, RunUnitPlugin) {\n  std::string toml_content = R\"(\n    [driver]\n    skip-default=true\n    dir=[\")\" + test_lib_dir + \"\\\",\\\"\" +\n                             test_data_dir + \"\\\"]\\n    \" +\n                             R\"([graph]\n    graphconf = '''digraph demo {                                                                            \n          test_0_1[type=flowunit, flowunit=test_0_1, device=cpu, deviceid=0, label=\"<Out_1>\"]             \n          tensorrt[type=flowunit, flowunit=tensorrt_plugin, device=cuda, deviceid=0, label=\"<input> | <output>\"]\n          test_1_0[type=flowunit, flowunit=test_1_0, device=cpu, deviceid=0, label=\"<In_1>\"]                          \n          test_0_1:Out_1 -> tensorrt:\"input:0\"\n          tensorrt:\"output:0\" -> test_1_0:In_1                                                                  \n        }'''\n    format = \"graphviz\"\n  )\";\n  auto driver_flow = GetDriverFlow();\n  auto ret = driver_flow->BuildAndRun(\"RunPlugin\", toml_content);\n  EXPECT_EQ(ret, STATUS_STOP);\n}\n\nTEST_F(TensorRTFlowUnitTest, RunUnitSingleEncrypt) {\n  std::string toml_content = R\"(\n    [driver]\n    skip-default=true\n    dir=[\")\" + test_lib_dir + \"\\\",\\\"\" +\n                             test_data_dir + \"\\\"]\\n    \" +\n                             R\"([graph]\n    graphconf = '''digraph demo {                                                                            \n          test_0_1[type=flowunit, flowunit=test_0_1, device=cpu, deviceid=0, label=\"<Out_1>\"]             \n          tensorrt[type=flowunit, flowunit=tensorrt_encrypt, device=cuda, deviceid=0, label=\"<input> | <output>\"]\n          test_1_0[type=flowunit, flowunit=test_1_0, device=cpu, deviceid=0, label=\"<In_1>\"]                          \n          test_0_1:Out_1 -> tensorrt:\"input:0\"\n          tensorrt:\"output:0\" -> test_1_0:In_1                                                                  \n        }'''\n    format = \"graphviz\"\n  )\";\n  auto driver_flow = GetDriverFlow();\n  auto ret = driver_flow->BuildAndRun(\"RunUnitSingleEncrypt\", toml_content);\n  EXPECT_EQ(ret, STATUS_STOP);\n}\n\n// TODO test batch inference\n// TODO test quantize\n\n}  // namespace modelbox\n"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/tensorrt/tensorrt_inference_plugin.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_INFER_PLUGIN_H_\n#define MODELBOX_INFER_PLUGIN_H_\n\n#include <modelbox/base/configuration.h>\n#include <modelbox/base/status.h>\n#include <modelbox/buffer_list.h>\n#include <modelbox/data_context.h>\n\n#include <vector>\n\nclass TensorRTInferencePlugin {\n public:\n  TensorRTInferencePlugin() = default;\n  virtual ~TensorRTInferencePlugin() = default;\n\n  // NOLINTNEXTLINE\n  virtual modelbox::Status PluginInit(\n      std::shared_ptr<modelbox::Configuration> config) = 0;\n\n  // NOLINTNEXTLINE\n  virtual modelbox::Status PreProcess(\n      std::shared_ptr<modelbox::DataContext> data_ctx) = 0;\n\n  // NOLINTNEXTLINE\n  virtual modelbox::Status PostProcess(\n      std::shared_ptr<modelbox::DataContext> data_ctx) = 0;\n\n  virtual modelbox::Status DataPre(\n      // NOLINTNEXTLINE\n      std::shared_ptr<modelbox::DataContext> data_ctx) {\n    return modelbox::STATUS_OK;\n  }\n\n  virtual modelbox::Status DataPost(\n      // NOLINTNEXTLINE\n      std::shared_ptr<modelbox::DataContext> data_ctx) {\n    return modelbox::STATUS_OK;\n  }\n};\n\nextern \"C\" {\n\n#if defined(__clang__)\n#pragma clang diagnostic push\n#pragma clang diagnostic ignored \"-Wreturn-type-c-linkage\"\n#endif\n\nMODELBOX_DLL_PUBLIC std::shared_ptr<TensorRTInferencePlugin> CreatePlugin();\n\n#if defined(__clang__)\n#pragma clang diagnostic pop\n#endif\n}\n\n#endif"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/tensorrt/test_plugin/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nset(UNIT_NAME \"tensorrt-inference-plugin\")\nproject(modelbox-flowunit-${UNIT_NAME})\n\nfile(GLOB PLUGIN_SOURCE *.cpp *.cc *.c)\n\nif (NOT TENSORRT_FOUND) \n    message(STATUS \"Not found tensorrt, disable ${UNIT_NAME} flowunit\")\n    return()\nendif()\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_CONFIG_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${TENSORRT_INCLUDE_DIR})\n\n\nset(PLUGIN_SHARED tensorrt-plugin)\nadd_library(${PLUGIN_SHARED} SHARED ${PLUGIN_SOURCE})\n\ntarget_link_libraries(${PLUGIN_SHARED} ${LIBMODELBOX_SHARED})\ntarget_link_libraries(${PLUGIN_SHARED} ${TENSORRT_LIBRARIES})\n\nset_target_properties(${PLUGIN_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-unit-${PLUGIN_SHARED}\")\n\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${PLUGIN_SHARED})\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")\n"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/tensorrt/test_plugin/generate_plugin.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include \"generate_plugin.h\"\n\nstd::shared_ptr<TensorRTInferencePlugin> CreatePlugin() {\n  return std::make_shared<OriginInferencePlugin>();\n}\n\nmodelbox::Status OriginInferencePlugin::PluginInit(\n    std::shared_ptr<modelbox::Configuration> config) {\n  modelbox::Status status = modelbox::STATUS_OK;\n  std::vector<std::string> names;\n  std::vector<std::string> types;\n  status = SetUpInputOutput(config, \"input\", names, types);\n  if (status != modelbox::STATUS_OK) {\n    auto err_msg = \"set up input failed, error: \" + status.WrapErrormsgs();\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  input_name_list_.swap(names);\n  input_type_list_.swap(types);\n\n  status = SetUpInputOutput(config, \"output\", names, types);\n  if (status != modelbox::STATUS_OK) {\n    auto err_msg = \"set up output failed, error: \" + status.WrapErrormsgs();\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  output_name_list_.swap(names);\n  output_type_list_.swap(types);\n\n  return status;\n}\n\nmodelbox::Status OriginInferencePlugin::SetUpInputOutput(\n    const std::shared_ptr<modelbox::Configuration> &config,\n    const std::string &type, std::vector<std::string> &names,\n    std::vector<std::string> &types) {\n  auto keys = config->GetSubKeys(type);\n  for (unsigned int i = 1; i <= keys.size(); ++i) {\n    std::string inner_name;\n    std::string inner_type;\n    auto key = type + \".\";\n    key += type;\n    key += std::to_string(i);\n    auto item_table = config->GetSubKeys(key);\n    if (item_table.empty()) {\n      auto err_msg = \"the key \" + key + \" is not found in config file.\";\n      return {modelbox::STATUS_FAULT, err_msg};\n    }\n\n    auto name_index = key + \".name\";\n    inner_name = config->GetString(name_index);\n    if (inner_name.empty()) {\n      auto err_msg = \"the key \" + key + \" should have key name.\";\n      return {modelbox::STATUS_FAULT, err_msg};\n    }\n\n    auto type_index = key + \".type\";\n    inner_type = config->GetString(type_index);\n    if (inner_type.empty()) {\n      auto err_msg = \"the key \" + key + \" should have key type.\";\n      return {modelbox::STATUS_FAULT, err_msg};\n    }\n\n    names.push_back(inner_name);\n    types.push_back(inner_type);\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status OriginInferencePlugin::PreProcess(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status OriginInferencePlugin::PostProcess(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status OriginInferencePlugin::DataPre(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status OriginInferencePlugin::DataPost(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  return modelbox::STATUS_OK;\n}"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/tensorrt/test_plugin/generate_plugin.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#ifndef MODELBOX_SAMPLE_INFER_PLUGIN_H_\n#define MODELBOX_SAMPLE_INFER_PLUGIN_H_\n\n#include <modelbox/base/configuration.h>\n#include <modelbox/base/status.h>\n#include <modelbox/buffer_list.h>\n#include <modelbox/data_context.h>\n\n#include \"tensorrt_inference_plugin.h\"\n\nclass OriginInferencePlugin : public TensorRTInferencePlugin {\n public:\n  OriginInferencePlugin() = default;\n  ~OriginInferencePlugin() override = default;\n\n  modelbox::Status PreProcess(std::shared_ptr<modelbox::DataContext> data_ctx) override;\n\n  modelbox::Status PostProcess(std::shared_ptr<modelbox::DataContext> data_ctx) override;\n\n  modelbox::Status PluginInit(\n      std::shared_ptr<modelbox::Configuration> config) override;\n\n  modelbox::Status DataPre(std::shared_ptr<modelbox::DataContext> data_ctx) override;\n  modelbox::Status DataPost(std::shared_ptr<modelbox::DataContext> data_ctx) override;\n\n private:\n  modelbox::Status SetUpInputOutput(\n      const std::shared_ptr<modelbox::Configuration> &config,\n      const std::string &type, std::vector<std::string> &names,\n      std::vector<std::string> &types);\n  std::vector<std::string> input_name_list_, output_name_list_;\n  std::vector<std::string> input_type_list_, output_type_list_;\n};\n\n#endif"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/tensorrt/test_toml/modelbox.test.plugin.tensorrt.in",
    "content": "[base]\nname = \"tensorrt_plugin\"\ndevice = \"cuda\"\nversion = \"1.1.2\"\ndescription = \"a tensorrt plugin inference flowunit\"\nentry = \"@CMAKE_SOURCE_DIR@/test/assets/tensorrt/model.onnx\"\ntype = \"inference\"\nvirtual_type = \"tensorrt\"\nstream = true\nplugin = \"libmodelbox-unit-tensorrt-plugin.so\"\n\n[input]\n[input.input1]\nname = \"input:0\"\n\n[output]\n[output.output1]\nname = \"output:0\"\n"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/tensorrt/test_toml/modelbox.test.tensorrt.encrypt.in",
    "content": "[base]\nname = \"tensorrt_encrypt\"\ndevice = \"cuda\"\nversion = \"1.1.2\"\ndescription = \"a tensorrt inference flowunit\"\nentry = \"@CMAKE_SOURCE_DIR@/test/assets/tensorrt/model_en.onnx\"\ntype = \"inference\"\nvirtual_type = \"tensorrt\"\n\n[encryption]\nplugin_name = \"modeldecrypt-plugin\"\nplugin_version = \"1.0.1\"\nrootkey = \"73/h8m9FOODRPHXjHS8FB78zAmU6v1bMYbbfavkf06WB0+xdSqgc+1rjx8UWuPdT+bEroyiz3yPqWcNcyP/ZdwtAhkZ3GDIAesY4GYk4KGk5\"\npasswd = \"DM06gWG0wrGL3iJx2a1WUtGrFDO9dHtsLGf6fTLre2/ajy6a+46XAINQjv8+RxzQ09DYdrouUA5TpgdMo7EqLQ==\"\n\n[input]\n[input.input1]\nname = \"input:0\"\ntype = \"float\"\n\n[output]\n[output.output1]\nname = \"output:0\"\ntype = \"float\""
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/tensorrt/test_toml/modelbox.test.tensorrt.in",
    "content": "[base]\nname = \"tensorrt\"\ndevice = \"cuda\"\nversion = \"1.1.2\"\ndescription = \"a tensorrt inference flowunit\"\nentry = \"@CMAKE_SOURCE_DIR@/test/assets/tensorrt/model.onnx\"\ntype = \"inference\"\nvirtual_type = \"tensorrt\"\n\n[input]\n[input.input1]\nname = \"input:0\"\n\n[output]\n[output.output1]\nname = \"output:0\"\n"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/torch/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nset(CMAKE_CXX_STANDARD 14)\nset(UNIT_DEVICE \"cuda\")\nset(UNIT_NAME \"torch_inference\")\n\nproject(modelbox-flowunit-${UNIT_DEVICE}-${UNIT_NAME})\n\nset(NVCC_ORIGIN_FLAG ${CUDA_NVCC_FLAGS})\nset(CUDA_NVCC_FLAGS \"\" CACHE INTERNAL \"\" FORCE)\n# fix torch compile error in lower version cmake(e.g. 3.10)\nunset(CUDA_cublas_device_LIBRARY CACHE)\nfind_package(Torch QUIET)\nif (NOT TORCH_FOUND) \n    message(STATUS \"Not found torch, disable ${UNIT_NAME} flowunit\")\n    return()\nendif()\n\n\nfile(GLOB_RECURSE UNIT_SOURCE *.cpp *.cc *.c)\n\ngroup_source_test_files(MODELBOX_UNIT_SOURCE MODELBOX_UNIT_TEST_SOURCE \"_test.c*\" ${UNIT_SOURCE})\n\nconfigure_file(${CMAKE_CURRENT_SOURCE_DIR}/test_toml/modelbox.test.torch.in ${TEST_WORKING_DATA_DIR}/virtual_torch_test.toml @ONLY)\nconfigure_file(${CMAKE_CURRENT_SOURCE_DIR}/test_toml/modelbox.test.torch.encrypt.in ${TEST_WORKING_DATA_DIR}/virtual_torch_test_encryt.toml @ONLY)\nconfigure_file(${CMAKE_CURRENT_SOURCE_DIR}/test_toml/modelbox.test.torch_2.in ${TEST_WORKING_DATA_DIR}/virtual_torch_test_2.toml @ONLY)\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_CUDA_INCLUDE})\ninclude_directories(${LIBMODELBOX_VIRTUALDRIVER_INFERENCE_INCLUDE})\ninclude_directories(${TORCH_INCLUDE_DIRS})\ninclude_directories(${CUDA_INCLUDE_DIRS})\ninclude_directories(${MODELBOX_COMMON_INFERENCE_INCLUDE})\n\nset(MODELBOX_UNIT_SHARED libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-shared)\nset(MODELBOX_UNIT_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\nadd_library(${MODELBOX_UNIT_SHARED} SHARED ${MODELBOX_UNIT_SOURCE})\nset(LIBMODELBOX_FLOWUNIT_INFERENCE_CUDA_SHARED ${MODELBOX_UNIT_SHARED})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES \nSOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} rt)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} dl)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_DEVICE_CUDA_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${TORCH_LIBRARIES})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${CUDA_CUDART_LIBRARY})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_VIRTUALDRIVER_INFERENCE_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${MODELBOX_COMMON_INFERENCE_LIBRARY})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}\")\n\ninstall(TARGETS ${MODELBOX_UNIT_SHARED}\n        COMPONENT cuda-device-flowunit\n        RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n        LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        ARCHIVE DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        OPTIONAL)\n\ninstall(FILES ${HEADER} \n        DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR}/modelbox/drivers/devices/cuda/flowunit/inference\n        COMPONENT cuda-device-flowunit-devel)\n\nset(LIBMODELBOX_FLOWUNIT_INFERENCE_CUDA_SHARED ${MODELBOX_UNIT_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_INFERENCE_CUDA_INCLUDE ${MODELBOX_UNIT_SOURCE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_INFERENCE_SOURCES ${MODELBOX_UNIT_SOURCE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_INFERENCE_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}.so CACHE INTERNAL \"\")\n\n# driver test\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_UNIT_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_UNIT_SHARED})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/torch/flowunit_desc.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <stdio.h>\n\n#include <memory>\n\n#include \"modelbox/base/driver_api_helper.h\"\n#include \"modelbox/base/status.h\"\n#include \"modelbox/device/cuda/device_cuda.h\"\n#include \"modelbox/flowunit.h\"\n#include \"torch_inference_flowunit.h\"\n#include \"virtualdriver_inference.h\"\n\nconstexpr const char *FLOWUNIT_NAME = \"torch_inference\";\nconstexpr const char *FLOWUNIT_DESC = \"A torch inference flowunit\";\n\nstd::shared_ptr<modelbox::DriverFactory> CreateDriverFactory() {\n  std::shared_ptr<modelbox::DriverFactory> factory =\n      std::make_shared<TorchInferenceFlowUnitFactory>();\n  return factory;\n}\n\nvoid DriverDescription(modelbox::DriverDesc *desc) {\n  desc->SetName(FLOWUNIT_NAME);\n  desc->SetClass(modelbox::DRIVER_CLASS_INFERENCE);\n  desc->SetType(modelbox::DEVICE_TYPE);\n  desc->SetDescription(FLOWUNIT_DESC);\n  desc->SetDeepBind(true);\n}\n\nmodelbox::Status DriverInit() {\n  // Driver Init.\n  return modelbox::STATUS_OK;\n}\n\nvoid DriverFini() {\n  // Driver Fini.\n}\n"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/torch/test_toml/modelbox.test.torch.encrypt.in",
    "content": "[base]\nname = \"torch_encrypt\"\ndevice = \"cuda\"\nversion = \"1.1.2\"\ndescription = \"a torch inference flowunit\"\nentry = \"@CMAKE_SOURCE_DIR@/test/assets/torch/pytorch_example_en.pt\"\ntype = \"inference\"\nvirtual_type = \"torch\"\n\n[encryption]\nplugin_name = \"modeldecrypt-plugin\"\nplugin_version = \"1.0.0\"\nrootkey = \"VeMf6QqtQeRw45KaFDgPVVNfz3dv/2Zz/Lg+N7MbcA5S/pnEoLgVTEdHT9YwA70JkgoEX6M2t+cS6IGbPA4hqHyEENUX4FPDSIbMd8IE9gC0\"\npasswd = \"ewJx5Cwe9N7xxXklA43bbExoxJezKxPuA4yzr2hd5pc+p2y1cr5WX2VVgftKy0A6caw3faZUnvhxT5XXxFCllA==\"\n\n[input]\n[input.input1]\nname = \"input\"\ntype = \"float\"\n\n[output]\n[output.output1]\nname = \"output\"\ntype = \"float\""
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/torch/test_toml/modelbox.test.torch.in",
    "content": "[base]\nname = \"torch\"\ndevice = \"cuda\"\nversion = \"1.1.2\"\ndescription = \"a torch inference flowunit\"\nentry = \"@CMAKE_SOURCE_DIR@/test/assets/torch/pytorch_example.pt\"\ntype = \"inference\"\nvirtual_type = \"torch\"\n\n[input]\n[input.input1]\nname = \"input\"\n\n[output]\n[output.output1]\nname = \"output\""
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/torch/test_toml/modelbox.test.torch_2.in",
    "content": "[base]\nname = \"torch_2\"\ndevice = \"cuda\"\nversion = \"1.1.2\"\ndescription = \"a torch inference flowunit\"\nentry = \"@CMAKE_SOURCE_DIR@/test/assets/torch/pytorch_example_2.pt\"\ntype = \"inference\"\nvirtual_type = \"torch\"\n\n[input]\n[input.input1]\nname = \"input\"\ntype = \"float\"\n\n[output]\n[output.output1]\nname = \"output1\"\ntype = \"float\"\n\n[output.output2]\nname = \"output2\"\ntype = \"float\""
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/torch/torch_inference_flowunit.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"torch_inference_flowunit.h\"\n\n#include <cuda_runtime.h>\n#include <model_decrypt.h>\n#include <modelbox/base/crypto.h>\n\n#include <fstream>\n#include <mutex>\n\n#include \"modelbox/device/cuda/device_cuda.h\"\n#include \"modelbox/type.h\"\n#include \"virtualdriver_inference.h\"\n\nstatic std::mutex torch_load_mutex;\n\nstatic std::map<std::string, c10::ScalarType> type_map = {\n    {\"FLOAT\", torch::kFloat32},  {\"DOUBLE\", torch::kFloat64},\n    {\"INT\", torch::kInt32},      {\"UINT8\", torch::kUInt8},\n    {\"LONG\", torch::kInt64},     {\"INT64\", torch::kInt64},\n    {\"FLOAT16\", torch::kFloat16}};\n\nstatic std::map<c10::ScalarType, modelbox::ModelBoxDataType> torch_mbtype_map =\n    {{torch::kFloat32, modelbox::MODELBOX_FLOAT},\n     {torch::kFloat64, modelbox::MODELBOX_DOUBLE},\n     {torch::kInt32, modelbox::MODELBOX_INT32},\n     {torch::kUInt8, modelbox::MODELBOX_UINT8},\n     {torch::kInt64, modelbox::MODELBOX_INT64},\n     {torch::kFloat16, modelbox::MODELBOX_HALF}};\n\nstatic std::map<modelbox::ModelBoxDataType, c10::ScalarType> mbtype_torch_map =\n    {{modelbox::MODELBOX_FLOAT, torch::kFloat32},\n     {modelbox::MODELBOX_DOUBLE, torch::kFloat64},\n     {modelbox::MODELBOX_INT32, torch::kInt32},\n     {modelbox::MODELBOX_UINT8, torch::kUInt8},\n     {modelbox::MODELBOX_INT64, torch::kInt64},\n     {modelbox::MODELBOX_HALF, torch::kFloat16}};\n\nTorchInferenceFlowUnit::TorchInferenceFlowUnit() = default;\nTorchInferenceFlowUnit::~TorchInferenceFlowUnit() = default;\n\nmodelbox::Status ConvertTorchTypeToModelBoxType(\n    c10::ScalarType torch_type, modelbox::ModelBoxDataType &modelbox_type) {\n  auto iter = torch_mbtype_map.find(torch_type);\n  if (iter == torch_mbtype_map.end()) {\n    return {modelbox::STATUS_NOTSUPPORT,\n            \"covert TorchType to ModelBoxType failed, unsupport type \" +\n                std::to_string(static_cast<int>(torch_type))};\n  }\n  modelbox_type = iter->second;\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status ConvertModelBoxTypeToTorchType(\n    modelbox::ModelBoxDataType modelbox_type, c10::ScalarType &torch_type) {\n  auto iter = mbtype_torch_map.find(modelbox_type);\n  if (iter == mbtype_torch_map.end()) {\n    return {modelbox::STATUS_NOTSUPPORT,\n            \"covert ModelBoxType to TorchType failed, unsupport type \" +\n                std::to_string(modelbox_type)};\n  }\n  torch_type = iter->second;\n  return modelbox::STATUS_SUCCESS;\n}\n\nvoid TorchInferenceFlowUnit::FillInput(\n    const std::vector<modelbox::FlowUnitInput> &flowunit_input_list) {\n  for (auto const &input_item : flowunit_input_list) {\n    auto input_name = input_item.GetPortName();\n    auto input_type = input_item.GetPortType();\n    params_.input_name_list_.push_back(input_name);\n    params_.input_type_list_.push_back(input_type);\n    params_.input_list_.push_back(input_item);\n  }\n}\n\nvoid TorchInferenceFlowUnit::FillOutput(\n    const std::vector<modelbox::FlowUnitOutput> &flowunit_output_list) {\n  for (auto const &output_item : flowunit_output_list) {\n    auto output_name = output_item.GetPortName();\n    auto output_type = output_item.GetPortType();\n    params_.output_name_list_.push_back(output_name);\n    params_.output_type_list_.push_back(output_type);\n    params_.output_list_.push_back(output_item);\n  }\n}\n\nmodelbox::Status TorchInferenceFlowUnit::LoadModel(\n    const std::string &model_path,\n    const std::shared_ptr<modelbox::Configuration> &config) {\n  std::lock_guard<std::mutex> lck(torch_load_mutex);\n  try {\n    MBLOG_DEBUG << \"model_path: \" << model_path;\n    auto drivers_ptr = GetBindDevice()->GetDeviceManager()->GetDrivers();\n\n    c10::Device device(c10::kCUDA, dev_id_);\n    ModelDecryption torch_decrypt;\n    torch_decrypt.Init(model_path, drivers_ptr, config);\n    // use GetModelState to check err, so donot need check Init ret\n    if (torch_decrypt.GetModelState() == ModelDecryption::MODEL_STATE_ENCRYPT) {\n      int64_t model_len = 0;\n      std::shared_ptr<uint8_t> modelBuf =\n          torch_decrypt.GetModelSharedBuffer(model_len);\n      if (!modelBuf) {\n        return {modelbox::STATUS_FAULT, \"Decrypt model fail\"};\n      }\n      std::stringstream modelStream;\n      modelStream.rdbuf()->pubsetbuf((char *)modelBuf.get(), model_len);\n      model_ = torch::jit::load(modelStream, device);\n    } else if (torch_decrypt.GetModelState() ==\n               ModelDecryption::MODEL_STATE_PLAIN) {\n      model_ = torch::jit::load(model_path, device);\n    } else {\n      return {modelbox::STATUS_FAULT, \"open torch model file fail\"};\n    }\n  } catch (const c10::Error &e) {\n    auto err_msg = \"loading model \" + model_path +\n                   \" failed, c10 error: \" + e.msg() +\n                   \"\\n backtrace: \" + e.backtrace();\n    MBLOG_ERROR << err_msg;\n    return {modelbox::STATUS_FAULT, err_msg};\n  } catch (const std::exception &e) {\n    auto err_msg = \"other loading error, \" + std::string(e.what());\n    MBLOG_ERROR << err_msg;\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n  MBLOG_DEBUG << \"model loads success.\";\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status TorchInferenceFlowUnit::InitConfig(\n    const std::shared_ptr<modelbox::Configuration> &config) {\n  auto inference_desc_ =\n      std::dynamic_pointer_cast<VirtualInferenceFlowUnitDesc>(\n          this->GetFlowUnitDesc());\n  const std::vector<modelbox::FlowUnitInput> &flowunit_input_list =\n      inference_desc_->GetFlowUnitInput();\n  const std::vector<modelbox::FlowUnitOutput> &flowunit_output_list =\n      inference_desc_->GetFlowUnitOutput();\n\n  std::string model_path = inference_desc_->GetModelEntry();\n\n  auto status = LoadModel(model_path, config);\n  if (modelbox::STATUS_OK != status) {\n    auto err_msg =\n        \"could not load inference graph, err: \" + status.WrapErrormsgs();\n    MBLOG_ERROR << err_msg;\n    return {status, err_msg};\n  }\n\n  FillInput(flowunit_input_list);\n  FillOutput(flowunit_output_list);\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status TorchInferenceFlowUnit::Open(\n    const std::shared_ptr<modelbox::Configuration> &opts) {\n  skip_first_dim_ = opts->GetBool(\"skip_first_dim\", false);\n\n  auto inference_desc = std::dynamic_pointer_cast<VirtualInferenceFlowUnitDesc>(\n      this->GetFlowUnitDesc());\n  auto config = inference_desc->GetConfiguration();\n  if (config == nullptr) {\n    return {modelbox::STATUS_BADCONF, \"inference config is invalid.\"};\n  }\n\n  auto merge_config = std::make_shared<modelbox::Configuration>();\n  merge_config->Add(*config);\n  merge_config->Add(*opts);\n  modelbox::Status status = InitConfig(merge_config);\n  if (status != modelbox::STATUS_OK) {\n    auto err_msg = \"init config failed: \" + status.WrapErrormsgs();\n    MBLOG_ERROR << err_msg;\n    return {status, err_msg};\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status TorchInferenceFlowUnit::ConvertType(\n    const std::string &type, c10::ScalarType &torch_type) {\n  if (type_map.find(type) == type_map.end()) {\n    return {modelbox::STATUS_FAULT, \"unsupported type \" + type};\n  }\n\n  torch_type = type_map[type];\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status TorchInferenceFlowUnit::CreateTorchTensor(\n    const std::shared_ptr<modelbox::BufferList> &input_buf,\n    const torch::TensorOptions &option, torch::Tensor &input_tensor) {\n  std::vector<size_t> buffer_shape;\n  auto result = input_buf->At(0)->Get(\"shape\", buffer_shape);\n  if (!result) {\n    auto err_msg = \"the input buffer don't have meta shape.\";\n    MBLOG_ERROR << err_msg;\n    return {modelbox::STATUS_OK, err_msg};\n  }\n\n  std::vector<int64_t> shape_vec;\n  if (!skip_first_dim_) {\n    shape_vec.emplace_back(static_cast<int64_t>(input_buf->Size()));\n  }\n  copy(buffer_shape.begin(), buffer_shape.end(), back_inserter(shape_vec));\n\n  at::IntArrayRef shape(shape_vec);\n  input_tensor = torch::from_blob(const_cast<void *>(input_buf->ConstData()),\n                                  shape, option);\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status TorchInferenceFlowUnit::CreateTorchTensorList(\n    const std::shared_ptr<modelbox::BufferList> &input_buf,\n    const torch::TensorOptions &option,\n    std::vector<torch::Tensor> &tensor_vec) {\n  std::vector<std::vector<size_t>> buffer_shape_vec;\n  modelbox::ModelBoxDataType buffer_type;\n  auto result = input_buf->At(0)->Get(\"shape\", buffer_shape_vec);\n  if (!result) {\n    auto err_msg = \"the input buffer don't have meta shape.\";\n    MBLOG_ERROR << err_msg;\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  result = input_buf->At(0)->Get(\"type\", buffer_type);\n  if (!result) {\n    auto err_msg = \"the input buffer don't have meta type.\";\n    MBLOG_ERROR << err_msg;\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  std::vector<size_t> bytes{0};\n  size_t acc_bytes = 0;\n  for (auto &buffer_shape : buffer_shape_vec) {\n    auto byte = std::accumulate(buffer_shape.begin(), buffer_shape.end(),\n                                (size_t)0, std::multiplies<size_t>()) *\n                modelbox::GetDataTypeSize(buffer_type);\n    acc_bytes += byte;\n    bytes.emplace_back(acc_bytes);\n  }\n\n  for (size_t i = 0; i < buffer_shape_vec.size(); i++) {\n    std::vector<torch::Tensor> concat_tensor_vec;\n    for (size_t j = 0; j < input_buf->Size(); ++j) {\n      std::vector<int64_t> shape_vec;\n      if (!skip_first_dim_) {\n        shape_vec.emplace_back(static_cast<int64_t>(input_buf->Size()));\n      }\n      copy(buffer_shape_vec[i].begin(), buffer_shape_vec[i].end(),\n           back_inserter(shape_vec));\n\n      at::IntArrayRef shape(shape_vec);\n      torch::Tensor tensor_item = torch::from_blob(\n          (char *)(const_cast<void *>(input_buf->At(j)->ConstData())) +\n              bytes[i],\n          shape, option);\n      concat_tensor_vec.emplace_back(tensor_item);\n    }\n    torch::TensorList concat_tensorlist(concat_tensor_vec);\n    auto concat_tensor = torch::cat(concat_tensorlist);\n    tensor_vec.emplace_back(concat_tensor);\n    concat_tensor_vec.clear();\n  }\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status TorchInferenceFlowUnit::PreProcess(\n    const std::shared_ptr<modelbox::DataContext> &data_ctx,\n    std::vector<torch::jit::IValue> &inputs) {\n  int index = 0;\n  modelbox::Status status;\n  for (const auto &input_name : params_.input_name_list_) {\n    const auto input_buf = data_ctx->Input(input_name);\n\n    std::string type = params_.input_type_list_[index];\n    std::string torch_set_type =\n        params_.input_list_[index++].GetProperity(\"torch_type\");\n\n    c10::ScalarType torch_type;\n    if (type.empty()) {\n      // Get type form buffer meta when model input type is not set\n      modelbox::ModelBoxDataType buffer_type;\n      status = input_buf->At(0)->Get(\"type\", buffer_type);\n      if (!status) {\n        auto err_msg =\n            \"input type is not set ,please set it in inference toml file or \"\n            \"buffer meta . error: \" +\n            status.WrapErrormsgs();\n        return {modelbox::STATUS_FAULT, err_msg};\n      }\n      status = ConvertModelBoxTypeToTorchType(buffer_type, torch_type);\n      if (!status) {\n        auto err_msg =\n            \"input type convert failed, error: \" + status.WrapErrormsgs();\n        return {modelbox::STATUS_FAULT, err_msg};\n      }\n    } else {\n      std::transform(type.begin(), type.end(), type.begin(), ::toupper);\n      status = ConvertType(type, torch_type);\n      if (status != modelbox::STATUS_OK) {\n        return {status, \"input type convert failed.\"};\n      }\n    }\n\n    torch::TensorOptions option = torch::TensorOptions()\n                                      .device(torch::kCUDA, dev_id_)\n                                      .layout(torch::kStrided)\n                                      .dtype(torch_type);\n\n    if (torch_set_type.empty()) {\n      torch::Tensor input_tensor;\n      status = CreateTorchTensor(input_buf, option, input_tensor);\n      if (status != modelbox::STATUS_SUCCESS) {\n        auto err_msg =\n            \"create torch tensor failed, err: \" + status.WrapErrormsgs();\n        MBLOG_ERROR << err_msg;\n        return {modelbox::STATUS_FAULT, err_msg};\n      }\n\n      inputs.emplace_back(input_tensor);\n      continue;\n    }\n\n    if (torch_set_type == TENSORLIST) {\n      std::vector<torch::Tensor> tensor_vec;\n      status = CreateTorchTensorList(input_buf, option, tensor_vec);\n      if (status != modelbox::STATUS_SUCCESS) {\n        auto err_msg =\n            \"create torch tensor list failed, err: \" + status.WrapErrormsgs();\n        MBLOG_ERROR << err_msg;\n        return {modelbox::STATUS_FAULT, err_msg};\n      }\n\n      torch::TensorList input_tensorlist(tensor_vec);\n      inputs.emplace_back(input_tensorlist);\n      continue;\n    }\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status TorchInferenceFlowUnit::ChunkTensors(\n    const std::vector<torch::Tensor> &output_tensor,\n    std::vector<std::vector<std::shared_ptr<modelbox::Buffer>>> &chunk_buffers,\n    size_t input_size) {\n  modelbox::Status status = modelbox::STATUS_SUCCESS;\n  for (size_t tensor_index = 0; tensor_index < output_tensor.size();\n       tensor_index++) {\n    auto chunk_tensors =\n        torch::chunk(output_tensor[tensor_index], input_size, 1);\n\n    for (size_t chunk_index = 0; chunk_index < chunk_tensors.size();\n         chunk_index++) {\n      std::shared_ptr<modelbox::Buffer> buffer =\n          std::make_shared<modelbox::Buffer>(GetBindDevice());\n      auto tensor = chunk_tensors[chunk_index];\n      status = buffer->Build(tensor.data_ptr(), tensor.nbytes(),\n                             [tensor](void *ptr) {});\n      if (status != modelbox::STATUS_OK) {\n        auto err_msg = \"output buffer builds error: \" + status.WrapErrormsgs();\n        MBLOG_ERROR << err_msg;\n        return {modelbox::STATUS_FAULT, err_msg};\n      }\n\n      if (tensor_index == 0) {\n        chunk_buffers.emplace_back();\n      }\n\n      chunk_buffers[chunk_index].push_back(buffer);\n    }\n  }\n\n  return status;\n}\n\nmodelbox::Status TorchInferenceFlowUnit::CreateOutputBufferList(\n    std::shared_ptr<modelbox::BufferList> &output_buffer_list,\n    torch::Tensor &output_tensor, size_t input_size) {\n  // TODO: build buffer from flowunit device memory by trasfer device ptr\n  torch::Tensor output = output_tensor;\n  if (output_tensor.is_cuda()) {\n    output = output_tensor.cpu();\n  }\n  auto tensor_byte = output.nbytes();\n  auto tensor_data = output.data_ptr();\n\n  std::vector<size_t> shape_vector;\n  if (skip_first_dim_) {\n    shape_vector.emplace_back(tensor_byte);\n  } else {\n    if (input_size == 0) {\n      return {modelbox::STATUS_FAULT, \"Divisor is zero\"};\n    }\n    auto single_byte = tensor_byte / input_size;\n    for (size_t i = 0; i < input_size; ++i) {\n      shape_vector.emplace_back(single_byte);\n    }\n  }\n\n  modelbox::Status status;\n  status =\n      output_buffer_list->BuildFromHost(shape_vector, tensor_data, tensor_byte);\n\n  if (status != modelbox::STATUS_OK) {\n    auto err_msg = \"output buffer list builds error: \" + status.WrapErrormsgs();\n    MBLOG_ERROR << err_msg;\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status TorchInferenceFlowUnit::CreateOutputBufferListFromVector(\n    std::shared_ptr<modelbox::BufferList> &output_buffer_list,\n    std::vector<torch::Tensor> &output_tensor, size_t input_size) {\n  MBLOG_DEBUG << \"output_tensor size: \" << output_tensor.size();\n\n  std::vector<std::vector<std::shared_ptr<modelbox::Buffer>>>\n      chunk_torch_tensors;\n  auto status = ChunkTensors(output_tensor, chunk_torch_tensors, input_size);\n  if (status != modelbox::STATUS_SUCCESS) {\n    auto err_msg = \"chunk tensors failed, err: \" + status.WrapErrormsgs();\n    MBLOG_ERROR << err_msg;\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  for (auto &chunk_tensor : chunk_torch_tensors) {\n    auto buffer_list = std::make_shared<modelbox::BufferList>(chunk_tensor);\n    auto ret = buffer_list->MakeContiguous();\n    if (!ret) {\n      MBLOG_ERROR << \"buffer list merge failed: \" << ret;\n      return modelbox::STATUS_FAULT;\n    }\n\n    auto dev_mem = buffer_list->GetDeviceMemory();\n    auto merge = std::make_shared<modelbox::Buffer>(dev_mem);\n    output_buffer_list->PushBack(merge);\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status TorchInferenceFlowUnit::SetOutputBufferListMeta(\n    const std::vector<torch::Tensor> &output,\n    std::shared_ptr<modelbox::BufferList> &output_buf) {\n  modelbox::ModelBoxDataType modelbox_type = modelbox::MODELBOX_TYPE_INVALID;\n  std::vector<std::vector<size_t>> output_shape_vec;\n  modelbox::Status status;\n  for (auto &item : output) {\n    status = ConvertTorchTypeToModelBoxType(item.scalar_type(), modelbox_type);\n    if (!status) {\n      auto err_msg =\n          \"output type convert failed, error: \" + status.WrapErrormsgs();\n      return {modelbox::STATUS_FAULT, err_msg};\n    }\n\n    auto sizes = item.sizes();\n    std::vector<size_t> output_shape;\n    for (long size : sizes) {\n      output_shape.push_back((size_t)size);\n    }\n    output_shape_vec.emplace_back(output_shape);\n  }\n\n  output_buf->Set(\"type\", modelbox_type);\n  if (output_shape_vec.size() == 1) {\n    output_buf->Set(\"shape\", output_shape_vec[0]);\n    return modelbox::STATUS_OK;\n  }\n\n  output_buf->Set(\"shape\", output_shape_vec);\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status TorchInferenceFlowUnit::GetOutputTensorVec(\n    torch::jit::IValue &outputs, std::vector<torch::Tensor> &output_vector,\n    int index) {\n  if (!outputs.isTensor() && !outputs.isTuple()) {\n    auto err_msg = \"unsupported torch inference output type.\";\n    MBLOG_ERROR << err_msg;\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  if (outputs.isTensor()) {\n    // single output\n    output_vector.push_back(outputs.toTensor());\n    return modelbox::STATUS_OK;\n  }\n\n  // multi outputs\n  auto tmp_output = outputs.toTuple()->elements()[index];\n  if (tmp_output.isTensor()) {\n    output_vector.push_back(tmp_output.toTensor());\n  } else if (tmp_output.isTensorList()) {\n    // one of the outputs is tensorlist\n    output_vector = tmp_output.toTensorVector();\n  } else if (tmp_output.isTuple()) {\n    // one of the outputs is also tuple, only fall into next layer.\n    auto tmp_output_value = tmp_output.toTuple()->elements();\n    MBLOG_DEBUG << \"size: \" << tmp_output_value.size();\n    for (auto &output_value : tmp_output_value) {\n      if (output_value.isTensorList()) {\n        output_vector = output_value.toTensorVector();\n      }\n    }\n  }\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status TorchInferenceFlowUnit::PostProcess(\n    const std::shared_ptr<modelbox::DataContext> &data_ctx,\n    torch::jit::IValue &outputs) {\n  int index = 0;\n  for (const auto &output_name : params_.output_name_list_) {\n    std::vector<torch::Tensor> output_vector;\n    MBLOG_DEBUG << \"output name:\\t\" << output_name;\n\n    auto status = GetOutputTensorVec(outputs, output_vector, index);\n    if (status != modelbox::STATUS_OK) {\n      auto err_msg =\n          \"get output tensor vect failed, err: \" + status.WrapErrormsgs();\n      MBLOG_ERROR << err_msg;\n      return {modelbox::STATUS_FAULT, err_msg};\n    }\n\n    auto output_buf = data_ctx->Output(output_name);\n    auto size = data_ctx->Input(params_.input_name_list_[0])->Size();\n    if (size == 0) {\n      auto err_msg = \"input size is 0 bytes\";\n      MBLOG_ERROR << err_msg;\n      return {modelbox::STATUS_FAULT, err_msg};\n    }\n\n    MBLOG_DEBUG << \"input size: \" << size;\n    if (output_vector.size() == 1) {\n      auto status = CreateOutputBufferList(output_buf, output_vector[0], size);\n      if (status != modelbox::STATUS_OK) {\n        auto err_msg =\n            \"CreateOutputBufferList single failed.\" + status.WrapErrormsgs();\n        MBLOG_ERROR << err_msg;\n        return {modelbox::STATUS_FAULT, err_msg};\n      }\n    } else {\n      auto status =\n          CreateOutputBufferListFromVector(output_buf, output_vector, size);\n      if (status != modelbox::STATUS_OK) {\n        auto err_msg =\n            \"CreateOutputBufferList vector failed.\" + status.WrapErrormsgs();\n        MBLOG_ERROR << err_msg;\n        return {modelbox::STATUS_FAULT, err_msg};\n      }\n    }\n\n    status = SetOutputBufferListMeta(output_vector, output_buf);\n    if (status != modelbox::STATUS_OK) {\n      auto err_msg = \"SetOutputBufferListMeta failed.\" + status.WrapErrormsgs();\n      MBLOG_ERROR << err_msg;\n      return {modelbox::STATUS_FAULT, err_msg};\n    }\n    index++;\n  }\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status TorchInferenceFlowUnit::Process(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  std::vector<torch::jit::IValue> inputs;\n  modelbox::Status status = PreProcess(data_ctx, inputs);\n  if (status != modelbox::STATUS_OK) {\n    auto err_msg =\n        \"torch inference preprocess failed, error: \" + status.WrapErrormsgs();\n    MBLOG_ERROR << err_msg;\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  torch::jit::IValue outputs;\n  try {\n    outputs = model_.forward(inputs);\n    MBLOG_DEBUG << \"output data: \" << outputs;\n  } catch (const c10::Error &e) {\n    auto err_msg = \"model inference error, \" + e.msg();\n    MBLOG_ERROR << err_msg;\n    return {modelbox::STATUS_FAULT, err_msg};\n  } catch (const std::exception &e) {\n    auto err_msg = \"other inference error, \" + std::string(e.what());\n    MBLOG_ERROR << err_msg;\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  status = PostProcess(data_ctx, outputs);\n  if (status != modelbox::STATUS_OK) {\n    auto err_msg =\n        \"torch inference postprocess failed, error: \" + status.WrapErrormsgs();\n    MBLOG_ERROR << err_msg;\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status TorchInferenceFlowUnit::Close() { return modelbox::STATUS_OK; }\n\nvoid TorchInferenceFlowUnitDesc::SetModelEntry(const std::string &model_entry) {\n  model_entry_ = model_entry;\n}\n\nconst std::string TorchInferenceFlowUnitDesc::GetModelEntry() {\n  return model_entry_;\n}\n\nstd::shared_ptr<modelbox::FlowUnit>\nTorchInferenceFlowUnitFactory::VirtualCreateFlowUnit(\n    const std::string &unit_name, const std::string &unit_type,\n    const std::string &virtual_type) {\n  return std::make_shared<TorchInferenceFlowUnit>();\n};\n"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/torch/torch_inference_flowunit.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_INFERENCE_H_\n#define MODELBOX_FLOWUNIT_INFERENCE_H_\n\n#include <modelbox/base/device.h>\n#include <modelbox/base/refcache.h>\n#include <modelbox/base/status.h>\n#include <modelbox/base/utils.h>\n#include <modelbox/buffer.h>\n#include <modelbox/flow.h>\n#include <modelbox/flowunit.h>\n#include <modelbox/tensor.h>\n#include <modelbox/tensor_list.h>\n#include <torch/script.h>\n\n#include <typeinfo>\n\nconstexpr const char *FLOWUNIT_TYPE = \"cuda\";\nconstexpr const char *INFERENCE_TYPE = \"torch\";\nconstexpr const char *TENSORLIST = \"tensorlist\";\n\nclass TorchInferenceFlowUnitDesc : public modelbox::FlowUnitDesc {\n  friend class TorchInferenceFlowUnit;\n\n public:\n  TorchInferenceFlowUnitDesc() = default;\n  ~TorchInferenceFlowUnitDesc() override = default;\n\n  void SetModelEntry(const std::string &model_entry);\n  const std::string GetModelEntry();\n\n  std::string model_entry_;\n};\n\nclass TorchInferenceParam {\n public:\n  std::vector<std::string> input_name_list_, output_name_list_;\n  std::vector<std::string> input_type_list_, output_type_list_;\n  std::vector<modelbox::FlowUnitInput> input_list_;\n  std::vector<modelbox::FlowUnitOutput> output_list_;\n};\n\nclass TorchInferenceFlowUnit : public modelbox::FlowUnit {\n public:\n  TorchInferenceFlowUnit();\n  ~TorchInferenceFlowUnit() override;\n\n  modelbox::Status Open(\n      const std::shared_ptr<modelbox::Configuration> &opts) override;\n\n  modelbox::Status Close() override;\n\n  /* run when processing data */\n  modelbox::Status Process(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override;\n\n private:\n  bool skip_first_dim_;\n  TorchInferenceParam params_;\n  torch::jit::script::Module model_;\n  modelbox::Status ConvertType(const std::string &type,\n                               c10::ScalarType &torch_type);\n  modelbox::Status CreateOutputBufferList(\n      std::shared_ptr<modelbox::BufferList> &output_buffer_list,\n      torch::Tensor &output_tensor, size_t input_size);\n  modelbox::Status CreateOutputBufferListFromVector(\n      std::shared_ptr<modelbox::BufferList> &output_buffer_list,\n      std::vector<torch::Tensor> &output_tensor, size_t input_size);\n  modelbox::Status PreProcess(\n      const std::shared_ptr<modelbox::DataContext> &data_ctx,\n      std::vector<torch::jit::IValue> &inputs);\n  modelbox::Status SetOutputBufferListMeta(\n      const std::vector<torch::Tensor> &output,\n      std::shared_ptr<modelbox::BufferList> &output_buf);\n  modelbox::Status PostProcess(\n      const std::shared_ptr<modelbox::DataContext> &data_ctx,\n      torch::jit::IValue &outputs);\n\n  modelbox::Status InitConfig(\n      const std::shared_ptr<modelbox::Configuration> &config);\n  modelbox::Status LoadModel(\n      const std::string &model_path,\n      const std::shared_ptr<modelbox::Configuration> &config);\n  void FillInput(\n      const std::vector<modelbox::FlowUnitInput> &flowunit_input_list);\n  void FillOutput(\n      const std::vector<modelbox::FlowUnitOutput> &flowunit_output_list);\n  modelbox::Status ChunkTensors(\n      const std::vector<torch::Tensor> &output_tensor,\n      std::vector<std::vector<std::shared_ptr<modelbox::Buffer>>>\n          &chunk_buffers,\n      size_t input_size);\n  modelbox::Status CreateTorchTensor(\n      const std::shared_ptr<modelbox::BufferList> &input_buf,\n      const torch::TensorOptions &option, torch::Tensor &input_tensor);\n  modelbox::Status CreateTorchTensorList(\n      const std::shared_ptr<modelbox::BufferList> &input_buf,\n      const torch::TensorOptions &option,\n      std::vector<torch::Tensor> &tensor_vec);\n  modelbox::Status GetOutputTensorVec(torch::jit::IValue &outputs,\n                                      std::vector<torch::Tensor> &output_vector,\n                                      int index);\n};\n\nclass TorchInferenceFlowUnitFactory : public modelbox::FlowUnitFactory {\n public:\n  TorchInferenceFlowUnitFactory() = default;\n  ~TorchInferenceFlowUnitFactory() override = default;\n\n  std::shared_ptr<modelbox::FlowUnit> VirtualCreateFlowUnit(\n      const std::string &unit_name, const std::string &unit_type,\n      const std::string &virtual_type) override;\n\n  std::string GetFlowUnitFactoryType() override { return FLOWUNIT_TYPE; };\n  std::string GetVirtualType() override { return INFERENCE_TYPE; };\n\n  std::map<std::string, std::shared_ptr<modelbox::FlowUnitDesc>> FlowUnitProbe()\n      override {\n    return std::map<std::string, std::shared_ptr<modelbox::FlowUnitDesc>>();\n  };\n};\n\n#endif  // MODELBOX_FLOWUNIT_INFERENCE_H_\n"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/torch/torch_inference_flowunit_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include <cuda_runtime.h>\n#include <dlfcn.h>\n\n#include <functional>\n#include <future>\n#include <random>\n#include <thread>\n\n#include \"modelbox/base/log.h\"\n#include \"modelbox/base/utils.h\"\n#include \"modelbox/buffer.h\"\n#include \"driver_flow_test.h\"\n#include \"flowunit_mockflowunit/flowunit_mockflowunit.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"test/mock/minimodelbox/mockflow.h\"\n\nnamespace modelbox {\nclass TorchInferenceFlowUnitTest : public testing::Test {\n public:\n  TorchInferenceFlowUnitTest()\n      : driver_flow_(std::make_shared<MockFlow>()) {}\n\n protected:\n  void SetUp() override {\n    int count = 0;\n    cudaGetDeviceCount(&count);\n    if (count <= 0) {\n      MBLOG_INFO << \"no cuda device, skip test suit\";\n      GTEST_SKIP();\n    }\n\n    AddMockFlowUnit();\n    SetUpTomlFiles();\n  }\n\n  void TearDown() override {\n    int count = 0;\n    cudaGetDeviceCount(&count);\n    if (count <= 0) {\n      GTEST_SKIP();\n    }\n\n    RemoveFiles();\n    driver_flow_ = nullptr;\n  };\n  std::shared_ptr<MockFlow> GetDriverFlow();\n\n  const std::string test_lib_dir = TEST_DRIVER_DIR,\n                    test_data_dir = TEST_DATA_DIR, test_assets = TEST_ASSETS,\n                    test_pt_file = \"pytorch_example.pt\",\n                    test_pt_file_en = \"pytorch_example_en.pt\",\n                    test_pt_2_output_file = \"pytorch_example_2.pt\",\n                    test_toml_file = \"virtual_torch_test.toml\",\n                    test_toml_file_en = \"virtual_torch_test_encryt.toml\",\n                    test_toml_2_output_file = \"virtual_torch_test_2.toml\";\n  std::string torch_model_path, dest_pt_file, dest_toml_file;\n  std::string dest_pt_2_output_file, dest_toml_2_output_file;\n  std::string dest_pt_file_en, dest_toml_file_en;\n\n private:\n  void AddMockFlowUnit();\n  void Register_Test_0_1_Flowunit();\n  void Register_Test_1_0_Flowunit();\n  void Register_Test_2_0_Flowunit();\n  void SetUpTomlFiles();\n  void RemoveFiles();\n\n  std::shared_ptr<MockFlow> driver_flow_;\n};\n\nvoid TorchInferenceFlowUnitTest::RemoveFiles() {\n  auto ret = remove(dest_toml_file.c_str());\n  EXPECT_EQ(ret, 0);\n  ret = remove(dest_pt_file.c_str());\n  EXPECT_EQ(ret, 0);\n  ret = remove(dest_pt_2_output_file.c_str());\n  EXPECT_EQ(ret, 0);\n  ret = remove(dest_toml_2_output_file.c_str());\n  EXPECT_EQ(ret, 0);\n  ret = remove(dest_toml_file_en.c_str());\n  EXPECT_EQ(ret, 0);\n  ret = remove(dest_pt_file_en.c_str());\n  EXPECT_EQ(ret, 0);\n  ret = remove(torch_model_path.c_str());\n  EXPECT_EQ(ret, 0);\n}\n\nvoid TorchInferenceFlowUnitTest::SetUpTomlFiles() {\n  const std::string src_file_dir = test_assets + \"/torch\";\n  const std::string src_pt_file = src_file_dir + \"/\" + test_pt_file;\n  const std::string src_file_pt_toml = test_data_dir + \"/\" + test_toml_file;\n  const std::string src_pt_2_output_file =\n      src_file_dir + \"/\" + test_pt_2_output_file;\n  const std::string src_file_pt_2_output_toml =\n      test_data_dir + \"/\" + test_toml_2_output_file;\n  const std::string src_pt_file_en = src_file_dir + \"/\" + test_pt_file_en;\n  const std::string src_file_pt_toml_en =\n      test_data_dir + \"/\" + test_toml_file_en;\n\n  torch_model_path = test_data_dir + \"/torch\";\n  auto mkdir_ret = mkdir(torch_model_path.c_str(), 0700);\n  EXPECT_EQ(mkdir_ret, 0);\n\n  dest_pt_file = torch_model_path + \"/\" + test_pt_file;\n  auto status = CopyFile(src_pt_file, dest_pt_file, 0);\n  EXPECT_EQ(status, STATUS_OK);\n\n  dest_toml_file = torch_model_path + \"/\" + test_toml_file;\n  status = CopyFile(src_file_pt_toml, dest_toml_file, 0);\n  EXPECT_EQ(status, STATUS_OK);\n\n  dest_pt_2_output_file = torch_model_path + \"/\" + test_pt_2_output_file;\n  status = CopyFile(src_pt_2_output_file, dest_pt_2_output_file, 0);\n  EXPECT_EQ(status, STATUS_OK);\n\n  dest_toml_2_output_file = torch_model_path + \"/\" + test_toml_2_output_file;\n  status = CopyFile(src_file_pt_2_output_toml, dest_toml_2_output_file, 0);\n  EXPECT_EQ(status, STATUS_OK);\n\n  dest_pt_file_en = torch_model_path + \"/\" + test_pt_file_en;\n  status = CopyFile(src_pt_file_en, dest_pt_file_en, 0);\n  EXPECT_EQ(status, STATUS_OK);\n\n  dest_toml_file_en = torch_model_path + \"/\" + test_toml_file_en;\n  status = CopyFile(src_file_pt_toml_en, dest_toml_file_en, 0);\n  EXPECT_EQ(status, STATUS_OK);\n}\n\nvoid TorchInferenceFlowUnitTest::Register_Test_0_1_Flowunit() {\n  auto mock_desc = GenerateFlowunitDesc(\"test_0_1\", {}, {\"Out_1\"});\n  mock_desc->SetFlowType(STREAM);\n\n  auto open_func =\n      [=](const std::shared_ptr<modelbox::Configuration> &flow_option,\n          std::shared_ptr<MockFlowUnit> mock_flowunit) -> Status {\n    std::weak_ptr<MockFlowUnit> mock_flowunit_wp;\n    mock_flowunit_wp = mock_flowunit;\n    auto spt = mock_flowunit_wp.lock();\n    auto ext_data = spt->CreateExternalData();\n    if (!ext_data) {\n      MBLOG_ERROR << \"can not get external data.\";\n    }\n\n    auto buffer_list = ext_data->CreateBufferList();\n    buffer_list->Build({10 * sizeof(int)});\n    auto data = (int *)buffer_list->MutableData();\n    for (size_t i = 0; i < 10; i++) {\n      data[i] = i;\n    }\n\n    auto status = ext_data->Send(buffer_list);\n    if (!status) {\n      MBLOG_ERROR << \"external data send buffer list failed:\" << status;\n    }\n\n    status = ext_data->Close();\n    if (!status) {\n      MBLOG_ERROR << \"external data close failed:\" << status;\n    }\n\n    return modelbox::STATUS_OK;\n  };\n\n  auto process_func =\n      [=](std::shared_ptr<DataContext> op_ctx,\n          std::shared_ptr<MockFlowUnit> mock_flowunit) -> Status {\n    auto output_buf_1 = op_ctx->Output(\"Out_1\");\n    std::vector<size_t> shape_vector(1, 200 * sizeof(float));\n    modelbox::ModelBoxDataType type = MODELBOX_FLOAT;\n    output_buf_1->Build(shape_vector);\n    output_buf_1->Set(\"type\", type);\n    std::vector<size_t> shape{20, 10};\n    output_buf_1->Set(\"shape\", shape);\n    auto dev_data = (float *)(output_buf_1->MutableData());\n    float num = 1.0;\n    for (size_t i = 0; i < output_buf_1->Size(); ++i) {\n      for (size_t j = 0; j < 200; ++j) {\n        dev_data[i * 200 + j] = num;\n      }\n    }\n\n    MBLOG_DEBUG << output_buf_1->GetBytes();\n    MBLOG_DEBUG << \"test_0_1 gen data, 0\" << output_buf_1->Size();\n\n    return modelbox::STATUS_OK;\n  };\n\n  auto mock_functions = std::make_shared<MockFunctionCollection>();\n  mock_functions->RegisterOpenFunc(open_func);\n  mock_functions->RegisterProcessFunc(process_func);\n  driver_flow_->AddFlowUnitDesc(mock_desc, mock_functions->GenerateCreateFunc(),\n                                TEST_DRIVER_DIR);\n};\n\nvoid TorchInferenceFlowUnitTest::Register_Test_1_0_Flowunit() {\n  auto mock_desc = GenerateFlowunitDesc(\"test_1_0\", {\"In_1\"}, {});\n  mock_desc->SetFlowType(STREAM);\n\n  auto post_func = [=](std::shared_ptr<DataContext> data_ctx,\n                       std::shared_ptr<MockFlowUnit> mock_flowunit) {\n    MBLOG_INFO << \"test_1_0 \"\n               << \"DataPost\";\n    return modelbox::STATUS_STOP;\n  };\n\n  auto process_func = [=](std::shared_ptr<DataContext> op_ctx,\n                          std::shared_ptr<MockFlowUnit> mock_flowunit) {\n    std::shared_ptr<BufferList> input_bufs = op_ctx->Input(\"In_1\");\n    EXPECT_EQ(input_bufs->Size(), 1);\n    std::vector<size_t> shape_vector{10, 10};\n    std::vector<size_t> input_shape;\n    auto result = input_bufs->At(0)->Get(\"shape\", input_shape);\n    EXPECT_TRUE(result);\n    EXPECT_EQ(input_shape, shape_vector);\n\n    for (size_t i = 0; i < input_bufs->Size(); ++i) {\n      auto input_data =\n          static_cast<const float *>(input_bufs->ConstBufferData(i));\n      MBLOG_DEBUG << \"index: \" << i;\n      for (size_t j = 0; j < 100; j += 10) {\n        MBLOG_DEBUG << input_data[j];\n      }\n\n      EXPECT_NEAR(input_data[0], 9.3490, 1e-4);\n      EXPECT_NEAR(input_data[10], 7.3774, 1e-4);\n      EXPECT_NEAR(input_data[20], 10.6521, 1e-4);\n      EXPECT_NEAR(input_data[30], 8.6493, 1e-4);\n      EXPECT_NEAR(input_data[40], 8.0384, 1e-4);\n      EXPECT_NEAR(input_data[50], 9.2835, 1e-4);\n      EXPECT_NEAR(input_data[60], 11.0915, 1e-4);\n      EXPECT_NEAR(input_data[70], 10.5014, 1e-4);\n      EXPECT_NEAR(input_data[80], 12.0796, 1e-4);\n      EXPECT_NEAR(input_data[90], 11.1132, 1e-4);\n    }\n    return modelbox::STATUS_OK;\n  };\n\n  auto mock_functions = std::make_shared<MockFunctionCollection>();\n  mock_functions->RegisterDataPostFunc(post_func);\n  mock_functions->RegisterProcessFunc(process_func);\n  driver_flow_->AddFlowUnitDesc(mock_desc, mock_functions->GenerateCreateFunc(),\n                                TEST_DRIVER_DIR);\n};\n\nvoid TorchInferenceFlowUnitTest::Register_Test_2_0_Flowunit() {\n  auto mock_desc = GenerateFlowunitDesc(\"test_2_0\", {\"In_1\", \"In_2\"}, {});\n  mock_desc->SetFlowType(STREAM);\n\n  auto post_func = [=](std::shared_ptr<DataContext> data_ctx,\n                       std::shared_ptr<MockFlowUnit> mock_flowunit) {\n    MBLOG_INFO << \"test_2_0 \"\n               << \"DataPost\";\n    return modelbox::STATUS_STOP;\n  };\n\n  auto process_func = [=](std::shared_ptr<DataContext> op_ctx,\n                          std::shared_ptr<MockFlowUnit> mock_flowunit) {\n    std::shared_ptr<BufferList> input_bufs = op_ctx->Input(\"In_1\");\n    EXPECT_EQ(input_bufs->Size(), 1);\n    std::vector<size_t> shape_vector{10, 10};\n    std::vector<size_t> input_shape;\n    auto result = input_bufs->At(0)->Get(\"shape\", input_shape);\n    EXPECT_TRUE(result);\n    EXPECT_EQ(input_shape, shape_vector);\n\n    auto input_bufs_2 = op_ctx->Input(\"In_2\");\n    std::vector<size_t> input_shape_2;\n    result = input_bufs_2->At(0)->Get(\"shape\", input_shape_2);\n    EXPECT_TRUE(result);\n    std::vector<size_t> shape_vector_2{20, 10};\n    EXPECT_EQ(input_shape_2, shape_vector_2);\n    EXPECT_EQ(static_cast<const float *>(input_bufs_2->ConstBufferData(0))[0],\n              1.0);\n\n    for (size_t i = 0; i < input_bufs->Size(); ++i) {\n      auto input_data =\n          static_cast<const float *>(input_bufs->ConstBufferData(i));\n      MBLOG_DEBUG << \"index: \" << i;\n      for (size_t j = 0; j < 100; j += 10) {\n        MBLOG_DEBUG << input_data[j];\n      }\n\n      EXPECT_NEAR(input_data[0], 10.2705, 1e-4);\n      EXPECT_NEAR(input_data[10], 9.03664, 1e-4);\n      EXPECT_NEAR(input_data[20], 9.106, 1e-4);\n      EXPECT_NEAR(input_data[30], 10.322, 1e-4);\n      EXPECT_NEAR(input_data[40], 10.6219, 1e-4);\n      EXPECT_NEAR(input_data[50], 10.5359, 1e-4);\n      EXPECT_NEAR(input_data[60], 10.6534, 1e-4);\n      EXPECT_NEAR(input_data[70], 10.1457, 1e-4);\n      EXPECT_NEAR(input_data[80], 7.73253, 1e-4);\n      EXPECT_NEAR(input_data[90], 8.46899, 1e-4);\n    }\n    return modelbox::STATUS_OK;\n  };\n\n  auto mock_functions = std::make_shared<MockFunctionCollection>();\n  mock_functions->RegisterDataPostFunc(post_func);\n  mock_functions->RegisterProcessFunc(process_func);\n  driver_flow_->AddFlowUnitDesc(mock_desc, mock_functions->GenerateCreateFunc(),\n                                TEST_DRIVER_DIR);\n};\n\nvoid TorchInferenceFlowUnitTest::AddMockFlowUnit() {\n  Register_Test_0_1_Flowunit();\n  Register_Test_1_0_Flowunit();\n  Register_Test_2_0_Flowunit();\n}\n\nstd::shared_ptr<MockFlow> TorchInferenceFlowUnitTest::GetDriverFlow() {\n  return driver_flow_;\n}\n\nTEST_F(TorchInferenceFlowUnitTest, RunUnitSingleOutput) {\n  std::string toml_content = R\"(\n    [log]\n    level = \"DEBUG\"\n    [driver]\n    skip-default=true\n    dir=[\")\" + test_lib_dir + \"\\\",\\\"\" +\n                             test_data_dir + \"/torch\\\"]\\n    \" +\n                             R\"([graph]\n    graphconf = '''digraph demo {                                                                            \n          test_0_1[type=flowunit, flowunit=test_0_1, device=cpu, deviceid=0, label=\"<Out_1>\"]             \n          inference[type=flowunit, flowunit=torch, device=cuda, deviceid=0, label=\"<input> | <output>\", skip_first_dim=true]\n          test_1_0[type=flowunit, flowunit=test_1_0, device=cpu, deviceid=0, label=\"<In_1>\"]                          \n          test_0_1:Out_1 -> inference:input\n          inference:output -> test_1_0:In_1                                                                  \n        }'''\n    format = \"graphviz\"\n  )\";\n  auto driver_flow = GetDriverFlow();\n  auto ret = driver_flow->BuildAndRun(\"RunUnit\", toml_content);\n  EXPECT_EQ(ret, STATUS_STOP);\n}\n\nTEST_F(TorchInferenceFlowUnitTest, RunUnitSingleOutputEncrypt) {\n  std::string toml_content = R\"(\n    [log]\n    level = \"DEBUG\"\n    [driver]\n    skip-default=true\n    dir=[\")\" + test_lib_dir + \"\\\",\\\"\" +\n                             test_data_dir + \"/torch\\\"]\\n    \" +\n                             R\"([graph]\n    graphconf = '''digraph demo {                                                                            \n          test_0_1[type=flowunit, flowunit=test_0_1, device=cpu, deviceid=0, label=\"<Out_1>\"]             \n          inference[type=flowunit, flowunit=torch_encrypt, device=cuda, deviceid=0, label=\"<input> | <output>\", skip_first_dim=true]\n          test_1_0[type=flowunit, flowunit=test_1_0, device=cpu, deviceid=0, label=\"<In_1>\"]                          \n          test_0_1:Out_1 -> inference:input\n          inference:output -> test_1_0:In_1                                                                  \n        }'''\n    format = \"graphviz\"\n  )\";\n  auto driver_flow = GetDriverFlow();\n  auto ret = driver_flow->BuildAndRun(\"RunUnit\", toml_content);\n  EXPECT_EQ(ret, STATUS_STOP);\n}\n\nTEST_F(TorchInferenceFlowUnitTest, RunUnitMutiOutput) {\n  std::string toml_content = R\"(\n    [log]\n    level = \"DEBUG\"\n    [driver]\n    skip-default=true\n    dir=[\")\" + test_lib_dir + \"\\\",\\\"\" +\n                             test_data_dir + \"/torch\\\"]\\n    \" +\n                             R\"([graph]\n    graphconf = '''digraph demo {                                                                            \n          test_0_1[type=flowunit, flowunit=test_0_1, device=cpu, deviceid=0, label=\"<Out_1>\"]             \n          inference[type=flowunit, flowunit=torch_2, device=cuda, deviceid=0, label=\"<input> | <output>\", skip_first_dim=true]\n          test_2_0[type=flowunit, flowunit=test_2_0, device=cpu, deviceid=0, label=\"<In_1>\"]                          \n          test_0_1:Out_1 -> inference:input\n          inference:output1 -> test_2_0:In_1 \n          inference:output2 -> test_2_0:In_2                                                                 \n        }'''\n    format = \"graphviz\"\n  )\";\n  auto driver_flow = GetDriverFlow();\n  auto ret = driver_flow->BuildAndRun(\"RunUnit\", toml_content);\n  EXPECT_EQ(ret, STATUS_STOP);\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/video_decoder/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nset(UNIT_DEVICE \"cuda\")\nset(UNIT_NAME \"video_decoder\")\n\nif (NOT FFMPEG_FOUND) \n    message(STATUS \"Not found ffmpeg, disable ${UNIT_NAME} flowunit\")\n    return()\nendif()\n\nif (NOT NVCUVID_FOUND) \n    message(STATUS \"Not found nvidia decode sdk, disable ${UNIT_NAME} flowunit\")\n    return()\nendif()\n\n\nproject(modelbox-flowunit-${UNIT_DEVICE}-${UNIT_NAME})\n\nfile(GLOB_RECURSE UNIT_SOURCE *.cpp *.cc *.c)\ngroup_source_test_files(MODELBOX_UNIT_SOURCE MODELBOX_UNIT_TEST_SOURCE \"_test.c*\" ${UNIT_SOURCE})\n\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_CUDA_INCLUDE})\ninclude_directories(${CUDA_INCLUDE_DIRS})\ninclude_directories(${NVCUVID_INCLUDE_DIR})\ninclude_directories(${FFMPEG_INCLUDE_DIR})\ninclude_directories(${MODELBOX_COMMON_VIDEO_DECODE_INCLUDE})\n\nset(MODELBOX_UNIT_SHARED libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-shared)\nset(MODELBOX_UNIT_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\ncuda_add_library(${MODELBOX_UNIT_SHARED} SHARED ${MODELBOX_UNIT_SOURCE})\nset(LIBMODELBOX_FLOWUNIT_VIDEO_DECODER_CUDA_SHARED ${MODELBOX_UNIT_SHARED})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES \n    SOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\nfind_cuda_helper_libs(nppicc)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_DEVICE_CUDA_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} rt)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} dl)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${CUDA_CUDA_LIBRARY})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${FFMPEG_LIBRARIES})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${NVCUVID_LIBRARIES})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${CUDA_nppicc_LIBRARY})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${MODELBOX_COMMON_VIDEO_DECODE_LIBRARY})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}\")\n\ninstall(TARGETS ${MODELBOX_UNIT_SHARED}\n        COMPONENT cuda-device-flowunit\n        RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n        LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        ARCHIVE DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        OPTIONAL)\n\ninstall(DIRECTORY ${HEADER}\n        DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR})\n\nset(LIBMODELBOX_FLOWUNIT_VIDEO_DECODER_CUDA_SHARED ${MODELBOX_UNIT_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_VIDEO_DECODER_CUDA_INCLUDE ${MODELBOX_UNIT_SOURCE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_VIDEO_DECODER_CUDA_SOURCES ${MODELBOX_UNIT_SOURCE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_VIDEO_DECODER_CUDA_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}.so CACHE INTERNAL \"\")\n\n# driver test\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_UNIT_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_UNIT_SHARED})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/video_decoder/nppi_color_converter.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include \"nppi_color_converter.h\"\n#include <modelbox/base/log.h>\n#include <cuda.h>\n\nNppiColorConverter::NppiColorConverter()\n    : cvt_color_{{\"rgb\", nppiNV12ToRGB_8u_P2C3R},\n                 {\"bgr\", nppiNV12ToBGR_8u_P2C3R}} {}\n\nNppiColorConverter::~NppiColorConverter() = default;\n\nmodelbox::Status NppiColorConverter::CvtColor(const uint8_t *src, int32_t width,\n                                            int32_t height, uint8_t *dest,\n                                            const std::string &pix_fmt) {\n  if (pix_fmt == \"nv12\") {\n    auto cu_ret =\n        cuMemcpy((CUdeviceptr)dest, (CUdeviceptr)src, width * height * 3 / 2);\n    if (cu_ret != CUDA_SUCCESS) {\n      MBLOG_ERROR << \"cuMemcpy failed, ret \" << cu_ret;\n      return modelbox::STATUS_FAULT;\n    }\n  } else {\n    auto iter = cvt_color_.find(pix_fmt);\n    if (iter == cvt_color_.end()) {\n      MBLOG_ERROR << \"Not support pix_fmt \" << pix_fmt;\n      return modelbox::STATUS_NOTSUPPORT;\n    }\n\n    const uint8_t *src_arr[2];  // One for Y plane, one for UV plane\n    src_arr[0] = src;\n    src_arr[1] = src + width * height;\n    auto ret = iter->second(src_arr, width, dest, width * 3, {width, height});\n    if (ret != NPP_SUCCESS) {\n      MBLOG_ERROR << \"Cvt color from nv12 to \" << pix_fmt << \" failed, npp ret \"\n                  << ret;\n      return modelbox::STATUS_FAULT;\n    }\n  }\n\n  return modelbox::STATUS_SUCCESS;\n}"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/video_decoder/nppi_color_converter.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#ifndef MODELBOX_FLOWUNIT_NPPI_COLOR_CONVERTER_H_\n#define MODELBOX_FLOWUNIT_NPPI_COLOR_CONVERTER_H_\n\n#include <modelbox/base/status.h>\n#include <nppi_color_conversion.h>\n#include <functional>\n#include <map>\n\nclass NppiColorConverter {\n public:\n  NppiColorConverter();\n  virtual ~NppiColorConverter();\n\n  modelbox::Status CvtColor(const uint8_t *src, int32_t width, int32_t height,\n                          uint8_t *dest, const std::string &pix_fmt);\n\n private:\n  std::map<std::string,\n           std::function<NppStatus(const uint8_t *const src[2],\n                                   int32_t src_step, uint8_t *dest,\n                                   int32_t dest_step, NppiSize size_roi)>>\n      cvt_color_;\n};\n\n#endif  // MODELBOX_FLOWUNIT_NPPI_COLOR_CONVERTER_H_"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/video_decoder/nvcodec_video_decoder.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"nvcodec_video_decoder.h\"\n\n#include <string>\n\n#include \"modelbox/base/log.h\"\n#include \"modelbox/device/cuda/device_cuda.h\"\n\n#define MIN_ALLOWABLE_DECODE_SURFACE_NUM 1\n\nNvcodecConcurrencyLimiter *NvcodecConcurrencyLimiter::GetInstance() {\n  static NvcodecConcurrencyLimiter limiter;\n  return &limiter;\n}\n\nvoid NvcodecConcurrencyLimiter::Init(uint32_t limit) {\n  if (limit == 0) {\n    limited_ = false;\n  }\n\n  count_ = limit;\n}\n\nvoid NvcodecConcurrencyLimiter::Acquire() {\n  if (!limited_) {\n    return;\n  }\n\n  std::unique_lock<std::mutex> lock(count_lock_);\n  count_cv_.wait(lock, [=] { return count_ > 0; });\n  --count_;\n}\n\nvoid NvcodecConcurrencyLimiter::Release() {\n  if (!limited_) {\n    return;\n  }\n\n  std::unique_lock<std::mutex> lock(count_lock_);\n  ++count_;\n  count_cv_.notify_one();\n}\n\n#define NVDEC_THROW_ERROR(err_str, err_code)                                \\\n  throw NVDECException::MakeNVDECException(err_str, err_code, __FUNCTION__, \\\n                                           __FILE__, __LINE__);\n\nNvcodecVideoDecoder::NvcodecVideoDecoder()\n    : codec_id_map_{{AVCodecID::AV_CODEC_ID_MPEG1VIDEO,\n                     cudaVideoCodec::cudaVideoCodec_MPEG1},\n                    {AVCodecID::AV_CODEC_ID_MPEG2VIDEO,\n                     cudaVideoCodec::cudaVideoCodec_MPEG2},\n                    {AVCodecID::AV_CODEC_ID_MPEG4,\n                     cudaVideoCodec::cudaVideoCodec_MPEG4},\n                    {AVCodecID::AV_CODEC_ID_VC1,\n                     cudaVideoCodec::cudaVideoCodec_VC1},\n                    {AVCodecID::AV_CODEC_ID_H264,\n                     cudaVideoCodec::cudaVideoCodec_H264},\n                    {AVCodecID::AV_CODEC_ID_HEVC,\n                     cudaVideoCodec::cudaVideoCodec_HEVC},\n                    {AVCodecID::AV_CODEC_ID_VP8,\n                     cudaVideoCodec::cudaVideoCodec_VP8},\n                    {AVCodecID::AV_CODEC_ID_VP9,\n                     cudaVideoCodec::cudaVideoCodec_VP9},\n                    {AVCodecID::AV_CODEC_ID_MJPEG,\n                     cudaVideoCodec::cudaVideoCodec_JPEG}},\n      codec_id_name_map_{{cudaVideoCodec_MPEG1, \"MPEG-1\"},\n                         {cudaVideoCodec_MPEG2, \"MPEG-2\"},\n                         {cudaVideoCodec_MPEG4, \"MPEG-4 (ASP)\"},\n                         {cudaVideoCodec_VP8, \"VP8\"},\n                         {cudaVideoCodec_VP9, \"VP9\"},\n                         {cudaVideoCodec_H264_SVC, \"H.264/SVC\"},\n                         {cudaVideoCodec_H264_MVC, \"H.264/MVC\"},\n                         {cudaVideoCodec_H264, \"AVC/H.264\"},\n                         {cudaVideoCodec_VC1, \"VC-1/WMV\"},\n                         {cudaVideoCodec_JPEG, \"M-JPEG\"},\n                         {cudaVideoCodec_NV12, \"NV12 4:2:0\"},\n                         {cudaVideoCodec_HEVC, \"H.265/HEVC\"},\n                         {cudaVideoCodec_YUYV, \"YUYV 4:2:2\"},\n                         {cudaVideoCodec_YV12, \"YV12 4:2:0\"},\n                         {cudaVideoCodec_UYVY, \"UYVY 4:2:2\"},\n                         {cudaVideoCodec_YUV420, \"YUV  4:2:0\"}} {}\n\nNvcodecVideoDecoder::~NvcodecVideoDecoder() {\n  auto ret = cudaSetDevice(gpu_id_);\n  if (ret != cudaSuccess) {\n    MBLOG_ERROR << \"Set device to gpu \" << gpu_id_ << \" failed, err \" << ret;\n  }\n\n  if (video_decoder_ != nullptr) {\n    cuvidDestroyDecoder(video_decoder_);\n    video_decoder_ = nullptr;\n  }\n\n  if (video_parser_ != nullptr) {\n    cuvidDestroyVideoParser(video_parser_);\n    video_parser_ = nullptr;\n  }\n\n  if (ctx_lock_ != nullptr) {\n    cuvidCtxLockDestroy(ctx_lock_);\n  }\n}\n\nmodelbox::Status NvcodecVideoDecoder::Init(const std::string &device_id,\n                                           AVCodecID codec_id,\n                                           const std::string &file_url,\n                                           bool skip_err_frame, bool no_delay) {\n  gpu_id_ = std::stoi(device_id);\n  MBLOG_INFO << \"Init decode in gpu \" << gpu_id_;\n  // Use cuda runtime CUContext on same device in whole modelbox process to\n  // ensure cuda work properly\n  auto cuda_ret = cudaSetDevice(gpu_id_);\n  if (cuda_ret != cudaSuccess) {\n    MBLOG_ERROR << \"Set device to \" << gpu_id_ << \" failed, err \" << cuda_ret;\n    return modelbox::STATUS_FAULT;\n  }\n\n  CUcontext cu_ctx;\n  auto cu_ret = cuCtxGetCurrent(&cu_ctx);\n  if (cu_ret != CUDA_SUCCESS) {\n    GET_CUDA_API_ERROR(cuCtxGetCurrent, cu_ret, err_str);\n    MBLOG_ERROR << \"Get Ctx in gpu \" << gpu_id_ << \" failed, err \" << err_str;\n    return modelbox::STATUS_FAULT;\n  }\n\n  cu_ret = cuvidCtxLockCreate(&ctx_lock_, cu_ctx);\n  if (cu_ret != CUDA_SUCCESS) {\n    GET_CUDA_API_ERROR(cuvidCtxLockCreate, cu_ret, err_str);\n    MBLOG_ERROR << err_str << \" : device \" << device_id.c_str();\n    return modelbox::STATUS_FAULT;\n  }\n\n  CUVIDPARSERPARAMS videoParserParams = {};\n  auto ret = GetCudaVideoCodec(codec_id, codec_id_);\n  if (ret != modelbox::STATUS_SUCCESS) {\n    return ret;\n  }\n\n  videoParserParams.CodecType = codec_id_;\n  videoParserParams.ulMaxNumDecodeSurfaces = 1;\n  videoParserParams.ulMaxDisplayDelay =\n      no_delay ? 0 : 2;  // setting ulMaxDisplayDelay to 2 achieves max decoding\n                         // rate, based on several tests.\n  videoParserParams.pUserData = (void *)this;\n  videoParserParams.pfnSequenceCallback = HandleVideoSequenceProc;\n  videoParserParams.pfnDecodePicture = HandlePictureDecodeProc;\n  videoParserParams.pfnDisplayPicture = HandlePictureDisplayProc;\n\n  cu_ret = cuvidCreateVideoParser(&video_parser_, &videoParserParams);\n  if (cu_ret != CUDA_SUCCESS) {\n    GET_CUDA_API_ERROR(cuvidCreateVideoParser, cu_ret, err_str);\n    MBLOG_ERROR << err_str;\n    return modelbox::STATUS_FAULT;\n  }\n\n  file_url_ = file_url;\n  skip_err_frame_ = skip_err_frame;\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status NvcodecVideoDecoder::Decode(\n    const std::shared_ptr<NvcodecPacket> &pkt,\n    std::vector<std::shared_ptr<NvcodecFrame>> &frame_list, CUstream stream) {\n  if (!video_parser_) {\n    MBLOG_ERROR << \"Nvcodec decode is not inited, parser is null\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  CUVIDSOURCEDATAPACKET packet = {};\n  packet.payload = pkt->GetDataRef();\n  packet.payload_size = pkt->GetSize();\n  packet.flags = CUVID_PKT_TIMESTAMP;\n  packet.timestamp = pkt->GetPts();\n  latest_pts_ = packet.timestamp;\n  if (packet.payload == nullptr || packet.payload_size == 0) {\n    packet.flags |= CUVID_PKT_ENDOFSTREAM;\n  }\n\n  video_stream_ = stream;\n  frame_count_in_one_decode_ = 0;\n\n  auto cuda_ret = cudaSetDevice(gpu_id_);\n  if (cuda_ret != cudaSuccess) {\n    MBLOG_ERROR << \"Set device to gpu \" << gpu_id_ << \" failed, err \"\n                << cuda_ret;\n    return modelbox::STATUS_FAULT;\n  }\n\n  NvcodecConcurrencyLimiter::GetInstance()->Acquire();\n  is_limiter_released_ = false;\n  CUDA_API_CALL(cuvidParseVideoData(video_parser_, &packet));\n  if (!is_limiter_released_) {\n    // might release when handle display\n    NvcodecConcurrencyLimiter::GetInstance()->Release();\n    is_limiter_released_ = true;\n  }\n\n  for (size_t i = 0; i < frame_count_in_one_decode_; ++i) {\n    auto frame = std::make_shared<NvcodecFrame>();\n    frame->data_ref = decoded_frame_buffer_list_[i].get();\n    frame->width = GetWidth();\n    frame->height = GetHeight();\n    frame->timestamp = decoded_frame_timestamp_list_[i];\n    frame_list.push_back(frame);\n  }\n\n  if (packet.flags & CUVID_PKT_ENDOFSTREAM) {\n    return modelbox::STATUS_NODATA;\n  }\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status NvcodecVideoDecoder::GetCudaVideoCodec(\n    AVCodecID codec_id, cudaVideoCodec &cuda_codec_id) {\n  auto iter = codec_id_map_.find(codec_id);\n  if (iter == codec_id_map_.end()) {\n    MBLOG_ERROR << \"ffmpeg code id[\" << codec_id\n                << \"] for nvcodec is not supported\";\n    return modelbox::STATUS_NOTSUPPORT;\n  }\n\n  cuda_codec_id = iter->second;\n  return modelbox::STATUS_SUCCESS;\n}\n\nstd::string NvcodecVideoDecoder::GetVideoCodecString(\n    cudaVideoCodec cuda_codec_id) {\n  auto iter = codec_id_name_map_.find(cuda_codec_id);\n  if (iter == codec_id_name_map_.end()) {\n    return \"Unknown\";\n  }\n\n  return iter->second;\n}\n\nint32_t NvcodecVideoDecoder::HandleVideoSequence(CUVIDEOFORMAT *video_format) {\n  uint32_t decode_surface = GetDecodeSurfaceNum(video_format);\n  if (decode_surface <= 0) {\n    decode_surface = MIN_ALLOWABLE_DECODE_SURFACE_NUM;\n    MBLOG_WARN << \"Invalid decode surface num (\"\n               << static_cast<int>(video_format->min_num_decode_surfaces)\n               << \"), change it to (\" << MIN_ALLOWABLE_DECODE_SURFACE_NUM\n               << \") for compatibility.\";\n  }\n\n  CheckDeviceCaps(video_format);\n  if (frame_width_ != 0 && luma_height_ != 0 && chroma_height_ != 0) {\n    SequenceChanged(video_format);\n    return decode_surface;\n  }\n\n  SaveSequenceParam(video_format);\n  CreateDecoder(video_format, decode_surface);\n  return decode_surface;\n}\n\nuint32_t NvcodecVideoDecoder::GetDecodeSurfaceNum(CUVIDEOFORMAT *video_format) {\n  uint8_t num = 8;\n  if (video_format->codec == cudaVideoCodec::cudaVideoCodec_VP9) {\n    num = 12;\n  } else if (video_format->codec == cudaVideoCodec::cudaVideoCodec_H264 ||\n             video_format->codec == cudaVideoCodec::cudaVideoCodec_H264_SVC ||\n             video_format->codec == cudaVideoCodec::cudaVideoCodec_H264_MVC ||\n             video_format->codec == cudaVideoCodec::cudaVideoCodec_HEVC) {\n    num = 20;\n  }\n\n  return std::max(video_format->min_num_decode_surfaces, num);\n}\n\nvoid NvcodecVideoDecoder::CheckDeviceCaps(CUVIDEOFORMAT *video_format) {\n  CUVIDDECODECAPS decode_caps = {};\n  decode_caps.eCodecType = video_format->codec;\n  decode_caps.eChromaFormat = video_format->chroma_format;\n  decode_caps.nBitDepthMinus8 = video_format->bit_depth_luma_minus8;\n\n  CUDA_API_CALL(cuvidGetDecoderCaps(&decode_caps));\n  if (!decode_caps.bIsSupported) {\n    NVDEC_THROW_ERROR(\"Codec not supported on this GPU\",\n                      CUDA_ERROR_NOT_SUPPORTED);\n  }\n\n  if (video_format->coded_width > decode_caps.nMaxWidth ||\n      video_format->coded_height > decode_caps.nMaxHeight) {\n    std::ostringstream error_str;\n    error_str << std::endl\n              << \"Resolution          : \" << video_format->coded_width << \" x \"\n              << video_format->coded_height << std::endl\n              << \"Max Supported (w x h) : \" << decode_caps.nMaxWidth << \" x \"\n              << decode_caps.nMaxHeight << std::endl\n              << \"Resolution not supported on this GPU\";\n    NVDEC_THROW_ERROR(error_str.str(), CUDA_ERROR_NOT_SUPPORTED);\n  }\n\n  if ((video_format->coded_width >> 4) * (video_format->coded_height >> 4) >\n      decode_caps.nMaxMBCount) {\n    std::ostringstream error_str;\n    error_str << std::endl\n              << \"MBCount             : \"\n              << (video_format->coded_width >> 4) *\n                     (video_format->coded_height >> 4)\n              << std::endl\n              << \"Max Supported mbcnt : \" << decode_caps.nMaxMBCount\n              << std::endl\n              << \"MBCount not supported on this GPU\";\n    NVDEC_THROW_ERROR(error_str.str(), CUDA_ERROR_NOT_SUPPORTED);\n  }\n}\n\nvoid NvcodecVideoDecoder::SequenceChanged(CUVIDEOFORMAT *video_format) {\n  if (video_format_.coded_width == video_format->coded_width &&\n      video_format_.coded_height == video_format->coded_height) {\n    // No resolution change\n    return;\n  }\n\n  NVDEC_THROW_ERROR(\"Resolution changed, decoded result may be incorrect\",\n                    CUDA_ERROR_ILLEGAL_STATE);\n}\n\nvoid NvcodecVideoDecoder::SaveSequenceParam(CUVIDEOFORMAT *video_format) {\n  codec_id_ = video_format->codec;\n  chroma_format_ = video_format->chroma_format;\n  // Output format only supports YUV, so we only use nv12 here\n  output_format_ = cudaVideoSurfaceFormat::cudaVideoSurfaceFormat_NV12;\n  byte_depth_per_pixel_ = 1;\n  video_format_ = *video_format;\n}\n\nvoid NvcodecVideoDecoder::CreateDecoder(CUVIDEOFORMAT *video_format,\n                                        uint32_t decode_surface) {\n  CUVIDDECODECREATEINFO decode_create_info = {};\n  decode_create_info.CodecType = video_format->codec;\n  decode_create_info.ChromaFormat = video_format->chroma_format;\n  decode_create_info.OutputFormat = output_format_;\n  decode_create_info.bitDepthMinus8 = video_format->bit_depth_luma_minus8;\n  if (video_format->progressive_sequence) {\n    decode_create_info.DeinterlaceMode = cudaVideoDeinterlaceMode_Weave;\n  } else {\n    decode_create_info.DeinterlaceMode = cudaVideoDeinterlaceMode_Adaptive;\n  }\n  decode_create_info.ulNumOutputSurfaces = 2;\n  decode_create_info.ulCreationFlags = cudaVideoCreate_PreferCUVID;\n  decode_create_info.ulNumDecodeSurfaces = decode_surface;\n  decode_create_info.vidLock = ctx_lock_;\n  decode_create_info.ulWidth = video_format->coded_width;\n  decode_create_info.ulHeight = video_format->coded_height;\n  decode_create_info.ulMaxWidth = video_format->coded_width;\n  decode_create_info.ulMaxHeight = video_format->coded_height;\n\n  frame_width_ =\n      video_format->display_area.right - video_format->display_area.left;\n  frame_height_ =\n      video_format->display_area.bottom - video_format->display_area.top;\n  decode_create_info.ulTargetWidth = video_format->coded_width;\n  decode_create_info.ulTargetHeight = video_format->coded_height;\n\n  luma_height_ = frame_height_;\n  chroma_height_ = frame_height_ / 2;\n  chroma_planes_number_ = 1;\n  surface_height_ = decode_create_info.ulTargetHeight;\n  surface_width_ = decode_create_info.ulTargetWidth;\n  CUDA_API_CALL(cuvidCreateDecoder(&video_decoder_, &decode_create_info));\n}\n\nint32_t NvcodecVideoDecoder::HandlePictureDecode(CUVIDPICPARAMS *pic_params) {\n  if (!video_decoder_) {\n    NVDEC_THROW_ERROR(\"Decoder not init successed\", CUDA_ERROR_NOT_INITIALIZED);\n    return 0;\n  }\n\n  auto ret = cuvidDecodePicture(video_decoder_, pic_params);\n  if (ret != CUDA_SUCCESS) {\n    MBLOG_ERROR << \"cuvidDecodePicture failed, ret: \" << ret;\n    return 0;\n  }\n  return 1;\n}\n\nint32_t NvcodecVideoDecoder::HandlePictureDisplay(\n    CUVIDPARSERDISPINFO *display_info) {\n  CUVIDPROCPARAMS proc_params = {};\n  proc_params.progressive_frame = display_info->progressive_frame;\n  proc_params.second_field = display_info->repeat_first_field + 1;\n  proc_params.top_field_first = display_info->top_field_first;\n  proc_params.unpaired_field = display_info->repeat_first_field < 0;\n  proc_params.output_stream = video_stream_;\n\n  CUdeviceptr src_frame_ptr = 0;\n  uint32_t src_pitch = 0;\n  CUDA_API_CALL(cuvidMapVideoFrame(video_decoder_, display_info->picture_index,\n                                   &src_frame_ptr, &src_pitch, &proc_params));\n\n  CUVIDGETDECODESTATUS decode_status = {};\n  CUresult result = cuvidGetDecodeStatus(\n      video_decoder_, display_info->picture_index, &decode_status);\n  if (result == CUDA_SUCCESS &&\n      (decode_status.decodeStatus == cuvidDecodeStatus_Error ||\n       decode_status.decodeStatus == cuvidDecodeStatus_Error_Concealed)) {\n    MBLOG_DEBUG << \"Picture decode has error, image might be incorrect\";\n    if (skip_err_frame_) {\n      CUDA_API_CALL(cuvidUnmapVideoFrame(video_decoder_, src_frame_ptr));\n      return 1;\n    }\n  }\n\n  if (latest_pts_ != 0 && display_info->timestamp > latest_pts_) {\n    MBLOG_WARN << \"Timestamp \" << display_info->timestamp\n               << \" err, should not great than \" << latest_pts_;\n    CUDA_API_CALL(cuvidUnmapVideoFrame(video_decoder_, src_frame_ptr));\n    return 1;\n  }\n\n  NvcodecConcurrencyLimiter::GetInstance()->Release();\n  is_limiter_released_ = true;\n\n  ++frame_count_in_one_decode_;\n  SaveFrame(src_frame_ptr, src_pitch);\n  SaveTimestamp(display_info->timestamp);\n  CUDA_API_CALL(cuvidUnmapVideoFrame(video_decoder_, src_frame_ptr));\n  return 1;\n}\n\nvoid NvcodecVideoDecoder::SaveFrame(CUdeviceptr src_frame_ptr,\n                                    uint32_t src_pitch) {\n  /* src frame is aligned, src pitch is great than frame width, so we need a mem\n   * copy */\n  uint8_t *decoded_frame_ptr =\n      GetDecodeFramePtrFromCache(frame_count_in_one_decode_ - 1);\n  CUDA_MEMCPY2D mem_cpy_2d = {};\n  mem_cpy_2d.srcMemoryType = CU_MEMORYTYPE_DEVICE;\n  mem_cpy_2d.srcDevice = src_frame_ptr;\n  mem_cpy_2d.srcPitch = src_pitch;\n  mem_cpy_2d.dstMemoryType = CU_MEMORYTYPE_DEVICE;\n  mem_cpy_2d.dstDevice = (CUdeviceptr)decoded_frame_ptr;\n  mem_cpy_2d.dstPitch = frame_width_ * byte_depth_per_pixel_;\n  mem_cpy_2d.WidthInBytes = frame_width_ * byte_depth_per_pixel_;\n  mem_cpy_2d.Height = luma_height_;\n  CUDA_API_CALL(cuMemcpy2D(&mem_cpy_2d));\n  mem_cpy_2d.srcDevice = (CUdeviceptr)((uint8_t *)src_frame_ptr +\n                                       mem_cpy_2d.srcPitch * surface_height_);\n  mem_cpy_2d.dstDevice =\n      (CUdeviceptr)(decoded_frame_ptr + mem_cpy_2d.dstPitch * luma_height_);\n  mem_cpy_2d.Height = chroma_height_;\n  CUDA_API_CALL(cuMemcpy2D(&mem_cpy_2d));\n}\n\nuint8_t *NvcodecVideoDecoder::GetDecodeFramePtrFromCache(size_t frame_index) {\n  std::lock_guard<std::mutex> lock(decoded_frame_buffer_lock_);\n  for (size_t i = decoded_frame_buffer_list_.size(); i <= frame_index; ++i) {\n    // Not enough frames in buffer list, we need alloc one more\n    uint8_t *frame_ptr = nullptr;\n    CUDA_API_CALL(cuMemAlloc((CUdeviceptr *)&frame_ptr, GetFrameSize()));\n    std::shared_ptr<uint8_t> frame_buffer(frame_ptr, [this](uint8_t *ptr) {\n      if (this->ctx_mtx_ != nullptr) {\n        this->ctx_mtx_->lock();\n      }\n      cuMemFree((CUdeviceptr)ptr);\n      if (this->ctx_mtx_ != nullptr) {\n        this->ctx_mtx_->unlock();\n      }\n    });\n    decoded_frame_buffer_list_.push_back(frame_buffer);\n  }\n\n  return decoded_frame_buffer_list_[frame_index].get();\n}\n\nvoid NvcodecVideoDecoder::SaveTimestamp(int64_t timestamp) {\n  if (decoded_frame_timestamp_list_.size() <\n      decoded_frame_buffer_list_.size()) {\n    decoded_frame_timestamp_list_.resize(decoded_frame_buffer_list_.size());\n  }\n\n  decoded_frame_timestamp_list_[frame_count_in_one_decode_ - 1] = timestamp;\n}"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/video_decoder/nvcodec_video_decoder.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_NVCODEC_VIDEO_DECODER_H_\n#define MODELBOX_FLOWUNIT_NVCODEC_VIDEO_DECODER_H_\n\n#include <cuda.h>\n#include <libavformat/avformat.h>\n#include <modelbox/base/status.h>\n#include <nvcuvid.h>\n\n#include <condition_variable>\n#include <iostream>\n#include <map>\n#include <memory>\n#include <mutex>\n#include <sstream>\n#include <utility>\n#include <vector>\n\nclass NvcodecConcurrencyLimiter {\n public:\n  static NvcodecConcurrencyLimiter *GetInstance();\n\n  void Init(uint32_t limit);\n\n  void Acquire();\n\n  void Release();\n\n private:\n  NvcodecConcurrencyLimiter() = default;\n\n  std::mutex count_lock_;\n  std::condition_variable count_cv_;\n  uint32_t count_{0};\n  bool limited_{false};\n};\n\nclass NVDECException : public std::exception {\n public:\n  NVDECException(std::string err_str, const CUresult err_code)\n      : err_str_(std::move(err_str)), err_code_(err_code) {}\n\n  ~NVDECException() noexcept override = default;\n  const char *what() const noexcept override { return err_str_.c_str(); }\n  CUresult GetErrorCode() const { return err_code_; }\n  const std::string &GetErrorString() const { return err_str_; }\n  static NVDECException MakeNVDECException(const std::string &err_str,\n                                           CUresult err_code,\n                                           const std::string &function_name,\n                                           const std::string &file_name,\n                                           int line_number);\n\n private:\n  std::string err_str_;\n  CUresult err_code_;\n};\n\ninline NVDECException NVDECException::MakeNVDECException(\n    const std::string &err_str, const CUresult err_code,\n    const std::string &function_name, const std::string &file_name,\n    int line_number) {\n  std::ostringstream error_log;\n  error_log << function_name << \" : \" << err_str << \" at \" << file_name << \":\"\n            << line_number << std::endl;\n  NVDECException exception(error_log.str(), err_code);\n  return exception;\n}\n\nclass NvcodecFrame {\n public:\n  int32_t width{0};\n  int32_t height{0};\n  int64_t timestamp{0};\n  uint8_t *data_ref{nullptr};\n};\n\nclass NvcodecPacket {\n public:\n  NvcodecPacket(size_t size, const uint8_t *data_ref, int64_t pts)\n      : size_(size), data_ref_(data_ref), pts_(pts) {}\n\n  NvcodecPacket() = default;\n\n  virtual ~NvcodecPacket() = default;\n\n  size_t GetSize() { return size_; };\n\n  const uint8_t *GetDataRef() { return data_ref_; };\n\n  int64_t GetPts() { return pts_; };\n\n private:\n  size_t size_{0};\n  const uint8_t *data_ref_{nullptr};\n  int64_t pts_{0};\n};\n\nclass NvcodecVideoDecoder {\n public:\n  NvcodecVideoDecoder();\n\n  virtual ~NvcodecVideoDecoder();\n\n  modelbox::Status Init(const std::string &device_id, AVCodecID codec_id,\n                        const std::string &file_url, bool skip_err_frame,\n                        bool no_delay);\n\n  modelbox::Status Decode(\n      const std::shared_ptr<NvcodecPacket> &pkt,\n      std::vector<std::shared_ptr<NvcodecFrame>> &frame_list,\n      CUstream stream = nullptr);\n\n  int32_t GetWidth() { return frame_width_; }\n\n  int32_t GetHeight() { return frame_height_; }\n\n  const std::string &GetFileUrl() { return file_url_; }\n\n private:\n  modelbox::Status InitCuCtx(const std::string &device_id);\n\n  modelbox::Status GetCudaVideoCodec(AVCodecID codec_id,\n                                     cudaVideoCodec &cuda_codec_id);\n\n  std::string GetVideoCodecString(cudaVideoCodec cuda_codec_id);\n\n  static int32_t CUDAAPI HandleVideoSequenceProc(void *user_data,\n                                                 CUVIDEOFORMAT *video_format) {\n    return ((NvcodecVideoDecoder *)user_data)\n        ->HandleVideoSequence(video_format);\n  }\n\n  static int32_t CUDAAPI HandlePictureDecodeProc(void *user_data,\n                                                 CUVIDPICPARAMS *pic_params) {\n    return ((NvcodecVideoDecoder *)user_data)->HandlePictureDecode(pic_params);\n  }\n\n  static int32_t CUDAAPI\n  HandlePictureDisplayProc(void *user_data, CUVIDPARSERDISPINFO *display_info) {\n    return ((NvcodecVideoDecoder *)user_data)\n        ->HandlePictureDisplay(display_info);\n  }\n\n  int32_t HandleVideoSequence(CUVIDEOFORMAT *video_format);\n\n  void CheckDeviceCaps(CUVIDEOFORMAT *video_format);\n\n  void SequenceChanged(CUVIDEOFORMAT *video_format);\n\n  void SaveSequenceParam(CUVIDEOFORMAT *video_format);\n\n  void CreateDecoder(CUVIDEOFORMAT *video_format, uint32_t decode_surface);\n\n  int32_t HandlePictureDecode(CUVIDPICPARAMS *pic_params);\n\n  int32_t HandlePictureDisplay(CUVIDPARSERDISPINFO *display_info);\n\n  void SaveFrame(CUdeviceptr src_frame_ptr, uint32_t src_pitch);\n\n  uint8_t *GetDecodeFramePtrFromCache(size_t frame_index);\n\n  void SaveTimestamp(int64_t timestamp);\n\n  inline int32_t GetFrameSize() {\n    return frame_width_ * (luma_height_ + chroma_height_);\n  }\n\n  uint32_t GetDecodeSurfaceNum(CUVIDEOFORMAT *video_format);\n\n  CUvideoparser video_parser_{nullptr};\n  CUstream video_stream_{nullptr};\n  CUcontext ctx_{nullptr};\n  std::mutex *ctx_mtx_{nullptr};\n  CUvideoctxlock ctx_lock_{nullptr};\n  CUvideodecoder video_decoder_{nullptr};\n  std::map<AVCodecID, cudaVideoCodec> codec_id_map_;\n  std::map<cudaVideoCodec, std::string> codec_id_name_map_;\n\n  int32_t frame_width_{0};\n  int32_t frame_height_{0};\n  int32_t luma_height_{0};\n  int32_t chroma_height_{0};\n  int32_t chroma_planes_number_{0};\n  int32_t surface_height_{0};\n  int32_t surface_width_{0};\n  cudaVideoChromaFormat chroma_format_{};\n  cudaVideoSurfaceFormat output_format_{};\n  uint8_t byte_depth_per_pixel_{1};\n  cudaVideoCodec codec_id_{};\n  CUVIDEOFORMAT video_format_{};\n\n  std::mutex decoded_frame_buffer_lock_;\n  size_t frame_count_in_one_decode_{0};\n  std::vector<std::shared_ptr<uint8_t>> decoded_frame_buffer_list_;\n  std::vector<int64_t> decoded_frame_timestamp_list_;\n\n  std::string file_url_;\n  bool skip_err_frame_{false};\n  int64_t latest_pts_{0};\n  int32_t gpu_id_{0};\n\n  bool is_limiter_released_{false};\n};\n\n#endif  // MODELBOX_FLOWUNIT_NVCODEC_VIDEO_DECODER_H_\n"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/video_decoder/video_decoder_flowunit.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"video_decoder_flowunit.h\"\n\n#include \"modelbox/device/cuda/device_cuda.h\"\n#include \"modelbox/flowunit.h\"\n#include \"modelbox/flowunit_api_helper.h\"\n#include \"nppi_color_converter.h\"\n#include \"video_decode_common.h\"\n\nVideoDecoderFlowUnit::VideoDecoderFlowUnit() = default;\nVideoDecoderFlowUnit::~VideoDecoderFlowUnit() = default;\n\nmodelbox::Status VideoDecoderFlowUnit::Open(\n    const std::shared_ptr<modelbox::Configuration> &opts) {\n  out_pix_fmt_str_ = opts->GetString(\"pix_fmt\", \"nv12\");\n  if (videodecode::g_supported_pix_fmt.find(out_pix_fmt_str_) ==\n      videodecode::g_supported_pix_fmt.end()) {\n    MBLOG_ERROR << \"Not support pix fmt \" << out_pix_fmt_str_;\n    return modelbox::STATUS_BADCONF;\n  }\n\n  skip_err_frame_ = opts->GetBool(\"skip_error_frame\", false);\n  concurrency_limit_ = opts->GetUint32(\"concurrency_limit\", 0);\n  NvcodecConcurrencyLimiter::GetInstance()->Init(concurrency_limit_);\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status VideoDecoderFlowUnit::Close() { return modelbox::STATUS_OK; }\n\nmodelbox::Status VideoDecoderFlowUnit::Process(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  std::shared_ptr<modelbox::Buffer> flag_buffer = nullptr;\n  auto video_decoder = std::static_pointer_cast<NvcodecVideoDecoder>(\n      data_ctx->GetPrivate(DECODER_CTX));\n  if (video_decoder == nullptr) {\n    MBLOG_ERROR << \"Video decoder is not init\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  std::vector<std::shared_ptr<NvcodecPacket>> pkt_list;\n  auto ret = ReadData(data_ctx, pkt_list, flag_buffer);\n  if (ret != modelbox::STATUS_SUCCESS) {\n    MBLOG_ERROR << \"Read av_packet input failed\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  if (flag_buffer) {\n    if (ReopenDecoder(data_ctx, flag_buffer) != modelbox::STATUS_SUCCESS) {\n      MBLOG_ERROR << \"Reopen decoder failed\";\n      return modelbox::STATUS_FAULT;\n    }\n\n    video_decoder = std::static_pointer_cast<NvcodecVideoDecoder>(\n        data_ctx->GetPrivate(DECODER_CTX));\n    if (video_decoder == nullptr) {\n      MBLOG_ERROR << \"Video decoder is not init\";\n      return modelbox::STATUS_FAULT;\n    }\n  }\n\n  std::vector<std::shared_ptr<NvcodecFrame>> frame_list;\n  modelbox::Status decode_ret = modelbox::STATUS_SUCCESS;\n\n  for (auto &pkt : pkt_list) {\n    try {\n      decode_ret = video_decoder->Decode(pkt, frame_list);\n    } catch (NVDECException &e) {\n      MBLOG_ERROR << \"Nvcodec decode frame failed, detail: \" << e.what();\n      if (skip_err_frame_) {\n        MBLOG_WARN << \"Skip error frame\";\n        continue;\n      }\n      return modelbox::STATUS_FAULT;\n    }\n    if (decode_ret == modelbox::STATUS_FAULT) {\n      MBLOG_ERROR << \"Video decoder failed\";\n      // TODO: Process decoder fault\n      return modelbox::STATUS_FAULT;\n    }\n\n    ret = WriteData(data_ctx, frame_list, decode_ret == modelbox::STATUS_NODATA,\n                    video_decoder->GetFileUrl());\n    if (ret != modelbox::STATUS_SUCCESS) {\n      MBLOG_ERROR << \"Send frame data failed\";\n      return modelbox::STATUS_FAULT;\n    }\n\n    frame_list.clear();\n  }\n\n  if (decode_ret == modelbox::STATUS_NODATA) {\n    MBLOG_INFO << \"Video decoder finish\";\n  }\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status VideoDecoderFlowUnit::ReadData(\n    const std::shared_ptr<modelbox::DataContext> &data_ctx,\n    std::vector<std::shared_ptr<NvcodecPacket>> &pkt_list,\n    std::shared_ptr<modelbox::Buffer> &flag_buffer) {\n  bool reset_flag = false;\n  auto video_packet_input = data_ctx->Input(VIDEO_PACKET_INPUT);\n  if (video_packet_input == nullptr) {\n    MBLOG_ERROR << \"video packet input is null\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  if (video_packet_input->Size() == 0) {\n    MBLOG_ERROR << \"video packet input size is 0\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  for (size_t i = 0; i < video_packet_input->Size(); ++i) {\n    auto packet_buffer = video_packet_input->At(i);\n\n    if (reset_flag == false) {\n      packet_buffer->Get(\"reset_flag\", reset_flag);\n      if (reset_flag == true) {\n        flag_buffer = packet_buffer;\n      }\n    }\n\n    std::shared_ptr<NvcodecPacket> pkt;\n    auto ret = ReadNvcodecPacket(packet_buffer, pkt);\n    if (ret != modelbox::STATUS_SUCCESS) {\n      return modelbox::STATUS_FAULT;\n    }\n\n    pkt_list.push_back(pkt);\n  }\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status VideoDecoderFlowUnit::ReadNvcodecPacket(\n    const std::shared_ptr<modelbox::Buffer> &packet_buffer,\n    std::shared_ptr<NvcodecPacket> &pkt) {\n  auto size = packet_buffer->GetBytes();\n  if (size == 1) {\n    pkt = std::make_shared<NvcodecPacket>();\n    return modelbox::STATUS_SUCCESS;\n  }\n\n  const auto *data = (const uint8_t *)packet_buffer->ConstData();\n  if (data == nullptr) {\n    MBLOG_ERROR << \"video_packet data is nullptr\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  int64_t pts = 0;\n  packet_buffer->Get(\"pts\", pts);\n  pkt = std::make_shared<NvcodecPacket>(size, data, pts);\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status VideoDecoderFlowUnit::WriteData(\n    std::shared_ptr<modelbox::DataContext> &data_ctx,\n    std::vector<std::shared_ptr<NvcodecFrame>> &frame_list, bool eos,\n    const std::string &file_url) {\n  auto last_frame = std::static_pointer_cast<modelbox::Buffer>(\n      data_ctx->GetPrivate(LAST_FRAME));\n  data_ctx->SetPrivate(LAST_FRAME, nullptr);\n  auto color_cvt = std::static_pointer_cast<NppiColorConverter>(\n      data_ctx->GetPrivate(CVT_CTX));\n  auto frame_buff_list = data_ctx->Output(FRAME_INFO_OUTPUT);\n  if (last_frame != nullptr) {\n    frame_buff_list->PushBack(last_frame);  // Send last frame in cache\n  }\n\n  if (frame_list.size() == 0) {\n    if (last_frame != nullptr && eos) {\n      last_frame->Set(\"eos\", true);  // Set eos for last frame\n    }\n\n    return modelbox::STATUS_SUCCESS;\n  }\n\n  auto frame_index =\n      std::static_pointer_cast<int64_t>(data_ctx->GetPrivate(FRAME_INDEX_CTX));\n  auto pack_buff_list = data_ctx->Input(VIDEO_PACKET_INPUT);\n  auto pack_buff = pack_buff_list->At(0);\n  int32_t rate_num = 0;\n  int32_t rate_den = 0;\n  int32_t rotate_angle = 0;\n  int64_t duration = 0;\n  pack_buff->Get(\"rate_num\", rate_num);\n  pack_buff->Get(\"rate_den\", rate_den);\n  pack_buff->Get(\"rotate_angle\", rotate_angle);\n  pack_buff->Get(\"duration\", duration);\n  double time_base = 0;\n  pack_buff->Get(\"time_base\", time_base);\n  size_t buffer_size;\n  for (auto &frame : frame_list) {\n    videodecode::UpdateStatsInfo(data_ctx, frame->width, frame->height);\n    auto frame_buffer = std::make_shared<modelbox::Buffer>(GetBindDevice());\n    auto ret = videodecode::GetBufferSize(frame->width, frame->height,\n                                          out_pix_fmt_str_, buffer_size);\n    if (ret != modelbox::STATUS_SUCCESS) {\n      return ret;\n    }\n\n    frame_buffer->Build(buffer_size);\n    ret = color_cvt->CvtColor(frame->data_ref, frame->width, frame->height,\n                              (uint8_t *)frame_buffer->MutableData(),\n                              out_pix_fmt_str_);\n    if (ret != modelbox::STATUS_SUCCESS) {\n      return ret;\n    }\n\n    frame_buffer->Set(\"index\", *frame_index);\n    *frame_index = *frame_index + 1;\n    frame_buffer->Set(\"width\", frame->width);\n    frame_buffer->Set(\"height\", frame->height);\n    frame_buffer->Set(\"height_stride\", frame->height);\n    frame_buffer->Set(\"rate_num\", rate_num);\n    frame_buffer->Set(\"rate_den\", rate_den);\n    frame_buffer->Set(\"rotate_angle\", rotate_angle);\n    frame_buffer->Set(\"duration\", duration);\n    frame_buffer->Set(\"eos\", false);\n    frame_buffer->Set(\"pix_fmt\", out_pix_fmt_str_);\n    auto width_stride = frame->width;\n    if (out_pix_fmt_str_ == \"rgb\" || out_pix_fmt_str_ == \"bgr\") {\n      width_stride *= 3;\n      int32_t channel = 3;\n      frame_buffer->Set(\"channel\", channel);\n      frame_buffer->Set(\"shape\",\n                        std::vector<size_t>({static_cast<size_t>(frame->height),\n                                             static_cast<size_t>(frame->width),\n                                             static_cast<size_t>(channel)}));\n      frame_buffer->Set(\"layout\", std::string(\"hwc\"));\n    }\n    frame_buffer->Set(\"width_stride\", width_stride);\n\n    frame_buffer->Set(\"type\", modelbox::ModelBoxDataType::MODELBOX_UINT8);\n    frame_buffer->Set(\"timestamp\", (int64_t)(frame->timestamp * time_base));\n    frame_buffer->Set(\"url\", file_url);\n    if (frame != frame_list.back()) {\n      frame_buff_list->PushBack(frame_buffer);\n    } else {\n      // try save last frame in data_ctx, when demuxe end, we could set last\n      // frame eos to 'true'\n      if (eos) {\n        frame_buffer->Set(\"eos\", true);\n        frame_buff_list->PushBack(frame_buffer);\n      } else {\n        data_ctx->SetPrivate(LAST_FRAME, frame_buffer);\n      }\n    }\n  }\n\n  std::dynamic_pointer_cast<modelbox::CudaMemory>(\n      frame_buff_list->GetDeviceMemory())\n      ->BindStream();\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status VideoDecoderFlowUnit::ReopenDecoder(\n    std::shared_ptr<modelbox::DataContext> &data_ctx,\n    const std::shared_ptr<modelbox::Buffer> &flag_buffer) {\n  auto old_source_url = std::static_pointer_cast<std::string>(\n      data_ctx->GetPrivate(SOURCE_URL_META));\n  auto old_codec_id =\n      std::static_pointer_cast<AVCodecID>(data_ctx->GetPrivate(CODEC_ID_META));\n\n  if (old_source_url == nullptr || old_codec_id == nullptr) {\n    MBLOG_ERROR << \"Reopen decoder failed, source url or codec id is null\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  std::string source_url;\n  AVCodecID codec_id;\n  if (flag_buffer->Get(SOURCE_URL_META, source_url) == false) {\n    return modelbox::STATUS_SUCCESS;\n  }\n\n  if (flag_buffer->Get(CODEC_ID_META, codec_id) == false) {\n    return modelbox::STATUS_SUCCESS;\n  }\n\n  if (source_url == *old_source_url && codec_id == *old_codec_id) {\n    return modelbox::STATUS_SUCCESS;\n  }\n\n  MBLOG_WARN << \"Reopen decoder, source url or codec id changed\";\n  auto ret = CloseDecoder(data_ctx);\n  if (ret != modelbox::STATUS_SUCCESS) {\n    MBLOG_ERROR << \"Close decoder failed\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  return NewDecoder(data_ctx, source_url, codec_id);\n}\n\nmodelbox::Status VideoDecoderFlowUnit::CloseDecoder(\n    std::shared_ptr<modelbox::DataContext> &data_ctx) {\n  data_ctx->SetPrivate(DECODER_CTX, nullptr);\n  data_ctx->SetPrivate(CVT_CTX, nullptr);\n  data_ctx->SetPrivate(FRAME_INDEX_CTX, nullptr);\n  data_ctx->SetOutputMeta(FRAME_INFO_OUTPUT, nullptr);\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status VideoDecoderFlowUnit::NewDecoder(\n    std::shared_ptr<modelbox::DataContext> &data_ctx,\n    const std::string &source_url, AVCodecID codec_id) {\n  auto video_decoder = std::make_shared<NvcodecVideoDecoder>();\n  // when concurrency limit set, no delay must be true to avoid gpu cache\n  auto no_delay = concurrency_limit_ != 0;\n  auto ret = video_decoder->Init(GetBindDevice()->GetDeviceID(), codec_id,\n                                 source_url, skip_err_frame_, no_delay);\n  if (ret != modelbox::STATUS_SUCCESS) {\n    MBLOG_ERROR << \"Video decoder init failed\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  auto color_cvt = std::make_shared<NppiColorConverter>();\n  auto frame_index = std::make_shared<int64_t>();\n  *frame_index = 0;\n  data_ctx->SetPrivate(DECODER_CTX, video_decoder);\n  data_ctx->SetPrivate(CVT_CTX, color_cvt);\n  data_ctx->SetPrivate(FRAME_INDEX_CTX, frame_index);\n  data_ctx->SetPrivate(SOURCE_URL_META,\n                       std::make_shared<std::string>(source_url));\n  data_ctx->SetPrivate(CODEC_ID_META, std::make_shared<AVCodecID>(codec_id));\n  auto meta = std::make_shared<modelbox::DataMeta>();\n  meta->SetMeta(SOURCE_URL_META, std::make_shared<std::string>(source_url));\n  data_ctx->SetOutputMeta(FRAME_INFO_OUTPUT, meta);\n  MBLOG_INFO << \"Video decoder init success\";\n  MBLOG_INFO << \"Video decoder output pix fmt \" << out_pix_fmt_str_;\n  MBLOG_INFO << \"Video decoder skip error frame  \" << skip_err_frame_;\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status VideoDecoderFlowUnit::DataPre(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  MBLOG_INFO << \"Video Decode Start\";\n  auto in_meta = data_ctx->GetInputMeta(VIDEO_PACKET_INPUT);\n  auto codec_id =\n      std::static_pointer_cast<AVCodecID>(in_meta->GetMeta(CODEC_META));\n  if (codec_id == nullptr) {\n    MBLOG_ERROR << \"Stream codec id is null, init decoder failed\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  auto source_url =\n      std::static_pointer_cast<std::string>(in_meta->GetMeta(SOURCE_URL_META));\n  if (source_url == nullptr) {\n    MBLOG_ERROR << \"Stream source url is null, init decoder failed\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  return NewDecoder(data_ctx, *source_url, *codec_id);\n}\n\nmodelbox::Status VideoDecoderFlowUnit::DataPost(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  data_ctx->SetPrivate(DECODER_CTX, nullptr);\n  data_ctx->SetPrivate(CVT_CTX, nullptr);\n  data_ctx->SetPrivate(FRAME_INDEX_CTX, nullptr);\n  data_ctx->SetPrivate(SOURCE_URL_META, nullptr);\n  data_ctx->SetPrivate(CODEC_ID_META, nullptr);\n  data_ctx->SetOutputMeta(FRAME_INFO_OUTPUT, nullptr);\n  return modelbox::STATUS_SUCCESS;\n}\n\nMODELBOX_FLOWUNIT(VideoDecoderFlowUnit, desc) {\n  desc.SetFlowUnitName(FLOWUNIT_NAME);\n  desc.SetFlowUnitGroupType(\"Video\");\n  desc.AddFlowUnitInput({VIDEO_PACKET_INPUT, \"cpu\"});\n  desc.AddFlowUnitOutput({FRAME_INFO_OUTPUT});\n  desc.SetFlowType(modelbox::STREAM);\n  desc.SetInputContiguous(false);\n  desc.SetResourceNice(false);\n  desc.SetDescription(FLOWUNIT_DESC);\n  std::map<std::string, std::string> pix_fmt_list;\n\n  for (const auto &item : videodecode::g_supported_pix_fmt) {\n    pix_fmt_list[item] = item;\n  }\n\n  desc.AddFlowUnitOption(\n      modelbox::FlowUnitOption(\"pix_fmt\", \"list\", true, \"nv12\",\n                               \"the video decoder pixel format\", pix_fmt_list));\n  desc.AddFlowUnitOption(modelbox::FlowUnitOption(\n      \"skip_error_frame\", \"bool\", true, \"false\",\n      \"whether the video decoder skip the error frame\"));\n  desc.AddFlowUnitOption(modelbox::FlowUnitOption(\n      \"concurrency_limit\", \"int\", false, \"0\",\n      \"limit gpu decode concurrency to avoid decode stuck\"));\n}\n\nMODELBOX_DRIVER_FLOWUNIT(desc) {\n  desc.Desc.SetName(FLOWUNIT_NAME);\n  desc.Desc.SetClass(modelbox::DRIVER_CLASS_FLOWUNIT);\n  desc.Desc.SetType(FLOWUNIT_TYPE);\n  desc.Desc.SetDescription(FLOWUNIT_DESC);\n  desc.Desc.SetVersion(\"1.0.0\");\n}\n"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/video_decoder/video_decoder_flowunit.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_VIDEO_DECODER_CPU_H_\n#define MODELBOX_FLOWUNIT_VIDEO_DECODER_CPU_H_\n\n#include <modelbox/base/device.h>\n#include <modelbox/base/status.h>\n#include <modelbox/flow.h>\n\n#include \"modelbox/flowunit.h\"\n#include \"nvcodec_video_decoder.h\"\n\nconstexpr const char *FLOWUNIT_NAME = \"video_decoder\";\nconstexpr const char *FLOWUNIT_TYPE = \"cuda\";\nconstexpr const char *FLOWUNIT_DESC =\n    \"\\n\\t@Brief: A video decoder flowunit on cpu. \\n\"\n    \"\\t@Port parameter: The input port buffer type is video_packet, the output \"\n    \"port buffer type is video_frame.\\n\"\n    \"\\t  The video_packet buffer contain the following meta fields:\\n\"\n    \"\\t\\tField Name: pts,           Type: int64_t\\n\"\n    \"\\t\\tField Name: dts,           Type: int64_t\\n\"\n    \"\\t\\tField Name: rate_num,      Type: int32_t\\n\"\n    \"\\t\\tField Name: rate_den,      Type: int32_t\\n\"\n    \"\\t\\tField Name: duration,      Type: int64_t\\n\"\n    \"\\t\\tField Name: time_base,     Type: double\\n\"\n    \"\\t\\tField Name: width,         Type: int32_t\\n\"\n    \"\\t\\tField Name: height,        Type: int32_t\\n\"\n    \"\\t  The video_frame buffer contain the following meta fields:\\n\"\n    \"\\t\\tField Name: index,         Type: int64_t\\n\"\n    \"\\t\\tField Name: rate_num,      Type: int32_t\\n\"\n    \"\\t\\tField Name: rate_den,      Type: int32_t\\n\"\n    \"\\t\\tField Name: duration,      Type: int64_t\\n\"\n    \"\\t\\tField Name: url,           Type: string\\n\"\n    \"\\t\\tField Name: timestamp,     Type: int64_t\\n\"\n    \"\\t\\tField Name: eos,           Type: bool\\n\"\n    \"\\t\\tField Name: width,         Type: int32_t\\n\"\n    \"\\t\\tField Name: height,        Type: int32_t\\n\"\n    \"\\t\\tField Name: width_stride,  Type: int32_t\\n\"\n    \"\\t\\tField Name: height_stride, Type: int32_t\\n\"\n    \"\\t\\tField Name: channel,       Type: int32_t\\n\"\n    \"\\t\\tField Name: pix_fmt,       Type: string\\n\"\n    \"\\t\\tField Name: layout,        Type: int32_t\\n\"\n    \"\\t\\tField Name: shape,         Type: vector<size_t>\\n\"\n    \"\\t\\tField Name: type,          Type: ModelBoxDataType::MODELBOX_UINT8\\n\"\n    \"\\t@Constraint: The flowuint 'video_decoder' must be used pair \"\n    \"with 'video_demuxer. the output buffer meta fields 'pix_fmt' is \"\n    \"'brg_packed' or 'rgb_packed', 'layout' is 'hcw'.\";\nconstexpr const char *CODEC_META = \"codec_meta\";\nconstexpr const char *DECODER_CTX = \"decoder_ctx\";\nconstexpr const char *CVT_CTX = \"converter_ctx\";\nconstexpr const char *FRAME_INDEX_CTX = \"frame_index_ctx\";\nconstexpr const char *VIDEO_PACKET_INPUT = \"in_video_packet\";\nconstexpr const char *FRAME_INFO_OUTPUT = \"out_video_frame\";\nconstexpr const char *SOURCE_URL_META = \"source_url\";\nconstexpr const char *CODEC_ID_META = \"codec_id\";\nconstexpr const char *LAST_FRAME = \"last_frame\";\n\nclass VideoDecoderFlowUnit : public modelbox::FlowUnit {\n public:\n  VideoDecoderFlowUnit();\n  ~VideoDecoderFlowUnit() override;\n\n  modelbox::Status Open(\n      const std::shared_ptr<modelbox::Configuration> &opts) override;\n\n  modelbox::Status Close() override;\n  /* run when processing data */\n  modelbox::Status Process(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override;\n\n  modelbox::Status DataPre(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override;\n\n  modelbox::Status DataPost(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override;\n\n  modelbox::Status DataGroupPre(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override {\n    return modelbox::STATUS_OK;\n  };\n\n  modelbox::Status DataGroupPost(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override {\n    return modelbox::STATUS_OK;\n  };\n\n private:\n  modelbox::Status ReadData(\n      const std::shared_ptr<modelbox::DataContext> &data_ctx,\n      std::vector<std::shared_ptr<NvcodecPacket>> &pkt_list,\n      std::shared_ptr<modelbox::Buffer> &flag_buffer);\n  modelbox::Status ReadNvcodecPacket(\n      const std::shared_ptr<modelbox::Buffer> &packet_buffer,\n      std::shared_ptr<NvcodecPacket> &pkt);\n  modelbox::Status WriteData(\n      std::shared_ptr<modelbox::DataContext> &data_ctx,\n      std::vector<std::shared_ptr<NvcodecFrame>> &frame_list, bool eos,\n      const std::string &file_url);\n  modelbox::Status CreateCudaContext(CUcontext &cu_ctx, std::string &device_id);\n\n  modelbox::Status CloseDecoder(\n      std::shared_ptr<modelbox::DataContext> &data_ctx);\n  modelbox::Status NewDecoder(std::shared_ptr<modelbox::DataContext> &data_ctx,\n                              const std::string &source_url,\n                              AVCodecID codec_id);\n  modelbox::Status ReopenDecoder(\n      std::shared_ptr<modelbox::DataContext> &data_ctx,\n      const std::shared_ptr<modelbox::Buffer> &flag_buffer);\n\n  std::string out_pix_fmt_str_;\n  bool skip_err_frame_{false};\n  std::string device_id_;\n  // limit decode concurrency to avoid decoder stuck bug in gpu driver\n  uint32_t concurrency_limit_{0};\n};\n\n#endif  // MODELBOX_FLOWUNIT_VIDEO_DECODER_CPU_H_\n"
  },
  {
    "path": "src/drivers/devices/cuda/flowunit/video_decoder/video_decoder_flowunit_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include <functional>\n#include <future>\n#include <thread>\n#include <cuda_runtime.h>\n\n#include \"modelbox/base/log.h\"\n#include \"modelbox/buffer.h\"\n#include \"common/video_decoder/video_decoder_mock.h\"\n#include \"driver_flow_test.h\"\n#include \"flowunit_mockflowunit/flowunit_mockflowunit.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace modelbox {\nclass VideoDecoderCudaFlowUnitTest : public testing::Test {\n public:\n  VideoDecoderCudaFlowUnitTest() = default;\n\n protected:\n  void SetUp() override {\n    int count = 0;\n    cudaGetDeviceCount(&count);\n    if (count <= 0) {\n      MBLOG_INFO << \"no cuda device, skip test suit\";\n      GTEST_SKIP();\n    }\n  };\n\n  void TearDown() override{};\n\n public:\n  std::shared_ptr<MockFlow> flow_;\n\n  void StartFlow(const std::string& toml_content, uint64_t millisecond);\n};\n\nvoid VideoDecoderCudaFlowUnitTest::StartFlow(const std::string& toml_content,\n                                             const uint64_t millisecond) {\n  flow_ = std::make_shared<MockFlow>();\n  auto ret = videodecoder::AddMockFlowUnit(flow_);\n  EXPECT_EQ(ret, STATUS_SUCCESS);\n  ret = flow_->BuildAndRun(\"decoder\", toml_content, millisecond);\n  EXPECT_EQ(ret, STATUS_SUCCESS);\n}\n\nTEST_F(VideoDecoderCudaFlowUnitTest, cudaDecoderNv12Test) {\n  auto toml_content = videodecoder::GetTomlConfig(\"cuda\", \"nv12\");\n  StartFlow(toml_content, 5 * 1000);\n}\n\nTEST_F(VideoDecoderCudaFlowUnitTest, cudaDecoderRgbTest) {\n  auto toml_content = videodecoder::GetTomlConfig(\"cuda\", \"rgb\");\n  StartFlow(toml_content, 5 * 1000);\n}\n\nTEST_F(VideoDecoderCudaFlowUnitTest, cudaDecoderBgrTest) {\n  auto toml_content = videodecoder::GetTomlConfig(\"cuda\", \"bgr\");\n  StartFlow(toml_content, 5 * 1000);\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/drivers/devices/rockchip/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\ncmake_minimum_required(VERSION 3.10)\n\nproject(modelbox-drivers-rockchip)\n\nif(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_BINARY_DIR})\n    message(FATAL_ERROR \"Do not build in source directory!\")\nendif()\n\nsubdirlist(SUBDIRS ${CMAKE_CURRENT_SOURCE_DIR} \"CMakeLists.txt\")\n\ninclude_directories(${HUAWEI_SECURE_C_INCLUDE_DIR})\ninclude_directories(${ROCKCHIP_INCLUDE_DIR})\n\nadd_subdirectory(core)\nadd_subdirectory(common)\nadd_subdirectory(flowunit)\n"
  },
  {
    "path": "src/drivers/devices/rockchip/common/CMakeLists.txt",
    "content": "#\n# Copyright 2022 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nproject(modelbox-drivers-common-flowunit)\n\nif(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_BINARY_DIR})\n    message(FATAL_ERROR \"Do not build in source directory!\")\nendif()\n\nsubdirlist(SUBDIRS ${CMAKE_CURRENT_SOURCE_DIR} \"CMakeLists.txt\")\n\nforeach(subdir ${SUBDIRS})\n    add_subdirectory(${subdir})\nendforeach()\n"
  },
  {
    "path": "src/drivers/devices/rockchip/common/video_out/CMakeLists.txt",
    "content": "#\n# Copyright 2022 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nfile(GLOB_RECURSE SOURCES *.cpp *.cc *.c)\n\nset(INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_CONFIG_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${INCLUDE})\ninclude_directories(${MODELBOX_COMMON_VIDEO_DECODE_INCLUDE})\ninclude_directories(${HUAWEI_SECURE_C_INCLUDE_DIR})\n\nset(LIBRARY modelbox-rochchip-common-ffmpeg-video-object)\nadd_library(${LIBRARY} STATIC ${SOURCES})\nset_property(TARGET ${LIBRARY} PROPERTY POSITION_INDEPENDENT_CODE ON)\n\nset(MODELBOX_ROCKCHIP_COMMON_FFMPEG_VIDEO_LIBRARY ${LIBRARY} CACHE INTERNAL \"\")\nset(MODELBOX_ROCKCHIP_COMMON_FFMPEG_VIDEO_INCLUDE ${INCLUDE} CACHE INTERNAL \"\")\n"
  },
  {
    "path": "src/drivers/devices/rockchip/common/video_out/ffmpeg_video_muxer.cc",
    "content": "/*\n * Copyright 2022 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"ffmpeg_video_muxer.h\"\n\n#include <modelbox/base/log.h>\n\n#include \"video_decode_common.h\"\n\nmodelbox::Status FfmpegVideoMuxer::Init(\n    const std::shared_ptr<AVCodecContext> &codec_ctx,\n    const std::shared_ptr<FfmpegWriter> &writer) {\n  destination_url_ = writer->GetDestinationURL();\n  format_ctx_ = writer->GetCtx();\n  if (format_ctx_ == nullptr) {\n    const auto *msg = \"FfmpegVideoMuxer.Init fail format ctx is nullptr\";\n    MBLOG_ERROR << msg;\n    return {modelbox::STATUS_FAULT, msg};\n  }\n\n  auto ret = SetupStreamParam(codec_ctx);\n  if (ret != modelbox::STATUS_SUCCESS) {\n    auto msg =\n        \"FfmpegVideoMuxer.Init SetupStreamParam fail reason: \" + ret.Errormsg();\n    MBLOG_ERROR << msg;\n    return ret;\n  }\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status FfmpegVideoMuxer::SetupStreamParam(\n    const std::shared_ptr<AVCodecContext> &codec_ctx) {\n  stream_ = avformat_new_stream(format_ctx_.get(), codec_ctx->codec);\n  if (stream_ == nullptr) {\n    MBLOG_ERROR << \"Create video stream failed\";\n    return {modelbox::STATUS_FAULT, \"Create video stream failed\"};\n  }\n\n  stream_->time_base = codec_ctx->time_base;\n  auto ret =\n      avcodec_parameters_from_context(stream_->codecpar, codec_ctx.get());\n  if (ret < 0) {\n    GET_FFMPEG_ERR(ret, ffmpeg_err);\n    MBLOG_ERROR << \"avcodec_parameters_from_context err \" << ffmpeg_err;\n    return {modelbox::STATUS_FAULT,\n            \"avcodec_parameters_from_context err \" + std::string(ffmpeg_err)};\n  }\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status FfmpegVideoMuxer::Mux(\n    const AVRational &time_base, const std::shared_ptr<AVPacket> &av_packet) {\n  av_packet_rescale_ts(av_packet.get(), time_base, stream_->time_base);\n  av_packet->stream_index = stream_->index;\n  if (!is_header_wrote_) {\n    AVDictionary *format_opts = nullptr;\n    av_dict_set(&format_opts, \"rtsp_transport\", \"tcp\", 0);\n    auto ret = avformat_write_header(format_ctx_.get(), &format_opts);\n    av_dict_free(&format_opts);\n    if (ret < 0) {\n      GET_FFMPEG_ERR(ret, ffmpeg_err);\n      MBLOG_ERROR << \"avformat_write_header failed, ret \" << ffmpeg_err;\n      return {modelbox::STATUS_FAULT,\n              \"avformat_write_header failed, ret \" + std::string(ffmpeg_err)};\n    }\n\n    is_header_wrote_ = true;\n  }\n\n  auto ret = av_interleaved_write_frame(format_ctx_.get(), av_packet.get());\n  if (ret < 0) {\n    if (ret == AVERROR(EPIPE) || ret == AVERROR_EOF) {\n      MBLOG_ERROR << \"remote end closed the connection\";\n      return modelbox::STATUS_NOSTREAM;\n    }\n\n    GET_FFMPEG_ERR(ret, ffmpeg_err);\n    MBLOG_ERROR << \"av_write_frame failed, ret \" << ffmpeg_err;\n    return {modelbox::STATUS_FAULT,\n            \"av_write_frame failed, ret \" + std::string(ffmpeg_err)};\n  }\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nFfmpegVideoMuxer::~FfmpegVideoMuxer() {\n  if (is_header_wrote_) {\n    auto ret = av_write_trailer(format_ctx_.get());\n    if (ret < 0) {\n      GET_FFMPEG_ERR(ret, ffmpeg_err);\n      MBLOG_ERROR << \"av_write_trailer failed, ret \" << ffmpeg_err;\n    }\n  }\n}"
  },
  {
    "path": "src/drivers/devices/rockchip/common/video_out/ffmpeg_video_muxer.h",
    "content": "/*\n * Copyright 2022 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_FFMPEG_MUXER_H_\n#define MODELBOX_FLOWUNIT_FFMPEG_MUXER_H_\n\n#include <modelbox/base/status.h>\n\n#include <memory>\n#include <string>\n\n#include \"ffmpeg_writer.h\"\n\nclass FfmpegVideoMuxer {\n public:\n  modelbox::Status Init(const std::shared_ptr<AVCodecContext> &codec_ctx,\n                        const std::shared_ptr<FfmpegWriter> &writer);\n\n  modelbox::Status Mux(const AVRational &time_base,\n                       const std::shared_ptr<AVPacket> &av_packet);\n\n  virtual ~FfmpegVideoMuxer();\n\n private:\n  modelbox::Status SetupStreamParam(\n      const std::shared_ptr<AVCodecContext> &codec_ctx);\n\n  std::shared_ptr<AVFormatContext> format_ctx_;\n  std::string destination_url_;\n  AVStream *stream_{nullptr};\n  bool is_header_wrote_{false};\n};\n\n#endif  // MODELBOX_FLOWUNIT_FFMPEG_MUXER_H_"
  },
  {
    "path": "src/drivers/devices/rockchip/common/video_out/ffmpeg_writer.cc",
    "content": "/*\n * Copyright 2022 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"ffmpeg_writer.h\"\n\n#include <modelbox/base/log.h>\n\n#include \"video_decode_common.h\"\n\nextern \"C\" {\n#include <libavutil/opt.h>\n}\n\nmodelbox::Status FfmpegWriter::Open(const std::string &format_name,\n                                    const std::string &destination_url) {\n#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(58, 9, 100)\n  av_register_all();\n#endif\n  auto ret = avformat_network_init();\n  if (ret < 0) {\n    GET_FFMPEG_ERR(ret, ffmpeg_err);\n    MBLOG_ERROR << \"avformat_network_init, err \" << ffmpeg_err;\n    return {modelbox::STATUS_FAULT,\n            \"avformat_network_init, err \" + std::string(ffmpeg_err)};\n  }\n\n  format_name_ = format_name;\n  destination_url_ = destination_url;\n\n  AVFormatContext *format_ctx = nullptr;\n  ret = avformat_alloc_output_context2(\n      &format_ctx, nullptr, format_name.c_str(), destination_url.c_str());\n  if (ret < 0 || format_ctx == nullptr) {\n    GET_FFMPEG_ERR(ret, ffmpeg_err);\n    auto msg = \"avformat_alloc_output_context2 failed, format \" + format_name +\n               \", dest_url \" + destination_url + \", ret \" +\n               std::string(ffmpeg_err);\n    MBLOG_ERROR << msg;\n    return {modelbox::STATUS_FAULT, msg};\n  }\n\n  format_ctx_.reset(format_ctx,\n                    [](AVFormatContext *ctx) { avformat_free_context(ctx); });\n  if (format_name_ != \"rtsp\") {\n    ret = avio_open2(&format_ctx_->pb, destination_url.c_str(), AVIO_FLAG_WRITE,\n                     nullptr, nullptr);\n    if (ret < 0) {\n      GET_FFMPEG_ERR(ret, ffmpeg_err);\n      auto msg = \"avio_open2 failed, url \" + destination_url + \", format \" +\n                 format_name + \", ret \" + std::string(ffmpeg_err);\n      MBLOG_ERROR << msg;\n      return {modelbox::STATUS_FAULT, msg};\n    }\n  }\n\n  MBLOG_INFO << \"Open url \" << destination_url << \", format \" << format_name\n             << \" success\";\n  return modelbox::STATUS_SUCCESS;\n}\n\nstd::string FfmpegWriter::GetFormatName() { return format_name_; }\nstd::string FfmpegWriter::GetDestinationURL() { return destination_url_; }\nstd::shared_ptr<AVFormatContext> FfmpegWriter::GetCtx() { return format_ctx_; }"
  },
  {
    "path": "src/drivers/devices/rockchip/common/video_out/ffmpeg_writer.h",
    "content": "/*\n * Copyright 2022 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_FFMPEG_WRITER_H_\n#define MODELBOX_FLOWUNIT_FFMPEG_WRITER_H_\n\n#include <modelbox/base/status.h>\n\n#include <memory>\n#include <string>\n\nextern \"C\" {\n#include <libavformat/avformat.h>\n}\n\nclass FfmpegWriter {\n public:\n  modelbox::Status Open(const std::string &format_name,\n                        const std::string &destination_url);\n\n  std::string GetFormatName();\n\n  std::string GetDestinationURL();\n\n  std::shared_ptr<AVFormatContext> GetCtx();\n\n private:\n  std::string format_name_;\n  std::string destination_url_;\n  std::shared_ptr<AVFormatContext> format_ctx_;\n};\n\n#endif  // MODELBOX_FLOWUNIT_FFMPEG_WRITER_H_"
  },
  {
    "path": "src/drivers/devices/rockchip/core/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\ncmake_minimum_required(VERSION 3.10)\n\nset(DEVICE_NAME \"rockchip\")\nproject(modelbox-devices-${DEVICE_NAME})\n\nfile(GLOB_RECURSE LIBMODELBOX_DEVICE_SOURCES *.cpp *.cc *.c)\nset(LIBMODELBOX_DEVICE_ROCKCHIP_INCLUDE ${CMAKE_CURRENT_LIST_DIR}/include)\n\ninclude_directories(${HUAWEI_SECURE_C_INCLUDE_DIR})\ninclude_directories(${ROCKCHIP_INCLUDE_DIR})\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_ROCKCHIP_INCLUDE})\ninclude_directories(${ROCKCHIP_MPP_INCLUDE})\ninclude_directories(${RKNN_INCLUDE_DIR})\ninclude_directories(${ROCKCHIP_RGA_INCLUDE})\n\nset(HEADER\n    ${LIBMODELBOX_DEVICE_ROCKCHIP_INCLUDE}/modelbox\n)\n\nset(LIBMODELBOX_DEVICE_ROCKCHIP_STATIC libmodelbox-device-${DEVICE_NAME}-static)\nset(LIBMODELBOX_DEVICE_ROCKCHIP_SHARED libmodelbox-device-${DEVICE_NAME}-shared)\n\nadd_library(${LIBMODELBOX_DEVICE_ROCKCHIP_STATIC} STATIC ${LIBMODELBOX_DEVICE_SOURCES})\nadd_library(${LIBMODELBOX_DEVICE_ROCKCHIP_SHARED} SHARED ${LIBMODELBOX_DEVICE_SOURCES})\n\nset_target_properties(${LIBMODELBOX_DEVICE_ROCKCHIP_SHARED} PROPERTIES\n    SOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n\ntarget_link_libraries(${LIBMODELBOX_DEVICE_ROCKCHIP_STATIC} ${LIBMODELBOX_SHARED})\ntarget_link_libraries(${LIBMODELBOX_DEVICE_ROCKCHIP_STATIC} pthread)\ntarget_link_libraries(${LIBMODELBOX_DEVICE_ROCKCHIP_STATIC} rt)\ntarget_link_libraries(${LIBMODELBOX_DEVICE_ROCKCHIP_STATIC} dl)\ntarget_link_libraries(${LIBMODELBOX_DEVICE_ROCKCHIP_STATIC} ${RKMPP_LIBRARIES})\ntarget_link_libraries(${LIBMODELBOX_DEVICE_ROCKCHIP_STATIC} ${RKRGA_LIBRARY})\n\ntarget_link_libraries(${LIBMODELBOX_DEVICE_ROCKCHIP_SHARED} ${LIBMODELBOX_SHARED})\ntarget_link_libraries(${LIBMODELBOX_DEVICE_ROCKCHIP_SHARED} pthread)\ntarget_link_libraries(${LIBMODELBOX_DEVICE_ROCKCHIP_SHARED} rt)\ntarget_link_libraries(${LIBMODELBOX_DEVICE_ROCKCHIP_SHARED} dl)\ntarget_link_libraries(${LIBMODELBOX_DEVICE_ROCKCHIP_SHARED} ${RKMPP_LIBRARIES})\ntarget_link_libraries(${LIBMODELBOX_DEVICE_ROCKCHIP_SHARED} ${RKRGA_LIBRARY})\n\nset_target_properties(${LIBMODELBOX_DEVICE_ROCKCHIP_STATIC} ${LIBMODELBOX_DEVICE_ROCKCHIP_SHARED}\n    PROPERTIES OUTPUT_NAME \"modelbox-device-${DEVICE_NAME}\"\n)\nset_target_properties(${LIBMODELBOX_DEVICE_ROCKCHIP_STATIC} ${LIBMODELBOX_DEVICE_ROCKCHIP_SHARED}\n    PROPERTIES\n    ARCHIVE_OUTPUT_DIRECTORY \"${TEST_WORKING_LIB_DIR}\"\n    RUNTIME_OUTPUT_DIRECTORY \"${TEST_WORKING_BIN_DIR}\"\n)\n\nconfigure_file(${CMAKE_CURRENT_LIST_DIR}/libmodelbox-device-${DEVICE_NAME}.pc.in ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-device-${DEVICE_NAME}.pc @ONLY)\n\ninstall(TARGETS ${LIBMODELBOX_DEVICE_ROCKCHIP_SHARED}\n    COMPONENT rockchip-device-flowunit\n    RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n    LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    OPTIONAL)\n\ninstall(TARGETS ${LIBMODELBOX_DEVICE_ROCKCHIP_STATIC}\n    COMPONENT rockchip-device-flowunit-devel\n    ARCHIVE DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    OPTIONAL)\n\ninstall(DIRECTORY\n    ${HEADER}\n    DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR}\n    COMPONENT rockchip-device-flowunit-devel\n)\n\ninstall(FILES\n    ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-device-${DEVICE_NAME}.pc\n    DESTINATION ${CMAKE_INSTALL_LIBDIR}/pkgconfig\n    COMPONENT rockchip-device-flowunit-devel\n)\n\nset(LIBMODELBOX_DEVICE_ROCKCHIP_STATIC ${LIBMODELBOX_DEVICE_ROCKCHIP_STATIC} CACHE INTERNAL \"\")\nset(LIBMODELBOX_DEVICE_ROCKCHIP_SHARED ${LIBMODELBOX_DEVICE_ROCKCHIP_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_DEVICE_ROCKCHIP_INCLUDE ${LIBMODELBOX_DEVICE_ROCKCHIP_INCLUDE} CACHE INTERNAL \"\")\n\nset(LIBMODELBOX_DEVICE_LIBMODELBOX_DEVICE_ROCKCHIP_SHARED_BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR} CACHE INTERNAL \"\")\nset(LIBMODELBOX_DEVICE_SOURCES ${LIBMODELBOX_DEVICE_SOURCES} CACHE INTERNAL \"\")\nset(LIBMODELBOX_DEVICE_ROCKCHIP_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-device-${DEVICE_NAME}.so CACHE INTERNAL \"\")\n\n# driver test\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_UNIT_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${LIBMODELBOX_DEVICE_ROCKCHIP_SHARED})\n\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")"
  },
  {
    "path": "src/drivers/devices/rockchip/core/device_rockchip.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/device/rockchip/device_rockchip.h\"\n\n#include <dlfcn.h>\n#include <fcntl.h>\n#include <linux/kernel.h>\n#include <linux/unistd.h>\n#include <stdio.h>\n#include <sys/sysinfo.h>\n\n#include <fstream>\n#include <iostream>\n\n#include \"modelbox/base/log.h\"\n#include \"modelbox/base/os.h\"\n#include \"modelbox/device/rockchip/rockchip_memory.h\"\n#include \"rknn_api.h\"\n\nconst std::string LIB_RKNN_API_PATH = \"librknn_api.so\";\n\nnamespace modelbox {\n\nRockChip::RockChip(const std::shared_ptr<DeviceMemoryManager> &mem_mgr)\n    : Device(mem_mgr) {}\n\nstd::string RockChip::GetType() const { return DEVICE_TYPE; }\n\nbool RockChip::SupportMemContiguous() const { return false; }\n\nStatus RockChip::DeviceExecute(const DevExecuteCallBack &rkfun,\n                               int32_t priority, size_t rkcount) {\n  if (0 == rkcount) {\n    return STATUS_OK;\n  }\n\n  for (size_t i = 0; i < rkcount; ++i) {\n    auto status = rkfun(i);\n    if (!status) {\n      MBLOG_WARN << \"executor rkfunc failed: \" << status\n                 << \" stack trace:\" << GetStackTrace();\n      return status;\n    }\n  }\n\n  return STATUS_OK;\n};\n\nbool RockChip::NeedResourceNice() { return true; }\n\nstd::map<std::string, std::shared_ptr<DeviceDesc>>\nRockChipFactory::ProbeRKNNDevice() {\n  std::map<std::string, std::shared_ptr<DeviceDesc>> device_desc_map;\n\n  void *handler = dlopen(LIB_RKNN_API_PATH.c_str(), RTLD_LAZY | RTLD_LOCAL);\n  if (handler == nullptr) {\n    MBLOG_ERROR << \"dlopen \" << LIB_RKNN_API_PATH << \" failed.\";\n    return device_desc_map;\n  }\n\n  Defer { dlclose(handler); };\n\n  std::shared_ptr<rknn_devices_id> dev_ids =\n      std::make_shared<rknn_devices_id>();\n  if (dev_ids == nullptr) {\n    MBLOG_ERROR << \"make dev ids fail\";\n    return device_desc_map;\n  }\n\n  typedef int (*find_device_func)(rknn_devices_id *);\n  auto find_device =\n      reinterpret_cast<find_device_func>(dlsym(handler, \"rknn_find_devices\"));\n  if (find_device == nullptr) {\n    MBLOG_ERROR << \"find device is nullptr\";\n    return device_desc_map;\n  }\n\n  if (find_device(dev_ids.get()) != RKNN_SUCC || dev_ids->n_devices == 0) {\n    MBLOG_ERROR << \"find none rknn device\";\n    return device_desc_map;\n  }\n\n  std::vector<std::string> rknn_devs;\n  for (size_t i = 0; i < dev_ids->n_devices; i++) {\n    rknn_devs.emplace_back(std::string(dev_ids->ids[i]));\n  }\n\n  struct sysinfo s_info;\n  auto ret = sysinfo(&s_info);\n  if (ret != 0) {\n    MBLOG_ERROR << \"failed to sysinfo ret = \" << ret;\n  }\n\n  for (size_t i = 0; i < rknn_devs.size(); i++) {\n    auto device_desc = std::make_shared<RockChipDesc>();\n    device_desc->SetDeviceDesc(\"This is a rockchip device description.\");\n    // inference module will bind all rockchip device to one\n    auto id_str = std::to_string(i);\n    device_desc->SetDeviceId(id_str);\n    device_desc->SetDeviceMemory(GetBytesReadable(s_info.totalram));\n    device_desc->SetDeviceType(DEVICE_TYPE);\n    device_desc_map.insert(std::make_pair(id_str, device_desc));\n  }\n\n  RKNNDevs::Instance().SetNames(rknn_devs);\n\n  return device_desc_map;\n}\n\nstd::map<std::string, std::shared_ptr<DeviceDesc>>\nRockChipFactory::DeviceProbe() {\n  RKNNDevs::Instance().UpdateDeviceType();\n\n  std::map<std::string, std::shared_ptr<DeviceDesc>> device_desc_map =\n      ProbeRKNNDevice();\n  auto deviceType = RKNNDevs::Instance().GetDeviceType();\n  if (deviceType == RKNNDevs::RKNN_DEVICE_TYPE_RK356X ||\n      deviceType == RKNNDevs::RKNN_DEVICE_TYPE_RK358X ||\n      deviceType == RKNNDevs::RKNN_DEVICE_TYPE_RV110X) {\n    MBLOG_INFO << \"find rknpu2 type inference.\";\n    struct sysinfo s_info;\n    auto ret = sysinfo(&s_info);\n    if (ret != 0) {\n      MBLOG_ERROR << \"failed to sysinfo ret = \" << ret;\n    }\n\n    auto device_desc = std::make_shared<RockChipDesc>();\n    device_desc->SetDeviceDesc(\"This is a rknpu2 device description.\");\n    auto id_str = std::to_string(device_desc_map.size());\n    device_desc->SetDeviceId(id_str);\n    device_desc->SetDeviceMemory(GetBytesReadable(s_info.totalram));\n    device_desc->SetDeviceType(DEVICE_TYPE);\n    device_desc_map.insert(std::make_pair(id_str, device_desc));\n  }\n\n  return device_desc_map;\n}\n\nstd::string RockChipFactory::GetDeviceFactoryType() { return DEVICE_TYPE; }\n\nstd::shared_ptr<Device> RockChipFactory::CreateDevice(\n    const std::string &device_id) {\n  auto mem_mgr = std::make_shared<RockChipMemoryManager>(device_id);\n  auto status = mem_mgr->Init();\n  if (!status) {\n    StatusError = status;\n    return nullptr;\n  }\n\n  return std::make_shared<RockChip>(mem_mgr);\n}\n\nvoid RKNNDevs::SetNames(std::vector<std::string> &dev_names) {\n  dev_names_.swap(dev_names);\n}\n\nconst std::vector<std::string> &RKNNDevs::GetNames() { return dev_names_; }\n\nvoid RKNNDevs::UpdateDeviceType() {\n  type_ = RKNN_DEVICE_TYPE_OTHERS;\n  std::ifstream dev_file(\"/proc/device-tree/compatible\", std::ios::in);\n  if (!dev_file.is_open()) {\n    MBLOG_ERROR << \"failed to open device file\";\n    return;\n  }\n\n  std::string strLine;\n\n  std::unordered_map<std::string, RKNNDevs::RKNN_DEVICE_TYPE> deviceDictionary =\n      {{\"rk3399pro\", RKNN_DEVICE_TYPE_RK3399PRO},\n       {\"rk356\", RKNN_DEVICE_TYPE_RK356X},\n       {\"rk358\", RKNN_DEVICE_TYPE_RK358X},\n       {\"rv110\", RKNN_DEVICE_TYPE_RV110X}};\n\n  Defer { dev_file.close(); };\n\n  while (getline(dev_file, strLine)) {\n    for (const auto &item : deviceDictionary) {\n      if (strLine.find(item.first) != std::string::npos) {\n        type_ = item.second;\n        return;\n      }\n    }\n\n    MBLOG_ERROR << strLine << \" type not support\";\n    break;\n  }\n}\n\nRKNNDevs::RKNN_DEVICE_TYPE RKNNDevs::GetDeviceType() { return type_; }\n\n}  // namespace modelbox\n"
  },
  {
    "path": "src/drivers/devices/rockchip/core/driver_desc.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"driver_desc.h\"\n\n#include <stdio.h>\n\n#include <memory>\n\n#include \"modelbox/device/rockchip/device_rockchip.h\"\n\nnamespace modelbox {\n\nstd::shared_ptr<Timer> kRKDeviceTimer;\n\nTimer *GetTimer() { return kRKDeviceTimer.get(); }\n\nstd::shared_ptr<modelbox::DriverFactory> CreateDriverFactory() {\n  std::shared_ptr<modelbox::DriverFactory> factory =\n      std::make_shared<RockChipFactory>();\n  return factory;\n}\n\nvoid DriverDescription(DriverDesc *desc) {\n  desc->SetClass(DRIVER_CLASS_DEVICE);\n  desc->SetType(DEVICE_TYPE);\n  desc->SetName(DEVICE_DRIVER_NAME);\n  desc->SetDescription(DEVICE_DRIVER_DESCRIPTION);\n}\n\nStatus DriverInit() {\n  if (kRKDeviceTimer != nullptr) {\n    return STATUS_OK;\n  }\n\n  kRKDeviceTimer = std::make_shared<Timer>();\n  if (kRKDeviceTimer == nullptr) {\n    auto msg = std::string(\"failed to make timer\");\n    MBLOG_ERROR << msg;\n    return {STATUS_FAULT, msg};\n  }\n\n  kRKDeviceTimer->SetName(\"RockChip-Timer\");\n  kRKDeviceTimer->Start();\n  return STATUS_OK;\n}\n\nvoid DriverFini() {\n  if (kRKDeviceTimer == nullptr) {\n    return;\n  }\n\n  // Driver Fini.\n  kRKDeviceTimer->Stop();\n  kRKDeviceTimer = nullptr;\n}\n\n}  // namespace modelbox\n"
  },
  {
    "path": "src/drivers/devices/rockchip/core/driver_desc.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_DEVICE_DESC_ROCKCHIP_H_\n#define MODELBOX_DEVICE_DESC_ROCKCHIP_H_\n\n#include <modelbox/base/device.h>\n#include <modelbox/base/driver.h>\n#include <modelbox/base/status.h>\n\nextern \"C\" {\n\nnamespace modelbox {\n\nstd::shared_ptr<DriverFactory> CreateDriverFactory();\n\nStatus DriverInit();\n\nvoid DriverFini();\n\nvoid DriverDescription(DriverDesc *desc);\n\n}  // namespace modelbox\n\n}  // extern \"C\"\n\n#endif  // MODELBOX_DEVICE_DESC_ROCKCHIP_H_"
  },
  {
    "path": "src/drivers/devices/rockchip/core/include/modelbox/device/rockchip/device_rockchip.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_DEVICE_ROCKCHIP_H_\n#define MODELBOX_DEVICE_ROCKCHIP_H_\n\n#include <modelbox/base/device.h>\n#include <modelbox/data_context.h>\n#include <modelbox/device/rockchip/rockchip_memory.h>\n#include <modelbox/flow.h>\n\nnamespace modelbox {\n\ntypedef void MppBufHdl;\n\nconstexpr const char *DEVICE_TYPE = \"rockchip\";\nconstexpr const char *DEVICE_DRIVER_NAME = \"device-rockchip\";\nconstexpr const char *DEVICE_DRIVER_DESCRIPTION = \"A rockchip device driver\";\n\nclass RockChip : public Device {\n public:\n  RockChip(const std::shared_ptr<DeviceMemoryManager> &mem_mgr);\n  ~RockChip() override = default;\n  std::string GetType() const override;\n\n  /**\n   * @brief when make mem contiguous, need test whether the device supports\n   * @return whether specify device supports mem contiguous\n   **/\n  bool SupportMemContiguous() const override;\n\n  Status DeviceExecute(const DevExecuteCallBack &rkfun, int32_t priority,\n                       size_t rkcount) override;\n  bool NeedResourceNice() override;\n};\n\nclass RockChipFactory : public DeviceFactory {\n public:\n  RockChipFactory() = default;\n  ~RockChipFactory() override = default;\n\n  std::map<std::string, std::shared_ptr<DeviceDesc>> DeviceProbe() override;\n  std::string GetDeviceFactoryType() override;\n  std::shared_ptr<Device> CreateDevice(const std::string &device_id) override;\n\n private:\n  std::map<std::string, std::shared_ptr<DeviceDesc>> ProbeRKNNDevice();\n};\n\nclass RockChipDesc : public DeviceDesc {\n public:\n  RockChipDesc() = default;\n  ~RockChipDesc() override = default;\n};\n\n// use it to store the rknn device names\nclass RKNNDevs {\n public:\n  typedef enum {\n    RKNN_DEVICE_TYPE_OTHERS = 0,\n    RKNN_DEVICE_TYPE_RK3399PRO,\n    RKNN_DEVICE_TYPE_RK356X,\n    RKNN_DEVICE_TYPE_RK358X,\n    RKNN_DEVICE_TYPE_RV110X\n  } RKNN_DEVICE_TYPE;\n\n  static RKNNDevs &Instance() {\n    static RKNNDevs rk_nndevs;\n    return rk_nndevs;\n  }\n\n  void SetNames(std::vector<std::string> &dev_names);\n  const std::vector<std::string> &GetNames();\n  void UpdateDeviceType();\n  RKNN_DEVICE_TYPE GetDeviceType();\n\n private:\n  RKNNDevs() = default;\n  virtual ~RKNNDevs() = default;\n  RKNNDevs(const RKNNDevs &) = delete;\n  RKNNDevs &operator=(const RKNNDevs &) = delete;\n\n  std::vector<std::string> dev_names_;\n  RKNN_DEVICE_TYPE type_{RKNN_DEVICE_TYPE_OTHERS};\n};\n\n}  // namespace modelbox\n\n#endif  // MODELBOX_DEVICE_ROCKCHIP_H_\n"
  },
  {
    "path": "src/drivers/devices/rockchip/core/include/modelbox/device/rockchip/rockchip_api.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_ROCKCHIP_API_H_\n#define MODELBOX_ROCKCHIP_API_H_\n\n#include <modelbox/base/status.h>\n#include <modelbox/buffer.h>\n\n#include \"im2d.h\"\n#include \"rga.h\"\n#include \"rk_mpi.h\"\n#include \"rk_type.h\"\n\n#define MPPFRAMETORGA(frame, fmt)                               \\\n  wrapbuffer_fd(mpp_buffer_get_fd(mpp_frame_get_buffer(frame)), \\\n                (int)mpp_frame_get_width(frame),                \\\n                (int)mpp_frame_get_height(frame), fmt,          \\\n                (int)mpp_frame_get_hor_stride(frame),           \\\n                (int)mpp_frame_get_ver_stride(frame));\n\n#define MPP_ALIGN(x, a) (((x) + (a)-1) & ~((a)-1))\n#define MPP_ALIGN_MPP_WH 16\n#define MPP_ALIGN_WIDTH 16\n#define MPP_ALIGN_HEIGHT 2\n#define RK_POLL_TIMEOUT 500\n\nnamespace modelbox {\n\nconstexpr const char *IMG_DEFAULT_FMT = \"bgr\";\n\nstd::shared_ptr<modelbox::Buffer> CreateEmptyMppImg(\n    int w, int h, RgaSURF_FORMAT fmt, const std::shared_ptr<Device> &dev,\n    rga_buffer_t &rga_buf);\n\nRgaSURF_FORMAT GetRGAFormat(const std::string &fmt_str);\nRgaSURF_FORMAT GetRGAFormat(const MppFrameFormat &fmt_mpp);\n\nStatus CopyRGBMemory(uint8_t *psrc, uint8_t *pdst, int w, int h, int ws,\n                     int hs);\nStatus CopyNVMemory(uint8_t *psrc, uint8_t *pdst, int w, int h, int ws, int hs);\n\nStatus GetRGAFromImgBuffer(const std::shared_ptr<Buffer> &in_img,\n                           RgaSURF_FORMAT fmt, rga_buffer_t &rgb_buf);\n\nstd::shared_ptr<modelbox::Buffer> ColorChange(\n    MppFrame &frame, RgaSURF_FORMAT fmt, const std::shared_ptr<Device> &device);\nstd::shared_ptr<modelbox::Buffer> MirrorImg(\n    const std::shared_ptr<modelbox::Buffer> &in_buf, RgaSURF_FORMAT fmt);\n\nclass MppJpegDecode {\n public:\n  MppJpegDecode() = default;\n  virtual ~MppJpegDecode();\n  Status Init();\n  MppFrame Decode(void *in_buf, int buf_len, int &w, int &h);\n  MppFrame Decode(MppBuffer &in_buf, int &w, int &h);\n\n private:\n  void GetJpegWH(int &nW, int &nH, const unsigned char *buf, int bufLen);\n  Status DecPkt(MppPacket &packet, int w = 0, int h = 0);\n  Status ShutDown();\n  MppPacket SendBuf(MppBuffer &in_buf, int &w, int &h);\n  MppPacket SendBuf(void *in_buf, int buf_len, int &w, int &h);\n  Status ReceiveFrame(MppFrame &out_frame);\n\n  MppCtx codec_ctx_{nullptr};\n  MppApi *rk_api_{nullptr};\n  MppBufferGroup frm_grp_{nullptr};\n  std::mutex jpeg_mtx_;\n};\n\nclass InferenceRKNPUParams {\n public:\n  InferenceRKNPUParams() = default;\n  virtual ~InferenceRKNPUParams() = default;\n\n  std::vector<std::string> input_name_list_, output_name_list_;\n  std::vector<std::string> input_type_list_, output_type_list_;\n\n  int32_t device_id_{0};\n};\n\nclass InferenceInputParams {\n public:\n  InferenceInputParams() = default;\n  virtual ~InferenceInputParams() = default;\n\n  int32_t in_width_ = 0;\n  int32_t in_height_ = 0;\n  int32_t in_wstride_ = 0;\n  int32_t in_hstride_ = 0;\n  std::string pix_fmt_;\n};\n\n}  // namespace modelbox\n\n#endif  // MODELBOX_ROCKCHIP_API_H_"
  },
  {
    "path": "src/drivers/devices/rockchip/core/include/modelbox/device/rockchip/rockchip_memory.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_ROCKCHIP_MEMORY_H_\n#define MODELBOX_ROCKCHIP_MEMORY_H_\n\n#include <modelbox/base/device.h>\n#include <modelbox/base/memory_pool.h>\n#include <modelbox/base/status.h>\n#include <modelbox/base/timer.h>\n\n#include <queue>\n#include <thread>\n#include <unordered_map>\n\n#include \"rk_mpi.h\"\n#include \"rk_type.h\"\n\nnamespace modelbox {\n\nTimer *GetTimer();\n\nclass RockChipMemory : public DeviceMemory {\n public:\n  RockChipMemory(const std::shared_ptr<Device> &device,\n                 const std::shared_ptr<DeviceMemoryManager> &mem_mgr,\n                 void *device_mem_ptr, size_t size);\n\n  RockChipMemory(const std::shared_ptr<Device> &device,\n                 const std::shared_ptr<DeviceMemoryManager> &mem_mgr,\n                 const std::shared_ptr<void> &device_mem_ptr, size_t size);\n\n  ~RockChipMemory() override = default;\n};\n\nclass RockChipMemoryManager;\nclass RockChipMemoryPool : public MemoryPoolBase {\n public:\n  RockChipMemoryPool(RockChipMemoryManager *mem_manager);\n  ~RockChipMemoryPool() override;\n  Status Init();\n  void *MemAlloc(size_t size) override;\n  void MemFree(void *ptr) override;\n  virtual void OnTimer();\n\n  size_t CalSlabSize(size_t object_size) override;\n\n private:\n  RockChipMemoryManager *mem_manager_;\n  std::shared_ptr<TimerTask> flush_timer_;\n};\n\nclass RockChipMemoryManager : public DeviceMemoryManager {\n public:\n  RockChipMemoryManager(const std::string &device_id);\n  ~RockChipMemoryManager() override;\n\n  Status Init();\n\n  /* *\n   * @brief Create a rockchip memory container\n   * @param device pointer to device\n   * @param mem_ptr shared pointer to memory\n   * @param size memory size\n   * @return Empty memory container\n   */\n  std::shared_ptr<DeviceMemory> MakeDeviceMemory(\n      const std::shared_ptr<Device> &device, std::shared_ptr<void> mem_ptr,\n      size_t size) override;\n\n  /* *\n   * @brief Implement by rockchip device, alloc memory\n   * @param size Memory size to allocate.\n   * @return Device memory.\n   */\n  void *Malloc(size_t size, uint32_t mem_flags) override;\n\n  /* *\n   * @brief Implement by rockchip device, alloc memory\n   * @param size Memory size to allocate\n   * @return Device memory in shared ptr\n   *   */\n  std::shared_ptr<void> AllocSharedPtr(size_t size,\n                                       uint32_t mem_flags) override;\n\n  /**\n   * @brief Implement by rockchip device, copy data from src to dest\n   * @param dest dest buffer to write\n   * @param dest_size dest buffer size\n   * @param src_buffer src buffer to read\n   * @param src_size read data size\n   * @param kind data copy kind\n   * @return Status\n   */\n  Status Copy(void *dest, size_t dest_size, const void *src_buffer,\n              size_t src_size, DeviceMemoryCopyKind kind) override;\n  /* *\n   * @brief Copy memory between rockchip device and host\n   * @param dest_memory Destination memory\n   * @param dest_offset Destination memory offset\n   * @param src_memory Source memory\n   * @param src_offset Source offset\n   * @param src_size Source memory size\n   * @param copy_kind Memory copy mode\n   * @return Status\n   */\n  Status DeviceMemoryCopy(\n      const std::shared_ptr<DeviceMemory> &dest_memory, size_t dest_offset,\n      const std::shared_ptr<const DeviceMemory> &src_memory, size_t src_offset,\n      size_t src_size,\n      DeviceMemoryCopyKind copy_kind = DeviceMemoryCopyKind::FromHost) override;\n\n  /* *\n   * @brief Get device memory info\n   * @return Status\n   */\n  Status GetDeviceMemUsage(size_t *free, size_t *total) const override;\n\n  /* *\n   * @brief Implement by rockchip device, free memory\n   * @param mem_ptr Memory to free\n   */\n  void Free(void *mem_ptr, uint32_t mem_flags) override;\n\n private:\n  RockChipMemoryPool mem_pool_;\n  MppBufferGroup buf_grp_ = nullptr;\n  std::mutex malloc_mtx_;\n};\n\n}  // namespace modelbox\n\n#endif  // MODELBOX_ROCKCHIP_MEMORY_H_\n"
  },
  {
    "path": "src/drivers/devices/rockchip/core/libmodelbox-device-rockchip.pc.in",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nprefix=/usr\nexec_prefix=${prefix}\nlibdir=${prefix}/lib\nincludedir=${prefix}/include/modelbox/device/rockchip\n\nName: libmodelbox-device-rockchip\nDescription: modelbox rockchip device SDK\nVersion: @MODELBOX_VERSION_STRING@\nLibs: -L${libdir} -lmodelbox-device-rockchip\nCflags: -I${includedir}"
  },
  {
    "path": "src/drivers/devices/rockchip/core/rockchip_api.cc",
    "content": "\n/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/device/rockchip/rockchip_api.h\"\n\n#include \"modelbox/device/rockchip/device_rockchip.h\"\n#include \"securec.h\"\n\nnamespace modelbox {\n\n#define JPEG_DEC_TIMEOUT 1000\n\nconst static std::map<MppFrameFormat, RgaSURF_FORMAT> mpp_rgb_fmt_map = {\n    {MPP_FMT_YUV420SP, RK_FORMAT_YCbCr_420_SP},\n    {MPP_FMT_YUV420SP_VU, RK_FORMAT_YCrCb_420_SP},\n    {MPP_FMT_YUV422SP, RK_FORMAT_YCbCr_422_SP},\n    {MPP_FMT_YUV422SP_VU, RK_FORMAT_YCrCb_422_SP},\n    {MPP_FMT_YUV422P, RK_FORMAT_YCbCr_422_P},\n    {MPP_FMT_RGB888, RK_FORMAT_RGB_888},\n    {MPP_FMT_BGR888, RK_FORMAT_BGR_888}};\n\nstd::shared_ptr<modelbox::Buffer> CreateEmptyMppImg(\n    int w, int h, RgaSURF_FORMAT fmt, const std::shared_ptr<Device> &dev,\n    rga_buffer_t &rga_buf) {\n  int div_num = 1;\n  if (fmt == RK_FORMAT_YCbCr_420_SP || fmt == RK_FORMAT_YCrCb_420_SP) {\n    div_num = 2;\n  }\n\n  int ws = MPP_ALIGN(w, MPP_ALIGN_WIDTH);\n  int hs = MPP_ALIGN(h, MPP_ALIGN_HEIGHT);\n  auto buffer_ptr = std::make_shared<Buffer>(dev);\n  int buf_size = ws * hs * 3 / div_num;\n\n  auto ret = buffer_ptr->Build(buf_size);\n  if (ret != STATUS_OK) {\n    MBLOG_ERROR << \"Create buffer fail, size=\" << buf_size;\n    return nullptr;\n  }\n\n  auto *mpp_buf = (MppBuffer)(buffer_ptr->MutableData());\n  if (mpp_buf == nullptr) {\n    MBLOG_ERROR << \"MppBuffer is invalid.\";\n    return nullptr;\n  }\n\n  rga_buf = wrapbuffer_fd(mpp_buffer_get_fd(mpp_buf), w, h, fmt, ws, hs);\n\n  return buffer_ptr;\n}\n\nRgaSURF_FORMAT GetRGAFormat(const std::string &fmt_str) {\n  const std::map<std::string, RgaSURF_FORMAT> fmt_map = {\n      {\"nv21\", RK_FORMAT_YCbCr_420_SP},\n      {\"nv12\", RK_FORMAT_YCrCb_420_SP},\n      {\"rgb\", RK_FORMAT_RGB_888},\n      {\"bgr\", RK_FORMAT_BGR_888}};\n\n  auto iter = fmt_map.find(fmt_str);\n  if (iter == fmt_map.end()) {\n    MBLOG_ERROR << \"Not support fmt: \" << fmt_str;\n    return RK_FORMAT_UNKNOWN;\n  }\n\n  return iter->second;\n}\n\nRgaSURF_FORMAT GetRGAFormat(const MppFrameFormat &fmt_mpp) {\n  auto iter = mpp_rgb_fmt_map.find(fmt_mpp);\n  if (iter == mpp_rgb_fmt_map.end()) {\n    MBLOG_ERROR << \"Not support mpp fmt: \" << fmt_mpp;\n    return RK_FORMAT_UNKNOWN;\n  }\n\n  return iter->second;\n}\n\nStatus CopyNVMemory(uint8_t *psrc, uint8_t *pdst, int w, int h, int ws,\n                    int hs) {\n  // copy y\n  uint8_t *ysrc = psrc;\n  uint8_t *ydst = pdst;\n  for (int i = 0; i < h; i++) {\n    if (0 != memcpy_s(ydst, w, ysrc, w)) {\n      MBLOG_ERROR << \"memcpy_s fail\";\n      return STATUS_FAULT;\n    }\n\n    ysrc += ws;\n    ydst += w;\n  }\n  uint8_t *uvsrc = psrc + ws * hs;\n  uint8_t *uvdst = pdst + w * h;\n  for (int i = 0; i < h / 2; i++) {\n    if (0 != memcpy_s(uvdst, w, uvsrc, w)) {\n      MBLOG_ERROR << \"memcpy_s fail\";\n      return STATUS_FAULT;\n    }\n\n    uvsrc += ws;\n    uvdst += w;\n  }\n  return STATUS_SUCCESS;\n}\n\nStatus CopyRGBMemory(uint8_t *psrc, uint8_t *pdst, int w, int h, int ws,\n                     int hs) {\n  uint8_t *rgbsrc = psrc;\n  uint8_t *rgbdst = pdst;\n\n  for (int i = 0; i < h; i++) {\n    if (0 != memcpy_s(rgbdst, w * 3, rgbsrc, w * 3)) {\n      MBLOG_ERROR << \"memcpy_s fail\";\n      return STATUS_FAULT;\n    }\n\n    rgbsrc += ws * 3;\n    rgbdst += w * 3;\n  }\n\n  return STATUS_SUCCESS;\n}\n\nStatus GetRGAFromImgBuffer(const std::shared_ptr<Buffer> &in_img,\n                           RgaSURF_FORMAT fmt, rga_buffer_t &rgb_buf) {\n  int32_t in_width = 0;\n  int32_t in_height = 0;\n  int32_t in_wstride = 0;\n  int32_t in_hstride = 0;\n\n  in_img->Get(\"width\", in_width);\n  in_img->Get(\"height\", in_height);\n  in_img->Get(\"width_stride\", in_wstride);\n  in_img->Get(\"height_stride\", in_hstride);\n  if (RK_FORMAT_RGB_888 == fmt || RK_FORMAT_BGR_888 == fmt) {\n    in_wstride = in_wstride / 3;\n  }\n\n  if (in_width == 0 || in_height == 0) {\n    MBLOG_ERROR << \"can not get input width or heigh\";\n    return STATUS_FAULT;\n  }\n\n  if (in_wstride == 0) {\n    in_wstride = in_width;\n  }\n\n  if (in_hstride == 0) {\n    in_hstride = in_height;\n  }\n\n  if (in_img->GetDeviceMemory()->IsHost()) {\n    rgb_buf = wrapbuffer_virtualaddr(\n        mpp_buffer_get_ptr((MppBuffer)(in_img->ConstData())), in_width,\n        in_height, fmt, in_wstride, in_hstride);\n  } else {\n    if (in_wstride % MPP_ALIGN_WIDTH != 0 ||\n        in_hstride % MPP_ALIGN_HEIGHT != 0) {\n      MBLOG_ERROR << \"mpp buffer not align\";\n      return {STATUS_FAULT, \"mpp buffer not align\"};\n    }\n\n    rgb_buf = wrapbuffer_fd(mpp_buffer_get_fd((MppBuffer)(in_img->ConstData())),\n                            in_width, in_height, fmt, in_wstride, in_hstride);\n  }\n\n  return STATUS_SUCCESS;\n}\n\nstd::shared_ptr<modelbox::Buffer> ColorChange(\n    MppFrame &frame, RgaSURF_FORMAT fmt,\n    const std::shared_ptr<Device> &device) {\n  std::shared_ptr<Buffer> buffer = nullptr;\n  auto w = (int32_t)mpp_frame_get_width(frame);\n  auto h = (int32_t)mpp_frame_get_height(frame);\n  auto ws = (int32_t)mpp_frame_get_hor_stride(frame);\n  int32_t hs = MPP_ALIGN(h, MPP_ALIGN_HEIGHT);  // frame allign too large\n  int32_t height = h;\n  int32_t channel = 3;\n\n  bool needRelease = true;\n\n  Defer {\n    if (needRelease) {\n      mpp_frame_deinit(&frame);\n    }\n  };\n\n  auto iter = mpp_rgb_fmt_map.find(mpp_frame_get_fmt(frame));\n  if (iter == mpp_rgb_fmt_map.end()) {\n    MBLOG_ERROR << \"fmt not support: \" << mpp_frame_get_fmt(frame);\n    return nullptr;\n  }\n\n  RgaSURF_FORMAT src_fmt = iter->second;\n  if ((h % 2 != 0 || w % 2 != 0) &&\n      (RK_FORMAT_RGB_888 != src_fmt && RK_FORMAT_BGR_888 != src_fmt)) {\n    // mpp may give an odd height even in yuv format, fix it\n    mpp_frame_set_height(frame, (h + 1) / 2 * 2);\n    mpp_frame_set_width(frame, (w + 1) / 2 * 2);\n  }\n\n  if (src_fmt == fmt) {\n    buffer = std::make_shared<Buffer>(device);\n    if (buffer == nullptr) {\n      MBLOG_ERROR << \"make buffer failed.\";\n      return nullptr;\n    }\n\n    MppBuffer mppbuf = mpp_frame_get_buffer(frame);\n    auto ret = buffer->Build((void *)(mppbuf), mpp_buffer_get_size(mppbuf),\n                             [frame](void *p) {\n                               MppFrame tmp = frame;\n                               mpp_frame_deinit(&tmp);\n                             });\n    if (ret != STATUS_OK) {\n      MBLOG_ERROR << \"failed to build buffer reason:\" << ret.Errormsg();\n      return nullptr;\n    }\n\n    needRelease = false;\n\n    if (RK_FORMAT_RGB_888 != fmt && RK_FORMAT_BGR_888 != fmt) {\n      height = h * 3 / 2;\n      hs = MPP_ALIGN(h, MPP_ALIGN_HEIGHT);\n      channel = 1;\n    }\n  } else {\n    // others format need colorspace change\n    rga_buffer_t src_buf = MPPFRAMETORGA(frame, src_fmt);\n    rga_buffer_t dst_buf;\n    buffer = CreateEmptyMppImg(w, h, fmt, device, dst_buf);\n    if (buffer == nullptr) {\n      MBLOG_ERROR << \"create mpp img failed.\";\n      return nullptr;\n    }\n\n    IM_STATUS status = imcvtcolor(src_buf, dst_buf, src_fmt, fmt);\n    if (status != IM_STATUS_SUCCESS) {\n      MBLOG_ERROR << \"rga convert color failed: \" << status;\n      return nullptr;\n    }\n  }\n\n  buffer->Set(\"type\", modelbox::ModelBoxDataType::MODELBOX_UINT8);\n  buffer->Set(\"channel\", channel);\n  buffer->Set(\"shape\",\n              std::vector<size_t>{(size_t)height, (size_t)w, (size_t)channel});\n  buffer->Set(\"width\", w);\n  buffer->Set(\"height\", h);\n  if (RK_FORMAT_RGB_888 == fmt || RK_FORMAT_BGR_888 == fmt) {\n    buffer->Set(\"width_stride\", ws * 3);\n  } else {\n    buffer->Set(\"width_stride\", ws);\n  }\n\n  buffer->Set(\"height_stride\", hs);\n  buffer->Set(\"layout\", std::string(\"hwc\"));\n\n  return buffer;\n}\n\nstd::shared_ptr<modelbox::Buffer> MirrorImg(\n    const std::shared_ptr<modelbox::Buffer> &in_buf, RgaSURF_FORMAT fmt) {\n  if (in_buf == nullptr) {\n    MBLOG_ERROR << \"in_buf is invalid\";\n    return nullptr;\n  }\n\n  int32_t w = 0;\n  int32_t h = 0;\n  int32_t ws = 0;\n  int32_t hs = 0;\n  in_buf->Get(\"width\", w);\n  in_buf->Get(\"height\", h);\n  in_buf->Get(\"width_stride\", ws);\n  in_buf->Get(\"height_stride\", hs);\n  if (RK_FORMAT_RGB_888 == fmt || RK_FORMAT_BGR_888 == fmt) {\n    ws = ws / 3;\n  }\n\n  auto *mpp_buf = (MppBuffer)(in_buf->MutableData());\n  rga_buffer_t src_buf =\n      wrapbuffer_fd(mpp_buffer_get_fd(mpp_buf), w, h, (int)fmt, ws, hs);\n  rga_buffer_t dst_buf;\n  auto out_buf = CreateEmptyMppImg(w, h, fmt, in_buf->GetDevice(), dst_buf);\n  if (out_buf == nullptr) {\n    MBLOG_ERROR << \"create mpp img failed.\";\n    return nullptr;\n  }\n\n  IM_STATUS status = imflip(src_buf, dst_buf, IM_HAL_TRANSFORM_FLIP_H);\n  if (status != IM_STATUS_SUCCESS) {\n    MBLOG_ERROR << \"rga flip failed: \" << status;\n    return nullptr;\n  }\n\n  out_buf->CopyMeta(in_buf);\n  return out_buf;\n}\n\n#define FFD8_OFFSET 2\n#define SOFO_OFFSET 3\n#define SEG_OFFSET 2\n#define CHAR_SHORT 256\nvoid MppJpegDecode::GetJpegWH(int &nW, int &nH, const unsigned char *buf,\n                              int bufLen) {\n  nH = 0;\n  nW = 0;\n  if (bufLen < 3) {\n    MBLOG_ERROR << \"bufLen is invalid \" << bufLen;\n    return;\n  }\n\n  int offset = FFD8_OFFSET;  // jump FFD8\n  unsigned char type = 0xff;\n  do {\n    while (offset < bufLen && buf[offset] != 0xff) {\n      offset++;\n    }\n\n    offset++;\n    while (offset < bufLen && buf[offset] == 0xff) {\n      offset++;\n    }\n\n    offset++;\n    if (offset > bufLen) {\n      MBLOG_ERROR << \"offset is invalid offset:\" << offset\n                  << \" bufLen:\" << bufLen;\n      return;\n    }\n\n    type = (unsigned char)buf[offset - 1];\n    switch (type) {\n      case 0x00:\n      case 0x01:\n      case 0xd0:\n      case 0xd1:\n      case 0xd2:\n      case 0xd3:\n      case 0xd4:\n      case 0xd5:\n      case 0xd6:\n      case 0xd7:\n        break;\n      case 0xc0:  // SOF0 segment (basic image info)\n      case 0xc2:  // JFIF format SOF0 segment\n      {\n        // find SOFO segment, parse height and width info\n        offset += SOFO_OFFSET;\n        if (offset > bufLen) {\n          MBLOG_ERROR << \"offset is invalid offset:\" << offset\n                      << \" bufLen:\" << bufLen;\n          return;\n        }\n\n        // height 2 bytes low high swap\n        nH = buf[offset++] * CHAR_SHORT;\n        nH += buf[offset++];\n        // width 2 bytes low high swap\n        nW = buf[offset++] * CHAR_SHORT;\n        nW += buf[offset++];\n        return;\n      }\n      default: {\n        // other segment jump\n        // get segment length, break\n        if (offset > bufLen) {\n          MBLOG_ERROR << \"offset is invalid offset:\" << offset\n                      << \" bufLen:\" << bufLen;\n          return;\n        }\n\n        int offsetTmp = buf[offset++] * CHAR_SHORT;\n        offsetTmp += offset + buf[offset] - SEG_OFFSET;\n        offset = offsetTmp;\n        break;\n      }\n    }\n  } while (type != 0xda && offset < bufLen);  // scan rows begin\n}\n\nMppJpegDecode::~MppJpegDecode() {\n  ShutDown();\n  if (rk_api_) {\n    rk_api_->reset(codec_ctx_);\n  }\n\n  if (codec_ctx_) {\n    mpp_destroy(codec_ctx_);\n    codec_ctx_ = nullptr;\n  }\n\n  if (frm_grp_) {\n    mpp_buffer_group_put(frm_grp_);\n    frm_grp_ = nullptr;\n  }\n}\n\nStatus MppJpegDecode::Init() {\n  auto ret = mpp_create(&codec_ctx_, &rk_api_);\n  if (ret != MPP_OK) {\n    auto msg = std::string(\"failed to run mpp_create: \") + std::to_string(ret);\n    MBLOG_ERROR << msg;\n    return {STATUS_FAULT, msg};\n  }\n\n  RK_U32 timeout = JPEG_DEC_TIMEOUT;\n  ret = rk_api_->control(codec_ctx_, MPP_SET_OUTPUT_TIMEOUT, &timeout);\n  if (ret != MPP_OK) {\n    auto msg = std::string(\"Failed to set output timeout 0 fail: \") +\n               std::to_string(ret);\n    MBLOG_ERROR << msg;\n    return {STATUS_FAULT, msg};\n  }\n\n  ret = mpp_init(codec_ctx_, MPP_CTX_DEC, MPP_VIDEO_CodingMJPEG);\n  if (ret != MPP_OK) {\n    auto msg = std::string(\"failed to run mpp_init: \") + std::to_string(ret);\n    MBLOG_ERROR << msg;\n    return {STATUS_FAULT, msg};\n  }\n\n  ret = mpp_buffer_group_get_internal(&frm_grp_, MPP_BUFFER_TYPE_DRM);\n  if (ret != MPP_OK) {\n    auto msg =\n        std::string(\"failed to get buffer group: \") + std::to_string(ret);\n    MBLOG_ERROR << msg;\n    return {STATUS_FAULT, msg};\n  }\n\n  return STATUS_OK;\n}\n\nStatus MppJpegDecode::ShutDown() {\n  MppPacket eos_packet = nullptr;\n  MppBuffer eos_buf = nullptr;\n\n  /* Prepare EOS packet */\n  mpp_buffer_get(frm_grp_, &eos_buf, 1);\n  mpp_packet_init_with_buffer(&eos_packet, eos_buf);\n  mpp_buffer_put(eos_buf);\n  mpp_packet_set_size(eos_packet, 0);\n  mpp_packet_set_length(eos_packet, 0);\n  mpp_packet_set_eos(eos_packet);\n\n  DecPkt(eos_packet);\n  mpp_packet_deinit(&eos_packet);\n  return STATUS_OK;\n}\n\nStatus MppJpegDecode::DecPkt(MppPacket &packet, int w, int h) {\n  MppTask task = nullptr;\n  if (packet == nullptr) {\n    auto msg = std::string(\"packet is null\");\n    MBLOG_ERROR << msg;\n    return {STATUS_FAULT, msg};\n  }\n\n  if (codec_ctx_ == nullptr) {\n    auto msg = std::string(\"codec ctx is null,please call init\");\n    MBLOG_ERROR << msg;\n    return {STATUS_FAULT, msg};\n  }\n\n  if (rk_api_->poll(codec_ctx_, MPP_PORT_INPUT, MPP_POLL_BLOCK) != MPP_OK) {\n    return STATUS_CONTINUE;\n  }\n\n  auto ret = rk_api_->dequeue(codec_ctx_, MPP_PORT_INPUT, &task);\n  if (ret != MPP_OK) {\n    auto msg =\n        std::string(\"mpp task input dequeue failed: \") + std::to_string(ret);\n    MBLOG_ERROR << msg;\n    return {STATUS_FAULT, msg};\n  }\n\n  if (!task) {\n    auto msg = std::string(\"SendBuf get task fail\");\n    MBLOG_ERROR << msg;\n    return {STATUS_FAULT, msg};\n  }\n\n  MppBuffer frm_buf = nullptr;\n  MppFrame frame = nullptr;\n  ret = mpp_frame_init(&frame);\n  if (ret != MPP_OK) {\n    auto msg = std::string(\"mpp_frame_init failed: \") + std::to_string(ret);\n    MBLOG_ERROR << msg;\n    return {STATUS_FAULT, msg};\n  }\n\n  if (w > 0 && h > 0) {\n    ret = mpp_buffer_get(\n        frm_grp_, &frm_buf,\n        MPP_ALIGN(w, MPP_ALIGN_MPP_WH) * MPP_ALIGN(h, MPP_ALIGN_MPP_WH) * 2);\n    if (ret != MPP_OK) {\n      auto msg = std::string(\"mpp_buffer_get failed: \") + std::to_string(ret);\n      MBLOG_ERROR << msg;\n      return {STATUS_FAULT, msg};\n    }\n\n    mpp_frame_set_buffer(frame, frm_buf);\n    mpp_buffer_put(frm_buf);\n  }\n\n  mpp_task_meta_set_packet(task, KEY_INPUT_PACKET, packet);\n  auto *meta = mpp_frame_get_meta(frame);\n  if (meta == nullptr) {\n    auto msg = std::string(\"failed to get meta\");\n    MBLOG_ERROR << msg;\n    return {STATUS_FAULT, msg};\n  }\n\n  mpp_meta_set_packet(meta, KEY_INPUT_PACKET, packet);\n  mpp_task_meta_set_frame(task, KEY_OUTPUT_FRAME, frame);\n\n  if ((ret = rk_api_->enqueue(codec_ctx_, MPP_PORT_INPUT, task)) != MPP_OK) {\n    MBLOG_ERROR << \"mpp task input enqueue failed: \" << ret;\n    mpp_frame_deinit(&frame);\n    return {STATUS_FAULT, \"mpp task input enqueue failed\"};\n  }\n\n  return STATUS_OK;\n}\n\nMppPacket MppJpegDecode::SendBuf(void *in_buf, int buf_len, int &w, int &h) {\n  MppPacket packet = nullptr;\n  if (w == 0 || h == 0) {\n    GetJpegWH(w, h, (unsigned char *)in_buf, buf_len);\n  }\n\n  if (w == 0 || h == 0) {\n    MBLOG_WARN << \"get jpeg w or h fail\";\n    return nullptr;\n  }\n\n  MppBuffer mpp_buf = nullptr;\n  auto ret = mpp_buffer_get(frm_grp_, &mpp_buf, buf_len);\n  if (ret != MPP_OK) {\n    auto msg = std::string(\"mpp_buffer_get failed: \") + std::to_string(ret);\n    MBLOG_ERROR << msg;\n    return nullptr;\n  }\n\n  ret = mpp_buffer_write(mpp_buf, 0, in_buf, buf_len);\n  if (ret != MPP_OK) {\n    auto msg = std::string(\"mpp_buffer_write failed: \") + std::to_string(ret);\n    MBLOG_ERROR << msg;\n    return nullptr;\n  }\n\n  ret = mpp_packet_init_with_buffer(&packet, mpp_buf);\n  if (ret != MPP_OK) {\n    auto msg = std::string(\"mpp_packet_init_with_buffer failed: \") +\n               std::to_string(ret);\n    MBLOG_ERROR << msg;\n    return nullptr;\n  }\n\n  mpp_buffer_put(mpp_buf);\n\n  if (STATUS_OK != DecPkt(packet, w, h)) {\n    mpp_packet_deinit(&packet);\n    packet = nullptr;\n  }\n\n  return packet;\n}\n\nMppPacket MppJpegDecode::SendBuf(MppBuffer &in_buf, int &w, int &h) {\n  MppPacket packet = nullptr;\n  if (w == 0 || h == 0) {\n    GetJpegWH(w, h, (unsigned char *)mpp_buffer_get_ptr(in_buf),\n              mpp_buffer_get_size(in_buf));\n  }\n\n  if (w == 0 || h == 0) {\n    MBLOG_ERROR << \"get jpeg w or h fail\";\n    return nullptr;\n  }\n\n  auto ret = mpp_packet_init_with_buffer(&packet, in_buf);\n  if (ret != MPP_OK) {\n    auto msg = std::string(\"mpp_packet_init_with_buffer failed: \") +\n               std::to_string(ret);\n    MBLOG_ERROR << msg;\n    return nullptr;\n  }\n\n  auto ret_dec = DecPkt(packet, w, h);\n  if (STATUS_OK != ret_dec) {\n    mpp_packet_deinit(&packet);\n    MBLOG_ERROR << \"failed to decpkt reason: \" << ret_dec.Errormsg();\n    packet = nullptr;\n  }\n\n  return packet;\n}\n\nStatus MppJpegDecode::ReceiveFrame(MppFrame &out_frame) {\n  MppTask task = nullptr;\n  if (codec_ctx_ == nullptr) {\n    auto msg = std::string(\"codec ctx is null,please call init\");\n    return {STATUS_FAULT, msg};\n  }\n\n  /* poll and wait here */\n  auto ret = rk_api_->poll(codec_ctx_, MPP_PORT_OUTPUT, MPP_POLL_BLOCK);\n  if (ret != MPP_OK) {\n    auto msg = std::string(\"mpp output poll failed: \") + std::to_string(ret);\n    MBLOG_ERROR << msg;\n    return {STATUS_NODATA, msg};\n  }\n\n  ret = rk_api_->dequeue(codec_ctx_, MPP_PORT_OUTPUT, &task);\n  if (ret != MPP_OK) {\n    auto msg =\n        std::string(\"mpp task output dequeue failed: \") + std::to_string(ret);\n    MBLOG_ERROR << msg;\n    return {STATUS_FAULT, msg};\n  }\n\n  if (!task) {\n    auto msg = std::string(\"ReceiveFrame get task fail\");\n    MBLOG_ERROR << msg;\n    return {STATUS_FAULT, msg};\n  }\n\n  mpp_task_meta_get_frame(task, KEY_OUTPUT_FRAME, &out_frame);\n  auto *meta = mpp_frame_get_meta(out_frame);\n  if (meta == nullptr) {\n    auto msg = std::string(\"meta is null\");\n    MBLOG_ERROR << msg;\n    return {STATUS_FAULT, msg};\n  }\n\n  MppPacket packet = nullptr;\n  mpp_meta_get_packet(meta, KEY_INPUT_PACKET, &packet);\n  if (packet) {\n    mpp_packet_deinit(&packet);\n  }\n\n  /* output queue */\n  rk_api_->enqueue(codec_ctx_, MPP_PORT_OUTPUT, task);\n  return STATUS_OK;\n}\n\nMppFrame MppJpegDecode::Decode(void *in_buf, int buf_len, int &w, int &h) {\n  std::lock_guard<std::mutex> lk(jpeg_mtx_);\n  MppPacket packet = SendBuf(in_buf, buf_len, w, h);\n  if (packet == nullptr) {\n    MBLOG_ERROR << \"failed to decode jpg reason: sendbuf fail\";\n    return nullptr;\n  }\n\n  MppFrame out_frame = nullptr;\n  auto ret = ReceiveFrame(out_frame);\n  if (STATUS_OK != ret) {\n    mpp_packet_deinit(&packet);\n    MBLOG_ERROR << \"failed to decode jpg reason: ReceiveFrame fail\";\n    return nullptr;\n  }\n\n  return out_frame;\n}\n\nMppFrame MppJpegDecode::Decode(MppBuffer &in_buf, int &w, int &h) {\n  std::lock_guard<std::mutex> lk(jpeg_mtx_);\n  MppPacket packet = SendBuf(in_buf, w, h);\n  if (packet == nullptr) {\n    return nullptr;\n  }\n\n  MppFrame out_frame = nullptr;\n  auto ret = ReceiveFrame(out_frame);\n  if (STATUS_OK != ret) {\n    mpp_packet_deinit(&packet);\n    return nullptr;\n  }\n\n  return out_frame;\n}\n\n}  // namespace modelbox\n"
  },
  {
    "path": "src/drivers/devices/rockchip/core/rockchip_memory.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/device/rockchip/rockchip_memory.h\"\n\n#include <securec.h>\n\n#include \"modelbox/device/rockchip/device_rockchip.h\"\n\n// -- only linux: get free memory\n#include <linux/kernel.h>\n#include <linux/unistd.h>\n#include <sys/sysinfo.h>\n\nnamespace modelbox {\n\nRockChipMemoryPool::RockChipMemoryPool(RockChipMemoryManager *mem_manager) {\n  mem_manager_ = mem_manager;\n}\n\nStatus RockChipMemoryPool::Init() {\n  auto status = InitSlabCache();\n  if (!status) {\n    return {status, \"init mempool failed.\"};\n  }\n  auto timer = std::make_shared<TimerTask>();\n  timer->Callback(&RockChipMemoryPool::OnTimer, this);\n  flush_timer_ = timer;\n\n  // flush slab every 10s\n  GetTimer()->Schedule(flush_timer_, 1000, 10000);\n  return STATUS_OK;\n}\n\nRockChipMemoryPool::~RockChipMemoryPool() {\n  if (flush_timer_) {\n    flush_timer_->Stop();\n    flush_timer_ = nullptr;\n  }\n}\n\nvoid RockChipMemoryPool::OnTimer() {\n  // TODO support config shrink time.\n}\n\nvoid *RockChipMemoryPool::MemAlloc(size_t size) {\n  return mem_manager_->Malloc(size, 0);\n}\n\nvoid RockChipMemoryPool::MemFree(void *ptr) { mem_manager_->Free(ptr, 0); }\n\nsize_t RockChipMemoryPool::CalSlabSize(size_t object_size) {\n  return object_size;\n}\n\nRockChipMemory::RockChipMemory(\n    const std::shared_ptr<Device> &device,\n    const std::shared_ptr<DeviceMemoryManager> &mem_mgr,\n    const std::shared_ptr<void> &device_mem_ptr, size_t size)\n    : DeviceMemory(device, mem_mgr, device_mem_ptr, size, false) {}\n\nRockChipMemoryManager::RockChipMemoryManager(const std::string &device_id)\n    : DeviceMemoryManager(device_id), mem_pool_(this) {}\n\nRockChipMemoryManager::~RockChipMemoryManager() {\n  if (buf_grp_ != nullptr) {\n    mpp_buffer_group_put(buf_grp_);\n  }\n  mem_pool_.DestroySlabCache();\n}\n\nStatus RockChipMemoryManager::Init() {\n  auto ret = mpp_buffer_group_get_internal(&buf_grp_, MPP_BUFFER_TYPE_DRM);\n  if (ret != MPP_OK) {\n    auto msg = std::string(\"failed to get buffer group, MemoryManager init\");\n    MBLOG_ERROR << msg << ret;\n    return {STATUS_FAULT, msg};\n  }\n\n  return mem_pool_.Init();\n}\n\nstd::shared_ptr<DeviceMemory> RockChipMemoryManager::MakeDeviceMemory(\n    const std::shared_ptr<Device> &device, std::shared_ptr<void> mem_ptr,\n    size_t size) {\n  return std::make_shared<RockChipMemory>(device, shared_from_this(), mem_ptr,\n                                          size);\n}\n\nvoid *RockChipMemoryManager::Malloc(size_t size, uint32_t mem_flags) {\n  if (size == 0) {\n    return nullptr;\n  }\n\n  std::lock_guard<std::mutex> lock(malloc_mtx_);\n  MppBuffer buf = nullptr;\n  auto ret = mpp_buffer_get(buf_grp_, &buf, size);\n  if (ret != MPP_OK) {\n    MBLOG_ERROR << \"Malloc mpp buffer fail, size = \" << size;\n    return nullptr;\n  }\n\n  return (void *)buf;\n}\n\nstd::shared_ptr<void> RockChipMemoryManager::AllocSharedPtr(\n    size_t size, uint32_t mem_flags) {\n  return mem_pool_.AllocSharedPtr(size);\n}\n\nvoid RockChipMemoryManager::Free(void *mem_ptr, uint32_t mem_flags) {\n  std::lock_guard<std::mutex> lock(malloc_mtx_);\n  if (mem_ptr != nullptr) {\n    mpp_buffer_put(mem_ptr);\n  }\n}\n\nStatus RockChipMemoryManager::Copy(void *dest, size_t dest_size,\n                                   const void *src_buffer, size_t src_size,\n                                   DeviceMemoryCopyKind kind) {\n  if (dest == nullptr || src_buffer == nullptr) {\n    MBLOG_ERROR << \"RockChip copy src \" << src_buffer << \" to dest \" << dest\n                << \"failed\";\n    return STATUS_INVALID;\n  }\n\n  if (dest_size < src_size) {\n    MBLOG_ERROR << \"RockChip memcpy failed, dest size[\" << dest_size\n                << \"] < src size[\" << src_size << \"]\";\n    return STATUS_RANGE;\n  }\n\n  void *cp_dest = (void *)dest;\n  void *cp_src = (void *)src_buffer;\n\n  if (kind == DeviceMemoryCopyKind::FromHost ||\n      kind == DeviceMemoryCopyKind::SameDeviceType) {\n    cp_dest = mpp_buffer_get_ptr((MppBuffer)dest);\n  } else if (kind == DeviceMemoryCopyKind::ToHost ||\n             kind == DeviceMemoryCopyKind::SameDeviceType) {\n    cp_src = mpp_buffer_get_ptr((MppBuffer)src_buffer);\n  }\n\n  int ret = memcpy_s(cp_dest, dest_size, cp_src, src_size);\n  if (ret != EOK) {\n    MBLOG_ERROR << \"RockChip Copy memcpy failed\";\n    return STATUS_FAULT;\n  }\n\n  return STATUS_SUCCESS;\n}\n\nStatus RockChipMemoryManager::GetDeviceMemUsage(size_t *free,\n                                                size_t *total) const {\n  struct sysinfo s_rkinfo;\n  // todo npu memory share cpu\n  auto ret = sysinfo(&s_rkinfo);\n  if (ret == 0) {\n    if (free != nullptr) {\n      *free = s_rkinfo.freeram;\n    }\n\n    if (total != nullptr) {\n      *total = s_rkinfo.totalram;\n    }\n\n    return STATUS_SUCCESS;\n  }\n\n  auto msg = \"failed to sysinfo ret = \" + std::to_string(ret);\n  MBLOG_ERROR << msg;\n\n  return {STATUS_FAULT, msg};\n}\n\nStatus RockChipMemoryManager::DeviceMemoryCopy(\n    const std::shared_ptr<DeviceMemory> &dest_memory, size_t dest_offset,\n    const std::shared_ptr<const DeviceMemory> &src_memory, size_t src_offset,\n    size_t src_size, DeviceMemoryCopyKind copy_kind) {\n  auto src_device = src_memory->GetDevice();\n  auto dest_device = dest_memory->GetDevice();\n  if (copy_kind == DeviceMemoryCopyKind::SameDeviceType &&\n      src_device != dest_device) {\n    return STATUS_NOTSUPPORT;\n  }\n\n  uint8_t *dest_ptr = nullptr;\n  if (dest_memory->IsHost()) {\n    dest_ptr = dest_memory->GetPtr<uint8_t>().get();\n  } else {\n    MppBuffer dest_devp = dest_memory->GetPtr<MppBufHdl>().get();\n    if (dest_devp) {\n      dest_ptr = (uint8_t *)(mpp_buffer_get_ptr(dest_devp));\n    }\n  }\n\n  const uint8_t *src_ptr = nullptr;\n  if (src_memory->IsHost()) {\n    src_ptr = src_memory->GetConstPtr<uint8_t>().get();\n  } else {\n    const MppBufHdl *src_devp = src_memory->GetConstPtr<MppBufHdl>().get();\n    if (src_devp) {\n      src_ptr = (const uint8_t *)(mpp_buffer_get_ptr((MppBuffer)src_devp));\n    }\n  }\n\n  if (memcpy_s(dest_ptr + dest_offset, src_size, src_ptr + src_offset,\n               src_size) != EOK) {\n    MBLOG_ERROR << \"DeviceMemoryCopy memcpy_s fail \";\n    return STATUS_FAULT;\n  }\n\n  return STATUS_SUCCESS;\n}\n\n}  // namespace modelbox\n"
  },
  {
    "path": "src/drivers/devices/rockchip/flowunit/CMakeLists.txt",
    "content": "#\n# Copyright 2022 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\ncmake_minimum_required(VERSION 3.10)\n\nproject(modelbox-drivers-rockchip-flowunit)\n\nadd_definitions(-DMODELBOX_VERSION_STR_MACRO=\"${MODELBOX_VERSION_STRING}\")\n\nif(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_BINARY_DIR})\n    message(FATAL_ERROR \"Do not build in source directory!\")\nendif()\n\nsubdirlist(SUBDIRS ${CMAKE_CURRENT_SOURCE_DIR} \"CMakeLists.txt\")\n\nforeach(subdir ${SUBDIRS})\n    add_subdirectory(${subdir})\nendforeach()\n"
  },
  {
    "path": "src/drivers/devices/rockchip/flowunit/crop/CMakeLists.txt",
    "content": "#\n# Copyright 2022 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\ncmake_minimum_required(VERSION 3.10)\n\nset(UNIT_DEVICE \"rockchip\")\nset(UNIT_NAME \"crop\")\n\nproject(modelbox-flowunit-${UNIT_DEVICE}-${UNIT_NAME})\n\nfile(GLOB_RECURSE UNIT_SOURCE *.cpp *.cc *.c)\ngroup_source_test_files(MODELBOX_UNIT_SOURCE MODELBOX_UNIT_TEST_SOURCE \"_test.c*\" ${UNIT_SOURCE})\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_ROCKCHIP_INCLUDE})\ninclude_directories(${ROCKCHIP_MPP_INCLUDE})\ninclude_directories(${ROCKCHIP_RGA_INCLUDE})\ninclude_directories(${MODELBOX_COMMON_IMAGE_PROCESS_INCLUDE})\n\nset(MODELBOX_UNIT_SHARED libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-shared)\nset(MODELBOX_UNIT_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\nadd_library(${MODELBOX_UNIT_SHARED} SHARED ${MODELBOX_UNIT_SOURCE})\nset(LIBMODELBOX_FLOWUNIT_RESIZE_ROCKCHIP_SHARED ${MODELBOX_UNIT_SHARED})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES\n        SOVERSION ${MODELBOX_VERSION_MAJOR}\n        VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} rt)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} dl)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_DEVICE_ROCKCHIP_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_SHARED})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}\")\n\ninstall(TARGETS ${MODELBOX_UNIT_SHARED}\n        COMPONENT rockchip-device-flowunit\n        RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n        LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        ARCHIVE DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        OPTIONAL)\ninstall(DIRECTORY ${HEADER}\n        DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR}\n        COMPONENT rockchip-device-flowunit-devel)\n\nset(LIBMODELBOX_FLOWUNIT_RESIZE_ROCKCHIP_SHARED ${MODELBOX_UNIT_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_RESIZE_ROCKCHIP_INCLUDE ${MODELBOX_UNIT_SOURCE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_RESIZE_ROCKCHIP_SOURCES ${MODELBOX_UNIT_SOURCE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_RESIZE_ROCKCHIP_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}.so CACHE INTERNAL \"\")\n\n# driver test\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_UNIT_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_UNIT_SHARED})\nlist(APPEND DRIVER_UNIT_TEST_LINK_LIBRARIES ${RKMPP_LIBRARIES})\nlist(APPEND TEST_INCLUDE ${ROCKCHIP_MPP_INCLUDE})\nlist(APPEND TEST_INCLUDE ${ROCKCHIP_RGA_INCLUDE})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")\nset(TEST_INCLUDE ${TEST_INCLUDE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_LINK_LIBRARIES ${DRIVER_UNIT_TEST_LINK_LIBRARIES} CACHE INTERNAL \"\")\n"
  },
  {
    "path": "src/drivers/devices/rockchip/flowunit/crop/crop_flowunit.cc",
    "content": "/*\n * Copyright 2022 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"crop_flowunit.h\"\n\n#include \"image_process.h\"\n#include \"modelbox/base/status.h\"\n#include \"modelbox/device/rockchip/rockchip_memory.h\"\n#include \"modelbox/flowunit.h\"\n#include \"modelbox/flowunit_api_helper.h\"\n#include \"securec.h\"\n\nRockchipCropFlowUnit::RockchipCropFlowUnit() = default;\nRockchipCropFlowUnit::~RockchipCropFlowUnit() = default;\n\nmodelbox::Status RockchipCropFlowUnit::Open(\n    const std::shared_ptr<modelbox::Configuration> &opts) {\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status RockchipCropFlowUnit::Close() {\n  return modelbox::STATUS_SUCCESS;\n}\n\nstd::shared_ptr<modelbox::Buffer> RockchipCropFlowUnit::ProcessOneImage(\n    const std::shared_ptr<modelbox::Buffer> &in_img, const im_rect &region) {\n  std::string pix_fmt;\n  RgaSURF_FORMAT rga_fmt = RK_FORMAT_UNKNOWN;\n  in_img->Get(\"pix_fmt\", pix_fmt);\n  rga_fmt = modelbox::GetRGAFormat(pix_fmt);\n  if (rga_fmt == RK_FORMAT_UNKNOWN) {\n    MBLOG_ERROR << \"unsupport pix format, pix_fmt: \" << pix_fmt;\n    return nullptr;\n  }\n\n  rga_buffer_t in_buf;\n  if (GetRGAFromImgBuffer(in_img, rga_fmt, in_buf) !=\n      modelbox::STATUS_SUCCESS) {\n    MBLOG_ERROR << \"input img can not change to rga buffer\";\n    return nullptr;\n  }\n\n  auto device = this->GetBindDevice();\n  rga_buffer_t out_buf;\n  auto out_img =\n      CreateEmptyMppImg(region.width, region.height, rga_fmt, device, out_buf);\n  if (out_img == nullptr) {\n    MBLOG_ERROR << \"failed to create mpp img\";\n    return nullptr;\n  }\n\n  IM_STATUS status = imcrop(in_buf, out_buf, region);\n  if (status != IM_STATUS_SUCCESS) {\n    MBLOG_ERROR << \"rga crop failed: \" << status;\n    return nullptr;\n  }\n\n  out_img->CopyMeta(in_img);\n  out_img->Set(\"width\", (int32_t)region.width);\n  out_img->Set(\"height\", (int32_t)region.height);\n  auto ws = (int32_t)MPP_ALIGN(region.width, MPP_ALIGN_WIDTH);\n  if (RK_FORMAT_BGR_888 == rga_fmt || RK_FORMAT_RGB_888 == rga_fmt) {\n    out_img->Set(\"width_stride\", ws * 3);\n  } else {\n    out_img->Set(\"width_stride\", ws);\n  }\n\n  out_img->Set(\"height_stride\",\n               (int32_t)MPP_ALIGN(region.height, MPP_ALIGN_HEIGHT));\n\n  return out_img;\n}\n\nmodelbox::Status RockchipCropFlowUnit::Process(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  auto input_img_bufs = data_ctx->Input(IN_IMG);\n  auto input_box_bufs = data_ctx->Input(IN_REGION);\n  if (input_img_bufs->Size() != input_box_bufs->Size()) {\n    auto msg = \"in_img and in_region mismatch: \" +\n               std::to_string(input_img_bufs->Size()) + \":\" +\n               std::to_string(input_box_bufs->Size());\n    MBLOG_ERROR << msg;\n    return {modelbox::STATUS_FAULT, msg};\n  }\n\n  auto output_bufs = data_ctx->Output(OUT_IMG);\n\n  for (size_t i = 0; i < input_img_bufs->Size(); ++i) {\n    const auto *const bbox = static_cast<const imageprocess::RoiBox *>(\n        input_box_bufs->At(i)->ConstData());\n    if (bbox == nullptr) {\n      MBLOG_ERROR << \"input region is invalid.\";\n      auto buffer = std::make_shared<modelbox::Buffer>();\n      buffer->SetError(\"ImageCrop.CropFailed\", \"input region is invalid.\");\n      output_bufs->PushBack(buffer);\n      continue;\n    }\n\n    im_rect region;\n    region.x = bbox->x;\n    region.y = bbox->y;\n    region.width = bbox->w;\n    region.height = bbox->h;\n\n    int32_t in_width = 0;\n    int32_t in_height = 0;\n\n    input_img_bufs->At(i)->Get(\"width\", in_width);\n    input_img_bufs->At(i)->Get(\"height\", in_height);\n    if (in_width <= 0 || in_height <= 0) {\n      MBLOG_ERROR << \"input size is invalid.\";\n      auto buffer = std::make_shared<modelbox::Buffer>();\n      buffer->SetError(\"ImageCrop.CropFailed\", \"input size is invalid\");\n      output_bufs->PushBack(buffer);\n      continue;\n    }\n\n    if (bbox->x < 0 || bbox->x > in_width || bbox->y < 0 ||\n        bbox->y > in_height || bbox->w < 0 || bbox->w > in_width ||\n        bbox->h < 0 || bbox->h > in_height) {\n      MBLOG_ERROR << \"bbox region is invalid.\";\n      auto buffer = std::make_shared<modelbox::Buffer>();\n      buffer->SetError(\"ImageCrop.CropFailed\", \"bbox region is invalid.\");\n      output_bufs->PushBack(buffer);\n      continue;\n    }\n\n    auto ret = ProcessOneImage(input_img_bufs->At(i), region);\n    if (ret == nullptr) {\n      auto msg = \"crop image failed, index is \" + std::to_string(i);\n      MBLOG_ERROR << msg;\n      auto buffer = std::make_shared<modelbox::Buffer>();\n      buffer->SetError(\"ImageCrop.CropFailed\", msg);\n      output_bufs->PushBack(buffer);\n      continue;\n    }\n\n    output_bufs->PushBack(ret);\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nMODELBOX_FLOWUNIT(RockchipCropFlowUnit, rk_crop_desc) {\n  rk_crop_desc.SetFlowUnitName(FLOWUNIT_NAME);\n  rk_crop_desc.SetFlowUnitGroupType(\"Image\");\n  rk_crop_desc.AddFlowUnitInput({IN_IMG, modelbox::DEVICE_TYPE});\n  rk_crop_desc.AddFlowUnitInput({IN_REGION, \"cpu\"});\n  rk_crop_desc.AddFlowUnitOutput({OUT_IMG, modelbox::DEVICE_TYPE});\n  rk_crop_desc.SetFlowType(modelbox::NORMAL);\n  rk_crop_desc.SetInputContiguous(false);\n  rk_crop_desc.SetDescription(FLOWUNIT_DESC);\n}\n\nMODELBOX_DRIVER_FLOWUNIT(rk_crop_desc) {\n  rk_crop_desc.Desc.SetName(FLOWUNIT_NAME);\n  rk_crop_desc.Desc.SetClass(modelbox::DRIVER_CLASS_FLOWUNIT);\n  rk_crop_desc.Desc.SetType(modelbox::DEVICE_TYPE);\n  rk_crop_desc.Desc.SetDescription(FLOWUNIT_DESC);\n  rk_crop_desc.Desc.SetVersion(MODELBOX_VERSION_STR_MACRO);\n}\n"
  },
  {
    "path": "src/drivers/devices/rockchip/flowunit/crop/crop_flowunit.h",
    "content": "/*\n * Copyright 2022 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_ROCKCHIP_CROP_H_\n#define MODELBOX_FLOWUNIT_ROCKCHIP_CROP_H_\n\n#include <modelbox/base/device.h>\n#include <modelbox/base/status.h>\n#include <modelbox/device/rockchip/device_rockchip.h>\n#include <modelbox/device/rockchip/rockchip_api.h>\n#include <modelbox/flow.h>\n#include <modelbox/flowunit.h>\n\nconstexpr const char *FLOWUNIT_TYPE = \"rockchip\";\nconstexpr const char *FLOWUNIT_NAME = \"crop\";\nconstexpr const char *FLOWUNIT_DESC =\n    \"\\n\\t@Brief: A crop flowunit on rockchip device. \\n\"\n    \"\\t@Port parameter: The input port 'in_image' and the output port \"\n    \"'out_image' buffer type are image. \\n\"\n    \"\\t  The image type buffer contain the following meta fields:\\n\"\n    \"\\t\\tField Name: width,         Type: int32_t\\n\"\n    \"\\t\\tField Name: height,        Type: int32_t\\n\"\n    \"\\t\\tField Name: width_stride,  Type: int32_t\\n\"\n    \"\\t\\tField Name: height_stride, Type: int32_t\\n\"\n    \"\\t\\tField Name: channel,       Type: int32_t\\n\"\n    \"\\t\\tField Name: pix_fmt,       Type: string\\n\"\n    \"\\t\\tField Name: layout,        Type: int32_t\\n\"\n    \"\\t\\tField Name: shape,         Type: vector<size_t>\\n\"\n    \"\\t\\tField Name: type,          Type: ModelBoxDataType::MODELBOX_UINT8\\n\"\n    \"\\t  The other input port 'in_region' buffer type is rectangle, the memory \"\n    \"arrangement is [x,y,w,h].\\n\"\n    \"\\t  it contain the following meta fields: \\n\"\n    \"\\t\\tField Name: type,          Type: ModelBoxDataType::MODELBOX_UINT8\\n\"\n    \"\\t@Constraint: The field value range of this flowunit support: 'pix_fmt': \"\n    \"[nv12, rgb, bgr], 'layout': [hwc]. One image can only be cropped with \"\n    \"one \";\n\nconstexpr const char *IN_IMG = \"in_image\";\nconstexpr const char *OUT_IMG = \"out_image\";\nconstexpr const char *IN_REGION = \"in_region\";\n\nclass RockchipCropFlowUnit : public modelbox::FlowUnit {\n public:\n  RockchipCropFlowUnit();\n  ~RockchipCropFlowUnit() override;\n\n  modelbox::Status Open(\n      const std::shared_ptr<modelbox::Configuration> &opts) override;\n  modelbox::Status Close() override;\n  modelbox::Status Process(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override;\n\n private:\n  std::shared_ptr<modelbox::Buffer> ProcessOneImage(\n      const std::shared_ptr<modelbox::Buffer> &in_img, const im_rect &region);\n};\n\n#endif  // MODELBOX_FLOWUNIT_ROCKCHIP_CROP_H_\n"
  },
  {
    "path": "src/drivers/devices/rockchip/flowunit/crop/crop_flowunit_test.cc",
    "content": "/*\n * Copyright 2022 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <securec.h>\n\n#include <opencv2/opencv.hpp>\n\n#include \"driver_flow_test.h\"\n#include \"flowunit_mockflowunit/flowunit_mockflowunit.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"modelbox/base/log.h\"\n#include \"modelbox/base/utils.h\"\n#include \"modelbox/buffer.h\"\n#include \"modelbox/device/rockchip/rockchip_api.h\"\n#include \"modelbox/device/rockchip/rockchip_memory.h\"\n#include \"test/mock/minimodelbox/mockflow.h\"\n\nnamespace modelbox {\nclass RockchipCropFlowUnitTest : public testing::Test {\n public:\n  RockchipCropFlowUnitTest()\n      : crop_driver_flow_(std::make_shared<DriverFlowTest>()),\n        jpeg_decode_(std::make_shared<modelbox::MppJpegDecode>()) {}\n\n protected:\n  void SetUp() override {\n    auto ret = jpeg_decode_->Init();\n    if (ret != modelbox::STATUS_OK) {\n      MBLOG_INFO << \"no rockchip device, skip test suit\";\n      GTEST_SKIP();\n    }\n\n    MBLOG_INFO << \"jpeg_decode:\" << ret;\n  }\n\n  void TearDown() override { crop_driver_flow_ = nullptr; };\n\n  std::shared_ptr<DriverFlowTest> GetDriverFlow();\n\n  const std::string test_lib_dir = TEST_DRIVER_DIR,\n                    test_data_dir = TEST_DATA_DIR, test_assets = TEST_ASSETS;\n\n private:\n  std::shared_ptr<DriverFlowTest> crop_driver_flow_;\n  std::shared_ptr<modelbox::MppJpegDecode> jpeg_decode_;\n};\n\nstd::shared_ptr<DriverFlowTest> RockchipCropFlowUnitTest::GetDriverFlow() {\n  return crop_driver_flow_;\n}\n\nTEST_F(RockchipCropFlowUnitTest, RunUnit) {\n  std::string toml_content = R\"(\n    [driver]\n    skip-default=true\n    dir=[\")\" + test_lib_dir + \"\\\",\\\"\" +\n                             test_data_dir + \"\\\"]\\n\" +\n                             R\"([graph]\n    graphconf = '''digraph demo {\n          input1[type=input]\n          input2[type=input]\n          output[type=output]\n          crop[type=flowunit, flowunit=crop, device=rockchip, deviceid=0]\n\n          input1 -> crop:in_image\n          input2 -> crop:in_region\n          crop:out_image -> output\n        }'''\n    format = \"graphviz\"\n  )\";\n\n  auto driver_flow = GetDriverFlow();\n  driver_flow->BuildAndRun(\"RunUnit\", toml_content, 10);\n\n  auto img = cv::imread(std::string(TEST_ASSETS) + \"/test.jpg\");\n  auto extern_data = driver_flow->GetFlow()->CreateExternalDataMap();\n  // in img\n  auto in_img_buffer_list = extern_data->CreateBufferList();\n  in_img_buffer_list->Build({img.total() * img.elemSize()});\n  auto in_img_buffer = in_img_buffer_list->At(0);\n  in_img_buffer->Set(\"width\", img.cols);\n  in_img_buffer->Set(\"height\", img.rows);\n  in_img_buffer->Set(\"width_stride\", img.cols * 3);\n  in_img_buffer->Set(\"height_stride\", img.rows);\n  in_img_buffer->Set(\"pix_fmt\", std::string(\"bgr\"));\n  auto e_ret = memcpy_s(in_img_buffer->MutableData(), in_img_buffer->GetBytes(),\n                        img.data, img.total() * img.elemSize());\n  EXPECT_EQ(e_ret, 0);\n  auto status = extern_data->Send(\"input1\", in_img_buffer_list);\n  EXPECT_EQ(status, STATUS_OK);\n  // in box\n  auto in_box_buffer_list = extern_data->CreateBufferList();\n  in_box_buffer_list->Build({sizeof(int32_t) * 4});\n  auto in_box_buffer = in_box_buffer_list->At(0);\n  auto *data_ptr = (int32_t *)in_box_buffer->MutableData();\n  data_ptr[0] = 30;\n  data_ptr[1] = 0;\n  data_ptr[2] = 128;\n  data_ptr[3] = 128;\n  status = extern_data->Send(\"input2\", in_box_buffer_list);\n  EXPECT_EQ(status, STATUS_OK);\n  // check output\n  OutputBufferList map_buffer_list;\n  status = extern_data->Recv(map_buffer_list);\n  EXPECT_EQ(status, STATUS_OK);\n  auto output_buffer_list = map_buffer_list[\"output\"];\n  ASSERT_EQ(output_buffer_list->Size(), 1);\n  auto output_buffer = output_buffer_list->At(0);\n\n  int32_t out_width = 0;\n  int32_t out_height = 0;\n  int32_t out_width_stride = 0;\n  int32_t out_height_stride = 0;\n  std::string out_pix_fmt;\n  output_buffer->Get(\"width\", out_width);\n  output_buffer->Get(\"height\", out_height);\n  output_buffer->Get(\"pix_fmt\", out_pix_fmt);\n  output_buffer->Get(\"width_stride\", out_width_stride);\n  output_buffer->Get(\"height_stride\", out_height_stride);\n  ASSERT_EQ(out_width, 128);\n  ASSERT_EQ(out_height, 128);\n  ASSERT_EQ(out_pix_fmt, std::string(\"bgr\"));\n  ASSERT_EQ(out_width_stride, 128 * 3);\n  ASSERT_EQ(out_height_stride, 128);\n\n  int32_t total_out_size = 128 * 128 * 3;\n  std::shared_ptr<unsigned char> out_img_buf(\n      new (std::nothrow) unsigned char[total_out_size],\n      std::default_delete<unsigned char[]>());\n  e_ret = memset_s(out_img_buf.get(), total_out_size, 0, total_out_size);\n  EXPECT_EQ(e_ret, 0);\n\n  auto *mpp_buffer = (MppBuffer)output_buffer->ConstData();\n\n  auto *rgbsrc = (uint8_t *)mpp_buffer_get_ptr(mpp_buffer);\n  auto *rgbdst = (uint8_t *)out_img_buf.get();\n\n  // copy to memory\n  for (int i = 0; i < out_height; i++) {\n    e_ret = memcpy_s(rgbdst, out_width * 3, rgbsrc, out_width * 3);\n    EXPECT_EQ(e_ret, 0);\n    rgbsrc += out_width * 3;\n    rgbdst += out_width * 3;\n  }\n\n  std::string out_file_name = std::string(TEST_ASSETS) + \"/rockchip_crop_bgr\";\n  struct stat out_statbuf = {0};\n  stat(out_file_name.c_str(), &out_statbuf);\n  EXPECT_EQ(out_statbuf.st_size, total_out_size);\n\n  FILE *fp_out = fopen(out_file_name.c_str(), \"rb\");\n  ASSERT_NE(fp_out, nullptr);\n\n  std::shared_ptr<unsigned char> out_file_img_buf(\n      new (std::nothrow) unsigned char[out_statbuf.st_size],\n      std::default_delete<unsigned char[]>());\n  e_ret = memset_s(out_file_img_buf.get(), out_statbuf.st_size, 0,\n                   out_statbuf.st_size);\n  EXPECT_EQ(e_ret, 0);\n\n  auto out_size = fread(out_file_img_buf.get(), 1, out_statbuf.st_size, fp_out);\n  EXPECT_EQ(out_size, total_out_size);\n  fclose(fp_out);\n\n  // cmp memory\n  EXPECT_EQ(\n      memcmp(out_img_buf.get(), out_file_img_buf.get(), out_statbuf.st_size),\n      0);\n\n  driver_flow->GetFlow()->Wait(3 * 1000);\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/drivers/devices/rockchip/flowunit/image_decoder/CMakeLists.txt",
    "content": "#\n# Copyright 2022 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\ncmake_minimum_required(VERSION 3.10)\n\nset(UNIT_DEVICE \"rockchip\")\nset(UNIT_NAME \"image_decoder\")\n\nproject(modelbox-flowunit-${UNIT_DEVICE}-${UNIT_NAME})\n\nfile(GLOB_RECURSE UNIT_SOURCE *.cpp *.cc *.c)\ngroup_source_test_files(MODELBOX_UNIT_SOURCE MODELBOX_UNIT_TEST_SOURCE \"_test.c*\" ${UNIT_SOURCE})\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_ROCKCHIP_INCLUDE})\ninclude_directories(${ROCKCHIP_MPP_INCLUDE})\ninclude_directories(${ROCKCHIP_RGA_INCLUDE})\ninclude_directories(${OpenCV_INCLUDE_DIRS})\n\nset(MODELBOX_UNIT_SHARED libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-shared)\nset(MODELBOX_UNIT_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\nadd_library(${MODELBOX_UNIT_SHARED} SHARED ${MODELBOX_UNIT_SOURCE})\nset(LIBMODELBOX_FLOWUNIT_IMAGE_DECODER_RKNN_SHARED ${MODELBOX_UNIT_SHARED})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES\n        SOVERSION ${MODELBOX_VERSION_MAJOR}\n        VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} rt)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} dl)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_DEVICE_ROCKCHIP_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_SHARED})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}\")\n\ninstall(TARGETS ${MODELBOX_UNIT_SHARED}\n        COMPONENT rockchip-device-flowunit\n        RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n        LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        ARCHIVE DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        OPTIONAL)\ninstall(DIRECTORY ${HEADER}\n        DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR}\n        COMPONENT rockchip-device-flowunit-devel)\n\nset(LIBMODELBOX_FLOWUNIT_IMAGE_DECODER_ROCKCHIP_SHARED ${MODELBOX_UNIT_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_IMAGE_DECODER_ROCKCHIP_INCLUDE ${MODELBOX_UNIT_SOURCE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_IMAGE_DECODER_ROCKCHIP_SOURCES ${MODELBOX_UNIT_SOURCE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_IMAGE_DECODER_ROCKCHIP_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}.so CACHE INTERNAL \"\")\n\n# driver test\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_UNIT_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_UNIT_SHARED})\nlist(APPEND DRIVER_UNIT_TEST_LINK_LIBRARIES ${RKMPP_LIBRARIES})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")\nset(TEST_INCLUDE ${TEST_INCLUDE} CACHE INTERNAL \"\")\nlist(APPEND TEST_INCLUDE ${${NLOHMANN_INCLUDE_DIR}})\nset(DRIVER_UNIT_TEST_LINK_LIBRARIES ${DRIVER_UNIT_TEST_LINK_LIBRARIES} CACHE INTERNAL \"\")\n"
  },
  {
    "path": "src/drivers/devices/rockchip/flowunit/image_decoder/image_decoder.cc",
    "content": "/*\n * Copyright 2022 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"image_decoder.h\"\n\n#include <modelbox/base/crypto.h>\n#include <securec.h>\n\n#include \"modelbox/flowunit_api_helper.h\"\n\nImageDecoderFlowUnit::ImageDecoderFlowUnit() = default;\nImageDecoderFlowUnit::~ImageDecoderFlowUnit() = default;\n\nstd::vector<std::string> CvImgPixelFormat{\"bgr\", \"rgb\", \"nv12\"};\n\nmodelbox::Status ImageDecoderFlowUnit::Open(\n    const std::shared_ptr<modelbox::Configuration> &opts) {\n  pixel_format_ = opts->GetString(\"pix_fmt\", modelbox::IMG_DEFAULT_FMT);\n  if (find(CvImgPixelFormat.begin(), CvImgPixelFormat.end(), pixel_format_) ==\n      CvImgPixelFormat.end()) {\n    auto errMsg = \"pixel_format is invalid, configure is :\" + pixel_format_;\n    MBLOG_ERROR << errMsg;\n    std::string valid_format;\n    for (const auto &iter : CvImgPixelFormat) {\n      if (valid_format.length() > 0) {\n        valid_format += \", \";\n      }\n\n      valid_format += iter;\n    }\n    MBLOG_ERROR << \"Valid pixel_format is: \" << valid_format;\n    return {modelbox::STATUS_BADCONF, errMsg};\n  }\n\n  MBLOG_DEBUG << \"pixel_format \" << pixel_format_;\n  out_pix_fmt_ = modelbox::GetRGAFormat(pixel_format_);\n\n  return jpeg_dec_.Init();\n}\n\nMppFrame ImageDecoderFlowUnit::JpegDec(\n    std::shared_ptr<modelbox::Buffer> &buffer, int &w, int &h) {\n  auto *input_data = (u_char *)(buffer->ConstData());\n  auto data_len = buffer->GetBytes();\n  if (data_len <= 0) {\n    MBLOG_ERROR << \"buffer size is invalid\";\n    return nullptr;\n  }\n\n  return jpeg_dec_.Decode(input_data, data_len, w, h);\n}\n\nstd::shared_ptr<modelbox::Buffer> ImageDecoderFlowUnit::DecodeFromCPU(\n    std::shared_ptr<modelbox::Buffer> &in_buffer) {\n  const auto *input_data_ptr =\n      static_cast<const u_char *>(in_buffer->ConstData());\n  std::vector<u_char> input_data(\n      input_data_ptr, input_data_ptr + in_buffer->GetBytes() / sizeof(u_char));\n\n  cv::Mat img_bgr = cv::imdecode(input_data, cv::IMREAD_COLOR);\n  if (img_bgr.data == nullptr) {\n    std::string error_msg = \"input image buffer is invalid, imdecode failed.\";\n    MBLOG_ERROR << error_msg;\n    auto buffer = std::make_shared<modelbox::Buffer>();\n    buffer->SetError(\"ImageDecoder.DecodeFailed\", error_msg);\n    return buffer;\n  }\n\n  cv::Mat img_dest;\n  if (pixel_format_ == \"bgr\") {\n    img_dest = img_bgr;\n  } else if (pixel_format_ == \"rgb\") {\n    cv::cvtColor(img_bgr, img_dest, cv::COLOR_BGR2RGB);\n  } else if (pixel_format_ == \"nv12\") {\n    img_dest = BGR2YUV_NV12(img_bgr);\n  } else {\n    std::string error_msg = \"no support pixel format:\" + pixel_format_;\n    MBLOG_ERROR << error_msg;\n    auto buffer = std::make_shared<modelbox::Buffer>();\n    buffer->SetError(\"ImageDecoder.DecodeFailed\", error_msg);\n    return buffer;\n  }\n\n  if (!modelbox::StatusError) {\n    std::string error_msg =\n        \"input image decode success, but transform nv12 format failed.\";\n    MBLOG_ERROR << error_msg;\n    auto buffer = std::make_shared<modelbox::Buffer>();\n    buffer->SetError(\"ImageDecoder.DecodeFailed\", error_msg);\n    return buffer;\n  }\n\n  auto output_buffer = std::make_shared<modelbox::Buffer>(GetBindDevice());\n  if (output_buffer == nullptr) {\n    std::string error_msg = \"create output buffer fail.\";\n    MBLOG_ERROR << error_msg;\n    auto buffer = std::make_shared<modelbox::Buffer>();\n    buffer->SetError(\"ImageDecoder.DecodeFailed\", error_msg);\n    return buffer;\n  }\n\n  auto img_size = img_dest.total() * img_dest.elemSize();\n  MBLOG_ERROR << img_size;\n\n  auto ret = output_buffer->Build(img_size);\n  if (ret != modelbox::STATUS_OK) {\n    auto error_msg = \"Create buffer fail, size=\" + std::to_string(img_size);\n    MBLOG_ERROR << error_msg;\n    auto buffer = std::make_shared<modelbox::Buffer>();\n    buffer->SetError(\"ImageDecoder.DecodeFailed\", error_msg);\n    return buffer;\n  }\n\n  auto *mpp_buf = (MppBuffer)(output_buffer->MutableData());\n  auto *cpu_buf = (uint8_t *)mpp_buffer_get_ptr(mpp_buf);\n\n  auto e_ret =\n      memcpy_s(cpu_buf, output_buffer->GetBytes(), img_dest.data, img_size);\n  if (e_ret != EOK) {\n    auto error_msg = \"memcpy_s fail, e_ret=\" + std::to_string(e_ret);\n    MBLOG_ERROR << error_msg;\n    auto buffer = std::make_shared<modelbox::Buffer>();\n    buffer->SetError(\"ImageDecoder.DecodeFailed\", error_msg);\n    return buffer;\n  }\n\n  output_buffer->Set(\"width\", (int32_t)img_bgr.cols);\n  output_buffer->Set(\"height\", (int32_t)img_bgr.rows);\n  auto width_stride = (int32_t)img_bgr.cols;\n  if (pixel_format_ == \"rgb\" || pixel_format_ == \"bgr\") {\n    width_stride *= 3;\n  }\n\n  output_buffer->Set(\"width_stride\", width_stride);\n  output_buffer->Set(\"height_stride\", (int32_t)img_bgr.rows);\n  output_buffer->Set(\"channel\", (int32_t)img_dest.channels());\n  output_buffer->Set(\"type\", modelbox::ModelBoxDataType::MODELBOX_UINT8);\n  output_buffer->Set(\n      \"shape\", std::vector<size_t>{(size_t)img_dest.rows, (size_t)img_dest.cols,\n                                   (size_t)img_dest.channels()});\n  output_buffer->Set(\"layout\", std::string(\"hwc\"));\n  return output_buffer;\n}\n\ncv::Mat ImageDecoderFlowUnit::BGR2YUV_NV12(const cv::Mat &src_bgr) {\n  modelbox::StatusError = modelbox::STATUS_OK;\n  cv::Mat dst_nv12(src_bgr.rows * 1.5, src_bgr.cols, CV_8UC1, cv::Scalar(0));\n  cv::Mat src_yuv_i420;\n  cv::cvtColor(src_bgr, src_yuv_i420, cv::COLOR_BGR2YUV_I420);\n\n  size_t len_y = src_bgr.rows * src_bgr.cols;\n  size_t len_u = len_y / 4;\n  auto ret = memcpy_s(dst_nv12.data, len_y, src_yuv_i420.data, len_y);\n  if (ret != EOK) {\n    MBLOG_ERROR << \"Cpu memcpy failed, ret \" << ret << \", size \" << len_y;\n    dst_nv12.release();\n    modelbox::StatusError = {modelbox::STATUS_FAULT};\n    return dst_nv12;\n  }\n  for (size_t i = 0; i < len_u; ++i) {\n    dst_nv12.data[len_y + 2 * i] = src_yuv_i420.data[len_y + i];\n    dst_nv12.data[len_y + 2 * i + 1] = src_yuv_i420.data[len_y + len_u + i];\n  }\n\n  return dst_nv12;\n}\n\nmodelbox::Status ImageDecoderFlowUnit::Process(\n    std::shared_ptr<modelbox::DataContext> ctx) {\n  // get input\n  auto input_bufs = ctx->Input(\"in_encoded_image\");\n  auto output_bufs = ctx->Output(\"out_image\");\n  if (input_bufs->Size() <= 0) {\n    auto msg = \"input images batch is \" + std::to_string(input_bufs->Size());\n    MBLOG_ERROR << msg;\n    return {modelbox::STATUS_FAULT, msg};\n  }\n\n  std::vector<size_t> output_shape;\n  for (auto &buffer : *input_bufs) {\n    int w = 0;\n    int h = 0;\n    std::shared_ptr<modelbox::Buffer> out_buf = nullptr;\n    MppFrame frame = JpegDec(buffer, w, h);\n    if (frame == nullptr) {\n      const auto *msg = \"failed to MppJpegDec\";\n      MBLOG_ERROR << msg;\n      out_buf = DecodeFromCPU(buffer);\n    } else {\n      out_buf = modelbox::ColorChange(frame, out_pix_fmt_, GetBindDevice());\n      if (out_buf == nullptr) {\n        const auto *msg = \"failed to color change\";\n        MBLOG_ERROR << msg;\n        out_buf = DecodeFromCPU(buffer);\n      }\n    }\n\n    // build out_buf\n    out_buf->Set(\"pix_fmt\", pixel_format_);\n    output_bufs->PushBack(out_buf);\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nMODELBOX_FLOWUNIT(ImageDecoderFlowUnit, rk_imgdec_desc) {\n  rk_imgdec_desc.SetFlowUnitName(FLOWUNIT_NAME);\n  rk_imgdec_desc.SetFlowUnitGroupType(\"Image\");\n  rk_imgdec_desc.AddFlowUnitInput({\"in_encoded_image\", \"cpu\"});\n  rk_imgdec_desc.AddFlowUnitOutput({\"out_image\"});\n\n  rk_imgdec_desc.AddFlowUnitOption(modelbox::FlowUnitOption(\n      \"pix_fmt\", \"string\", true, modelbox::IMG_DEFAULT_FMT,\n      \"the output pixel format\"));\n\n  rk_imgdec_desc.SetFlowType(modelbox::NORMAL);\n  rk_imgdec_desc.SetInputContiguous(false);\n  rk_imgdec_desc.SetDescription(FLOWUNIT_DESC);\n}\n\nMODELBOX_DRIVER_FLOWUNIT(rk_imgdec_desc) {\n  rk_imgdec_desc.Desc.SetName(FLOWUNIT_NAME);\n  rk_imgdec_desc.Desc.SetClass(modelbox::DRIVER_CLASS_FLOWUNIT);\n  rk_imgdec_desc.Desc.SetType(modelbox::DEVICE_TYPE);\n  rk_imgdec_desc.Desc.SetDescription(FLOWUNIT_DESC);\n  rk_imgdec_desc.Desc.SetVersion(MODELBOX_VERSION_STR_MACRO);\n}\n"
  },
  {
    "path": "src/drivers/devices/rockchip/flowunit/image_decoder/image_decoder.h",
    "content": "/*\n * Copyright 2022 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_IMAGE_DECODER_ROCKCHIP_H_\n#define MODELBOX_FLOWUNIT_IMAGE_DECODER_ROCKCHIP_H_\n\n#include <modelbox/base/device.h>\n#include <modelbox/device/rockchip/device_rockchip.h>\n#include <modelbox/device/rockchip/rockchip_api.h>\n#include <modelbox/device/rockchip/rockchip_memory.h>\n#include <modelbox/flow.h>\n#include <modelbox/flowunit.h>\n\n#include <opencv2/opencv.hpp>\n\nconstexpr const char *FLOWUNIT_NAME = \"image_decoder\";\nconstexpr const char *FLOWUNIT_TYPE = \"rockchip\";\nconstexpr const char *FLOWUNIT_DESC =\n    \"\\n\\t@Brief: jpeg decoder flowunit on rockchip. \\n\"\n    \"\\t@Port parameter: The input port buffer type is image file binary, the \"\n    \"output port buffer type are image. \\n\"\n    \"\\t  The image type buffer contains the following meta fields:\\n\"\n    \"\\t\\tField Name: width,               Type: int32_t\\n\"\n    \"\\t\\tField Name: height,              Type: int32_t\\n\"\n    \"\\t\\tField Name: width_stride,        Type: int32_t\\n\"\n    \"\\t\\tField Name: height_stride,       Type: int32_t\\n\"\n    \"\\t\\tField Name: channel,             Type: int32_t\\n\"\n    \"\\t\\tField Name: pix_fmt,             Type: string\\n\"\n    \"\\t\\tField Name: layout,              Type: int32_t\\n\"\n    \"\\t\\tField Name: shape,               Type: vector<size_t>\\n\"\n    \"\\t\\tField Name: type,                Type: \"\n    \"ModelBoxDataType::MODELBOX_UINT8\\n\"\n    \"\\t@Constraint:\";\n\nclass ImageDecoderFlowUnit : public modelbox::FlowUnit {\n public:\n  ImageDecoderFlowUnit();\n  ~ImageDecoderFlowUnit() override;\n\n  modelbox::Status Open(\n      const std::shared_ptr<modelbox::Configuration> &opts) override;\n  modelbox::Status Process(std::shared_ptr<modelbox::DataContext> ct) override;\n\n private:\n  MppFrame JpegDec(std::shared_ptr<modelbox::Buffer> &buffer, int &w, int &h);\n  std::shared_ptr<modelbox::Buffer> DecodeFromCPU(\n      std::shared_ptr<modelbox::Buffer> &in_buffer);\n  cv::Mat BGR2YUV_NV12(const cv::Mat &src_bgr);\n\n private:\n  std::string pixel_format_{modelbox::IMG_DEFAULT_FMT};\n  modelbox::MppJpegDecode jpeg_dec_;\n  RgaSURF_FORMAT out_pix_fmt_{RK_FORMAT_YCbCr_420_SP};\n};\n\n#endif  // MODELBOX_FLOWUNIT_IMAGE_DECODER_ROCKCHIP_H_\n"
  },
  {
    "path": "src/drivers/devices/rockchip/flowunit/image_decoder/image_decoder_test.cc",
    "content": "/*\n * Copyright 2022 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <securec.h>\n\n#include <nlohmann/json.hpp>\n#include <opencv2/opencv.hpp>\n\n#include \"driver_flow_test.h\"\n#include \"flowunit_mockflowunit/flowunit_mockflowunit.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"modelbox/base/crypto.h\"\n#include \"modelbox/base/log.h\"\n#include \"modelbox/base/utils.h\"\n#include \"modelbox/buffer.h\"\n#include \"modelbox/device/rockchip/rockchip_api.h\"\n#include \"modelbox/device/rockchip/rockchip_memory.h\"\n#include \"test/mock/minimodelbox/mockflow.h\"\n\nnamespace modelbox {\nclass RockchipImageDecoderFlowUnitTest : public testing::Test {\n public:\n  RockchipImageDecoderFlowUnitTest()\n      : driver_flow_(std::make_shared<DriverFlowTest>()),\n        jpeg_decode_(std::make_shared<modelbox::MppJpegDecode>()) {}\n\n protected:\n  void SetUp() override {\n    auto ret = jpeg_decode_->Init();\n    if (ret != modelbox::STATUS_OK) {\n      MBLOG_INFO << \"no rockchip device, skip test suit\";\n      GTEST_SKIP();\n    }\n\n    MBLOG_INFO << \"jpeg_decode:\" << ret;\n  }\n\n  void TearDown() override { driver_flow_ = nullptr; };\n\n  std::shared_ptr<DriverFlowTest> GetDriverFlow();\n\n  const std::string test_lib_dir = TEST_DRIVER_DIR,\n                    test_data_dir = TEST_DATA_DIR, test_assets = TEST_ASSETS;\n\n private:\n  std::shared_ptr<DriverFlowTest> driver_flow_;\n  std::shared_ptr<modelbox::MppJpegDecode> jpeg_decode_;\n};\n\nstd::shared_ptr<DriverFlowTest>\nRockchipImageDecoderFlowUnitTest::GetDriverFlow() {\n  return driver_flow_;\n}\n\nTEST_F(RockchipImageDecoderFlowUnitTest, DecodeTest) {\n  std::string toml_content = R\"(\n    [driver]\n    skip-default=true\n    dir=[\")\" + test_lib_dir + \"\\\",\\\"\" +\n                             test_data_dir + \"\\\"]\\n\" +\n                             R\"([graph]\n    graphconf = '''digraph demo {\n          input[type=input]\n          output[type=output]\n          image_decoder[type=flowunit, flowunit=image_decoder, device=rockchip, deviceid=0, batch_size=3]\n          input -> image_decoder:in_encoded_image\n          image_decoder:out_image -> output\n        }'''\n    format = \"graphviz\"\n  )\";\n\n  auto driver_flow = GetDriverFlow();\n  auto ret = driver_flow->BuildAndRun(\"DecodeTest\", toml_content, -1);\n  EXPECT_EQ(ret, STATUS_SUCCESS);\n\n  MBLOG_INFO << toml_content;\n\n  auto in_file_name = std::string(TEST_ASSETS) + \"/test.jpg\";\n\n  // load file\n  FILE *fp_in_image = fopen(in_file_name.c_str(), \"rb\");\n  ASSERT_NE(fp_in_image, nullptr);\n\n  struct stat in_image_statbuf = {0};\n  stat(in_file_name.c_str(), &in_image_statbuf);\n  EXPECT_EQ(in_image_statbuf.st_size > 0, true);\n\n  auto img = cv::imread(in_file_name);\n  auto extern_data = driver_flow->GetFlow()->CreateExternalDataMap();\n  auto in_img_buffer_list = extern_data->CreateBufferList();\n  in_img_buffer_list->Build({(size_t)in_image_statbuf.st_size});\n  auto in_img_buffer = in_img_buffer_list->At(0);\n\n  auto in_image_size = fread(in_img_buffer->MutableData(), 1,\n                             in_image_statbuf.st_size, fp_in_image);\n\n  EXPECT_EQ(in_image_size, in_image_statbuf.st_size);\n  fclose(fp_in_image);\n\n  in_img_buffer->Set(\"pix_fmt\", std::string(\"bgr\"));\n\n  int32_t total_out_size = img.cols * img.rows * 3;\n\n  auto status = extern_data->Send(\"input\", in_img_buffer_list);\n  EXPECT_EQ(status, STATUS_OK);\n\n  // check output\n  OutputBufferList map_buffer_list;\n  status = extern_data->Recv(map_buffer_list);\n  EXPECT_EQ(status, STATUS_OK);\n  auto output_buffer_list = map_buffer_list[\"output\"];\n  ASSERT_EQ(output_buffer_list->Size(), 1);\n  auto output_buffer = output_buffer_list->At(0);\n\n  ASSERT_EQ(output_buffer->GetBytes(), total_out_size);\n\n  auto *mpp_buffer = (MppBuffer)output_buffer->ConstData();\n\n  int32_t out_width = 0;\n  int32_t out_height = 0;\n  int32_t out_width_stride = 0;\n  int32_t out_height_stride = 0;\n  std::string out_pix_fmt;\n  output_buffer->Get(\"width\", out_width);\n  output_buffer->Get(\"height\", out_height);\n  output_buffer->Get(\"pix_fmt\", out_pix_fmt);\n  output_buffer->Get(\"width_stride\", out_width_stride);\n  output_buffer->Get(\"height_stride\", out_height_stride);\n  ASSERT_EQ(out_width, img.cols);\n  ASSERT_EQ(out_height, img.rows);\n  ASSERT_EQ(out_pix_fmt, std::string(\"bgr\"));\n  ASSERT_EQ(out_width_stride, img.cols * 3);\n  ASSERT_EQ(out_height_stride, img.rows);\n\n  std::shared_ptr<unsigned char> out_img_buf(\n      new (std::nothrow) unsigned char[total_out_size],\n      std::default_delete<unsigned char[]>());\n  auto e_ret = memset_s(out_img_buf.get(), total_out_size, 0, total_out_size);\n  EXPECT_EQ(e_ret, EOK);\n\n  auto *rgbsrc = (uint8_t *)mpp_buffer_get_ptr(mpp_buffer);\n  auto *rgbdst = (uint8_t *)out_img_buf.get();\n\n  // copy to memory\n  for (int i = 0; i < out_height; i++) {\n    e_ret = memcpy_s(rgbdst, out_width * 3, rgbsrc, out_width * 3);\n    EXPECT_EQ(e_ret, 0);\n    rgbsrc += out_width * 3;\n    rgbdst += out_width * 3;\n  }\n\n  auto out_file_name = std::string(TEST_ASSETS) + \"/rockchip_decoder_test_bgr\";\n  FILE *fp_out_image = fopen(out_file_name.c_str(), \"rb\");\n  ASSERT_NE(fp_out_image, nullptr);\n\n  struct stat out_image_statbuf = {0};\n  stat(out_file_name.c_str(), &out_image_statbuf);\n  EXPECT_EQ(out_image_statbuf.st_size, total_out_size);\n\n  std::shared_ptr<unsigned char> out_img_file_buf(\n      new (std::nothrow) unsigned char[total_out_size],\n      std::default_delete<unsigned char[]>());\n  e_ret = memset_s(out_img_file_buf.get(), total_out_size, 0, total_out_size);\n  EXPECT_EQ(e_ret, 0);\n\n  auto out_image_size =\n      fread(out_img_file_buf.get(), 1, total_out_size, fp_out_image);\n  EXPECT_EQ(out_image_size, total_out_size);\n  fclose(fp_out_image);\n\n  // cmp memory\n  EXPECT_EQ(memcmp(out_img_buf.get(), out_img_file_buf.get(), total_out_size),\n            0);\n\n  driver_flow->GetFlow()->Wait(3 * 1000);\n}\n\nTEST_F(RockchipImageDecoderFlowUnitTest, DecodeBase64Test) {\n  std::string toml_content = R\"(\n    [driver]\n    skip-default=true\n    dir=[\")\" + test_lib_dir + \"\\\",\\\"\" +\n                             test_data_dir + \"\\\"]\\n\" +\n                             R\"([graph]\n    graphconf = '''digraph demo {\n          input[type=input]\n          output[type=output]\n          base64_decoder[type=flowunit, flowunit=base64_decoder, device=cpu, deviceid=0, batch_size=3, data_format=json, key=image_base64]\n          image_decoder[type=flowunit, flowunit=image_decoder, device=rockchip, deviceid=0, batch_size=3, key=image_base64, pix_fmt=bgr]\n          input -> base64_decoder:in_data\n          base64_decoder:out_data -> image_decoder:in_encoded_image\n          image_decoder:out_image -> output\n        }'''\n    format = \"graphviz\"\n  )\";\n\n  auto driver_flow = GetDriverFlow();\n  auto ret = driver_flow->BuildAndRun(\"DecodeTest\", toml_content, 10);\n\n  MBLOG_INFO << toml_content;\n\n  auto in_file_name = std::string(TEST_ASSETS) + \"/test.jpg\";\n\n  // load file\n  FILE *fp_in_image = fopen(in_file_name.c_str(), \"rb\");\n  ASSERT_NE(fp_in_image, nullptr);\n\n  struct stat in_image_statbuf = {0};\n  stat(in_file_name.c_str(), &in_image_statbuf);\n  EXPECT_EQ(in_image_statbuf.st_size > 0, true);\n\n  auto img = cv::imread(in_file_name);\n  auto extern_data = driver_flow->GetFlow()->CreateExternalDataMap();\n  auto in_img_buffer_list = extern_data->CreateBufferList();\n\n  std::shared_ptr<unsigned char> in_img_file_buf(\n      new (std::nothrow) unsigned char[in_image_statbuf.st_size],\n      std::default_delete<unsigned char[]>());\n  auto e_ret = memset_s(in_img_file_buf.get(), in_image_statbuf.st_size, 0,\n                        in_image_statbuf.st_size);\n  EXPECT_EQ(e_ret, EOK);\n\n  auto in_image_size =\n      fread(in_img_file_buf.get(), 1, in_image_statbuf.st_size, fp_in_image);\n  EXPECT_EQ(in_image_size, in_image_statbuf.st_size);\n  fclose(fp_in_image);\n\n  std::string base64_image;\n  auto en_ret = modelbox::Base64Encode(in_img_file_buf.get(),\n                                       in_image_statbuf.st_size, &base64_image);\n  EXPECT_EQ(en_ret, STATUS_OK);\n\n  nlohmann::json base64_image_json;\n  base64_image_json[\"image_base64\"] = base64_image;\n  std::string base64_image_json_str = base64_image_json.dump();\n\n  in_img_buffer_list->Build({(size_t)base64_image_json_str.size()});\n  auto in_img_buffer = in_img_buffer_list->At(0);\n\n  in_img_buffer->Set(\"pix_fmt\", std::string(\"bgr\"));\n  in_img_buffer->Set(\"key\", std::string(\"image_base64\"));\n\n  e_ret = memcpy_s(in_img_buffer->MutableData(), in_img_buffer->GetBytes(),\n                   base64_image_json_str.c_str(), base64_image_json_str.size());\n  EXPECT_EQ(e_ret, EOK);\n\n  int32_t total_out_size = img.cols * img.rows * 3;\n\n  auto status = extern_data->Send(\"input\", in_img_buffer_list);\n  EXPECT_EQ(status, STATUS_OK);\n\n  // check output\n  OutputBufferList map_buffer_list;\n  status = extern_data->Recv(map_buffer_list);\n  EXPECT_EQ(status, STATUS_OK);\n  auto output_buffer_list = map_buffer_list[\"output\"];\n  ASSERT_EQ(output_buffer_list->Size(), 1);\n  auto output_buffer = output_buffer_list->At(0);\n\n  ASSERT_EQ(output_buffer->GetBytes(), total_out_size);\n\n  auto *mpp_buffer = (MppBuffer)output_buffer->ConstData();\n\n  int32_t out_width = 0;\n  int32_t out_height = 0;\n  int32_t out_width_stride = 0;\n  int32_t out_height_stride = 0;\n  std::string out_pix_fmt;\n  output_buffer->Get(\"width\", out_width);\n  output_buffer->Get(\"height\", out_height);\n  output_buffer->Get(\"pix_fmt\", out_pix_fmt);\n  output_buffer->Get(\"width_stride\", out_width_stride);\n  output_buffer->Get(\"height_stride\", out_height_stride);\n  ASSERT_EQ(out_width, img.cols);\n  ASSERT_EQ(out_height, img.rows);\n  ASSERT_EQ(out_pix_fmt, std::string(\"bgr\"));\n  ASSERT_EQ(out_width_stride, img.cols * 3);\n  ASSERT_EQ(out_height_stride, img.rows);\n\n  std::shared_ptr<unsigned char> out_img_buf(\n      new (std::nothrow) unsigned char[total_out_size],\n      std::default_delete<unsigned char[]>());\n  e_ret = memset_s(out_img_buf.get(), total_out_size, 0, total_out_size);\n  EXPECT_EQ(e_ret, EOK);\n\n  auto *rgbsrc = (uint8_t *)mpp_buffer_get_ptr(mpp_buffer);\n  auto *rgbdst = (uint8_t *)out_img_buf.get();\n\n  // copy to memory\n  for (int i = 0; i < out_height; i++) {\n    e_ret = memcpy_s(rgbdst, out_width * 3, rgbsrc, out_width * 3);\n    EXPECT_EQ(e_ret, 0);\n    rgbsrc += out_width * 3;\n    rgbdst += out_width * 3;\n  }\n\n  auto out_file_name = std::string(TEST_ASSETS) + \"/rockchip_decoder_test_bgr\";\n  FILE *fp_out_image = fopen(out_file_name.c_str(), \"rb\");\n  ASSERT_NE(fp_out_image, nullptr);\n\n  struct stat out_image_statbuf = {0};\n  stat(out_file_name.c_str(), &out_image_statbuf);\n  EXPECT_EQ(out_image_statbuf.st_size, total_out_size);\n\n  std::shared_ptr<unsigned char> out_img_file_buf(\n      new (std::nothrow) unsigned char[total_out_size],\n      std::default_delete<unsigned char[]>());\n  e_ret = memset_s(out_img_file_buf.get(), total_out_size, 0, total_out_size);\n  EXPECT_EQ(e_ret, 0);\n\n  auto out_image_size =\n      fread(out_img_file_buf.get(), 1, total_out_size, fp_out_image);\n  EXPECT_EQ(out_image_size, total_out_size);\n  fclose(fp_out_image);\n\n  // cmp memory\n  EXPECT_EQ(memcmp(out_img_buf.get(), out_img_file_buf.get(), total_out_size),\n            0);\n\n  driver_flow->GetFlow()->Wait(3 * 1000);\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/drivers/devices/rockchip/flowunit/inference_rknpu2/CMakeLists.txt",
    "content": "#\n# Copyright 2022 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\ncmake_minimum_required(VERSION 3.10)\n\nset(UNIT_DEVICE \"rockchip\")\nset(UNIT_NAME \"inference_rknpu2\")\n\nproject(modelbox-flowunit-${UNIT_DEVICE}-${UNIT_NAME})\n\nfile(GLOB_RECURSE UNIT_SOURCE *.cpp *.cc *.c)\ngroup_source_test_files(MODELBOX_UNIT_SOURCE MODELBOX_UNIT_TEST_SOURCE \"_test.c*\" ${UNIT_SOURCE})\n\nconfigure_file(${CMAKE_CURRENT_SOURCE_DIR}/modelbox.test.rknpu2.inference.in ${CMAKE_BINARY_DIR}/test/test-working-dir/data/virtual_rknpu_infer_test.toml @ONLY)\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_ROCKCHIP_INCLUDE})\ninclude_directories(${LIBMODELBOX_VIRTUALDRIVER_INFERENCE_INCLUDE})\ninclude_directories(${MODELBOX_COMMON_INFERENCE_INCLUDE})\ninclude_directories(${RKNPU2_INCLUDE_DIR})\ninclude_directories(${ROCKCHIP_MPP_INCLUDE})\ninclude_directories(${ROCKCHIP_RGA_INCLUDE})\n\nset(MODELBOX_UNIT_SHARED libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-shared)\nset(MODELBOX_UNIT_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\nadd_library(${MODELBOX_UNIT_SHARED} SHARED ${MODELBOX_UNIT_SOURCE})\nset(LIBMODELBOX_FLOWUNIT_INFERENCE_ROCKCHIP_SHARED ${MODELBOX_UNIT_SHARED})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES\n        SOVERSION ${MODELBOX_VERSION_MAJOR}\n        VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} rt)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} dl)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_DEVICE_ROCKCHIP_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${RKNPU2_LIBRARY})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_VIRTUALDRIVER_INFERENCE_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${MODELBOX_COMMON_INFERENCE_LIBRARY})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}\")\n\ninstall(TARGETS ${MODELBOX_UNIT_SHARED}\n        COMPONENT rockchip-device-flowunit\n        RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n        LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        ARCHIVE DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        OPTIONAL)\ninstall(DIRECTORY ${HEADER}\n        DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR}\n        COMPONENT rockchip-device-flowunit-devel)\n\nset(LIBMODELBOX_FLOWUNIT_INFERENCE_ROCKCHIP_SHARED ${MODELBOX_UNIT_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_INFERENCE_ROCKCHIP_INCLUDE ${MODELBOX_UNIT_SOURCE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_INFERENCE_ROCKCHIP_SOURCES ${MODELBOX_UNIT_SOURCE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_INFERENCE_ROCKCHIP_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}.so CACHE INTERNAL \"\")\n\n# driver test\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_UNIT_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_UNIT_SHARED})\nlist(APPEND DRIVER_UNIT_TEST_LINK_LIBRARIES ${RKMPP_LIBRARIES})\nlist(APPEND TEST_INCLUDE ${ROCKCHIP_MPP_INCLUDE})\nlist(APPEND TEST_INCLUDE ${ROCKCHIP_RGA_INCLUDE})\nlist(APPEND TEST_INCLUDE ${LIBMODELBOX_DEVICE_ROCKCHIP_INCLUDE})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")\nset(TEST_INCLUDE ${TEST_INCLUDE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_LINK_LIBRARIES ${DRIVER_UNIT_TEST_LINK_LIBRARIES} CACHE INTERNAL \"\")\n"
  },
  {
    "path": "src/drivers/devices/rockchip/flowunit/inference_rknpu2/flowunit_desc.cc",
    "content": "/*\n * Copyright 2022 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <stdio.h>\n\n#include <memory>\n\n#include \"modelbox/base/driver_api_helper.h\"\n#include \"modelbox/base/status.h\"\n#include \"modelbox/device/rockchip/device_rockchip.h\"\n#include \"modelbox/flowunit.h\"\n#include \"rknpu2_inference_flowunit.h\"\n\nconstexpr const char *RKNPU2_FLOWUNIT_NAME = \"rknpu2_inference\";\nconstexpr const char *RKNPU2_FLOWUNIT_DESC = \"A rknpu2 inference flowunit\";\n\nstd::shared_ptr<modelbox::DriverFactory> CreateDriverFactory() {\n  std::shared_ptr<modelbox::DriverFactory> factory =\n      std::make_shared<RKNPU2InferenceFlowUnitFactory>();\n  return factory;\n}\n\nvoid DriverDescription(modelbox::DriverDesc *desc) {\n  desc->SetName(RKNPU2_FLOWUNIT_NAME);\n  desc->SetClass(modelbox::DRIVER_CLASS_INFERENCE);\n  desc->SetType(modelbox::DEVICE_TYPE);\n  desc->SetDescription(RKNPU2_FLOWUNIT_DESC);\n}\n\nmodelbox::Status DriverInit() {\n  // Driver Init.\n  return modelbox::STATUS_OK;\n}\n\nvoid DriverFini() {\n  // Driver Fini.\n}\n"
  },
  {
    "path": "src/drivers/devices/rockchip/flowunit/inference_rknpu2/modelbox.test.rknpu2.inference.in",
    "content": "[base]\nname = \"rknpu2_inference\"\ndevice = \"rockchip\"\nversion = \"@MODELBOX_VERSION_STRING@\"\ndescription = \"an rknpu2 inference flowunit\"\nentry = \"@CMAKE_SOURCE_DIR@/test/assets/sdc_inference/pool_inst.wk\"\ntype = \"inference\"\nvirtual_type = \"rknpu2\"\nis_input_contiguous = \"false\"\n\n[input]\n[input.input1]\nname = \"input\"\ntype = \"float\"\ndevice = \"rockchip\"\n\n[output]\n[output.output1]\nname = \"output:0\"\ntype = \"float\"\n\n[output.output2]\nname = \"output:1\"\ntype = \"float\""
  },
  {
    "path": "src/drivers/devices/rockchip/flowunit/inference_rknpu2/rknpu2_inference.cc",
    "content": "/*\n * Copyright 2022 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"rknpu2_inference.h\"\n\n#include <model_decrypt.h>\n#include <modelbox/base/log.h>\n#include <modelbox/base/status.h>\n\n#include <algorithm>\n\n#include \"modelbox/device/rockchip/rockchip_memory.h\"\n#include \"securec.h\"\n\n#pragma GCC diagnostic ignored \"-Wunused-but-set-variable\"\n\nstatic std::map<std::string, rknn_tensor_type> type_map = {\n    {\"FLOAT\", RKNN_TENSOR_FLOAT32},   {\"INT\", RKNN_TENSOR_INT32},\n    {\"FLOAT32\", RKNN_TENSOR_FLOAT32}, {\"FLOAT16\", RKNN_TENSOR_FLOAT16},\n    {\"INT8\", RKNN_TENSOR_INT8},       {\"UINT8\", RKNN_TENSOR_UINT8},\n    {\"INT16\", RKNN_TENSOR_INT16},     {\"UINT16\", RKNN_TENSOR_UINT16},\n    {\"INT32\", RKNN_TENSOR_INT32},     {\"UINT32\", RKNN_TENSOR_UINT32},\n    {\"INT64\", RKNN_TENSOR_INT64}};\n\nstatic std::map<rknn_tensor_type, size_t> type_size_map = {\n    {RKNN_TENSOR_FLOAT32, 4}, {RKNN_TENSOR_FLOAT16, 2}, {RKNN_TENSOR_INT8, 1},\n    {RKNN_TENSOR_UINT8, 1},   {RKNN_TENSOR_INT16, 2},   {RKNN_TENSOR_UINT16, 2},\n    {RKNN_TENSOR_INT32, 4},   {RKNN_TENSOR_UINT32, 4},  {RKNN_TENSOR_INT64, 8}};\n\nmodelbox::Status modelbox::RKNPU2Inference::LoadModel(\n    const std::string &model_file,\n    const std::shared_ptr<modelbox::Drivers> &drivers_ptr,\n    const std::shared_ptr<modelbox::Configuration> &config) {\n  ModelDecryption rknpu2_model_decrypt;\n  if (modelbox::STATUS_SUCCESS !=\n      rknpu2_model_decrypt.Init(model_file, drivers_ptr, config)) {\n    MBLOG_ERROR << \"init model fail\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  int64_t model_len = 0;\n  std::shared_ptr<uint8_t> modelBuf =\n      rknpu2_model_decrypt.GetModelSharedBuffer(model_len);\n  if (!modelBuf) {\n    MBLOG_ERROR << \"GetDecryptModelBuffer fail\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  int ret = rknn_init(&ctx_, modelBuf.get(), model_len, 0, nullptr);\n  if (ret != RKNN_SUCC) {\n    MBLOG_ERROR << \"rknn_init fail:\" << ret;\n    ctx_ = 0;\n    return modelbox::STATUS_FAULT;\n  }\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status modelbox::RKNPU2Inference::ConvertType(\n    const std::string &type, rknn_tensor_type &rk_type) {\n  auto tmp_type = type;\n  std::transform(tmp_type.begin(), tmp_type.end(), tmp_type.begin(), ::toupper);\n  auto iter = type_map.find(tmp_type);\n  if (iter == type_map.end()) {\n    MBLOG_ERROR << \"Not support type: \" << type;\n    return modelbox::STATUS_FAULT;\n  }\n  rk_type = iter->second;\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status modelbox::RKNPU2Inference::GetModelAttr() {\n  inputs_type_.resize(npu2model_input_list_.size());\n  inputs_size_.resize(npu2model_input_list_.size());\n  // rknn_tensor_attr use new to avoid stack crash\n  std::shared_ptr<rknn_tensor_attr> tmp_attr =\n      std::make_shared<rknn_tensor_attr>();\n  for (size_t i = 0; i < npu2model_input_list_.size(); i++) {\n    tmp_attr->index = (unsigned int)i;\n    auto ret = rknn_query(ctx_, RKNN_QUERY_INPUT_ATTR, tmp_attr.get(),\n                          sizeof(rknn_tensor_attr));\n    if (ret != RKNN_SUCC) {\n      MBLOG_ERROR << \"query input attrs error\";\n      return {modelbox::STATUS_FAULT, \"query input attrs error\"};\n    }\n\n    rknn_tensor_type rk_type;\n    auto status = ConvertType(npu2model_type_list_[i], rk_type);\n    if (status != modelbox::STATUS_OK) {\n      MBLOG_ERROR << \"input type convert failed. \" << status.WrapErrormsgs();\n      return {status, \"input type convert failed.\"};\n    }\n    inputs_type_[i] = rk_type;\n    inputs_size_[i] = tmp_attr->n_elems * type_size_map[rk_type];\n  }\n\n  outputs_size_.resize(npu2model_output_list_.size());\n  for (size_t i = 0; i < npu2model_output_list_.size(); i++) {\n    tmp_attr->index = (unsigned int)i;\n    auto ret = rknn_query(ctx_, RKNN_QUERY_OUTPUT_ATTR, tmp_attr.get(),\n                          sizeof(rknn_tensor_attr));\n    if (ret != RKNN_SUCC) {\n      MBLOG_ERROR << \"query output attrs error\";\n      return {modelbox::STATUS_FAULT, \"query output attrs error\"};\n    }\n\n    rknn_tensor_type rk_type;\n    auto status = ConvertType(npu2model_type_list_output_[i], rk_type);\n    if (status != modelbox::STATUS_OK) {\n      MBLOG_ERROR << \"output type convert failed. \" << status.WrapErrormsgs();\n      return {status, \"output type convert failed.\"};\n    }\n    outputs_size_[i] = tmp_attr->n_elems * type_size_map[rk_type];\n  }\n  return STATUS_SUCCESS;\n}\n\nmodelbox::Status modelbox::RKNPU2Inference::Init(\n    const std::string &model_file,\n    const std::shared_ptr<modelbox::Drivers> &drivers_ptr,\n    const std::shared_ptr<modelbox::Configuration> &config,\n    const std::shared_ptr<modelbox::InferenceRKNPUParams> &params) {\n  batch_size_ = config->GetInt32(\"batch_size\", 1);\n\n  if (LoadModel(model_file, drivers_ptr, config) != STATUS_SUCCESS) {\n    return modelbox::STATUS_FAULT;\n  }\n  // just use input name without check\n  npu2model_input_list_ = params->input_name_list_;\n  npu2model_type_list_ = params->input_type_list_;\n  npu2model_output_list_ = params->output_name_list_;\n  npu2model_type_list_output_ = params->output_type_list_;\n\n  rknn_input_output_num rknpu2_io_num;\n  auto ret = rknn_query(ctx_, RKNN_QUERY_IN_OUT_NUM, &rknpu2_io_num,\n                        sizeof(rknpu2_io_num));\n  if (ret != RKNN_SUCC) {\n    MBLOG_ERROR << \"query input_output error\";\n    return {modelbox::STATUS_FAULT, \"query input_output error\"};\n  }\n\n  if (npu2model_input_list_.size() != rknpu2_io_num.n_input ||\n      npu2model_output_list_.size() != rknpu2_io_num.n_output) {\n    MBLOG_ERROR << \"model input output num mismatch: input num in graph is \"\n                << npu2model_input_list_.size()\n                << \", the real model input num is \" << rknpu2_io_num.n_input\n                << \", output num in graph is \" << npu2model_output_list_.size()\n                << \"the real model output num is \" << rknpu2_io_num.n_output;\n    return modelbox::STATUS_FAULT;\n  }\n\n  return GetModelAttr();\n}\n\nmodelbox::Status modelbox::RKNPU2Inference::Build_Outputs(\n    std::shared_ptr<modelbox::DataContext> &data_ctx) {\n  auto out_cnt = npu2model_output_list_.size();\n  std::vector<rknn_output> rknpu2_outputs;\n  rknpu2_outputs.reserve(out_cnt);\n\n  for (size_t i = 0; i < out_cnt; ++i) {\n    auto &name = npu2model_output_list_[i];\n    auto buffer_list = data_ctx->Output(name);\n\n    std::vector<size_t> shape({outputs_size_[i]});\n    buffer_list->Build(shape, false);\n    auto rknpu2_buffer = buffer_list->At(0);\n    auto *mpp_buf = (MppBuffer)(rknpu2_buffer->MutableData());\n    auto *data_buf = (float *)mpp_buffer_get_ptr(mpp_buf);\n\n    // convert outputs to float*\n    rknpu2_outputs.push_back({.want_float = true,\n                              .is_prealloc = true,\n                              .index = (unsigned int)i,\n                              .buf = data_buf,\n                              .size = (uint32_t)outputs_size_[i]});\n    rknpu2_buffer->Set(\"type\", modelbox::ModelBoxDataType::MODELBOX_FLOAT);\n    rknpu2_buffer->Set(\"shape\", outputs_size_[i]);\n  }\n\n  auto ret = rknn_outputs_get(ctx_, out_cnt, rknpu2_outputs.data(), nullptr);\n  // reset rknpu2_outputs, avoid buf released\n  for (auto ele : rknpu2_outputs) {\n    ele.is_prealloc = 1;\n    ele.buf = nullptr;\n  }\n  rknn_outputs_release(ctx_, out_cnt, rknpu2_outputs.data());\n  if (ret != RKNN_SUCC) {\n    MBLOG_ERROR << \"rknn get output error\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nsize_t modelbox::RKNPU2Inference::CopyFromAlignMemory(\n    std::shared_ptr<modelbox::BufferList> &input_buf_list,\n    std::shared_ptr<uint8_t> &pdst,\n    std::shared_ptr<modelbox::InferenceInputParams> &input_params) {\n  int32_t one_size = input_params->in_width_ * input_params->in_height_;\n  RgaSURF_FORMAT rga_fmt = RK_FORMAT_UNKNOWN;\n  rga_fmt = modelbox::GetRGAFormat(input_params->pix_fmt_);\n\n  size_t input_total_size = batch_size_ * one_size;\n  if (rga_fmt == RK_FORMAT_YCbCr_420_SP || rga_fmt == RK_FORMAT_YCrCb_420_SP) {\n    input_total_size = input_total_size * 3 / 2;\n  } else {\n    if (rga_fmt == RK_FORMAT_RGB_888 || rga_fmt == RK_FORMAT_BGR_888) {\n      input_total_size = input_total_size * 3;\n    }\n  }\n\n  if ((batch_size_ == 1) &&\n      ((input_params->in_width_ == input_params->in_wstride_ &&\n        input_params->in_height_ == input_params->in_hstride_) ||\n       (input_params->in_wstride_ == 0 && input_params->in_hstride_ == 0))) {\n    auto in_image = input_buf_list->At(0);\n    auto *mpp_buf = (MppBuffer)(in_image->ConstData());\n    auto *cpu_buf = (uint8_t *)mpp_buffer_get_ptr(mpp_buf);\n    pdst.reset(cpu_buf, [](uint8_t *p) {});\n    return input_total_size;\n  }\n\n  pdst.reset(new u_int8_t[input_total_size],\n             [](const uint8_t *p) { delete[] p; });\n\n  uint8_t *pdst_buf = pdst.get();\n  for (size_t i = 0; i < batch_size_; i++) {\n    auto in_image = input_buf_list->At(i);\n    auto *mpp_buf = (MppBuffer)(in_image->ConstData());\n    auto *cpu_buf = (uint8_t *)mpp_buffer_get_ptr(mpp_buf);\n\n    if (rga_fmt == RK_FORMAT_YCbCr_420_SP ||\n        rga_fmt == RK_FORMAT_YCrCb_420_SP) {\n      modelbox::CopyNVMemory(\n          cpu_buf, pdst_buf, input_params->in_width_, input_params->in_height_,\n          input_params->in_wstride_, input_params->in_hstride_);\n      pdst_buf += one_size * 3 / 2;\n    } else if (rga_fmt == RK_FORMAT_RGB_888 || rga_fmt == RK_FORMAT_BGR_888) {\n      modelbox::CopyRGBMemory(\n          cpu_buf, pdst_buf, input_params->in_width_, input_params->in_height_,\n          input_params->in_wstride_, input_params->in_hstride_);\n      pdst_buf += one_size * 3;\n    } else {\n      auto rc = memcpy_s(pdst_buf, one_size, cpu_buf, one_size);\n      if (rc != EOK) {\n        MBLOG_WARN << \"RKNPUInference2 copy fail\";\n      }\n      pdst_buf += one_size;\n    }\n  }\n\n  return input_total_size;\n}\n\nsize_t modelbox::RKNPU2Inference::GetInputBuffer(\n    std::shared_ptr<uint8_t> &input_buf,\n    std::shared_ptr<modelbox::BufferList> &input_buf_list) {\n  auto in_image = input_buf_list->At(0);\n  auto input_params = std::make_shared<InferenceInputParams>();\n  input_params->pix_fmt_ = \"\";\n\n  in_image->Get(\"width\", input_params->in_width_);\n  in_image->Get(\"height\", input_params->in_height_);\n  in_image->Get(\"width_stride\", input_params->in_wstride_);\n  in_image->Get(\"height_stride\", input_params->in_hstride_);\n  in_image->Get(\"pix_fmt\", input_params->pix_fmt_);\n  if (input_params->pix_fmt_ == \"rgb\" || input_params->pix_fmt_ == \"bgr\") {\n    input_params->in_wstride_ /= 3;\n  } else if (input_params->pix_fmt_.empty()) {\n    input_params->in_height_ = 1;\n    input_params->in_width_ = in_image->GetBytes();\n  }\n\n  if (input_buf_list->GetDevice()->GetType() == \"rknpu\") {\n    return CopyFromAlignMemory(input_buf_list, input_buf, input_params);\n  }\n  input_buf.reset((uint8_t *)input_buf_list->ConstData(), [](uint8_t *p) {});\n  return input_buf_list->GetBytes();\n}\n\nmodelbox::Status modelbox::RKNPU2Inference::Infer(\n    std::shared_ptr<modelbox::DataContext> &data_ctx) {\n  // 构造impl的输入\n  if (ctx_ == 0) {\n    MBLOG_ERROR << \"rk model not load, pass\";\n    return {STATUS_FAULT, \"rk model not load, pass\"};\n  }\n\n  std::vector<rknn_input> rknpu2_inputs;\n  rknpu2_inputs.reserve(npu2model_input_list_.size());\n  std::vector<std::shared_ptr<uint8_t>> rknpu2_input_bufs;\n  rknpu2_input_bufs.resize(npu2model_input_list_.size());\n\n  for (size_t i = 0; i < npu2model_input_list_.size(); i++) {\n    auto inputs = data_ctx->Input(npu2model_input_list_[i]);\n    rknn_input one_input;\n    size_t realBatch = inputs->Size();\n    if (realBatch != batch_size_) {\n      auto msg = npu2model_input_list_[i] +\n                 \" batch mismatch:\" + std::to_string(batch_size_) + \" \" +\n                 std::to_string(realBatch);\n      MBLOG_ERROR << msg;\n      return {STATUS_FAULT, msg};\n    }\n\n    size_t ret_size = GetInputBuffer(rknpu2_input_bufs[i], inputs);\n    one_input.index = i;\n    one_input.buf = rknpu2_input_bufs[i].get();\n    one_input.size = ret_size;\n    if (one_input.size != inputs_size_[i]) {\n      MBLOG_ERROR << \"input size mismatch:(yours model) \" << one_input.size\n                  << \" \" << inputs_size_[i];\n      return modelbox::STATUS_FAULT;\n    }\n    one_input.pass_through = false;\n    one_input.type = (rknn_tensor_type)inputs_type_[i];\n    one_input.fmt = RKNN_TENSOR_NHWC;\n    rknpu2_inputs.push_back(one_input);\n  }\n\n  std::lock_guard<std::mutex> lk(rknpu2_infer_mtx_);\n  auto ret = rknn_inputs_set(ctx_, rknpu2_inputs.size(), rknpu2_inputs.data());\n  if (ret != RKNN_SUCC) {\n    MBLOG_ERROR << \"rknn_inputs_set fail: \" << ret;\n    return modelbox::STATUS_FAULT;\n  }\n\n  ret = rknn_run(ctx_, nullptr);\n  if (ret != RKNN_SUCC) {\n    MBLOG_ERROR << \"run error fail: \" << ret;\n    return modelbox::STATUS_FAULT;\n  }\n\n  return Build_Outputs(data_ctx);\n}\n\nmodelbox::Status modelbox::RKNPU2Inference::Deinit() {\n  std::lock_guard<std::mutex> lk(rknpu2_infer_mtx_);\n  if (ctx_ != 0) {\n    // 发现，ctrlc退出的时候 rknn_destroy之前需要等一下，\n    // 有可能3568比较慢，不然会导致下一次推理异常\n    usleep(1000);\n    rknn_destroy(ctx_);\n    ctx_ = 0;\n  }\n\n  return modelbox::STATUS_SUCCESS;\n}\n"
  },
  {
    "path": "src/drivers/devices/rockchip/flowunit/inference_rknpu2/rknpu2_inference.h",
    "content": "/*\n * Copyright 2022 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_RKNPU2_INFERENCE_H_\n#define MODELBOX_RKNPU2_INFERENCE_H_\n\n#include <modelbox/base/configuration.h>\n#include <modelbox/base/status.h>\n#include <modelbox/data_context.h>\n#include <modelbox/device/rockchip/device_rockchip.h>\n#include <modelbox/device/rockchip/rockchip_api.h>\n\n#include <string>\n#include <vector>\n\n#include \"rknn_api.h\"\n\nnamespace modelbox {\n\nclass RKNPU2Inference {\n public:\n  Status Deinit();\n  Status Init(const std::string &model_file,\n              const std::shared_ptr<modelbox::Drivers> &drivers_ptr,\n              const std::shared_ptr<modelbox::Configuration> &config,\n              const std::shared_ptr<modelbox::InferenceRKNPUParams> &params);\n\n  Status Infer(std::shared_ptr<modelbox::DataContext> &data_ctx);\n\n private:\n  size_t GetInputBuffer(std::shared_ptr<uint8_t> &input_buf,\n                        std::shared_ptr<modelbox::BufferList> &input_buf_list);\n  size_t CopyFromAlignMemory(\n      std::shared_ptr<modelbox::BufferList> &input_buf_list,\n      std::shared_ptr<uint8_t> &pdst,\n      std::shared_ptr<modelbox::InferenceInputParams> &input_params);\n  Status Build_Outputs(std::shared_ptr<modelbox::DataContext> &data_ctx);\n  Status GetModelAttr();\n  Status LoadModel(const std::string &model_file,\n                   const std::shared_ptr<modelbox::Drivers> &drivers_ptr,\n                   const std::shared_ptr<modelbox::Configuration> &config);\n  Status ConvertType(const std::string &type, rknn_tensor_type &rk_type);\n\n  size_t batch_size_{1};\n  std::vector<size_t> outputs_size_;\n  std::vector<size_t> inputs_size_;\n  std::vector<int> inputs_type_;\n  std::vector<std::string> npu2model_input_list_;\n  std::vector<std::string> npu2model_type_list_;\n  std::vector<std::string> npu2model_output_list_;\n  std::vector<std::string> npu2model_type_list_output_;\n  rknn_context ctx_{0};\n  std::mutex rknpu2_infer_mtx_;\n};\n}  // namespace modelbox\n#endif"
  },
  {
    "path": "src/drivers/devices/rockchip/flowunit/inference_rknpu2/rknpu2_inference_flowunit.cc",
    "content": "/*\n * Copyright 2022 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"rknpu2_inference_flowunit.h\"\n\n#include \"virtualdriver_inference.h\"\n\nRKNPU2InferenceFlowUnit::RKNPU2InferenceFlowUnit() = default;\nRKNPU2InferenceFlowUnit::~RKNPU2InferenceFlowUnit() = default;\n\nmodelbox::Status RKNPU2InferenceFlowUnit::Open(\n    const std::shared_ptr<modelbox::Configuration> &opts) {\n  auto unit_desc = std::dynamic_pointer_cast<VirtualInferenceFlowUnitDesc>(\n      this->GetFlowUnitDesc());\n  unit_desc->GetModelEntry();\n  auto config = unit_desc->GetConfiguration();\n\n  auto merge_config = std::make_shared<modelbox::Configuration>();\n  // opts override python_desc_ config\n  merge_config->Add(*config);\n  merge_config->Add(*opts);\n\n  auto params = std::make_shared<modelbox::InferenceRKNPUParams>();\n  auto ret = GetFlowUnitIO(params);\n  if (ret != modelbox::STATUS_SUCCESS) {\n    return ret;\n  }\n  params->device_id_ = dev_id_;\n\n  infer_ = std::make_shared<modelbox::RKNPU2Inference>();\n  return infer_->Init(unit_desc->GetModelEntry(),\n                      this->GetBindDevice()->GetDeviceManager()->GetDrivers(),\n                      merge_config, params);\n}\n\nmodelbox::Status RKNPU2InferenceFlowUnit::GetFlowUnitIO(\n    std::shared_ptr<modelbox::InferenceRKNPUParams> &params) {\n  auto unit_desc = std::dynamic_pointer_cast<VirtualInferenceFlowUnitDesc>(\n      this->GetFlowUnitDesc());\n  auto input_desc = unit_desc->GetFlowUnitInput();\n  auto output_desc = unit_desc->GetFlowUnitOutput();\n  for (auto &input : input_desc) {\n    params->input_name_list_.push_back(input.GetPortName());\n    params->input_type_list_.push_back(input.GetPortType());\n  }\n\n  for (auto &output : output_desc) {\n    params->output_name_list_.push_back(output.GetPortName());\n    params->output_type_list_.push_back(output.GetPortType());\n  }\n\n  if (params->input_name_list_.empty() || params->output_name_list_.empty()) {\n    MBLOG_ERROR << \"rknpu2 Wrong input[\" << params->input_name_list_.size()\n                << \"] or output[\" << params->output_name_list_.size()\n                << \"] number\";\n    return modelbox::STATUS_BADCONF;\n  }\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status RKNPU2InferenceFlowUnit::Process(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  auto ret = infer_->Infer(data_ctx);\n  if (ret != modelbox::STATUS_SUCCESS) {\n    MBLOG_ERROR << \"Execute infer failed, detail:\" << ret.Errormsg();\n    return {modelbox::STATUS_FAULT, ret.Errormsg()};\n  }\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status RKNPU2InferenceFlowUnit::Close() {\n  MBLOG_INFO << \"rknn2 inference close\";\n  auto ret = infer_->Deinit();\n  if (ret != modelbox::STATUS_SUCCESS) {\n    MBLOG_ERROR << \"Deinit inference failed\";\n    return ret;\n  }\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nRKNPU2InferenceFlowUnitDesc::RKNPU2InferenceFlowUnitDesc() = default;\nRKNPU2InferenceFlowUnitDesc::~RKNPU2InferenceFlowUnitDesc() = default;\n\nvoid RKNPU2InferenceFlowUnitDesc::SetModelEntry(\n    const std::string &model_entry) {\n  model_entry_ = model_entry;\n}\n\nstd::string RKNPU2InferenceFlowUnitDesc::GetModelEntry() {\n  return model_entry_;\n}\n\nRKNPU2InferenceFlowUnitFactory::RKNPU2InferenceFlowUnitFactory() = default;\nRKNPU2InferenceFlowUnitFactory::~RKNPU2InferenceFlowUnitFactory() = default;\n\nstd::shared_ptr<modelbox::FlowUnit>\nRKNPU2InferenceFlowUnitFactory::VirtualCreateFlowUnit(\n    const std::string &unit_name, const std::string &unit_type,\n    const std::string &virtual_type) {\n  return std::make_shared<RKNPU2InferenceFlowUnit>();\n};"
  },
  {
    "path": "src/drivers/devices/rockchip/flowunit/inference_rknpu2/rknpu2_inference_flowunit.h",
    "content": "/*\n * Copyright 2022 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_RKNPU2_INFERENCE_H_\n#define MODELBOX_FLOWUNIT_RKNPU2_INFERENCE_H_\n\n#include <modelbox/base/device.h>\n#include <modelbox/base/status.h>\n#include <modelbox/buffer.h>\n#include <modelbox/device/rockchip/device_rockchip.h>\n#include <modelbox/flow.h>\n#include <modelbox/flowunit.h>\n\n#include \"rknpu2_inference.h\"\n\nconstexpr const char *RKNPU2_FLOWUNIT_TYPE = \"rknpu2\";\nconstexpr const char *RKNPU2_INFERENCE_TYPE = \"rknpu2\";\n\nclass RKNPU2InferenceFlowUnit : public modelbox::FlowUnit {\n public:\n  RKNPU2InferenceFlowUnit();\n  ~RKNPU2InferenceFlowUnit() override;\n  modelbox::Status Open(\n      const std::shared_ptr<modelbox::Configuration> &opts) override;\n  modelbox::Status Close() override;\n  modelbox::Status Process(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override;\n\n private:\n  modelbox::Status GetFlowUnitIO(\n      std::shared_ptr<modelbox::InferenceRKNPUParams> &params);\n  std::shared_ptr<modelbox::RKNPU2Inference> infer_;\n};\n\nclass RKNPU2InferenceFlowUnitDesc : public modelbox::FlowUnitDesc {\n public:\n  RKNPU2InferenceFlowUnitDesc();\n  ~RKNPU2InferenceFlowUnitDesc() override;\n  void SetModelEntry(const std::string &model_entry);\n  std::string GetModelEntry();\n\n private:\n  std::string model_entry_;\n};\n\nclass RKNPU2InferenceFlowUnitFactory : public modelbox::FlowUnitFactory {\n public:\n  RKNPU2InferenceFlowUnitFactory();\n  ~RKNPU2InferenceFlowUnitFactory() override;\n\n  std::map<std::string, std::shared_ptr<modelbox::FlowUnitDesc>>\n  FlowUnitProbe() {\n    return {};\n  }\n  std::string GetFlowUnitFactoryType() { return RKNPU2_FLOWUNIT_TYPE; };\n  std::string GetVirtualType() { return RKNPU2_INFERENCE_TYPE; };\n  std::shared_ptr<modelbox::FlowUnit> VirtualCreateFlowUnit(\n      const std::string &unit_name, const std::string &unit_type,\n      const std::string &virtual_type) override;\n};\n\n#endif  // MODELBOX_FLOWUNIT_RKNPU2_INFERENCE_H_\n"
  },
  {
    "path": "src/drivers/devices/rockchip/flowunit/local_camera/CMakeLists.txt",
    "content": "#\n# Copyright 2022 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\ncmake_minimum_required(VERSION 3.10)\n\nset(UNIT_DEVICE \"rockchip\")\nset(UNIT_NAME \"local_camera\")\n\nproject(modelbox-flowunit-${UNIT_DEVICE}-${UNIT_NAME})\n\nfile(GLOB_RECURSE UNIT_SOURCE *.cpp *.cc *.c)\ngroup_source_test_files(MODELBOX_UNIT_SOURCE MODELBOX_UNIT_TEST_SOURCE \"_test.c*\" ${UNIT_SOURCE})\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_ROCKCHIP_INCLUDE})\ninclude_directories(${ROCKCHIP_MPP_INCLUDE})\ninclude_directories(${ROCKCHIP_RGA_INCLUDE})\ninclude_directories(${NLOHMANN_INCLUDE_DIR})\n\nset(MODELBOX_UNIT_SHARED libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-shared)\nset(MODELBOX_UNIT_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\nadd_library(${MODELBOX_UNIT_SHARED} SHARED ${MODELBOX_UNIT_SOURCE})\nset(LIBMODELBOX_FLOWUNIT_LOCALCAM_ROCKCHIP_SHARED ${MODELBOX_UNIT_SHARED})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES\n        SOVERSION ${MODELBOX_VERSION_MAJOR}\n        VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} rt)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} dl)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_DEVICE_ROCKCHIP_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_SHARED})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}\")\n\ninstall(TARGETS ${MODELBOX_UNIT_SHARED}\n        COMPONENT rockchip-device-flowunit\n        RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n        LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        ARCHIVE DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        OPTIONAL)\ninstall(DIRECTORY ${HEADER}\n        DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR}\n        COMPONENT rockchip-device-flowunit-devel)\n\nset(LIBMODELBOX_FLOWUNIT_LOCALCAM_ROCKCHIP_SHARED ${MODELBOX_UNIT_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_LOCALCAM_ROCKCHIP_INCLUDE ${MODELBOX_UNIT_SOURCE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_LOCALCAM_ROCKCHIP_SOURCES ${MODELBOX_UNIT_SOURCE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_LOCALCAM_ROCKCHIP_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}.so CACHE INTERNAL \"\")\n\n# driver test\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_UNIT_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_UNIT_SHARED})\nlist(APPEND DRIVER_UNIT_TEST_LINK_LIBRARIES ${RKMPP_MPP_LIBRARY})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")\nset(TEST_INCLUDE ${TEST_INCLUDE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_LINK_LIBRARIES ${DRIVER_UNIT_TEST_LINK_LIBRARIES} CACHE INTERNAL \"\")\n"
  },
  {
    "path": "src/drivers/devices/rockchip/flowunit/local_camera/local_camera_flowunit.cc",
    "content": "/*\n * Copyright 2022 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"local_camera_flowunit.h\"\n\n#include <securec.h>\n\n#include <functional>\n#include <nlohmann/json.hpp>\n\n#include \"modelbox/base/log.h\"\n#include \"modelbox/base/utils.h\"\n#include \"modelbox/flowunit.h\"\n#include \"modelbox/flowunit_api_helper.h\"\n#include \"v4l2_camera.h\"\n\n#define RK_CAMERA_MAXRETRY 10\n\nRockChipLocalCameraFlowUnit::RockChipLocalCameraFlowUnit() = default;\nRockChipLocalCameraFlowUnit::~RockChipLocalCameraFlowUnit() = default;\n\nmodelbox::Status RockChipLocalCameraFlowUnit::Open(\n    const std::shared_ptr<modelbox::Configuration> &opts) {\n  camWidth_ = opts->GetInt32(\"cam_width\", 640);\n  camHeight_ = opts->GetInt32(\"cam_height\", 480);\n  camWidth_ = MPP_ALIGN(camWidth_, MPP_ALIGN_WIDTH);\n  camHeight_ = MPP_ALIGN(camHeight_, MPP_ALIGN_HEIGHT);\n  camera_id_ = (uint32_t)(opts->GetUint32(\"cam_id\", 0));\n  fps_ = (uint32_t)(opts->GetInt32(\"fps\", 30));\n  if (fps_ <= 0 || fps_ > 60) {\n    fps_ = 30;\n  }\n  camera_bus_info_ = opts->GetString(\"bus_info\", \"\");\n  mirror_ = opts->GetBool(\"mirror\", true);\n\n  out_pix_fmt_str_ = opts->GetString(\"pix_fmt\", modelbox::IMG_DEFAULT_FMT);\n  MBLOG_INFO << \"rockchip local-camera with \" << out_pix_fmt_str_;\n\n  out_pix_fmt_ = modelbox::GetRGAFormat(out_pix_fmt_str_);\n  if (out_pix_fmt_ == RK_FORMAT_UNKNOWN) {\n    MBLOG_ERROR << \"Not support pix fmt \" << out_pix_fmt_str_;\n    return {modelbox::STATUS_BADCONF,\n            \"Not support pix fmt \" + out_pix_fmt_str_};\n  }\n\n  return jpeg_dec_.Init();\n}\n\nmodelbox::Status RockChipLocalCameraFlowUnit::DataPre(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  std::string rk_source_url_ptr;\n  auto input_meta = data_ctx->GetInputMeta(LOCAL_CAMERA_INPUT);\n  if (input_meta != nullptr) {\n    rk_source_url_ptr = *(\n        std::static_pointer_cast<std::string>(input_meta->GetMeta(SOURCE_URL)));\n  }\n\n  // check url is invalid or not\n  if (!rk_source_url_ptr.empty()) {\n    if ((rk_source_url_ptr.at(0) < '0' || rk_source_url_ptr.at(0) > '9') &&\n        rk_source_url_ptr.substr(0, 4) != \"usb-\") {\n      rk_source_url_ptr = \"\";\n    }\n  }\n\n  if (rk_source_url_ptr.empty()) {\n    if (camera_bus_info_.empty() || camera_bus_info_.substr(0, 4) != \"usb-\") {\n      rk_source_url_ptr = std::to_string(camera_id_);\n    } else {\n      rk_source_url_ptr = camera_bus_info_;\n    }\n  }\n\n  auto camhdl_ptr = std::make_shared<V4L2Camera>();\n  bool prefer_rgb =\n      (RK_FORMAT_RGB_888 == out_pix_fmt_ || RK_FORMAT_BGR_888 == out_pix_fmt_);\n  auto ret = camhdl_ptr->Init(rk_source_url_ptr, camWidth_, camHeight_, fps_,\n                              prefer_rgb);\n  if (ret != modelbox::STATUS_SUCCESS) {\n    auto msg = \"camera url:\" + rk_source_url_ptr +\n               \" init fail reason: \" + ret.Errormsg();\n    MBLOG_ERROR << msg;\n    return {modelbox::STATUS_FAULT, msg};\n  }\n\n  auto frameindex_ptr = std::make_shared<int64_t>();\n  *(frameindex_ptr.get()) = 0;\n  auto retry_ptr = std::make_shared<int32_t>();\n  *(retry_ptr.get()) = 0;\n\n  data_ctx->SetPrivate(FRAME_INDEX_CTX, frameindex_ptr);\n  data_ctx->SetPrivate(LOCAL_CAMERA_CTX, camhdl_ptr);\n  data_ctx->SetPrivate(RETRY_COUNT_CTX, retry_ptr);\n  MBLOG_INFO << \"rknpu open local camera url = \" << rk_source_url_ptr\n             << \" (w,h,fmt)=\" << camhdl_ptr->GetWidth() << \",\"\n             << camhdl_ptr->GetHeight() << \",\" << camhdl_ptr->GetFmt();\n\n  return modelbox::STATUS_SUCCESS;\n};\n\nmodelbox::Status RockChipLocalCameraFlowUnit::DataPost(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  MBLOG_DEBUG << \"rknpu local camera data post.\";\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status RockChipLocalCameraFlowUnit::Close() {\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status RockChipLocalCameraFlowUnit::BuildOutput(\n    const std::shared_ptr<modelbox::DataContext> &data_ctx,\n    std::shared_ptr<modelbox::Buffer> &img_buf, MppFrame &frame,\n    std::shared_ptr<int64_t> &frame_index) {\n  auto output_bufs = data_ctx->Output(FRAME_INFO_OUTPUT);\n  std::shared_ptr<modelbox::Buffer> buffer = nullptr;\n  if (img_buf != nullptr &&\n      out_pix_fmt_ == modelbox::GetRGAFormat(mpp_frame_get_fmt(frame))) {\n    buffer = img_buf;\n    auto w = (int32_t)mpp_frame_get_width(frame);\n    auto h = (int32_t)mpp_frame_get_height(frame);\n    auto ws = (int32_t)mpp_frame_get_hor_stride(frame);\n    auto hs = MPP_ALIGN(h, MPP_ALIGN_HEIGHT);  // frame allign too large\n    buffer->Set(\"type\", modelbox::ModelBoxDataType::MODELBOX_UINT8);\n    buffer->Set(\"width\", w);\n    buffer->Set(\"height\", h);\n    int32_t channel = 3;\n    if (RK_FORMAT_BGR_888 == out_pix_fmt_ ||\n        RK_FORMAT_RGB_888 == out_pix_fmt_) {\n      buffer->Set(\"width_stride\", ws * 3);\n    } else {\n      buffer->Set(\"width_stride\", ws);\n      h = h * 3 / 2;\n      hs = MPP_ALIGN(h, MPP_ALIGN_HEIGHT);\n      channel = 1;\n    }\n\n    buffer->Set(\"channel\", channel);\n    buffer->Set(\"shape\",\n                std::vector<size_t>{(size_t)h, (size_t)w, (size_t)channel});\n    buffer->Set(\"height_stride\", hs);\n    buffer->Set(\"layout\", std::string(\"hwc\"));\n  } else {\n    buffer = ColorChange(frame, out_pix_fmt_, GetBindDevice());\n  }\n\n  if (buffer != nullptr) {\n    // update rk buffer\n    buffer->Set(\"index\", (*frame_index)++);\n    buffer->Set(\"eos\", false);\n    buffer->Set(\"pix_fmt\", out_pix_fmt_str_);\n\n    std::shared_ptr<modelbox::Buffer> flip_buf = buffer;\n    if (mirror_) {\n      flip_buf = MirrorImg(buffer, out_pix_fmt_);\n    }\n\n    output_bufs->PushBack(flip_buf);\n  }\n\n  return modelbox::STATUS_CONTINUE;\n}\n\nMppFrame RockChipLocalCameraFlowUnit::SetMppFrameInfo(size_t w, size_t h,\n                                                      MppFrameFormat fmt,\n                                                      MppBuffer mpp_buf) {\n  MppFrame frame = nullptr;\n  auto ret = mpp_frame_init(&frame);\n  if (ret != MPP_OK) {\n    MBLOG_ERROR << \"failed to mpp frame init ret: \" << ret;\n    return nullptr;\n  }\n\n  mpp_frame_set_width(frame, w);\n  mpp_frame_set_height(frame, h);\n  mpp_frame_set_hor_stride(frame, w);\n  mpp_frame_set_ver_stride(frame, h);\n  mpp_frame_set_fmt(frame, fmt);\n  mpp_frame_set_eos(frame, 0);\n  mpp_frame_set_buffer(frame, mpp_buf);\n\n  return frame;\n}\n\nMppFrame RockChipLocalCameraFlowUnit::ProcessYVY2(\n    const uint8_t *buf, size_t size, size_t w, size_t h,\n    std::shared_ptr<modelbox::Buffer> &img_buf) {\n  auto yuy2_buf = std::make_shared<modelbox::Buffer>(GetBindDevice());\n  auto ret = yuy2_buf->Build(size);\n  if (ret != modelbox::STATUS_SUCCESS) {\n    MBLOG_ERROR << \"failed to build buffer reason: \" << ret.Errormsg();\n    return nullptr;\n  }\n\n  auto *mpp_cam_buf = (MppBuffer)(yuy2_buf->MutableData());\n  auto *cpu_cam_buf = (uint8_t *)mpp_buffer_get_ptr(mpp_cam_buf);\n  auto yuy2_size = w * h;\n  // yuy2 -- yuv422sp\n  for (size_t i = 0; i < yuy2_size; i++) {\n    cpu_cam_buf[i] = buf[i << 1];\n    cpu_cam_buf[i + yuy2_size] = buf[(i << 1) + 1];\n  }\n\n  img_buf = yuy2_buf;\n\n  return SetMppFrameInfo(w, h, MPP_FMT_YUV422SP, mpp_cam_buf);\n}\n\nMppFrame RockChipLocalCameraFlowUnit::ProcessJpg(\n    const uint8_t *buf, size_t size, size_t w, size_t h,\n    std::shared_ptr<modelbox::Buffer> &img_buf) {\n  // here make sure jpg_dec is locked, mpp jpeg dec is not thread-safe , only 1\n  // jpg dec jpg dec onebyone may even faster\n  std::lock_guard<std::mutex> lock(jpgdec_mtx_);\n  // camera now in mjpg mode\n  auto width = (int)w;\n  auto height = (int)h;\n  auto *frame = jpeg_dec_.Decode((void *)buf, size, width, height);\n  if (frame == nullptr) {\n    MBLOG_WARN << \"local camera jpg decoder error\";\n  }\n\n  return frame;\n}\n\nMppFrame RockChipLocalCameraFlowUnit::ProcessNV12(\n    const uint8_t *buf, size_t size, size_t w, size_t h,\n    std::shared_ptr<modelbox::Buffer> &img_buf) {\n  auto nv12_buf = std::make_shared<modelbox::Buffer>(GetBindDevice());\n  auto mb_ret = nv12_buf->Build(size);\n  if (mb_ret != modelbox::STATUS_SUCCESS) {\n    MBLOG_ERROR << \"failed to build buffer reason: \" << mb_ret.Errormsg();\n    return nullptr;\n  }\n\n  auto *mpp_cam_buf = (MppBuffer)(nv12_buf->MutableData());\n  auto *cpu_cam_buf = (uint8_t *)mpp_buffer_get_ptr(mpp_cam_buf);\n  auto ret = memcpy_s(cpu_cam_buf, size, buf, size);\n  if (ret != 0) {\n    MBLOG_ERROR << \"process nv12 memcpy fail\";\n    return nullptr;\n  }\n\n  img_buf = nv12_buf;\n\n  return SetMppFrameInfo(w, h, MPP_FMT_YUV420SP, mpp_cam_buf);\n}\n\nMppFrame RockChipLocalCameraFlowUnit::ProcessRGB24(\n    const uint8_t *buf, size_t size, size_t w, size_t h,\n    std::shared_ptr<modelbox::Buffer> &img_buf) {\n  auto rgb24_buf = std::make_shared<modelbox::Buffer>(GetBindDevice());\n  auto mb_ret = rgb24_buf->Build(size);\n  if (mb_ret != modelbox::STATUS_SUCCESS) {\n    MBLOG_ERROR << \"failed to build buffer reason: \" << mb_ret.Errormsg();\n    return nullptr;\n  }\n\n  auto *mpp_cam_buf = (MppBuffer)(rgb24_buf->MutableData());\n  auto *cpu_cam_buf = (uint8_t *)mpp_buffer_get_ptr(mpp_cam_buf);\n  auto ret = memcpy_s(cpu_cam_buf, size, buf, size);\n  if (ret != 0) {\n    MBLOG_ERROR << \"process rgb memcpy fail\";\n    return nullptr;\n  }\n\n  img_buf = rgb24_buf;\n\n  return SetMppFrameInfo(w, h, MPP_FMT_RGB888, mpp_cam_buf);\n}\n\nmodelbox::Status RockChipLocalCameraFlowUnit::Process(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  auto camhdl_ptr = std::static_pointer_cast<V4L2Camera>(\n      data_ctx->GetPrivate(LOCAL_CAMERA_CTX));\n  auto frame_index =\n      std::static_pointer_cast<int64_t>(data_ctx->GetPrivate(FRAME_INDEX_CTX));\n  auto retry_count =\n      std::static_pointer_cast<int32_t>(data_ctx->GetPrivate(RETRY_COUNT_CTX));\n  if (camhdl_ptr == nullptr || frame_index == nullptr ||\n      retry_count == nullptr) {\n    MBLOG_ERROR << \"localcamera is not init\";\n    return {modelbox::STATUS_FAULT, \"localcamera is not init\"};\n  }\n\n  auto ret = modelbox::STATUS_CONTINUE;\n\n  Defer {\n    if (ret == modelbox::STATUS_CONTINUE) {\n      auto event = std::make_shared<modelbox::FlowUnitEvent>();\n      data_ctx->SendEvent(event);\n    }\n  };\n\n  using FuncType = std::function<MppFrame(RockChipLocalCameraFlowUnit *,\n                                          uint8_t *, size_t, size_t, size_t,\n                                          std::shared_ptr<modelbox::Buffer> &)>;\n  static const std::map<uint32_t, FuncType> process_funcs = {\n      {V4L2_PIX_FMT_MJPEG, &RockChipLocalCameraFlowUnit::ProcessJpg},\n      {V4L2_PIX_FMT_NV12, &RockChipLocalCameraFlowUnit::ProcessNV12},\n      {V4L2_PIX_FMT_RGB24, &RockChipLocalCameraFlowUnit::ProcessRGB24},\n      {V4L2_PIX_FMT_YUYV, &RockChipLocalCameraFlowUnit::ProcessYVY2},\n  };\n\n  MppFrame frame = nullptr;\n  std::shared_ptr<modelbox::Buffer> img_buf = nullptr;\n  auto cam_buf = camhdl_ptr->GetFrame();\n  if (cam_buf == nullptr) {\n    const auto *msg = \"failed to get frame\";\n    MBLOG_ERROR << msg;\n    ret = modelbox::STATUS_FAULT;\n    return {modelbox::STATUS_FAULT, msg};\n  }\n\n  auto iter = process_funcs.find(camhdl_ptr->GetFmt());\n  if (cam_buf != nullptr && iter != process_funcs.end()) {\n    frame =\n        iter->second(this, (uint8_t *)(cam_buf->start), cam_buf->length,\n                     camhdl_ptr->GetWidth(), camhdl_ptr->GetHeight(), img_buf);\n  }\n  if (frame == nullptr) {\n    // log has been put in ProcessYVY2 or ProcessJpg\n    if ((*retry_count)++ > RK_CAMERA_MAXRETRY) {\n      const auto *msg = \"localcamera get buffer fail\";\n      MBLOG_ERROR << msg;\n      ret = modelbox::STATUS_FAULT;\n      return {modelbox::STATUS_FAULT, msg};\n    }\n\n    MBLOG_WARN << \"local camera get null buffer\";\n    return modelbox::STATUS_CONTINUE;\n  }\n\n  *retry_count = 0;\n  return BuildOutput(data_ctx, img_buf, frame, frame_index);\n}\n\nMODELBOX_FLOWUNIT(RockChipLocalCameraFlowUnit, rk_cam_desc) {\n  rk_cam_desc.SetFlowUnitName(FLOWUNIT_NAME);\n  rk_cam_desc.SetFlowUnitGroupType(\"Video\");\n  rk_cam_desc.AddFlowUnitInput({LOCAL_CAMERA_INPUT, \"cpu\"});\n  rk_cam_desc.AddFlowUnitOutput({FRAME_INFO_OUTPUT, modelbox::DEVICE_TYPE});\n  rk_cam_desc.SetFlowType(modelbox::STREAM);\n  rk_cam_desc.AddFlowUnitOption(modelbox::FlowUnitOption(\n      \"cam_width\", \"int\", false, \"0\", \"the camera width\"));\n  rk_cam_desc.AddFlowUnitOption(modelbox::FlowUnitOption(\n      \"cam_height\", \"int\", false, \"0\", \"the camera height\"));\n  rk_cam_desc.AddFlowUnitOption(\n      modelbox::FlowUnitOption(\"cam_id\", \"int\", false, \"-1\", \"the camera id\"));\n  rk_cam_desc.AddFlowUnitOption(\n      modelbox::FlowUnitOption(\"fps\", \"int\", false, \"30\", \"the camera fps\"));\n  rk_cam_desc.AddFlowUnitOption(modelbox::FlowUnitOption(\n      \"bus_info\", \"string\", false, \"\",\n      \"v4l2 camera bus_info, use v4l2-ctl --list-devices\"));\n  rk_cam_desc.AddFlowUnitOption(modelbox::FlowUnitOption(\n      \"pix_fmt\", \"string\", false, modelbox::IMG_DEFAULT_FMT, \"the pix format\"));\n  rk_cam_desc.AddFlowUnitOption(modelbox::FlowUnitOption(\n      \"mirror\", \"bool\", false, \"true\", \"camera mirror\"));\n}\n\nMODELBOX_DRIVER_FLOWUNIT(rk_cam_desc) {\n  rk_cam_desc.Desc.SetName(FLOWUNIT_NAME);\n  rk_cam_desc.Desc.SetClass(modelbox::DRIVER_CLASS_FLOWUNIT);\n  rk_cam_desc.Desc.SetType(modelbox::DEVICE_TYPE);\n  rk_cam_desc.Desc.SetDescription(FLOWUNIT_DESC);\n  rk_cam_desc.Desc.SetVersion(MODELBOX_VERSION_STR_MACRO);\n}\n"
  },
  {
    "path": "src/drivers/devices/rockchip/flowunit/local_camera/local_camera_flowunit.h",
    "content": "/*\n * Copyright 2022 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_LOCAL_CAMERA_ROCKCHIP_H_\n#define MODELBOX_FLOWUNIT_LOCAL_CAMERA_ROCKCHIP_H_\n\n#include <modelbox/base/device.h>\n#include <modelbox/base/status.h>\n#include <modelbox/buffer.h>\n#include <modelbox/device/rockchip/device_rockchip.h>\n#include <modelbox/device/rockchip/rockchip_api.h>\n\n#include <algorithm>\n#include <string>\n\n#include \"rga.h\"\n\nconstexpr const char *FLOWUNIT_NAME = \"local_camera\";\nconstexpr const char *FLOWUNIT_DESC = \"A rockchip local camera flowunit\";\nconstexpr const char *LOCAL_CAMERA_INPUT = \"in_camera_packet\";\nconstexpr const char *FRAME_INFO_OUTPUT = \"out_camera_frame\";\nconstexpr const char *LOCAL_CAMERA_CTX = \"local_camera_ctx\";\nconstexpr const char *FRAME_INDEX_CTX = \"frame_index_ctx\";\nconstexpr const char *RETRY_COUNT_CTX = \"retry_count_ctx\";\nconstexpr const char *SOURCE_URL = \"source_url\";\n\nclass RockChipLocalCameraFlowUnit : public modelbox::FlowUnit {\n public:\n  RockChipLocalCameraFlowUnit();\n  ~RockChipLocalCameraFlowUnit() override;\n\n  modelbox::Status DataPre(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override;\n  modelbox::Status DataPost(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override;\n  modelbox::Status Open(\n      const std::shared_ptr<modelbox::Configuration> &opts) override;\n  modelbox::Status Close() override;\n  modelbox::Status Process(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override;\n\n private:\n  modelbox::Status BuildOutput(\n      const std::shared_ptr<modelbox::DataContext> &data_ctx,\n      std::shared_ptr<modelbox::Buffer> &img_buf, MppFrame &frame,\n      std::shared_ptr<int64_t> &frame_index);\n  MppFrame ProcessJpg(const uint8_t *buf, size_t size, size_t w, size_t h,\n                      std::shared_ptr<modelbox::Buffer> &img_buf);\n  MppFrame ProcessNV12(const uint8_t *buf, size_t size, size_t w, size_t h,\n                       std::shared_ptr<modelbox::Buffer> &img_buf);\n  MppFrame ProcessRGB24(const uint8_t *buf, size_t size, size_t w, size_t h,\n                        std::shared_ptr<modelbox::Buffer> &img_buf);\n  MppFrame ProcessYVY2(const uint8_t *buf, size_t size, size_t w, size_t h,\n                       std::shared_ptr<modelbox::Buffer> &img_buf);\n  MppFrame SetMppFrameInfo(size_t w, size_t h, MppFrameFormat fmt,\n                           MppBuffer mpp_buf);\n\n  uint32_t camWidth_{0};\n  uint32_t camHeight_{0};\n  uint32_t camera_id_{0};\n  uint32_t fps_{30};\n  bool mirror_{true};\n  std::string camera_bus_info_;\n  modelbox::MppJpegDecode jpeg_dec_;\n  std::string out_pix_fmt_str_;\n  RgaSURF_FORMAT out_pix_fmt_{RK_FORMAT_YCbCr_420_SP};\n  std::mutex jpgdec_mtx_;\n};\n\n#endif  // MODELBOX_FLOWUNIT_LOCAL_CAMERA_ROCKCHIP_H_\n"
  },
  {
    "path": "src/drivers/devices/rockchip/flowunit/local_camera/v4l2_camera.cc",
    "content": "/*\n * Copyright 2022 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"v4l2_camera.h\"\n\n#include <fcntl.h>\n#include <glob.h>\n#include <securec.h>\n#include <sys/ioctl.h>\n#include <sys/mman.h>\n#include <sys/stat.h>\n#include <unistd.h>\n\n#include \"modelbox/base/log.h\"\n\nconstexpr const char *CAM_DEV = \"/dev/video\";\n#define FMT_NUM_PLANES 1\n#define SKIP_COUNT 10\n\nV4L2Camera::V4L2Camera() = default;\n\nV4L2Camera::~V4L2Camera() {\n  if (fd_ < 0) {\n    return;\n  }\n\n  // Stop capturing\n  CamIoCtl(fd_, VIDIOC_STREAMOFF, &type_);\n\n  // un-mmap buffers\n  for (size_t i = 0; i < RK_CAMERA_BUFCNT; i++) {\n    struct v4l2_buffer buf = {0};\n    buf.type = type_;\n    buf.memory = V4L2_MEMORY_MMAP;\n    buf.index = i;\n    CamIoCtl(fd_, VIDIOC_QUERYBUF, &buf);\n    // no mpp_buffer , not need put\n    munmap(fbuf_[i].start, buf.length);\n  }\n\n  // Close v4l2 device\n  close(fd_);\n  fd_ = -1;\n}\n\nmodelbox::Status V4L2Camera::CamIoCtl(int32_t fd, int32_t req, void *arg) {\n  int32_t ret;\n\n  while ((ret = ioctl(fd, req, arg))) {\n    if (ret == -1 && (EINTR != errno && EAGAIN != errno)) {\n      break;\n    }\n    // 10 milliseconds\n    usleep(1000 * 10);\n  }\n\n  if (ret == 0) {\n    return modelbox::STATUS_SUCCESS;\n  }\n\n  auto msg = std::string(\"ioctl fail errno: \") + modelbox::StrError(errno);\n  MBLOG_ERROR << msg;\n  return {modelbox::STATUS_FAULT, msg};\n}\n\nint32_t V4L2Camera::GetCamfd(int32_t id, const std::string &bus_info) {\n  int32_t cam_index = 0;\n  int32_t fd = -1;\n  struct v4l2_capability cap;\n  glob_t glob_result;\n  struct stat buffer;\n  bool bfound = false;\n\n  auto ret = glob(\"/dev/video*\", GLOB_TILDE, nullptr, &glob_result);\n  if (ret != 0) {\n    return -1;\n  }\n\n  // do not add any return , globfree(&glob_result);\n  for (unsigned int i = 0; i < glob_result.gl_pathc; i++) {\n    if (stat(glob_result.gl_pathv[i], &buffer) == -1) {\n      continue;\n    }\n\n    if (S_ISDIR(buffer.st_mode) != 0) {\n      continue;\n    }\n\n    if (fd >= 0) {\n      close(fd);\n      fd = -1;\n    }\n\n    fd = open(glob_result.gl_pathv[i], O_RDWR, 0);\n    if (fd < 0) {\n      MBLOG_DEBUG << \"Cannot open device:\" << glob_result.gl_pathv[i];\n      continue;\n    }\n\n    Defer { globfree(&glob_result); };\n\n    // detect it is a camera device\n    if (modelbox::STATUS_SUCCESS != CamIoCtl(fd, VIDIOC_QUERYCAP, &cap)) {\n      MBLOG_DEBUG << \"Not v4l2 device:\" << glob_result.gl_pathv[i];\n      continue;\n    }\n\n    if (!IsCamera(fd, cap.capabilities, glob_result.gl_pathv[i])) {\n      continue;\n    }\n\n    cam_index++;\n    if (bus_info.empty()) {\n      if (cam_index == (id + 1)) {\n        // find id camera, return the fd\n        bfound = true;\n        break;\n      }\n    } else {\n      // if bus_info, find the right v4l2 device\n      if (bus_info == (const char *)(cap.bus_info)) {\n        bfound = true;\n        break;\n      }\n    }\n  }\n\n  if (!bfound && fd >= 0) {\n    close(fd);\n    fd = -1;\n  }\n\n  return fd;\n}\n\nbool V4L2Camera::IsCamera(int32_t fd, uint32_t capabilities,\n                          const char *cam_name) {\n  if (!(capabilities & V4L2_CAP_VIDEO_CAPTURE) &&\n      !(capabilities & V4L2_CAP_VIDEO_CAPTURE_MPLANE)) {\n    MBLOG_DEBUG << \"Camera Capture not supported for\" << cam_name;\n    return false;\n  }\n\n  if (!(capabilities & V4L2_CAP_STREAMING)) {\n    MBLOG_DEBUG << \"Camera Streaming IO Not Supported for \" << cam_name;\n    return false;\n  }\n\n  if (capabilities & V4L2_CAP_VIDEO_CAPTURE_MPLANE) {\n    type_ = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;\n  } else {\n    type_ = V4L2_BUF_TYPE_VIDEO_CAPTURE;\n  }\n\n  struct v4l2_format vfmt = {0};\n  vfmt.type = type_;\n  if (modelbox::STATUS_SUCCESS != CamIoCtl(fd, VIDIOC_G_FMT, &vfmt)) {\n    MBLOG_DEBUG << \"Camera VIDIOC_G_FMT fail for \" << cam_name;\n    return false;\n  }\n\n  if (vfmt.fmt.pix.pixelformat != V4L2_PIX_FMT_MJPEG &&\n      vfmt.fmt.pix.pixelformat != V4L2_PIX_FMT_YUYV) {\n    MBLOG_DEBUG << \"Camera fmt:\" << vfmt.fmt.pix.pixelformat\n                << \" not support for \" << cam_name;\n    return false;\n  }\n\n  return true;\n}\n\nmodelbox::Status V4L2Camera::SetFmt(uint32_t cam_width, uint32_t cam_height,\n                                    bool prefer_rgb) {\n  struct v4l2_format vfmt = {0};\n  vfmt.type = type_;\n  vfmt.fmt.pix.width = cam_width;\n  vfmt.fmt.pix.height = cam_height;\n\n  std::vector<uint32_t> try_fmts = {V4L2_PIX_FMT_MJPEG, V4L2_PIX_FMT_NV12,\n                                    V4L2_PIX_FMT_NV12, V4L2_PIX_FMT_YUYV};\n  if (prefer_rgb) {\n    try_fmts[1] = V4L2_PIX_FMT_RGB24;\n  }\n\n  // 有优先级顺序，一定要优先使用MJPEG\n  uint32_t i = 0;\n  for (i = 0; i < try_fmts.size(); i++) {\n    vfmt.fmt.pix.pixelformat = try_fmts[i];\n    if (modelbox::STATUS_SUCCESS == CamIoCtl(fd_, VIDIOC_S_FMT, &vfmt)) {\n      break;\n    }\n  }\n\n  if (modelbox::STATUS_SUCCESS != CamIoCtl(fd_, VIDIOC_G_FMT, &vfmt)) {\n    MBLOG_ERROR << \"VIDIOC_G_FMT fail\";\n    return {modelbox::STATUS_FAULT, \"VIDIOC_G_FMT fail\"};\n  }\n\n  // 设置失败，使用默认值\n  if (i == try_fmts.size()) {\n    MBLOG_WARN << \"VIDIOC_S_FMT fail, use the default value\";\n    if (CamIoCtl(fd_, VIDIOC_S_FMT, &vfmt) != modelbox::STATUS_SUCCESS) {\n      MBLOG_ERROR << \"failed to cam io ctl\";\n      return {modelbox::STATUS_FAULT, \"failed to cam io ctl\"};\n    }\n  }\n\n  cam_fmt_ = vfmt.fmt.pix.pixelformat;\n  width_ = vfmt.fmt.pix.width;\n  height_ = vfmt.fmt.pix.height;\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status V4L2Camera::SetFps(uint32_t fps) {\n  struct v4l2_streamparm setfps = {0};\n  setfps.type = type_;\n  setfps.parm.capture.timeperframe.numerator = 1;\n  setfps.parm.capture.timeperframe.denominator = fps;\n  if (modelbox::STATUS_SUCCESS != CamIoCtl(fd_, VIDIOC_S_PARM, &setfps)) {\n    MBLOG_WARN << \"VIDIOC_S_PARM set fps fail\";\n  }\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status V4L2Camera::RequestBuf() {\n  struct v4l2_requestbuffers req = {0};\n  req.count = RK_CAMERA_BUFCNT;\n  req.type = type_;\n  req.memory = V4L2_MEMORY_MMAP;\n  auto ret = CamIoCtl(fd_, VIDIOC_REQBUFS, &req);\n  if (modelbox::STATUS_SUCCESS != ret) {\n    MBLOG_ERROR << \"Device does not support mmap\";\n    return {modelbox::STATUS_FAULT,\n            \"Device does not support mmap reason: \" + ret.Errormsg()};\n  }\n\n  if (req.count != RK_CAMERA_BUFCNT) {\n    MBLOG_ERROR << \"Device buffer count mismatch\";\n    return {modelbox::STATUS_FAULT, \"Device buffer count mismatch\"};\n  }\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status V4L2Camera::MapMemory() {\n  uint32_t buf_len = 0;\n  uint32_t offset = 0;\n  // mmap the v4l2 buf into userspace memory\n  for (uint32_t i = 0; i < RK_CAMERA_BUFCNT; i++) {\n    struct v4l2_buffer buf = {0};\n    buf.type = type_;\n    buf.memory = V4L2_MEMORY_MMAP;\n    buf.index = i;\n    struct v4l2_plane planes[FMT_NUM_PLANES];\n    buf.memory = V4L2_MEMORY_MMAP;\n    if (V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE == type_) {\n      buf.m.planes = planes;\n      buf.length = FMT_NUM_PLANES;\n    }\n\n    auto ret = CamIoCtl(fd_, VIDIOC_QUERYBUF, &buf);\n    if (modelbox::STATUS_SUCCESS != ret) {\n      MBLOG_ERROR << \"VIDIOC_QUERYBUF fail\";\n      return {modelbox::STATUS_FAULT,\n              \"VIDIOC_QUERYBUF fail reason: \" + ret.Errormsg()};\n    }\n\n    if (V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE == buf.type) {\n      buf_len = buf.m.planes[0].length;\n      offset = buf.m.planes[0].m.mem_offset;\n    } else {\n      buf_len = buf.length;\n      offset = buf.m.offset;\n    }\n\n    fbuf_[i].start =\n        mmap(nullptr, buf_len, PROT_READ | PROT_WRITE, MAP_SHARED, fd_, offset);\n    shared_fbuf_[i].start = fbuf_[i].start;\n    if (MAP_FAILED == fbuf_[i].start) {\n      MBLOG_ERROR << \"Failed to map device frame buffers\";\n      return {modelbox::STATUS_FAULT, \"Failed to map device frame buffers\"};\n    }\n    // do not map to mpp_buffer , dma_buffer seems fail in jpg_dec\n  }\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status V4L2Camera::QBufAndRun() {\n  // qbuf into v4l2\n  for (size_t i = 0; i < RK_CAMERA_BUFCNT; i++) {\n    struct v4l2_plane planes[FMT_NUM_PLANES];\n    struct v4l2_buffer buf = {0};\n    buf.type = type_;\n    buf.memory = V4L2_MEMORY_MMAP;\n    buf.index = i;\n    buf.memory = V4L2_MEMORY_MMAP;\n\n    if (V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE == type_) {\n      buf.m.planes = planes;\n      buf.length = FMT_NUM_PLANES;\n    }\n\n    auto ret = CamIoCtl(fd_, VIDIOC_QBUF, &buf);\n    if (modelbox::STATUS_SUCCESS != ret) {\n      MBLOG_ERROR << \"VIDIOC_QBUF fail at :\" << i\n                  << \" reason: \" << ret.Errormsg();\n      return {modelbox::STATUS_FAULT,\n              \"VIDIOC_QBUF fail at :\" + std::to_string(i) +\n                  \" reason: \" + ret.Errormsg()};\n    }\n  }\n\n  // Start capturing\n  enum v4l2_buf_type type = type_;\n  auto ret = CamIoCtl(fd_, VIDIOC_STREAMON, &type);\n  if (modelbox::STATUS_SUCCESS != ret) {\n    MBLOG_ERROR << \"VIDIOC_STREAMON fail reason: \" << ret.Errormsg();\n    return {modelbox::STATUS_FAULT,\n            \"VIDIOC_STREAMON fail reason: \" + ret.Errormsg()};\n  }\n\n  // skip some frames at start\n  for (size_t i = 0; i < SKIP_COUNT; i++) {\n    GetFrame();\n  }\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status V4L2Camera::Init(const std::string &cam_url,\n                                  uint32_t cam_width, uint32_t cam_height,\n                                  uint32_t fps, bool prefer_rgb) {\n  int32_t id = -1;\n  std::string cam_name = cam_url;\n  try {\n    // string -> integer\n    id = std::stoi(cam_name);\n    // set empty\n    cam_name = \"\";\n  } catch (const std::exception &e) {\n    auto msg =\n        \"stoi exception v4l2 camera name: \" + cam_name + \" reason: \" + e.what();\n    MBLOG_ERROR << msg;\n    return {modelbox::STATUS_FAULT, msg};\n  }\n\n  fd_ = GetCamfd(id, cam_name);\n  if (fd_ < 0) {\n    auto msg = \"can not find v4l2 camera name: \" + cam_name;\n    MBLOG_ERROR << msg;\n    return {modelbox::STATUS_FAULT, msg};\n  }\n\n  auto ret = SetFmt(cam_width, cam_height, prefer_rgb);\n  if (modelbox::STATUS_SUCCESS != ret) {\n    return {modelbox::STATUS_FAULT,\n            \"failed to SetFmt reason: \" + ret.Errormsg()};\n  }\n\n  ret = SetFps(fps);\n  if (modelbox::STATUS_SUCCESS != ret) {\n    return {modelbox::STATUS_FAULT,\n            \"failed to SetFps reason: \" + ret.Errormsg()};\n  }\n\n  ret = RequestBuf();\n  if (modelbox::STATUS_SUCCESS != ret) {\n    return {modelbox::STATUS_FAULT,\n            \"failed to RequestBuf reason: \" + ret.Errormsg()};\n  }\n\n  ret = MapMemory();\n  if (modelbox::STATUS_SUCCESS != ret) {\n    return {modelbox::STATUS_FAULT,\n            \"failed to MapMemory reason: \" + ret.Errormsg()};\n  }\n\n  ret = QBufAndRun();\n  if (modelbox::STATUS_SUCCESS != ret) {\n    return {modelbox::STATUS_FAULT,\n            \"failed to QBufAndRun reason: \" + ret.Errormsg()};\n  }\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nstd::shared_ptr<CamFrame> V4L2Camera::GetFrame() {\n  struct v4l2_buffer buf = {0};\n  buf.type = type_;\n  buf.memory = V4L2_MEMORY_MMAP;\n\n  struct v4l2_plane planes[FMT_NUM_PLANES];\n  if (V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE == type_) {\n    buf.m.planes = planes;\n    buf.length = FMT_NUM_PLANES;\n  }\n\n  if (modelbox::STATUS_SUCCESS != CamIoCtl(fd_, VIDIOC_DQBUF, &buf)) {\n    MBLOG_ERROR << \"GetFrame VIDIOC_DQBUF fail\";\n    return nullptr;\n  }\n\n  if (buf.index >= RK_CAMERA_BUFCNT) {\n    MBLOG_ERROR << \"GetFrame buffer index out of bounds\";\n    return nullptr;\n  }\n\n  if (V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE == type_) {\n    buf.bytesused = buf.m.planes[0].bytesused;\n  }\n\n  shared_fbuf_[buf.index].length = buf.bytesused;\n  shared_fbuf_[buf.index].start = fbuf_[buf.index].start;\n  std::shared_ptr<CamFrame> ret(\n      &shared_fbuf_[buf.index],\n      std::bind(&V4L2Camera::PutFrame, this, buf.index, std::placeholders::_1));\n\n  return ret;\n}\n\n// It's OK to capture into this framebuffer now\nvoid V4L2Camera::PutFrame(uint32_t idx, CamFrame *p) {\n  // do not delete p, it's class local var\n  struct v4l2_buffer buf = {0};\n  buf.type = type_;\n  buf.memory = V4L2_MEMORY_MMAP;\n  buf.index = idx;\n\n  struct v4l2_plane planes[FMT_NUM_PLANES];\n  if (V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE == type_) {\n    buf.m.planes = planes;\n    buf.length = FMT_NUM_PLANES;\n  }\n\n  auto ret = CamIoCtl(fd_, VIDIOC_QBUF, &buf);\n  if (modelbox::STATUS_SUCCESS != ret) {\n    MBLOG_ERROR << \"PutFrame VIDIOC_QBUF fail reason: \" << ret.Errormsg();\n  }\n}"
  },
  {
    "path": "src/drivers/devices/rockchip/flowunit/local_camera/v4l2_camera.h",
    "content": "/*\n * Copyright 2022 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef _V4L2_CAMERA_H_\n#define _V4L2_CAMERA_H_\n\n#include <linux/videodev2.h>\n#include <modelbox/base/status.h>\n\n#include <string>\n\n#define RK_CAMERA_BUFCNT 4\n\ntypedef struct CamFrame_t {\n  void *start;\n  size_t length;\n} CamFrame;\n\nclass V4L2Camera {\n public:\n  V4L2Camera();\n  virtual ~V4L2Camera();\n\n  modelbox::Status Init(const std::string &cam_url, uint32_t cam_width,\n                        uint32_t cam_height, uint32_t fps, bool prefer_rgb);\n  std::shared_ptr<CamFrame> GetFrame();\n  inline uint32_t GetWidth() { return width_; }\n  inline uint32_t GetHeight() { return height_; }\n  inline uint32_t GetFmt() { return cam_fmt_; }\n\n private:\n  modelbox::Status CamIoCtl(int32_t fd, int32_t req, void *arg);\n  modelbox::Status SetFmt(uint32_t cam_width, uint32_t cam_height,\n                          bool prefer_rgb);\n  modelbox::Status SetFps(uint32_t fps);\n  modelbox::Status RequestBuf();\n  modelbox::Status MapMemory();\n  modelbox::Status QBufAndRun();\n  void PutFrame(uint32_t idx, CamFrame *p);\n  int32_t GetCamfd(int32_t id, const std::string &bus_info);\n  bool IsCamera(int32_t fd, uint32_t capabilities, const char *cam_name);\n\n private:\n  int32_t fd_{-1};\n  uint32_t cam_fmt_{V4L2_PIX_FMT_MJPEG};\n  enum v4l2_buf_type type_ { V4L2_BUF_TYPE_VIDEO_CAPTURE };\n  uint32_t width_{0};\n  uint32_t height_{0};\n  CamFrame fbuf_[RK_CAMERA_BUFCNT];         // frame buffers\n  CamFrame shared_fbuf_[RK_CAMERA_BUFCNT];  // frame buffers\n};\n\n#endif"
  },
  {
    "path": "src/drivers/devices/rockchip/flowunit/resize/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\ncmake_minimum_required(VERSION 3.10)\n\nset(UNIT_DEVICE \"rockchip\")\nset(UNIT_NAME \"resize\")\n\nproject(modelbox-flowunit-${UNIT_DEVICE}-${UNIT_NAME})\n\nfile(GLOB_RECURSE UNIT_SOURCE *.cpp *.cc *.c)\ngroup_source_test_files(MODELBOX_UNIT_SOURCE MODELBOX_UNIT_TEST_SOURCE \"_test.c*\" ${UNIT_SOURCE})\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_ROCKCHIP_INCLUDE})\ninclude_directories(${ROCKCHIP_MPP_INCLUDE})\ninclude_directories(${ROCKCHIP_RGA_INCLUDE})\n\nset(MODELBOX_UNIT_SHARED libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-shared)\nset(MODELBOX_UNIT_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\nadd_library(${MODELBOX_UNIT_SHARED} SHARED ${MODELBOX_UNIT_SOURCE})\nset(LIBMODELBOX_FLOWUNIT_RESIZE_ROCKCHIP_SHARED ${MODELBOX_UNIT_SHARED})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES\n        SOVERSION ${MODELBOX_VERSION_MAJOR}\n        VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} rt)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} dl)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_DEVICE_ROCKCHIP_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_SHARED})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}\")\n\ninstall(TARGETS ${MODELBOX_UNIT_SHARED}\n        COMPONENT rockchip-device-flowunit\n        RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n        LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        ARCHIVE DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        OPTIONAL)\ninstall(DIRECTORY ${HEADER}\n        DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR}\n        COMPONENT rockchip-device-flowunit-devel)\n\nset(LIBMODELBOX_FLOWUNIT_RESIZE_ROCKCHIP_SHARED ${MODELBOX_UNIT_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_RESIZE_ROCKCHIP_INCLUDE ${MODELBOX_UNIT_SOURCE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_RESIZE_ROCKCHIP_SOURCES ${MODELBOX_UNIT_SOURCE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_RESIZE_ROCKCHIP_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}.so CACHE INTERNAL \"\")\n\n# driver test\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_UNIT_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_UNIT_SHARED})\nlist(APPEND DRIVER_UNIT_TEST_LINK_LIBRARIES ${RKMPP_LIBRARIES})\nlist(APPEND DRIVER_UNIT_TEST_LINK_LIBRARIES ${LIBMODELBOX_DEVICE_ROCKCHIP_SHARED})\nlist(APPEND TEST_INCLUDE ${LIBMODELBOX_DEVICE_ROCKCHIP_INCLUDE})\nlist(APPEND TEST_INCLUDE ${ROCKCHIP_MPP_INCLUDE})\nlist(APPEND TEST_INCLUDE ${ROCKCHIP_RGA_INCLUDE})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")\nset(TEST_INCLUDE ${TEST_INCLUDE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_LINK_LIBRARIES ${DRIVER_UNIT_TEST_LINK_LIBRARIES} CACHE INTERNAL \"\")\n"
  },
  {
    "path": "src/drivers/devices/rockchip/flowunit/resize/resize_flowunit.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"resize_flowunit.h\"\n\n#include \"modelbox/base/status.h\"\n#include \"modelbox/device/rockchip/rockchip_memory.h\"\n#include \"modelbox/flowunit.h\"\n#include \"modelbox/flowunit_api_helper.h\"\n#include \"securec.h\"\n\n#define MIN_SIZE 32\n\nmodelbox::Status ResizeFlowUnit::Open(\n    const std::shared_ptr<modelbox::Configuration> &opts) {\n  dest_width_ = opts->GetUint32(\"image_width\", 0);\n\n  dest_height_ = opts->GetUint32(\"image_height\", 0);\n\n  if (dest_width_ < MIN_SIZE || dest_height_ < MIN_SIZE) {\n    std::string msg =\n        \"Dest width or dest height must great equal than 32, dest_width: \" +\n        std::to_string(dest_width_) +\n        \" dest_height: \" + std::to_string(dest_height_);\n    MBLOG_ERROR << msg;\n    return {modelbox::STATUS_BADCONF, msg};\n  }\n\n  align_width_ = MPP_ALIGN(dest_width_, MPP_ALIGN_WIDTH);\n  align_height_ = MPP_ALIGN(dest_height_, MPP_ALIGN_HEIGHT);\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nvoid ResizeFlowUnit::WriteData(\n    const std::shared_ptr<modelbox::DataContext> &data_ctx,\n    const std::string &pix_fmt, RgaSURF_FORMAT rga_fmt,\n    std::shared_ptr<modelbox::Buffer> &out_image) {\n  auto output_bufs = data_ctx->Output(OUT_IMG);\n  out_image->Set(\"pix_fmt\", pix_fmt);\n  out_image->Set(\"width\", (int32_t)dest_width_);\n  out_image->Set(\"height\", (int32_t)dest_height_);\n  if (RK_FORMAT_RGB_888 == rga_fmt || RK_FORMAT_BGR_888 == rga_fmt) {\n    out_image->Set(\"width_stride\", (int32_t)(align_width_ * 3));\n  } else {\n    out_image->Set(\"width_stride\", (int32_t)align_width_);\n  }\n\n  out_image->Set(\"height_stride\", (int32_t)align_height_);\n  out_image->Set(\"layout\", std::string(\"hwc\"));\n  size_t height = dest_height_;\n  size_t channel = 3;\n  if (rga_fmt == RK_FORMAT_YCbCr_420_SP || rga_fmt == RK_FORMAT_YCrCb_420_SP) {\n    height = dest_height_ * 3 / 2;\n    channel = 1;\n  }\n\n  out_image->Set(\"channel\", (int32_t)channel);\n  out_image->Set(\"shape\",\n                 std::vector<size_t>{height, (size_t)dest_width_, channel});\n  out_image->Set(\"type\", modelbox::ModelBoxDataType::MODELBOX_UINT8);\n\n  output_bufs->PushBack(out_image);\n}\n\nmodelbox::Status ResizeFlowUnit::Process(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  auto input_img_buffer_list = data_ctx->Input(IN_IMG);\n  auto img_count = input_img_buffer_list->Size();\n  if (img_count == 0) {\n    MBLOG_ERROR << \"input img buffer list is empty\";\n    return {modelbox::STATUS_INVALID, \"input img buffer list is empty\"};\n  }\n\n  for (size_t i = 0; i < img_count; ++i) {\n    auto in_image = input_img_buffer_list->At(i);\n\n    std::string pix_fmt;\n    RgaSURF_FORMAT rga_fmt = RK_FORMAT_UNKNOWN;\n    in_image->Get(\"pix_fmt\", pix_fmt);\n    rga_fmt = modelbox::GetRGAFormat(pix_fmt);\n    if (rga_fmt == RK_FORMAT_UNKNOWN) {\n      MBLOG_ERROR << \"rga fmt unknow\";\n      return {modelbox::STATUS_NOTSUPPORT, \"rga fmt unknow\"};\n    }\n\n    rga_buffer_t in_buf;\n    if (modelbox::GetRGAFromImgBuffer(in_image, rga_fmt, in_buf) !=\n        modelbox::STATUS_SUCCESS) {\n      MBLOG_WARN << \"input img can not change to rga buffer\";\n      return {modelbox::STATUS_NOTSUPPORT,\n              \"input img can not change to rga buffer\"};\n    }\n\n    auto device = this->GetBindDevice();\n    rga_buffer_t out_buf;\n    auto out_image = modelbox::CreateEmptyMppImg(dest_width_, dest_height_,\n                                                 rga_fmt, device, out_buf);\n    if (out_image == nullptr) {\n      MBLOG_ERROR << \"failed to create mpp img\";\n      return {modelbox::STATUS_NOTSUPPORT, \"failed to create mpp img\"};\n    }\n\n    IM_STATUS status = imresize(in_buf, out_buf);\n    if (status != IM_STATUS_SUCCESS) {\n      MBLOG_ERROR << \"rga resize failed: \" << status;\n      return {modelbox::STATUS_NOTSUPPORT,\n              \"rga resize failed: \" + std::to_string(status)};\n    }\n\n    WriteData(data_ctx, pix_fmt, rga_fmt, out_image);\n  }\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status ResizeFlowUnit::Close() { return modelbox::STATUS_SUCCESS; }\n\nMODELBOX_FLOWUNIT(ResizeFlowUnit, rk_resize_desc) {\n  rk_resize_desc.SetFlowUnitName(FLOWUNIT_NAME);\n  rk_resize_desc.SetFlowUnitGroupType(\"Image\");\n  rk_resize_desc.AddFlowUnitInput({IN_IMG});\n  rk_resize_desc.AddFlowUnitOutput({OUT_IMG});\n  rk_resize_desc.SetFlowType(modelbox::NORMAL);\n  rk_resize_desc.SetInputContiguous(false);\n  rk_resize_desc.SetDescription(FLOWUNIT_DESC);\n  rk_resize_desc.AddFlowUnitOption(modelbox::FlowUnitOption(\n      \"image_width\", \"int\", true, \"0\", \"the resize width\"));\n  rk_resize_desc.AddFlowUnitOption(modelbox::FlowUnitOption(\n      \"image_height\", \"int\", true, \"0\", \"the resize height\"));\n}\n\nMODELBOX_DRIVER_FLOWUNIT(rk_resize_desc) {\n  rk_resize_desc.Desc.SetName(FLOWUNIT_NAME);\n  rk_resize_desc.Desc.SetClass(modelbox::DRIVER_CLASS_FLOWUNIT);\n  rk_resize_desc.Desc.SetType(modelbox::DEVICE_TYPE);\n  rk_resize_desc.Desc.SetDescription(FLOWUNIT_DESC);\n  rk_resize_desc.Desc.SetVersion(MODELBOX_VERSION_STR_MACRO);\n}"
  },
  {
    "path": "src/drivers/devices/rockchip/flowunit/resize/resize_flowunit.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_ROCKCHIP_RESIZE_H_\n#define MODELBOX_FLOWUNIT_ROCKCHIP_RESIZE_H_\n\n#include <modelbox/base/device.h>\n#include <modelbox/base/status.h>\n#include <modelbox/device/rockchip/device_rockchip.h>\n#include <modelbox/device/rockchip/rockchip_api.h>\n#include <modelbox/flow.h>\n#include <modelbox/flowunit.h>\n\nconstexpr const char *FLOWUNIT_TYPE = \"rockchip\";\nconstexpr const char *FLOWUNIT_NAME = \"resize\";\nconstexpr const char *FLOWUNIT_DESC =\n    \"\\n\\t@Brief: A resize flowunit on rockchip device. \\n\"\n    \"\\t@Port parameter: The input port buffer type and the output port buffer \"\n    \"type are image. \\n\"\n    \"\\t  The image type buffer contains the following meta fields:\\n\"\n    \"\\t\\tField Name: width,         Type: int32_t\\n\"\n    \"\\t\\tField Name: height,        Type: int32_t\\n\"\n    \"\\t\\tField Name: width_stride,  Type: int32_t\\n\"\n    \"\\t\\tField Name: height_stride, Type: int32_t\\n\"\n    \"\\t\\tField Name: channel,       Type: int32_t\\n\"\n    \"\\t\\tField Name: pix_fmt,       Type: string\\n\"\n    \"\\t\\tField Name: layout,        Type: int32_t\\n\"\n    \"\\t\\tField Name: shape,         Type: vector<size_t>\\n\"\n    \"\\t\\tField Name: type,          Type: ModelBoxDataType::MODELBOX_UINT8\\n\"\n    \"\\t@Constraint: The field value range of this flowunit supports: \"\n    \"'pix_fmt': \"\n    \"[rgb_packed,bgr_packed], 'layout': [hwc]. \";\n\nconstexpr const char *IN_IMG = \"in_image\";\nconstexpr const char *OUT_IMG = \"out_image\";\n\nclass ResizeFlowUnit : public modelbox::FlowUnit {\n public:\n  ResizeFlowUnit() = default;\n  ~ResizeFlowUnit() override = default;\n\n  modelbox::Status Open(\n      const std::shared_ptr<modelbox::Configuration> &opts) override;\n  modelbox::Status Close() override;\n  modelbox::Status Process(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override;\n\n private:\n  void WriteData(const std::shared_ptr<modelbox::DataContext> &data_ctx,\n                 const std::string &pix_fmt, RgaSURF_FORMAT rga_fmt,\n                 std::shared_ptr<modelbox::Buffer> &out_image);\n\n  uint32_t dest_width_{0};\n  uint32_t dest_height_{0};\n  uint32_t align_width_{0};\n  uint32_t align_height_{0};\n};\n\n#endif  // MODELBOX_FLOWUNIT_ROCKCHIP_RESIZE_H_\n"
  },
  {
    "path": "src/drivers/devices/rockchip/flowunit/resize/resize_flowunit_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <securec.h>\n\n#include <fstream>\n#include <functional>\n#include <future>\n#include <opencv2/opencv.hpp>\n#include <random>\n#include <thread>\n\n#include \"driver_flow_test.h\"\n#include \"flowunit_mockflowunit/flowunit_mockflowunit.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"modelbox/base/log.h\"\n#include \"modelbox/base/utils.h\"\n#include \"modelbox/buffer.h\"\n#include \"modelbox/device/rockchip/rockchip_api.h\"\n#include \"test/mock/minimodelbox/mockflow.h\"\n\nnamespace modelbox {\nclass RockchipResizeFlowUnitTest : public testing::Test {\n public:\n  RockchipResizeFlowUnitTest()\n      : driver_flow_(std::make_shared<MockFlow>()),\n        jpeg_decode_(std::make_shared<modelbox::MppJpegDecode>()) {}\n\n protected:\n  void SetUp() override {\n    // Test rockchip runtime\n    auto ret = jpeg_decode_->Init();\n    if (ret != modelbox::STATUS_OK) {\n      MBLOG_INFO << \"no rockchip device, skip test suit\";\n      GTEST_SKIP();\n    }\n  }\n\n  void TearDown() override { driver_flow_ = nullptr; };\n\n  std::shared_ptr<MockFlow> GetDriverFlow();\n\n  const std::string test_lib_dir = TEST_DRIVER_DIR,\n                    test_data_dir = TEST_DATA_DIR, test_assets = TEST_ASSETS;\n\n  std::shared_ptr<modelbox::MppJpegDecode> GetJpegDecode() {\n    return jpeg_decode_;\n  }\n\n private:\n  std::shared_ptr<MockFlow> driver_flow_;\n  std::shared_ptr<modelbox::MppJpegDecode> jpeg_decode_;\n};\n\nstd::shared_ptr<MockFlow> RockchipResizeFlowUnitTest::GetDriverFlow() {\n  return driver_flow_;\n}\n\nTEST_F(RockchipResizeFlowUnitTest, RunUnit) {\n  std::map<int, int> size_map = {{112, 110}, {160, 120}, {640, 480}};\n  for (auto &it : size_map) {\n    std::string toml_content = R\"(\n    [driver]\n    skip-default=true\n    dir=[\")\" + test_lib_dir + \"\\\",\\\"\" +\n                               test_data_dir + \"\\\"]\\n\" +\n                               R\"([graph]\n    graphconf = '''digraph demo {\n          input[type=input]\n          output[type=output]\n          resize[type=flowunit, flowunit=resize, device=rockchip, deviceid=0, image_width=)\" +\n                               std::to_string(it.first) +\n                               \", image_height=\" + std::to_string(it.second) +\n                               R\"(]\n\n          input -> resize:in_image\n          resize:out_image -> output\n        }'''\n    format = \"graphviz\"\n  )\";\n\n    MBLOG_INFO << toml_content;\n\n    auto driver_flow = GetDriverFlow();\n    driver_flow->BuildAndRun(\"RunUnit\", toml_content, 10);\n\n    int w = 0;\n    int h = 0;\n\n    struct stat statbuf = {0};\n    stat((std::string(TEST_ASSETS) + \"/test.jpg\").c_str(), &statbuf);\n    FILE *fp_jpg =\n        fopen((std::string(TEST_ASSETS) + \"/test.jpg\").c_str(), \"rb\");\n    EXPECT_EQ(fp_jpg == nullptr, false);\n    std::shared_ptr<unsigned char> img_buf(\n        new (std::nothrow) unsigned char[statbuf.st_size + 1],\n        std::default_delete<unsigned char[]>());\n    EXPECT_EQ(img_buf == nullptr, false);\n    auto s_ret =\n        memset_s(img_buf.get(), statbuf.st_size + 1, 0, statbuf.st_size + 1);\n    EXPECT_EQ(s_ret, EOK);\n    auto jpg_size = fread(img_buf.get(), 1, statbuf.st_size, fp_jpg);\n    EXPECT_EQ(jpg_size, statbuf.st_size);\n    fclose(fp_jpg);\n    MppFrame frame = GetJpegDecode()->Decode(img_buf.get(), jpg_size, w, h);\n    EXPECT_EQ(frame == nullptr, false);\n    EXPECT_EQ(w, 400);\n    EXPECT_EQ(h, 300);\n\n    auto img = cv::imread(std::string(TEST_ASSETS) + \"/test.jpg\");\n    auto extern_data = driver_flow->GetFlow()->CreateExternalDataMap();\n    auto in_img_buffer_list = extern_data->CreateBufferList();\n    in_img_buffer_list->Build({img.total() * img.elemSize()});\n    auto in_img_buffer = in_img_buffer_list->At(0);\n    in_img_buffer->Set(\"width\", img.cols);\n    in_img_buffer->Set(\"height\", img.rows);\n    in_img_buffer->Set(\"width_stride\", img.cols * 3);\n    in_img_buffer->Set(\"height_stride\", img.rows);\n    in_img_buffer->Set(\"pix_fmt\", std::string(\"bgr\"));\n    auto e_ret =\n        memcpy_s(in_img_buffer->MutableData(), in_img_buffer->GetBytes(),\n                 img.data, img.total() * img.elemSize());\n\n    EXPECT_EQ(e_ret, 0);\n    auto status = extern_data->Send(\"input\", in_img_buffer_list);\n    EXPECT_EQ(status, STATUS_OK);\n    // check output\n    OutputBufferList map_buffer_list;\n    status = extern_data->Recv(map_buffer_list);\n    EXPECT_EQ(status, STATUS_OK);\n    auto output_buffer_list = map_buffer_list[\"output\"];\n    ASSERT_EQ(output_buffer_list->Size(), 1);\n    auto output_buffer = output_buffer_list->At(0);\n    ASSERT_EQ(output_buffer->GetBytes(), it.first * it.second * 3);\n\n    auto *mpp_buffer = (MppBuffer)output_buffer->ConstData();\n\n    int32_t out_width = 0;\n    int32_t out_height = 0;\n    int32_t out_width_stride = 0;\n    int32_t out_height_stride = 0;\n    std::string out_pix_fmt;\n    output_buffer->Get(\"width\", out_width);\n    output_buffer->Get(\"height\", out_height);\n    output_buffer->Get(\"pix_fmt\", out_pix_fmt);\n    output_buffer->Get(\"width_stride\", out_width_stride);\n    output_buffer->Get(\"height_stride\", out_height_stride);\n    ASSERT_EQ(out_width, it.first);\n    ASSERT_EQ(out_height, it.second);\n    ASSERT_EQ(out_pix_fmt, std::string(\"bgr\"));\n    ASSERT_EQ(out_width_stride, it.first * 3);\n    ASSERT_EQ(out_height_stride, it.second);\n\n    int32_t total_out_size = it.first * it.second * 3;\n    std::shared_ptr<unsigned char> out_img_buf(\n        new (std::nothrow) unsigned char[total_out_size],\n        std::default_delete<unsigned char[]>());\n    e_ret = memset_s(out_img_buf.get(), total_out_size, 0, total_out_size);\n    EXPECT_EQ(e_ret, 0);\n\n    auto *rgbsrc = (uint8_t *)mpp_buffer_get_ptr(mpp_buffer);\n    auto *rgbdst = (uint8_t *)out_img_buf.get();\n\n    // copy to memory\n    for (int i = 0; i < out_height; i++) {\n      e_ret = memcpy_s(rgbdst, out_width * 3, rgbsrc, out_width * 3);\n      EXPECT_EQ(e_ret, 0);\n      rgbsrc += out_width * 3;\n      rgbdst += out_width * 3;\n    }\n\n    std::string out_file_name = std::string(TEST_ASSETS) + \"/rockchip_\" +\n                                std::to_string(it.first) + \"x\" +\n                                std::to_string(it.second) + \"_bgr\";\n    struct stat out_statbuf = {0};\n    stat(out_file_name.c_str(), &out_statbuf);\n    EXPECT_EQ(out_statbuf.st_size, total_out_size);\n\n    // load file\n    FILE *fp_out = fopen(out_file_name.c_str(), \"rb\");\n    EXPECT_EQ(fp_out == nullptr, false);\n\n    std::shared_ptr<unsigned char> out_file_img_buf(\n        new (std::nothrow) unsigned char[out_statbuf.st_size],\n        std::default_delete<unsigned char[]>());\n    e_ret = memset_s(out_file_img_buf.get(), out_statbuf.st_size, 0,\n                     out_statbuf.st_size);\n    EXPECT_EQ(e_ret, 0);\n\n    auto out_size =\n        fread(out_file_img_buf.get(), 1, out_statbuf.st_size, fp_out);\n\n    EXPECT_EQ(out_size, out_statbuf.st_size);\n    fclose(fp_out);\n\n    // cmp memory\n    EXPECT_EQ(\n        memcmp(out_img_buf.get(), out_file_img_buf.get(), out_statbuf.st_size),\n        0);\n\n    driver_flow->GetFlow()->Wait(3 * 1000);\n  }\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/drivers/devices/rockchip/flowunit/to_cpuimg/CMakeLists.txt",
    "content": "#\n# Copyright 2022 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\ncmake_minimum_required(VERSION 3.10)\n\nset(UNIT_DEVICE \"cpu\")\nset(UNIT_NAME \"rk_cpuimg\")\n\nproject(modelbox-flowunit-${UNIT_DEVICE}-${UNIT_NAME})\n\nfile(GLOB_RECURSE UNIT_SOURCE *.cpp *.cc *.c)\ngroup_source_test_files(MODELBOX_UNIT_SOURCE MODELBOX_UNIT_TEST_SOURCE \"_test.c*\" ${UNIT_SOURCE})\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_ROCKCHIP_INCLUDE})\ninclude_directories(${ROCKCHIP_MPP_INCLUDE})\ninclude_directories(${ROCKCHIP_RGA_INCLUDE})\n\nset(MODELBOX_UNIT_SHARED libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-shared)\nset(MODELBOX_UNIT_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\nadd_library(${MODELBOX_UNIT_SHARED} SHARED ${MODELBOX_UNIT_SOURCE})\nset(LIBMODELBOX_FLOWUNIT_RESIZE_ROCKCHIP_SHARED ${MODELBOX_UNIT_SHARED})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES\n        SOVERSION ${MODELBOX_VERSION_MAJOR}\n        VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} rt)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} dl)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_DEVICE_ROCKCHIP_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_SHARED})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}\")\n\ninstall(TARGETS ${MODELBOX_UNIT_SHARED}\n        COMPONENT rockchip-device-flowunit\n        RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n        LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        ARCHIVE DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        OPTIONAL)\ninstall(DIRECTORY ${HEADER}\n        DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR}\n        COMPONENT rockchip-device-flowunit-devel)\n\nset(LIBMODELBOX_FLOWUNIT_RESIZE_ROCKCHIP_SHARED ${MODELBOX_UNIT_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_RESIZE_ROCKCHIP_INCLUDE ${MODELBOX_UNIT_SOURCE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_RESIZE_ROCKCHIP_SOURCES ${MODELBOX_UNIT_SOURCE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_RESIZE_ROCKCHIP_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}.so CACHE INTERNAL \"\")\n\n# driver test\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_UNIT_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_UNIT_SHARED})\nlist(APPEND DRIVER_UNIT_TEST_LINK_LIBRARIES ${RKMPP_LIBRARIES})\nlist(APPEND TEST_INCLUDE ${LIBMODELBOX_DEVICE_ROCKCHIP_INCLUDE})\nlist(APPEND TEST_INCLUDE ${ROCKCHIP_MPP_INCLUDE})\nlist(APPEND TEST_INCLUDE ${ROCKCHIP_RGA_INCLUDE})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")\nset(TEST_INCLUDE ${TEST_INCLUDE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_LINK_LIBRARIES ${DRIVER_UNIT_TEST_LINK_LIBRARIES} CACHE INTERNAL \"\")\n"
  },
  {
    "path": "src/drivers/devices/rockchip/flowunit/to_cpuimg/mpp_to_cpu_flowunit.cc",
    "content": "/*\n * Copyright 2022 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"mpp_to_cpu_flowunit.h\"\n\n#include \"modelbox/base/status.h\"\n#include \"modelbox/device/rockchip/rockchip_memory.h\"\n#include \"modelbox/flowunit.h\"\n#include \"modelbox/flowunit_api_helper.h\"\n\nMppToCpuFlowUnit::MppToCpuFlowUnit() = default;\nMppToCpuFlowUnit::~MppToCpuFlowUnit() = default;\n\nmodelbox::Status MppToCpuFlowUnit::Open(\n    const std::shared_ptr<modelbox::Configuration> &opts) {\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status MppToCpuFlowUnit::Close() { return modelbox::STATUS_SUCCESS; }\n\nstd::shared_ptr<modelbox::Buffer> MppToCpuFlowUnit::ProcessOneImage(\n    const std::shared_ptr<modelbox::Buffer> &in_img, std::string &pix_fmt,\n    int32_t w, int32_t h, int32_t ws, int32_t hs) {\n  RgaSURF_FORMAT rga_fmt = RK_FORMAT_UNKNOWN;\n  rga_fmt = modelbox::GetRGAFormat(pix_fmt);\n  if (rga_fmt == RK_FORMAT_UNKNOWN) {\n    MBLOG_ERROR << \"unsupport pix format, pix_fmt: \" << pix_fmt;\n    return nullptr;\n  }\n\n  auto *mpp_buf = (MppBuffer)(in_img->ConstData());\n  auto *cpu_buf = (uint8_t *)mpp_buffer_get_ptr(mpp_buf);\n\n  auto device = this->GetBindDevice();\n  auto out_img = std::make_shared<modelbox::Buffer>(device);\n  out_img->CopyMeta(in_img);\n\n  if ((w == ws && h == hs) || (ws == 0 && hs == 0)) {\n    out_img->Build((void *)cpu_buf, in_img->GetBytes(), [](void *p) {});\n    out_img->Set(\"origin_buf\", in_img);\n    return out_img;\n  }\n\n  size_t total_size = 0;\n  int32_t div = 1;\n  auto ret = modelbox::STATUS_OK;\n  if (rga_fmt == RK_FORMAT_YCbCr_420_SP || rga_fmt == RK_FORMAT_YCrCb_420_SP) {\n    total_size = w * h * 3 / 2;\n    out_img->Build(total_size);\n    ret = modelbox::CopyNVMemory(cpu_buf, (uint8_t *)out_img->MutableData(), w,\n                                 h, ws, hs);\n    div = 2;\n  } else {\n    total_size = w * h * 3;\n    out_img->Build(total_size);\n    ret = modelbox::CopyRGBMemory(cpu_buf, (uint8_t *)out_img->MutableData(), w,\n                                  h, ws, hs);\n  }\n\n  if (ret != modelbox::STATUS_OK) {\n    MBLOG_ERROR << \"copy image fail, reason: \" << ret.Errormsg();\n    return nullptr;\n  }\n\n  out_img->Set(\"width\", (int32_t)w);\n  out_img->Set(\"height\", (int32_t)h);\n  if (RK_FORMAT_BGR_888 == rga_fmt || RK_FORMAT_RGB_888 == rga_fmt) {\n    out_img->Set(\"width_stride\", (int32_t)(w * 3));\n  } else {\n    out_img->Set(\"width_stride\", (int32_t)w);\n  }\n  out_img->Set(\"height_stride\", (int32_t)h);\n  int32_t channel = 0;\n  in_img->Get(\"channel\", channel);\n  out_img->Set(\"shape\", std::vector<size_t>{(size_t)h * 3 / div, (size_t)w,\n                                            (size_t)channel});\n  return out_img;\n}\n\nmodelbox::Status MppToCpuFlowUnit::Process(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  auto input_img_bufs = data_ctx->Input(IN_IMG);\n  auto output_bufs = data_ctx->Output(OUT_IMG);\n\n  for (size_t i = 0; i < input_img_bufs->Size(); ++i) {\n    std::shared_ptr<modelbox::Buffer> in_img = input_img_bufs->At(i);\n    std::string pix_fmt;\n\n    int32_t w = 0;\n    int32_t h = 0;\n    int32_t ws = 0;\n    int32_t hs = 0;\n    in_img->Get(\"pix_fmt\", pix_fmt);\n\n    in_img->Get(\"width\", w);\n    in_img->Get(\"height\", h);\n    in_img->Get(\"width_stride\", ws);\n    in_img->Get(\"height_stride\", hs);\n\n    auto out_img = ProcessOneImage(in_img, pix_fmt, w, h, ws, hs);\n    if (out_img == nullptr) {\n      auto msg = \"transfer image to cpu failed, index is \" + std::to_string(i);\n      MBLOG_ERROR << msg;\n      auto buffer = std::make_shared<modelbox::Buffer>();\n      buffer->SetError(\"MppToCpu.Failed\", msg);\n      output_bufs->PushBack(buffer);\n      continue;\n    }\n\n    output_bufs->PushBack(out_img);\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nMODELBOX_FLOWUNIT(MppToCpuFlowUnit, desc) {\n  desc.SetFlowUnitName(FLOWUNIT_NAME);\n  desc.SetFlowUnitGroupType(\"Image\");\n  desc.AddFlowUnitInput({IN_IMG, modelbox::DEVICE_TYPE});\n  desc.AddFlowUnitOutput({OUT_IMG, \"cpu\"});\n  desc.SetFlowType(modelbox::NORMAL);\n  desc.SetDescription(FLOWUNIT_DESC);\n}\n\nMODELBOX_DRIVER_FLOWUNIT(desc) {\n  desc.Desc.SetName(FLOWUNIT_NAME);\n  desc.Desc.SetClass(modelbox::DRIVER_CLASS_FLOWUNIT);\n  desc.Desc.SetType(\"cpu\");\n  desc.Desc.SetDescription(FLOWUNIT_DESC);\n  desc.Desc.SetVersion(MODELBOX_VERSION_STR_MACRO);\n}"
  },
  {
    "path": "src/drivers/devices/rockchip/flowunit/to_cpuimg/mpp_to_cpu_flowunit.h",
    "content": "/*\n * Copyright 2022 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_ROCKCHIP_MPP_TO_CPU_H_\n#define MODELBOX_FLOWUNIT_ROCKCHIP_MPP_TO_CPU_H_\n\n#include <modelbox/base/device.h>\n#include <modelbox/base/status.h>\n#include <modelbox/device/rockchip/device_rockchip.h>\n#include <modelbox/device/rockchip/rockchip_api.h>\n#include <modelbox/flow.h>\n#include <modelbox/flowunit.h>\n\nconstexpr const char *FLOWUNIT_NAME = \"rk_cpuimg\";\nconstexpr const char *FLOWUNIT_DESC =\n    \"\\n\\t@Brief: transfer image from rockchip mpp to cpu image\\n\"\n    \"\\t@Port parameter: The input port buffer type and the output port buffer \"\n    \"type are image. \\n\"\n    \"\\t  The image type buffer contains the following meta fields:\\n\"\n    \"\\t\\tField Name: width,         Type: int32_t\\n\"\n    \"\\t\\tField Name: height,        Type: int32_t\\n\"\n    \"\\t\\tField Name: width_stride,  Type: int32_t\\n\"\n    \"\\t\\tField Name: height_stride, Type: int32_t\\n\"\n    \"\\t\\tField Name: channel,       Type: int32_t\\n\"\n    \"\\t\\tField Name: pix_fmt,       Type: string\\n\"\n    \"\\t\\tField Name: layout,        Type: int32_t\\n\"\n    \"\\t\\tField Name: shape,         Type: vector<size_t>\\n\"\n    \"\\t\\tField Name: type,          Type: ModelBoxDataType::MODELBOX_UINT8\\n\"\n    \"\\t@Constraint: The field value range of this flowunit supports: \"\n    \"'pix_fmt': \"\n    \"[rgb_packed,bgr_packed], 'layout': [hwc]. \";\nconstexpr const char *IN_IMG = \"in_image\";\nconstexpr const char *OUT_IMG = \"out_image\";\n\nclass MppToCpuFlowUnit : public modelbox::FlowUnit {\n public:\n  MppToCpuFlowUnit();\n  ~MppToCpuFlowUnit() override;\n\n  modelbox::Status Open(\n      const std::shared_ptr<modelbox::Configuration> &opts) override;\n  modelbox::Status Close() override;\n  modelbox::Status Process(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override;\n\n private:\n  std::shared_ptr<modelbox::Buffer> ProcessOneImage(\n      const std::shared_ptr<modelbox::Buffer> &in_img, std::string &pix_fmt,\n      int32_t w, int32_t h, int32_t ws, int32_t hs);\n};\n\n#endif  // MODELBOX_FLOWUNIT_ROCKCHIP_MPP_TO_CPU_H_\n"
  },
  {
    "path": "src/drivers/devices/rockchip/flowunit/to_cpuimg/mpp_to_cpu_flowunit_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <securec.h>\n\n#include <fstream>\n#include <functional>\n#include <future>\n#include <opencv2/opencv.hpp>\n#include <random>\n#include <thread>\n\n#include \"driver_flow_test.h\"\n#include \"flowunit_mockflowunit/flowunit_mockflowunit.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"modelbox/base/log.h\"\n#include \"modelbox/base/utils.h\"\n#include \"modelbox/buffer.h\"\n#include \"modelbox/device/rockchip/rockchip_api.h\"\n#include \"test/mock/minimodelbox/mockflow.h\"\n\nnamespace modelbox {\nclass RockchipMppToCpuFlowUnitTest : public testing::Test {\n public:\n  RockchipMppToCpuFlowUnitTest()\n      : driver_flow_(std::make_shared<MockFlow>()),\n        jpeg_decode_(std::make_shared<modelbox::MppJpegDecode>()) {}\n\n protected:\n  void SetUp() override {\n    // Test rockchip runtime\n    auto ret = jpeg_decode_->Init();\n    if (ret != modelbox::STATUS_OK) {\n      MBLOG_INFO << \"no rockchip device, skip test suit\";\n      GTEST_SKIP();\n    }\n  }\n\n  void TearDown() override { driver_flow_ = nullptr; };\n\n  std::shared_ptr<MockFlow> GetDriverFlow();\n\n  const std::string test_lib_dir = TEST_DRIVER_DIR,\n                    test_data_dir = TEST_DATA_DIR, test_assets = TEST_ASSETS;\n\n  std::shared_ptr<modelbox::MppJpegDecode> GetJpegDecode() {\n    return jpeg_decode_;\n  }\n\n private:\n  std::shared_ptr<MockFlow> driver_flow_;\n  std::shared_ptr<modelbox::MppJpegDecode> jpeg_decode_;\n};\n\nstd::shared_ptr<MockFlow> RockchipMppToCpuFlowUnitTest::GetDriverFlow() {\n  return driver_flow_;\n}\n\nTEST_F(RockchipMppToCpuFlowUnitTest, RunUnit) {\n  std::map<int, int> size_map = {{112, 110}, {160, 120}, {640, 480}};\n  for (auto &it : size_map) {\n    std::string toml_content = R\"(\n    [driver]\n    skip-default=true\n    dir=[\")\" + test_lib_dir + \"\\\",\\\"\" +\n                               test_data_dir + \"\\\"]\\n\" +\n                               R\"([graph]\n    graphconf = '''digraph demo {\n          input[type=input]\n          output[type=output]\n          resize[type=flowunit, flowunit=resize, device=rockchip, deviceid=0, image_width=)\" +\n                               std::to_string(it.first) +\n                               \", image_height=\" + std::to_string(it.second) +\n                               R\"(]\n          rk_cpuimg[type=flowunit, flowunit=rk_cpuimg, device=cpu, deviceid=0]\n          input -> resize:in_image\n          resize:out_image -> rk_cpuimg:in_image\n          rk_cpuimg:out_image -> output\n        }'''\n    format = \"graphviz\"\n  )\";\n\n    MBLOG_INFO << toml_content;\n\n    auto driver_flow = GetDriverFlow();\n    driver_flow->BuildAndRun(\"RunUnit\", toml_content, 10);\n\n    int w = 0;\n    int h = 0;\n\n    struct stat statbuf = {0};\n    stat((std::string(TEST_ASSETS) + \"/test.jpg\").c_str(), &statbuf);\n    FILE *fp_jpg =\n        fopen((std::string(TEST_ASSETS) + \"/test.jpg\").c_str(), \"rb\");\n    EXPECT_EQ(fp_jpg == nullptr, false);\n    std::shared_ptr<unsigned char> img_buf(\n        new (std::nothrow) unsigned char[statbuf.st_size + 1],\n        std::default_delete<unsigned char[]>());\n    EXPECT_EQ(img_buf == nullptr, false);\n    auto s_ret =\n        memset_s(img_buf.get(), statbuf.st_size + 1, 0, statbuf.st_size + 1);\n    EXPECT_EQ(s_ret, EOK);\n    auto jpg_size = fread(img_buf.get(), 1, statbuf.st_size, fp_jpg);\n    EXPECT_EQ(jpg_size, statbuf.st_size);\n    fclose(fp_jpg);\n    MppFrame frame = GetJpegDecode()->Decode(img_buf.get(), jpg_size, w, h);\n    EXPECT_EQ(frame == nullptr, false);\n    EXPECT_EQ(w, 400);\n    EXPECT_EQ(h, 300);\n\n    auto img = cv::imread(std::string(TEST_ASSETS) + \"/test.jpg\");\n    auto extern_data = driver_flow->GetFlow()->CreateExternalDataMap();\n    auto in_img_buffer_list = extern_data->CreateBufferList();\n    in_img_buffer_list->Build({img.total() * img.elemSize()});\n    auto in_img_buffer = in_img_buffer_list->At(0);\n    in_img_buffer->Set(\"width\", img.cols);\n    in_img_buffer->Set(\"height\", img.rows);\n    in_img_buffer->Set(\"width_stride\", img.cols * 3);\n    in_img_buffer->Set(\"height_stride\", img.rows);\n    in_img_buffer->Set(\"pix_fmt\", std::string(\"bgr\"));\n    auto e_ret =\n        memcpy_s(in_img_buffer->MutableData(), in_img_buffer->GetBytes(),\n                 img.data, img.total() * img.elemSize());\n    EXPECT_EQ(e_ret, 0);\n    auto status = extern_data->Send(\"input\", in_img_buffer_list);\n    EXPECT_EQ(status, STATUS_OK);\n    // check output\n    OutputBufferList map_buffer_list;\n    status = extern_data->Recv(map_buffer_list);\n    EXPECT_EQ(status, STATUS_OK);\n    auto output_buffer_list = map_buffer_list[\"output\"];\n    ASSERT_EQ(output_buffer_list->Size(), 1);\n    auto output_buffer = output_buffer_list->At(0);\n    ASSERT_EQ(output_buffer->GetBytes(), it.first * it.second * 3);\n\n    int32_t out_width = 0;\n    int32_t out_height = 0;\n    int32_t out_width_stride = 0;\n    int32_t out_height_stride = 0;\n    std::string out_pix_fmt;\n    output_buffer->Get(\"width\", out_width);\n    output_buffer->Get(\"height\", out_height);\n    output_buffer->Get(\"pix_fmt\", out_pix_fmt);\n    output_buffer->Get(\"width_stride\", out_width_stride);\n    output_buffer->Get(\"height_stride\", out_height_stride);\n    ASSERT_EQ(out_width, it.first);\n    ASSERT_EQ(out_height, it.second);\n    ASSERT_EQ(out_pix_fmt, std::string(\"bgr\"));\n    ASSERT_EQ(out_width_stride, it.first * 3);\n    ASSERT_EQ(out_height_stride, it.second);\n\n    int32_t total_out_size = it.first * it.second * 3;\n    std::shared_ptr<unsigned char> out_img_buf(\n        new (std::nothrow) unsigned char[total_out_size],\n        std::default_delete<unsigned char[]>());\n    e_ret = memset_s(out_img_buf.get(), total_out_size, 0, total_out_size);\n    EXPECT_EQ(e_ret, 0);\n\n    // copy to memory\n    e_ret = memcpy_s(out_img_buf.get(), output_buffer->GetBytes(),\n                     output_buffer->ConstData(), output_buffer->GetBytes());\n    EXPECT_EQ(e_ret, 0);\n\n    std::string out_file_name = std::string(TEST_ASSETS) + \"/rockchip_\" +\n                                std::to_string(it.first) + \"x\" +\n                                std::to_string(it.second) + \"_bgr\";\n    struct stat out_statbuf = {0};\n    stat(out_file_name.c_str(), &out_statbuf);\n    EXPECT_EQ(out_statbuf.st_size, total_out_size);\n\n    // load file\n    FILE *fp_out = fopen(out_file_name.c_str(), \"rb\");\n    EXPECT_EQ(fp_out == nullptr, false);\n\n    std::shared_ptr<unsigned char> out_file_img_buf(\n        new (std::nothrow) unsigned char[out_statbuf.st_size],\n        std::default_delete<unsigned char[]>());\n    e_ret = memset_s(out_file_img_buf.get(), out_statbuf.st_size, 0,\n                     out_statbuf.st_size);\n    EXPECT_EQ(e_ret, 0);\n\n    auto out_size =\n        fread(out_file_img_buf.get(), 1, out_statbuf.st_size, fp_out);\n\n    EXPECT_EQ(out_size, out_statbuf.st_size);\n    fclose(fp_out);\n\n    // cmp memory\n    EXPECT_EQ(\n        memcmp(out_img_buf.get(), out_file_img_buf.get(), out_statbuf.st_size),\n        0);\n\n    driver_flow->GetFlow()->Wait(3 * 1000);\n  }\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/drivers/devices/rockchip/flowunit/video_decoder/CMakeLists.txt",
    "content": "#\n# Copyright 2022 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\ncmake_minimum_required(VERSION 3.10)\n\nset(UNIT_DEVICE \"rockchip\")\nset(UNIT_NAME \"video_decoder\")\n\nproject(modelbox-flowunit-${UNIT_DEVICE}-${UNIT_NAME})\n\nfile(GLOB_RECURSE UNIT_SOURCE *.cpp *.cc *.c)\ngroup_source_test_files(MODELBOX_UNIT_SOURCE MODELBOX_UNIT_TEST_SOURCE \"_test.c*\" ${UNIT_SOURCE})\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_ROCKCHIP_INCLUDE})\ninclude_directories(${ROCKCHIP_MPP_INCLUDE})\ninclude_directories(${ROCKCHIP_RGA_INCLUDE})\ninclude_directories(${FFMPEG_INCLUDE_DIR})\n\nset(MODELBOX_UNIT_SHARED libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-shared)\nset(MODELBOX_UNIT_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\nadd_library(${MODELBOX_UNIT_SHARED} SHARED ${MODELBOX_UNIT_SOURCE})\nset(LIBMODELBOX_FLOWUNIT_LOCALCAM_ROCKCHIP_SHARED ${MODELBOX_UNIT_SHARED})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES\n        SOVERSION ${MODELBOX_VERSION_MAJOR}\n        VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} rt)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} dl)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_DEVICE_ROCKCHIP_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_SHARED})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}\")\n\ninstall(TARGETS ${MODELBOX_UNIT_SHARED}\n        COMPONENT rockchip-device-flowunit\n        RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n        LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        ARCHIVE DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        OPTIONAL)\ninstall(DIRECTORY ${HEADER}\n        DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR}\n        COMPONENT rockchip-device-flowunit-devel)\n\nset(LIBMODELBOX_FLOWUNIT_LOCALCAM_ROCKCHIP_SHARED ${MODELBOX_UNIT_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_LOCALCAM_ROCKCHIP_INCLUDE ${MODELBOX_UNIT_SOURCE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_LOCALCAM_ROCKCHIP_SOURCES ${MODELBOX_UNIT_SOURCE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_LOCALCAM_ROCKCHIP_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}.so CACHE INTERNAL \"\")\n\n# driver test\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_UNIT_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_UNIT_SHARED})\nlist(APPEND DRIVER_UNIT_TEST_LINK_LIBRARIES ${RKMPP_LIBRARIES})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")\nset(TEST_INCLUDE ${TEST_INCLUDE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_LINK_LIBRARIES ${DRIVER_UNIT_TEST_LINK_LIBRARIES} CACHE INTERNAL \"\")\n"
  },
  {
    "path": "src/drivers/devices/rockchip/flowunit/video_decoder/rk_video_decoder.cc",
    "content": "/*\n * Copyright 2022 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"rk_video_decoder.h\"\n\n#include <unistd.h>\n\n#include \"modelbox/base/log.h\"\n\nconstexpr uint32_t DEC_RETRYS = 10;\nconstexpr uint32_t DEC_DELAY_TIMES = 100 * 1000;\nconstexpr uint32_t RETRY_DELAY_TIMES = 10 * 1000;\nconstexpr uint32_t ERROR_LOG_TIMES = 100;\n\nRKNPUVideoDecoder::~RKNPUVideoDecoder() {\n  running_ = false;\n\n  if (cfg_) {\n    mpp_enc_cfg_deinit(cfg_);  // todo: here or Init_Config?\n  }\n  if (rk_api_ && codec_ctx_) {\n    rk_api_->reset(codec_ctx_);\n  }\n  if (codec_ctx_) {\n    mpp_destroy(codec_ctx_);\n    codec_ctx_ = nullptr;\n  }\n  if (frm_grp_) {\n    mpp_buffer_group_put(frm_grp_);\n    frm_grp_ = nullptr;\n  }\n}\n\nmodelbox::Status RKNPUVideoDecoder::InitDecoder(MppCodingType codec_type) {\n  auto ret = mpp_create(&codec_ctx_, &rk_api_);\n  if (ret != MPP_OK) {\n    auto msg = std::string(\"failed to run mpp_create: \") + std::to_string(ret);\n    MBLOG_ERROR << msg;\n    return {modelbox::STATUS_FAULT, msg};\n  }\n\n  // rockchip codec init\n  RK_U32 need_split = 1;\n  ret =\n      rk_api_->control(codec_ctx_, MPP_DEC_SET_PARSER_SPLIT_MODE, &need_split);\n  if (ret != MPP_OK) {\n    auto msg = std::string(\"failed to set MPP_DEC_SET_PARSER_SPLIT_MODE: \") +\n               std::to_string(ret);\n    MBLOG_ERROR << msg;\n    return {modelbox::STATUS_FAULT, msg};\n  }\n\n  RK_U32 timeout = 0;\n  ret = rk_api_->control(codec_ctx_, MPP_SET_OUTPUT_TIMEOUT, &timeout);\n  if (ret != MPP_OK) {\n    auto msg = std::string(\"Failed to set output timeout 0 fail: \") +\n               std::to_string(ret);\n    MBLOG_ERROR << msg;\n    return {modelbox::STATUS_FAULT, msg};\n  }\n\n  ret = mpp_init(codec_ctx_, MPP_CTX_DEC, codec_type);\n  if (ret != MPP_OK) {\n    auto msg = std::string(\"failed to run mpp_init: \") + std::to_string(ret);\n    MBLOG_ERROR << msg;\n    return {modelbox::STATUS_FAULT, msg};\n  }\n\n  mpp_dec_cfg_init(&cfg_);\n  /*\n   * split_parse is to enable mpp internal frame spliter when the\n   * input packet_ is not aplited into frames.\n   */\n  ret = mpp_dec_cfg_set_u32(cfg_, \"base:split_parse\", need_split);\n  if (ret != MPP_OK) {\n    auto msg = std::string(\"failed to set split_parse: \") + std::to_string(ret);\n    MBLOG_ERROR << msg;\n    return {modelbox::STATUS_FAULT, msg};\n  }\n\n  ret = rk_api_->control(codec_ctx_, MPP_DEC_SET_CFG, cfg_);\n  if (ret != MPP_OK) {\n    auto msg = std::string(\"failed to set cfg: \") + std::to_string(ret);\n    MBLOG_ERROR << msg;\n    return {modelbox::STATUS_FAULT, msg};\n  }\n\n  running_ = true;\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status RKNPUVideoDecoder::Init(AVCodecID codec_id) {\n  const std::map<AVCodecID, MppCodingType> codectype_map = {\n      {AV_CODEC_ID_H264, MPP_VIDEO_CodingAVC},\n      {AV_CODEC_ID_HEVC, MPP_VIDEO_CodingHEVC}};\n\n  auto iter = codectype_map.find(codec_id);\n  if (iter == codectype_map.end()) {\n    auto msg = std::string(\"Not support codec type: \") +\n               std::to_string(codec_id) +\n               \" support only AV_CODEC_ID_H264 or AV_CODEC_ID_HEVC\";\n    MBLOG_ERROR << msg;\n    return {modelbox::STATUS_NOTSUPPORT, msg};\n  }\n\n  err_number_ = 0;\n  return InitDecoder(iter->second);\n}\n\nvoid RKNPUVideoDecoder::SetPacket(MppPacket &packet, uint8_t *inData,\n                                  size_t inSize) {\n  mpp_packet_set_data(packet, inData);\n  mpp_packet_set_size(packet, inSize);\n  mpp_packet_set_pos(packet, inData);\n  mpp_packet_set_length(packet, inSize);\n  if (inSize == 0 || inData == nullptr) {\n    mpp_packet_set_eos(packet);\n  }\n}\n\nmodelbox::Status RKNPUVideoDecoder::InfoChange(MppFrame &frame) {\n  auto buf_size = mpp_frame_get_buf_size(frame);\n  if (buf_size <= 0) {\n    auto msg = std::string(\"get mpp frame get buf size failed: \") +\n               std::to_string(buf_size);\n    MBLOG_ERROR << msg;\n    return {modelbox::STATUS_FAULT, msg};\n  }\n\n  if (frm_grp_ == nullptr) {\n    /* If buffer group is not set create one and limit it */\n    auto ret = mpp_buffer_group_get_internal(&frm_grp_, MPP_BUFFER_TYPE_ION);\n    if (ret != MPP_OK) {\n      auto msg =\n          std::string(\"get mpp buffer group failed: \") + std::to_string(ret);\n      MBLOG_ERROR << msg;\n      return {modelbox::STATUS_FAULT, msg};\n    }\n\n    /* Set buffer to mpp decoder */\n    ret = rk_api_->control(codec_ctx_, MPP_DEC_SET_EXT_BUF_GROUP, frm_grp_);\n    if (ret != MPP_OK) {\n      auto msg = std::string(\"set buffer group failed: \") + std::to_string(ret);\n      MBLOG_ERROR << msg;\n      return {modelbox::STATUS_FAULT, msg};\n    }\n  } else {\n    /* If old buffer group exist clear it */\n    auto ret = mpp_buffer_group_clear(frm_grp_);\n    if (ret != MPP_OK) {\n      auto msg =\n          std::string(\"clear buffer group failed: \") + std::to_string(ret);\n      MBLOG_ERROR << msg;\n      return {modelbox::STATUS_FAULT, msg};\n    }\n  }\n\n  /* Use limit config to limit buffer count to 16~24 with buf_size */\n  auto ret =\n      mpp_buffer_group_limit_config(frm_grp_, buf_size, DEC_BUF_LIMIT * 2);\n  if (ret != MPP_OK) {\n    auto msg = std::string(\"limit buffer group failed: \") + std::to_string(ret);\n    MBLOG_ERROR << msg;\n    return {modelbox::STATUS_FAULT, msg};\n  }\n\n  /*\n   * All buffer group config done. Set info change ready to let\n   * decoder continue decoding\n   */\n  ret = rk_api_->control(codec_ctx_, MPP_DEC_SET_INFO_CHANGE_READY, nullptr);\n  if (ret != MPP_OK) {\n    auto msg = std::string(\"info change ready failed: \") + std::to_string(ret);\n    MBLOG_ERROR << msg;\n    return {modelbox::STATUS_FAULT, msg};\n  }\n\n  mpp_frame_deinit(&frame);\n  frame = nullptr;\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status RKNPUVideoDecoder::GetDecFrame(MppFrame &frame) {\n  frame = nullptr;\n  auto ret = rk_api_->decode_get_frame(codec_ctx_, &frame);\n  if (MPP_ERR_TIMEOUT == ret) {\n    MBLOG_DEBUG << \"decode_get_frame failed too much time\";\n    return {modelbox::STATUS_FAULT, \"decode_get_frame failed too much time\"};\n  }\n\n  if (ret != MPP_OK) {\n    MBLOG_ERROR << \"decode_get_frame failed: \" << ret;\n    return {modelbox::STATUS_FAULT, \"decode_get_frame failed\"};\n  }\n\n  if (ret == MPP_OK && frame == nullptr) {\n    // ok, no more, return STATUS_FAULT to exit while, not err here\n    return modelbox::STATUS_NODATA;\n  }\n\n  if (mpp_frame_get_info_change(frame)) {\n    return InfoChange(frame);\n  }\n\n  RK_U32 err_info = mpp_frame_get_errinfo(frame);\n  if (err_info != MPP_OK) {\n    err_number_++;\n    if (err_number_ % ERROR_LOG_TIMES == 0) {\n      MBLOG_WARN << \"frame error: \" << err_info;\n    }\n    mpp_frame_deinit(&frame);\n    frame = nullptr;\n    // do not return STATUS_FAULT, just skip\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status RKNPUVideoDecoder::SendDecBuf(MppPacket &packet) {\n  MPP_RET ret = MPP_OK;\n  int times = DEC_RETRYS;\n\n  while (running_ && times-- > 0) {\n    ret = rk_api_->decode_put_packet(codec_ctx_, packet);\n    if (MPP_OK == ret || MPP_ERR_BUFFER_FULL == ret) {\n      break;\n    }\n\n    usleep(DEC_DELAY_TIMES);\n  }\n\n  if (MPP_OK == ret) {\n    return modelbox::STATUS_OK;\n  }\n\n  if (MPP_ERR_BUFFER_FULL == ret) {\n    usleep(RETRY_DELAY_TIMES);\n    return modelbox::STATUS_AGAIN;\n  }\n\n  MBLOG_ERROR << \"send decode frame fail: \" << ret;\n  return modelbox::STATUS_FAULT;\n}\n\nvoid RKNPUVideoDecoder::GetLimitDecFrame(std::vector<MppFrame> &out_frame,\n                                         size_t max_frames) {\n  MppFrame frame = nullptr;\n  while (running_ && out_frame.size() < max_frames &&\n         GetDecFrame(frame) == modelbox::STATUS_OK) {\n    // inData=null 意味着是最后一帧， 全部获取完， 然后丢弃\n    if (frame != nullptr) {\n      out_frame.push_back(frame);\n    }\n  }\n}\n\nmodelbox::Status RKNPUVideoDecoder::DecodeFrameBuf(\n    const uint8_t *inData, size_t inSize, std::vector<MppFrame> &out_frame,\n    size_t max_frames) {\n  MppPacket packet;\n  modelbox::Status ret = modelbox::STATUS_OK;\n  auto mppret = mpp_packet_init(&packet, nullptr, 0);\n  if (mppret != MPP_OK) {\n    auto msg = std::string(\"mpp_packet_init failed: \") + std::to_string(mppret);\n    MBLOG_ERROR << msg;\n    return {modelbox::STATUS_FAULT, msg};\n  }\n\n  Defer { mpp_packet_deinit(&packet); };\n  SetPacket(packet, (uint8_t *)inData, inSize);\n\n  do {\n    GetLimitDecFrame(out_frame, (inData == nullptr) ? INT_MAX : max_frames);\n  } while ((ret = SendDecBuf(packet)) == modelbox::STATUS_AGAIN);\n\n  // if last data, must get all frames, but send only max_frames to avoid block\n  if (out_frame.size() > max_frames) {\n    out_frame.resize(max_frames);\n  }\n\n  return ret;\n}\n"
  },
  {
    "path": "src/drivers/devices/rockchip/flowunit/video_decoder/rk_video_decoder.h",
    "content": "/*\n * Copyright 2022 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_RK_VIDEO_DECODER_H_\n#define MODELBOX_RK_VIDEO_DECODER_H_\n\n#include <libavformat/avformat.h>\n#include <modelbox/base/status.h>\n#include <modelbox/device/rockchip/rockchip_api.h>\n\n#include <vector>\n\n#include \"rga.h\"\n#include \"rk_mpi.h\"\n#include \"rk_type.h\"\n\nconstexpr uint32_t DEC_BUF_LIMIT = 8;\n\nclass RKNPUVideoDecoder {\n public:\n  RKNPUVideoDecoder() = default;\n  virtual ~RKNPUVideoDecoder();\n\n  modelbox::Status Init(AVCodecID codec_id);\n  modelbox::Status DecodeFrameBuf(const uint8_t *inData, size_t inSize,\n                                  std::vector<MppFrame> &out_frame,\n                                  size_t max_frames);\n\n private:\n  modelbox::Status InitDecoder(MppCodingType codec_type);\n  void SetPacket(MppPacket &packet, uint8_t *inData, size_t inSize);\n  modelbox::Status InfoChange(MppFrame &frame);\n  modelbox::Status GetDecFrame(MppFrame &frame);\n  modelbox::Status SendDecBuf(MppPacket &packet);\n  void GetLimitDecFrame(std::vector<MppFrame> &out_frame, size_t max_frames);\n\n private:\n  uint32_t err_number_ = 0;\n  bool running_ = false;\n  MppCtx codec_ctx_ = nullptr;\n  MppApi *rk_api_ = nullptr;\n  MppBufferGroup frm_grp_ = nullptr;\n  MppEncCfg cfg_ = nullptr;\n};\n\n#endif  // MODELBOX_RK_VIDEO_DECODER_H_"
  },
  {
    "path": "src/drivers/devices/rockchip/flowunit/video_decoder/video_decoder_flowunit.cc",
    "content": "/*\n * Copyright 2022 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"video_decoder_flowunit.h\"\n\n#include <securec.h>\n\n#include <string>\n\n#include \"modelbox/base/log.h\"\n#include \"modelbox/base/status.h\"\n#include \"modelbox/base/utils.h\"\n#include \"modelbox/flowunit.h\"\n#include \"modelbox/flowunit_api_helper.h\"\n#include \"rk_video_decoder.h\"\n\nconstexpr uint32_t MAX_PACKAGE_NUM = 2;\n\nVideoDecoderFlowUnit::VideoDecoderFlowUnit() = default;\nVideoDecoderFlowUnit::~VideoDecoderFlowUnit() = default;\n\nmodelbox::Status VideoDecoderFlowUnit::Open(\n    const std::shared_ptr<modelbox::Configuration> &opts) {\n  out_pix_fmt_str_ = opts->GetString(\"pix_fmt\", modelbox::IMG_DEFAULT_FMT);\n  MBLOG_INFO << \"RKNPU video_decoder with \" << out_pix_fmt_str_;\n\n  out_pix_fmt_ = modelbox::GetRGAFormat(out_pix_fmt_str_);\n  if (out_pix_fmt_ == RK_FORMAT_UNKNOWN) {\n    MBLOG_ERROR << \"Not support pix fmt \" << out_pix_fmt_str_;\n    return modelbox::STATUS_BADCONF;\n  }\n\n  queue_size_ = opts->GetUint64(\"queue_size\", DEC_BUF_LIMIT / 2);\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status VideoDecoderFlowUnit::Close() { return modelbox::STATUS_OK; }\n\nmodelbox::Status VideoDecoderFlowUnit::DataPre(\n    std::shared_ptr<modelbox::DataContext> ctx) {\n  MBLOG_INFO << \"Video Decode DataPre\";\n  auto in_meta = ctx->GetInputMeta(VIDEO_PACKET_INPUT);\n  auto codec_id =\n      std::static_pointer_cast<AVCodecID>(in_meta->GetMeta(CODEC_META));\n  if (codec_id == nullptr) {\n    MBLOG_ERROR << \"Stream codec id is null, init decoder failed\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  auto video_decoder = std::make_shared<RKNPUVideoDecoder>();\n  auto ret = video_decoder->Init(*codec_id);\n  if (ret != modelbox::STATUS_SUCCESS) {\n    MBLOG_ERROR << \"Video decoder init failed\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  auto frame_index = std::make_shared<int64_t>();\n  *frame_index = 0;\n  ctx->SetPrivate(DECODER_CTX, video_decoder);\n  ctx->SetPrivate(FRAME_INDEX_CTX, frame_index);\n  return modelbox::STATUS_OK;\n};\n\nmodelbox::Status VideoDecoderFlowUnit::DataPost(\n    std::shared_ptr<modelbox::DataContext> ctx) {\n  MBLOG_DEBUG << \"rknpu Decode DataPost\";\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status VideoDecoderFlowUnit::WriteData(\n    const std::shared_ptr<modelbox::DataContext> &ctx,\n    std::shared_ptr<modelbox::Buffer> &pack_buff,\n    std::vector<MppFrame> &out_frame) {\n  int32_t rate_num = 0;\n  int32_t rate_den = 0;\n  int64_t duration = 0;\n  pack_buff->Get(\"rate_num\", rate_num);\n  pack_buff->Get(\"rate_den\", rate_den);\n  pack_buff->Get(\"duration\", duration);\n  double time_base = 0;\n  pack_buff->Get(\"time_base\", time_base);\n\n  auto output_bufs = ctx->Output(FRAME_INFO_OUTPUT);\n  auto frame_index =\n      std::static_pointer_cast<int64_t>(ctx->GetPrivate(FRAME_INDEX_CTX));\n\n  for (auto &frame : out_frame) {\n    auto pts = (int64_t)(mpp_frame_get_pts(frame) * time_base);\n\n    auto buffer = modelbox::ColorChange(frame, out_pix_fmt_, GetBindDevice());\n    // out_frame[i] may be deinit after ColorChange\n    if (buffer == nullptr) {\n      MBLOG_ERROR << \"failed to ColorChange\";\n      continue;\n    }\n\n    buffer->Set(\"index\", *frame_index);\n    *frame_index = *frame_index + 1;\n\n    buffer->Set(\"pix_fmt\", out_pix_fmt_str_);\n    buffer->Set(\"rate_num\", rate_num);\n    buffer->Set(\"rate_den\", rate_den);\n    buffer->Set(\"duration\", duration);\n    buffer->Set(\"timestamp\", pts);\n    buffer->Set(\"eos\", false);\n\n    output_bufs->PushBack(buffer);\n  }\n  return modelbox::STATUS_SUCCESS;\n}\n\n// note: it will block for a while (10s) if buffer not release, so must set\n// enough thread num\nmodelbox::Status VideoDecoderFlowUnit::Process(\n    std::shared_ptr<modelbox::DataContext> ctx) {\n  auto video_decoder =\n      std::static_pointer_cast<RKNPUVideoDecoder>(ctx->GetPrivate(DECODER_CTX));\n  if (video_decoder == nullptr) {\n    MBLOG_ERROR << \"Video decoder is not init\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  auto video_packet_input = ctx->Input(VIDEO_PACKET_INPUT);\n  if (video_packet_input == nullptr) {\n    MBLOG_ERROR << \"video packet input is null\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  if (video_packet_input->Size() == 0 ||\n      video_packet_input->Size() > MAX_PACKAGE_NUM) {\n    MBLOG_ERROR << \"input size not right: \" << video_packet_input->Size()\n                << \", set demuxer queue size: 1 ~ \" << MAX_PACKAGE_NUM;\n    return modelbox::STATUS_FAULT;\n  }\n\n  std::lock_guard<std::mutex> lk(rk_dec_mtx_);\n  for (size_t i = 0; i < video_packet_input->Size(); ++i) {\n    auto packet_buffer = video_packet_input->At(i);\n    std::vector<MppFrame> out_frame;\n\n    auto size = packet_buffer->GetBytes();\n    if (size <= 1) {\n      video_decoder->DecodeFrameBuf(nullptr, 0, out_frame, queue_size_);\n    } else {\n      video_decoder->DecodeFrameBuf((const uint8_t *)packet_buffer->ConstData(),\n                                    size, out_frame, queue_size_);\n    }\n\n    if (out_frame.size() > 0) {\n      WriteData(ctx, packet_buffer, out_frame);\n    }\n  }\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nMODELBOX_FLOWUNIT(VideoDecoderFlowUnit, desc) {\n  desc.SetFlowUnitName(FLOWUNIT_NAME);\n  desc.SetFlowUnitGroupType(\"Video\");\n  desc.AddFlowUnitInput({VIDEO_PACKET_INPUT, \"cpu\"});\n  desc.AddFlowUnitOutput({FRAME_INFO_OUTPUT, modelbox::DEVICE_TYPE});\n  desc.SetFlowType(modelbox::STREAM);\n  desc.SetInputContiguous(false);\n  desc.SetResourceNice(false);\n  desc.SetDescription(FLOWUNIT_DESC);\n  desc.AddFlowUnitOption(modelbox::FlowUnitOption(\n      \"pix_fmt\", \"string\", true, modelbox::IMG_DEFAULT_FMT, \"the pix format\"));\n}\n\nMODELBOX_DRIVER_FLOWUNIT(desc) {\n  desc.Desc.SetName(FLOWUNIT_NAME);\n  desc.Desc.SetClass(modelbox::DRIVER_CLASS_FLOWUNIT);\n  desc.Desc.SetType(modelbox::DEVICE_TYPE);\n  desc.Desc.SetDescription(FLOWUNIT_DESC);\n  desc.Desc.SetVersion(MODELBOX_VERSION_STR_MACRO);\n}\n"
  },
  {
    "path": "src/drivers/devices/rockchip/flowunit/video_decoder/video_decoder_flowunit.h",
    "content": "/*\n * Copyright 2022 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_VIDEO_DECODER_ROCKCHIP_H_\n#define MODELBOX_FLOWUNIT_VIDEO_DECODER_ROCKCHIP_H_\n\n#include <modelbox/base/device.h>\n#include <modelbox/base/status.h>\n#include <modelbox/base/timer.h>\n#include <modelbox/buffer.h>\n#include <modelbox/device/rockchip/device_rockchip.h>\n#include <modelbox/device/rockchip/rockchip_api.h>\n#include <modelbox/device/rockchip/rockchip_memory.h>\n#include <modelbox/flow.h>\n#include <modelbox/flowunit.h>\n\n#include <algorithm>\n#include <atomic>\n#include <thread>\n\n#include \"rga.h\"\n\nconstexpr const char *FLOWUNIT_NAME = \"video_decoder\";\nconstexpr const char *FLOWUNIT_DESC =\n    \"\\n\\t@Brief: A video decoder flowunit on rockchip. \\n\"\n    \"\\t@Port parameter: The input port buffer type is video_packet, the output \"\n    \"port buffer type is video_frame.\\n\"\n    \"\\t  The video_packet buffer contain the following meta fields:\\n\"\n    \"\\t\\tField Name: rate_num,      Type: int32_t\\n\"\n    \"\\t\\tField Name: rate_den,      Type: int32_t\\n\"\n    \"\\t\\tField Name: duration,      Type: int64_t\\n\"\n    \"\\t\\tField Name: time_base,     Type: double\\n\"\n    \"\\t\\tField Name: width,         Type: int32_t\\n\"\n    \"\\t\\tField Name: height,        Type: int32_t\\n\"\n    \"\\t  The video_frame buffer contain the following meta fields:\\n\"\n    \"\\t\\tField Name: index,         Type: int64_t\\n\"\n    \"\\t\\tField Name: rate_num,      Type: int32_t\\n\"\n    \"\\t\\tField Name: rate_den,      Type: int32_t\\n\"\n    \"\\t\\tField Name: duration,      Type: int64_t\\n\"\n    \"\\t\\tField Name: timestamp,     Type: int64_t\\n\"\n    \"\\t\\tField Name: eos,           Type: bool\\n\"\n    \"\\t\\tField Name: width,         Type: int32_t\\n\"\n    \"\\t\\tField Name: height,        Type: int32_t\\n\"\n    \"\\t\\tField Name: width_stride,  Type: int32_t\\n\"\n    \"\\t\\tField Name: height_stride, Type: int32_t\\n\"\n    \"\\t\\tField Name: channel,       Type: int32_t\\n\"\n    \"\\t\\tField Name: pix_fmt,       Type: string\\n\"\n    \"\\t\\tField Name: layout,        Type: int32_t\\n\"\n    \"\\t\\tField Name: shape,         Type: vector<size_t>\\n\"\n    \"\\t\\tField Name: type,          Type: ModelBoxDataType::MODELBOX_UINT8\\n\"\n    \"\\t@Constraint: The flowuint 'video_decoder' must be used pair \"\n    \"with 'video_demuxer. the output buffer meta fields 'pix_fmt' is \"\n    \"'brg_packed' or 'rgb_packed', 'layout' is 'hcw'.\";\nconstexpr const char *VIDEO_PACKET_INPUT = \"in_video_packet\";\nconstexpr const char *FRAME_INFO_OUTPUT = \"out_video_frame\";\nconstexpr const char *CODEC_META = \"codec_meta\";\nconstexpr const char *DECODER_CTX = \"decoder_ctx\";\nconstexpr const char *FRAME_INDEX_CTX = \"frame_index_ctx\";\n\nclass VideoDecoderFlowUnit : public modelbox::FlowUnit {\n public:\n  VideoDecoderFlowUnit();\n  ~VideoDecoderFlowUnit() override;\n\n  modelbox::Status Open(\n      const std::shared_ptr<modelbox::Configuration> &opts) override;\n  modelbox::Status Close() override;\n  modelbox::Status Process(std::shared_ptr<modelbox::DataContext> ctx) override;\n  modelbox::Status DataPre(std::shared_ptr<modelbox::DataContext> ctx) override;\n  modelbox::Status DataPost(\n      std::shared_ptr<modelbox::DataContext> ctx) override;\n  modelbox::Status DataGroupPre(\n      std::shared_ptr<modelbox::DataContext> ctx) override {\n    return modelbox::STATUS_OK;\n  }\n  modelbox::Status DataGroupPost(\n      std::shared_ptr<modelbox::DataContext> ctx) override {\n    return modelbox::STATUS_OK;\n  }\n\n private:\n  modelbox::Status WriteData(const std::shared_ptr<modelbox::DataContext> &ctx,\n                             std::shared_ptr<modelbox::Buffer> &pack_buff,\n                             std::vector<MppFrame> &out_frame);\n\n private:\n  size_t queue_size_{0};\n  std::string out_pix_fmt_str_;\n  RgaSURF_FORMAT out_pix_fmt_{RK_FORMAT_YCbCr_420_SP};\n  std::mutex rk_dec_mtx_;\n};\n\n#endif  // MODELBOX_FLOWUNIT_VIDEO_DECODER_ROCKCHIP_H_\n"
  },
  {
    "path": "src/drivers/devices/rockchip/flowunit/video_decoder/video_decoder_flowunit_test.cc",
    "content": "/*\n * Copyright 2022 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <functional>\n#include <future>\n#include <thread>\n\n#include \"common/video_decoder/video_decoder_mock.h\"\n#include \"driver_flow_test.h\"\n#include \"flowunit_mockflowunit/flowunit_mockflowunit.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"modelbox/base/log.h\"\n#include \"modelbox/buffer.h\"\n#include \"modelbox/device/rockchip/rockchip_api.h\"\n\nnamespace modelbox {\nclass VideoDecoderRockchipFlowUnitTest : public testing::Test {\n public:\n  VideoDecoderRockchipFlowUnitTest()\n      : flow_(std::make_shared<MockFlow>()),\n        jpeg_decode_(std::make_shared<modelbox::MppJpegDecode>()) {}\n\n protected:\n  void SetUp() override {\n    auto ret = jpeg_decode_->Init();\n    if (ret != modelbox::STATUS_OK) {\n      MBLOG_INFO << \"no rockchip device, skip test suit\";\n      GTEST_SKIP();\n    }\n  }\n\n  void TearDown() override {}\n\n public:\n  std::shared_ptr<MockFlow> flow_;\n  std::shared_ptr<modelbox::MppJpegDecode> jpeg_decode_;\n\n  void StartFlow(const std::string& toml_content, uint64_t millisecond);\n};\n\nvoid VideoDecoderRockchipFlowUnitTest::StartFlow(\n    const std::string& toml_content, const uint64_t millisecond) {\n  auto ret = videodecoder::AddMockFlowUnit(flow_);\n  EXPECT_EQ(ret, STATUS_SUCCESS);\n  ret = flow_->BuildAndRun(\"decoder\", toml_content, millisecond);\n  EXPECT_EQ(ret, STATUS_SUCCESS);\n}\n\nTEST_F(VideoDecoderRockchipFlowUnitTest, rockchipDecoderNv12Test) {\n  auto toml_content = videodecoder::GetTomlConfig(\"rockchip\", \"nv12\");\n  StartFlow(toml_content, 5 * 1000);\n}\n\nTEST_F(VideoDecoderRockchipFlowUnitTest, rockchipDecoderRgbTest) {\n  auto toml_content = videodecoder::GetTomlConfig(\"rockchip\", \"rgb\");\n  StartFlow(toml_content, 5 * 1000);\n}\n\nTEST_F(VideoDecoderRockchipFlowUnitTest, rockchipDecoderBgrTest) {\n  auto toml_content = videodecoder::GetTomlConfig(\"rockchip\", \"bgr\");\n  StartFlow(toml_content, 5 * 1000);\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/drivers/devices/rockchip/flowunit/video_out/CMakeLists.txt",
    "content": "#\n# Copyright 2022 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\ncmake_minimum_required(VERSION 3.10)\n\nset(UNIT_DEVICE \"rockchip\")\nset(UNIT_NAME \"video_out\")\n\nif(NOT FFMPEG_FOUND)\n    message(STATUS \"Not found ffmpeg, disable ${UNIT_NAME} flowunit\")\n    return()\nendif()\n\nproject(modelbox-flowunit-${UNIT_DEVICE}-${UNIT_NAME})\n\nfile(GLOB_RECURSE UNIT_SOURCE *.cpp *.cc *.c)\ngroup_source_test_files(MODELBOX_UNIT_SOURCE MODELBOX_UNIT_TEST_SOURCE \"_test.c*\" ${UNIT_SOURCE})\n\nfind_package(FFMPEG)\nfind_package(OpenCV)\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_ROCKCHIP_INCLUDE})\ninclude_directories(${ROCKCHIP_MPP_INCLUDE})\ninclude_directories(${ROCKCHIP_RGA_INCLUDE})\ninclude_directories(${FFMPEG_INCLUDE_DIR})\ninclude_directories(${MODELBOX_COMMON_VIDEO_DECODE_INCLUDE})\ninclude_directories(${MODELBOX_ROCKCHIP_COMMON_FFMPEG_VIDEO_INCLUDE})\ninclude_directories(${NLOHMANN_INCLUDE_DIR})\n\nset(MODELBOX_UNIT_SHARED libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-shared)\nset(MODELBOX_UNIT_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\nadd_library(${MODELBOX_UNIT_SHARED} SHARED ${MODELBOX_UNIT_SOURCE})\nset(LIBMODELBOX_FLOWUNIT_VIDEO_DECODER_CPU_SHARED ${MODELBOX_UNIT_SHARED})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES\n    SOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_DEVICE_ROCKCHIP_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} rt)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} dl)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${FFMPEG_LIBRARIES})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${MODELBOX_COMMON_VIDEO_DECODE_LIBRARY})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${MODELBOX_ROCKCHIP_COMMON_FFMPEG_VIDEO_LIBRARY})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${OpenCV_LIBS})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}\")\n\ninstall(TARGETS ${MODELBOX_UNIT_SHARED}\n    COMPONENT rockchip-device-flowunit\n    RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n    LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    ARCHIVE DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    OPTIONAL)\n\ninstall(DIRECTORY ${HEADER}\n    DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR})\n\nset(LIBMODELBOX_FLOWUNIT_VIDEO_ENCODER_CPU_SHARED ${MODELBOX_UNIT_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_VIDEO_ENCODER_CPU_INCLUDE ${MODELBOX_UNIT_SOURCE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_VIDEO_ENCODER_CPU_SOURCES ${MODELBOX_UNIT_SOURCE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_VIDEO_ENCODER_CPU_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}.so CACHE INTERNAL \"\")\n\n# driver test\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_UNIT_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_UNIT_SHARED})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")"
  },
  {
    "path": "src/drivers/devices/rockchip/flowunit/video_out/ffmpeg_video_encoder.cc",
    "content": "/*\n * Copyright 2022 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"ffmpeg_video_encoder.h\"\n\n#include <modelbox/base/log.h>\n\n#include \"modelbox/device/rockchip/rockchip_memory.h\"\n#include \"securec.h\"\n\n#define ENCODER_PROFILE 66\n#define RK_POLL_TIMEOUT 500\n\nmodelbox::Status FfmpegVideoEncoder::Init_PrepConfig() {\n  MppEncPrepCfg prep_cfg;\n  (void)memset_s(&prep_cfg, sizeof(MppEncPrepCfg), 0, sizeof(MppEncPrepCfg));\n\n  prep_cfg.change = MPP_ENC_PREP_CFG_CHANGE_INPUT |\n                    MPP_ENC_PREP_CFG_CHANGE_ROTATION |\n                    MPP_ENC_PREP_CFG_CHANGE_FORMAT;\n  prep_cfg.width = width_;\n  prep_cfg.height = height_;\n  prep_cfg.hor_stride = alignW_;\n  prep_cfg.ver_stride = alignH_;\n  prep_cfg.format = MPP_FMT_YUV420SP;\n  prep_cfg.rotation = MPP_ENC_ROT_0;\n  auto ret = rk_api_->control(codec_ctx_, MPP_ENC_SET_PREP_CFG, &prep_cfg);\n  if (ret) {\n    MBLOG_ERROR << \"mpi control enc set prep cfg failed ret=\" << ret;\n    return {modelbox::STATUS_FAULT, \"mpi control enc set prep cfg failed\"};\n  }\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status FfmpegVideoEncoder::Init_RcConfig() {\n  MppEncRcCfg rc_cfg = {0};\n  (void)memset_s(&rc_cfg, sizeof(MppEncRcCfg), 0, sizeof(MppEncRcCfg));\n\n  rc_cfg.change = MPP_ENC_RC_CFG_CHANGE_ALL;\n  rc_cfg.rc_mode = MPP_ENC_RC_MODE_AVBR;\n  rc_cfg.quality = MPP_ENC_RC_QUALITY_MEDIUM;\n\n  int wh = width_ * height_;\n  if (wh <= 640 * 480) {\n    bps_ = (int)(wh * 3.5);\n  } else {\n    bps_ = (int)(wh * (codec_type_ == MPP_VIDEO_CodingHEVC ? 2.5 : 3));\n  }\n\n  if (rc_cfg.rc_mode == MPP_ENC_RC_MODE_CBR) {\n    /* constant bitrate has very small bps_ range of 1/16 bps_ */\n    rc_cfg.bps_target = bps_;\n    rc_cfg.bps_max = bps_ * 17 / 16;\n    rc_cfg.bps_min = bps_ * 15 / 16;\n  } else if (rc_cfg.rc_mode == MPP_ENC_RC_MODE_AVBR) {\n    /* variable bitrate has large bps_ range */\n    rc_cfg.bps_target = bps_;\n    rc_cfg.bps_max = bps_ * 17 / 16;\n    rc_cfg.bps_min = bps_ * 1 / 16;\n  }\n\n  /* fix input / output frame rate */\n  rc_cfg.fps_in_flex = 0;\n  rc_cfg.fps_in_num = fps_;\n  rc_cfg.fps_in_denorm = fps_den_;\n  rc_cfg.fps_out_flex = 0;\n  rc_cfg.fps_out_num = fps_;\n  rc_cfg.fps_out_denorm = fps_den_;\n\n  auto fgop = fps_ * 1.0 / fps_den_ + 0.5;\n  rc_cfg.gop = (int)(fgop)*2;\n  rc_cfg.max_reenc_times = 0;\n\n  int qp_init = 26;\n  int qp_max = 0;\n  int qp_min = 0;\n  int qp_step = 0;\n\n  if (rc_cfg.rc_mode == MPP_ENC_RC_MODE_CBR) {\n    /* constant bitrate do not limit qp range */\n    qp_max = 48;\n    qp_min = 4;\n    qp_step = 16;\n    qp_init = 0;\n  } else if (rc_cfg.rc_mode == MPP_ENC_RC_MODE_AVBR) {\n    /* variable bitrate has qp min limit */\n    qp_max = 48;\n    qp_min = 10;\n    qp_step = 4;\n    qp_init = 10;\n  }\n\n  rc_cfg.qp_max = qp_max;\n  rc_cfg.qp_min = qp_min;\n  rc_cfg.qp_max_i = qp_max;\n  rc_cfg.qp_min_i = qp_min;\n  rc_cfg.qp_init = qp_init;\n  rc_cfg.qp_max_step = qp_step;\n  rc_cfg.qp_delta_ip = 4;\n  rc_cfg.qp_delta_vi = 2;\n\n  auto ret = rk_api_->control(codec_ctx_, MPP_ENC_SET_RC_CFG, &rc_cfg);\n  if (ret) {\n    MBLOG_ERROR << \"mpi control enc set rc cfg failed ret=\" << ret;\n    return {modelbox::STATUS_FAULT, \"mpi control enc set rc cfg failed\"};\n  }\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status FfmpegVideoEncoder::Init_CodecConfig() {\n  MppEncCodecCfg codec_cfg;\n  (void)memset_s(&codec_cfg, sizeof(MppEncCodecCfg), 0, sizeof(MppEncCodecCfg));\n\n  codec_cfg.coding = codec_type_;\n  switch (codec_cfg.coding) {\n    case MPP_VIDEO_CodingAVC: {\n      codec_cfg.h264.change =\n          MPP_ENC_H264_CFG_CHANGE_PROFILE | MPP_ENC_H264_CFG_CHANGE_ENTROPY |\n          MPP_ENC_H264_CFG_CHANGE_TRANS_8x8 | MPP_ENC_H264_CFG_CHANGE_QP_LIMIT;\n\n      codec_cfg.h264.profile = ENCODER_PROFILE;\n      codec_cfg.h264.level = (width_ > 1280) ? 40 : 31;\n      codec_cfg.h264.entropy_coding_mode = 0;  // baseline=0, others=1\n      codec_cfg.h264.cabac_init_idc = 0;\n      codec_cfg.h264.transform8x8_mode = 0;  // baseline=0, others=1\n    } break;\n    case MPP_VIDEO_CodingMJPEG:\n    case MPP_VIDEO_CodingHEVC:\n    case MPP_VIDEO_CodingVP8:\n    default: {\n      auto msg = std::string(\"unsupport encoder coding type =\") +\n                 std::to_string(codec_cfg.coding);\n      MBLOG_ERROR << msg;\n      return {modelbox::STATUS_FAULT, msg};\n    } break;\n  }\n\n  auto ret = rk_api_->control(codec_ctx_, MPP_ENC_SET_CODEC_CFG, &codec_cfg);\n  if (ret) {\n    MBLOG_ERROR << \"mpi control enc set codec cfg failed ret=\" << ret;\n    return {modelbox::STATUS_FAULT, \"mpi control enc set codec cfg failed\"};\n  }\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status FfmpegVideoEncoder::Init_Config() {\n  auto ret = mpp_enc_cfg_init(&cfg_);\n  if (ret != MPP_OK) {\n    MBLOG_ERROR << \"mpi control enc get cfg failed ret=\" << ret;\n    return {modelbox::STATUS_FAULT, \"mpi control enc get cfg failed\"};\n  }\n\n  auto ret_config = Init_PrepConfig();\n  if (ret_config != modelbox::STATUS_SUCCESS) {\n    return ret_config;\n  }\n\n  ret_config = Init_RcConfig();\n  if (ret_config != modelbox::STATUS_SUCCESS) {\n    return ret_config;\n  }\n\n  ret_config = Init_CodecConfig();\n  if (ret_config != modelbox::STATUS_SUCCESS) {\n    return ret_config;\n  }\n\n  /* optional */\n  int sei_mode = MPP_ENC_SEI_MODE_ONE_FRAME;\n  ret = rk_api_->control(codec_ctx_, MPP_ENC_SET_SEI_CFG, &sei_mode);\n  if (ret != MPP_OK) {\n    MBLOG_ERROR << \"mpi control enc set sei cfg failed ret=\" << ret;\n    return {modelbox::STATUS_FAULT, \"mpi control enc set sei cfg failed\"};\n  }\n\n  int header_mode = MPP_ENC_HEADER_MODE_EACH_IDR;\n  ret = rk_api_->control(codec_ctx_, MPP_ENC_SET_HEADER_MODE, &header_mode);\n  if (ret != MPP_OK) {\n    MBLOG_ERROR << \"mpi control enc set header mode failed ret=\" << ret;\n    return {modelbox::STATUS_FAULT, \"mpi control enc set header mode failed\"};\n  }\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status FfmpegVideoEncoder::Init_MppContex() {\n  auto ret = mpp_create(&codec_ctx_, &rk_api_);\n  if (ret != MPP_OK) {\n    MBLOG_ERROR << \"failed to run mpp_create: \" << ret;\n    return {modelbox::STATUS_FAULT, \"failed to run mpp_create\"};\n  }\n\n  RK_U32 timeout = RK_POLL_TIMEOUT;\n  ret = rk_api_->control(codec_ctx_, MPP_SET_OUTPUT_TIMEOUT, &timeout);\n  if (ret != MPP_OK) {\n    MBLOG_ERROR << \"mpi control set output timeout ret=\" << ret;\n    return {modelbox::STATUS_FAULT, \"mpi control set output timeout\"};\n  }\n\n  ret = mpp_init(codec_ctx_, MPP_CTX_ENC, codec_type_);\n  if (ret != MPP_OK) {\n    MBLOG_ERROR << \"mpp_init failed ret=\" << ret;\n    return {modelbox::STATUS_FAULT, \"mpp_init failed\"};\n  }\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status FfmpegVideoEncoder::RkInit(int w, int h,\n                                            const AVRational &frame_rate,\n                                            const std::string &encodeType) {\n  codec_type_ = MPP_VIDEO_CodingAVC;\n  width_ = w;\n  height_ = h;\n  fps_ = frame_rate.num;\n  fps_den_ = frame_rate.den;\n\n  alignW_ = MPP_ALIGN(w, MPP_ALIGN_WIDTH);\n  alignH_ = MPP_ALIGN(h, MPP_ALIGN_HEIGHT);\n\n  auto ret = Init_MppContex();\n  if (ret != modelbox::STATUS_SUCCESS) {\n    MBLOG_ERROR << \"failed to init mpp contex reason: \" << ret.Errormsg();\n    return {modelbox::STATUS_FAULT,\n            \"failed to init mpp contex reason: \" + ret.Errormsg()};\n  }\n\n  ret = Init_Config();\n  if (ret != modelbox::STATUS_SUCCESS) {\n    MBLOG_ERROR << \"failed to init config reason: \" << ret.Errormsg();\n    return {modelbox::STATUS_FAULT,\n            \"failed to init config reason: \" + ret.Errormsg()};\n  }\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nvoid FfmpegVideoEncoder::CloseRkEncoder() {\n  if (cfg_) {\n    mpp_enc_cfg_deinit(cfg_);  // todo: here or Init_Config?\n  }\n  if (rk_api_ && codec_ctx_) {\n    rk_api_->reset(codec_ctx_);\n  }\n\n  if (codec_ctx_) {\n    mpp_destroy(codec_ctx_);\n    codec_ctx_ = nullptr;\n  }\n}\n\nmodelbox::Status FfmpegVideoEncoder::Init(\n    const std::shared_ptr<modelbox::Device> &device, int32_t width,\n    int32_t height, const AVRational &frame_rate) {\n#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(58, 9, 100)\n  av_register_all();\n#endif\n\n  std::string encoder_name = \"h264\";\n  auto *av_codec_ctx = (AVCodecContext *)av_malloc(sizeof(AVCodecContext));\n  (void)memset_s(av_codec_ctx, sizeof(AVCodecContext), 0,\n                 sizeof(AVCodecContext));\n  if (av_codec_ctx == nullptr) {\n    MBLOG_ERROR << \"Alloc codec ctx failed, encoder name:\" << encoder_name;\n    return {modelbox::STATUS_FAULT,\n            \"Alloc codec ctx failed, encoder name:\" + encoder_name};\n  }\n\n  av_codec_ctx_.reset(av_codec_ctx, [this](AVCodecContext *ctx) {\n    if (ctx->extradata) {\n      av_free(ctx->extradata);\n    }\n\n    av_free(ctx);\n    CloseRkEncoder();\n  });\n\n  auto ret = RkInit(width, height, frame_rate, encoder_name);\n  if (ret != modelbox::STATUS_SUCCESS) {\n    MBLOG_ERROR << \"rk init fail\";\n    return {modelbox::STATUS_FAULT,\n            \"failed to rk init reason: \" + ret.Errormsg()};\n  }\n\n  SetupCodecParam(width, height, frame_rate, av_codec_ctx_);\n  // add extra data\n  auto buffer = std::make_shared<modelbox::Buffer>(device);\n  buffer->Build(500);\n  MppPacket packet = nullptr;\n  mpp_packet_init_with_buffer(&packet, (MppBuffer)(buffer->MutableData()));\n  /* NOTE: It is important to clear output packet length!! */\n  mpp_packet_set_length(packet, 0);\n\n  auto mppret = rk_api_->control(codec_ctx_, MPP_ENC_GET_HDR_SYNC, packet);\n  if (mppret == MPP_OK) {\n    void *ptr = mpp_packet_get_pos(packet);\n    av_codec_ctx_->extradata_size = (int)(mpp_packet_get_length(packet));\n    av_codec_ctx_->extradata =\n        (uint8_t *)av_malloc(av_codec_ctx_->extradata_size);\n    if (av_codec_ctx_->extradata) {\n      (void)memcpy_s(av_codec_ctx_->extradata, av_codec_ctx_->extradata_size,\n                     ptr, av_codec_ctx_->extradata_size);\n    }\n  }\n\n  mpp_packet_set_buffer(packet, nullptr);\n  mpp_packet_deinit(&packet);\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nvoid FfmpegVideoEncoder::SetupCodecParam(\n    int32_t width, int32_t height, const AVRational &frame_rate,\n    std::shared_ptr<AVCodecContext> &codec_ctx) {\n  codec_ctx->codec_type = AVMEDIA_TYPE_VIDEO;\n  codec_ctx->codec_id = (codec_type_ == MPP_VIDEO_CodingAVC) ? AV_CODEC_ID_H264\n                                                             : AV_CODEC_ID_HEVC;\n  codec_ctx->bit_rate = bps_;\n  codec_ctx->profile = ENCODER_PROFILE;\n  codec_ctx->level = (width > 1280) ? 40 : 31;\n\n  codec_ctx->pix_fmt = AV_PIX_FMT_NV12;\n  codec_ctx->width = width;\n  codec_ctx->height = height;\n  codec_ctx->color_primaries = AVCOL_PRI_UNSPECIFIED;\n  codec_ctx->color_trc = AVCOL_TRC_UNSPECIFIED;\n  codec_ctx->colorspace = AVCOL_SPC_UNSPECIFIED;\n  codec_ctx->chroma_sample_location = AVCHROMA_LOC_UNSPECIFIED;\n  codec_ctx->sample_aspect_ratio = {0, 1};\n  codec_ctx->has_b_frames = 0;\n\n  codec_ctx->framerate = frame_rate;\n  codec_ctx->time_base = av_inv_q(frame_rate);\n  codec_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;\n}\n\nstd::shared_ptr<AVPacket> FfmpegVideoEncoder::NewPacket(MppPacket &packet) {\n  if (packet == nullptr) {\n    MBLOG_ERROR << \"packet is nullptr\";\n    return nullptr;\n  }\n\n  void *ptr = mpp_packet_get_pos(packet);\n  size_t len = mpp_packet_get_length(packet);\n\n  auto *av_packet_ptr = av_packet_alloc();\n  Defer { mpp_packet_deinit(&packet); };\n  if (av_packet_ptr == nullptr) {\n    MBLOG_ERROR << \"av packet alloc failed\";\n    return nullptr;\n  }\n  if (0 != av_new_packet(av_packet_ptr, (int)len)) {\n    MBLOG_ERROR << \"av packet new failed\";\n    return nullptr;\n  }\n\n  std::shared_ptr<AVPacket> av_packet(\n      av_packet_ptr, [](AVPacket *pkt) { av_packet_free(&pkt); });\n\n  if (len > 0) {\n    auto e_ret = memcpy_s(av_packet_ptr->data, len, ptr, len);\n    if (e_ret != EOK) {\n      MBLOG_ERROR << \"av packet memcpy_s failed ret: \" << e_ret;\n      return nullptr;\n    }\n  } else {\n    MBLOG_WARN << \"get one zero compress frame\";\n  }\n\n  av_packet_ptr->pts = mpp_packet_get_pts(packet);\n  av_packet_ptr->dts = av_packet_ptr->pts;\n\n  return av_packet;\n}\n\nstd::shared_ptr<modelbox::Buffer> FfmpegVideoEncoder::FromAvFrame(\n    const std::shared_ptr<modelbox::Device> &device,\n    const std::shared_ptr<AVFrame> &av_frame) {\n  size_t w = av_frame->width;\n  size_t h = av_frame->height;\n  size_t total_size = av_frame->format == AVPixelFormat::AV_PIX_FMT_NV12\n                          ? w * h * 3 / 2\n                          : w * h * 3;\n  // assume buffer is allignmented\n\n  auto buffer = std::make_shared<modelbox::Buffer>(device);\n  buffer->Build(total_size);\n  auto *mpp_buf = (MppBuffer)(buffer->MutableData());\n  auto *cpu_buf = (uint8_t *)mpp_buffer_get_ptr(mpp_buf);\n\n  auto ret = av_image_copy_to_buffer(\n      cpu_buf, (int)total_size, av_frame->data, av_frame->linesize,\n      (AVPixelFormat)(av_frame->format), w, h, 1);\n  if (ret < 0) {\n    MBLOG_ERROR << \"failed to av_image_copy_to_buffer: \" << ret;\n    return nullptr;\n  }\n\n  if (av_frame->format != AVPixelFormat::AV_PIX_FMT_NV12) {\n    MppFrame frame = nullptr;\n    auto ret = mpp_frame_init(&frame);\n    if (ret != MPP_OK) {\n      MBLOG_ERROR << \"FromAvFrame frame failed \";\n      return nullptr;\n    }\n\n    mpp_frame_set_width(frame, w);\n    mpp_frame_set_height(frame, h);\n    mpp_frame_set_hor_stride(frame, w);\n    mpp_frame_set_ver_stride(frame, h);\n    mpp_frame_set_fmt(frame, av_frame->format == AVPixelFormat::AV_PIX_FMT_RGB24\n                                 ? MPP_FMT_RGB888\n                                 : MPP_FMT_BGR888);\n    mpp_frame_set_eos(frame, 0);\n    mpp_frame_set_buffer(frame, mpp_buf);\n\n    buffer = ColorChange(frame, RK_FORMAT_YCbCr_420_SP, device);\n  }\n\n  return buffer;\n}\n\nmodelbox::Status FfmpegVideoEncoder::Encode(\n    const std::shared_ptr<modelbox::Device> &device,\n    const std::shared_ptr<AVFrame> &av_frame,\n    std::vector<std::shared_ptr<AVPacket>> &av_packet_list) {\n  std::lock_guard<std::mutex> lk(rk_enc_mtx_);\n  auto av_buffer = FromAvFrame(device, av_frame);\n  if (av_buffer == nullptr) {\n    MBLOG_ERROR << \"FromAvFrame fail\";\n    return {modelbox::STATUS_FAULT, \"failed to FromAvFrame\"};\n  }\n\n  auto *mpp_buf = (MppBuffer)(av_buffer->ConstData());\n  MppFrame frame = nullptr;\n  MppPacket packet = nullptr;\n\n  auto ret = mpp_frame_init(&frame);\n  if (ret != MPP_OK) {\n    auto msg = std::string(\"mpp_frame_init failed: \") + std::to_string(ret);\n    MBLOG_ERROR << msg;\n    return {modelbox::STATUS_FAULT, msg};\n  }\n\n  mpp_frame_set_width(frame, width_);\n  mpp_frame_set_height(frame, height_);\n  mpp_frame_set_hor_stride(frame, alignW_);\n  mpp_frame_set_ver_stride(frame, alignH_);\n  mpp_frame_set_fmt(frame, MPP_FMT_YUV420SP);\n  mpp_frame_set_pts(frame, (RK_S64)(av_frame->pts));\n  mpp_frame_set_eos(frame, 0);\n  mpp_frame_set_buffer(frame, mpp_buf);\n\n  Defer { mpp_frame_deinit(&frame); };\n\n  ret = rk_api_->encode_put_frame(codec_ctx_, frame);\n  if (ret != MPP_OK) {\n    auto msg =\n        std::string(\"mpp encode put frame failed: \") + std::to_string(ret);\n    MBLOG_ERROR << msg;\n    return {modelbox::STATUS_FAULT, msg};\n  }\n\n  ret = rk_api_->encode_get_packet(codec_ctx_, &packet);\n  if (ret != MPP_OK) {\n    auto msg =\n        std::string(\"mpp encode get packet failed: \") + std::to_string(ret);\n    MBLOG_ERROR << msg;\n    return {modelbox::STATUS_FAULT, msg};\n  }\n\n  auto new_pkt = NewPacket(packet);\n  if (new_pkt != nullptr) {\n    av_packet_list.push_back(new_pkt);\n  }\n\n  return modelbox::STATUS_SUCCESS;\n}"
  },
  {
    "path": "src/drivers/devices/rockchip/flowunit/video_out/ffmpeg_video_encoder.h",
    "content": "/*\n * Copyright 2022 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_VIDEO_OUT_H_\n#define MODELBOX_FLOWUNIT_VIDEO_OUT_H_\n\n#include <modelbox/base/status.h>\n#include <modelbox/device/rockchip/device_rockchip.h>\n#include <modelbox/device/rockchip/rockchip_api.h>\n\n#include <memory>\n#include <string>\n#include <vector>\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n#include <libavcodec/avcodec.h>\n#include <libavformat/avformat.h>\n#include <libavutil/imgutils.h>\n#ifdef __cplusplus\n}\n#endif\n\nclass FfmpegVideoEncoder {\n public:\n  modelbox::Status Init(const std::shared_ptr<modelbox::Device> &device,\n                        int32_t width, int32_t height,\n                        const AVRational &frame_rate);\n\n  modelbox::Status Encode(\n      const std::shared_ptr<modelbox::Device> &device,\n      const std::shared_ptr<AVFrame> &av_frame,\n      std::vector<std::shared_ptr<AVPacket>> &av_packet_list);\n\n  std::shared_ptr<AVCodecContext> GetCtx() { return av_codec_ctx_; }\n\n private:\n  void SetupCodecParam(int32_t width, int32_t height,\n                       const AVRational &frame_rate,\n                       std::shared_ptr<AVCodecContext> &codec_ctx);\n\n  std::shared_ptr<AVCodecContext> av_codec_ctx_;\n  // rk\n  modelbox::Status RkInit(int w, int h, const AVRational &frame_rate,\n                          const std::string &encodeType);\n  modelbox::Status Init_PrepConfig();\n  modelbox::Status Init_RcConfig();\n  modelbox::Status Init_CodecConfig();\n  modelbox::Status Init_Config();\n  modelbox::Status Init_MppContex();\n  void CloseRkEncoder();\n  std::shared_ptr<AVPacket> NewPacket(MppPacket &packet);\n  std::shared_ptr<modelbox::Buffer> FromAvFrame(\n      const std::shared_ptr<modelbox::Device> &device,\n      const std::shared_ptr<AVFrame> &av_frame);\n\n  MppCodingType codec_type_ = MPP_VIDEO_CodingAVC;\n  MppCtx codec_ctx_ = nullptr;\n  MppApi *rk_api_ = nullptr;\n  MppEncCfg cfg_ = nullptr;\n\n  int width_ = 0;\n  int height_ = 0;\n  int alignW_ = 0;\n  int alignH_ = 0;\n  int fps_ = 0;\n  int fps_den_ = 0;\n  int bps_ = 0;\n  std::mutex rk_enc_mtx_;\n};\n\n#endif  // MODELBOX_FLOWUNIT_VIDEO_OUT_H_"
  },
  {
    "path": "src/drivers/devices/rockchip/flowunit/video_out/video_out_flowunit.cc",
    "content": "/*\n * Copyright 2022 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"video_out_flowunit.h\"\n\n#include <securec.h>\n\n#include <nlohmann/json.hpp>\n#include <opencv2/opencv.hpp>\n#include <regex>\n\n#include \"modelbox/flowunit.h\"\n#include \"modelbox/flowunit_api_helper.h\"\n\nmodelbox::Status VideoOutFlowUnit::Open(\n    const std::shared_ptr<modelbox::Configuration> &opts) {\n  default_dest_url_ = opts->GetString(\"default_dest_url\", \"\");\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status VideoOutFlowUnit::Close() { return modelbox::STATUS_OK; }\n\nmodelbox::Status VideoOutFlowUnit::Process(\n    std::shared_ptr<modelbox::DataContext> ctx) {\n  auto image_queue = std::static_pointer_cast<\n      modelbox::BlockingQueue<std::shared_ptr<modelbox::Buffer>>>(\n      ctx->GetPrivate(SHOW_QUEUE_CTX));\n  if (image_queue != nullptr) {\n    auto input_buffer_list = ctx->Input(FRAME_INFO_INPUT);\n    for (size_t i = 0; i < input_buffer_list->Size(); ++i) {\n      image_queue->Push(input_buffer_list->At(i), 50);\n    }\n    return modelbox::STATUS_SUCCESS;\n  }\n\n  // others do video encoding\n  auto muxer =\n      std::static_pointer_cast<FfmpegVideoMuxer>(ctx->GetPrivate(MUXER_CTX));\n  auto encoder = std::static_pointer_cast<FfmpegVideoEncoder>(\n      ctx->GetPrivate(ENCODER_CTX));\n  if (muxer == nullptr || encoder == nullptr) {\n    MBLOG_ERROR << \"Stream not inited\";\n    return {modelbox::STATUS_FAULT, \"Stream not inited\"};\n  }\n\n  std::vector<std::shared_ptr<AVFrame>> av_frame_list;\n  auto ret = ReadFrames(ctx, av_frame_list);\n  if (ret != modelbox::STATUS_SUCCESS) {\n    MBLOG_ERROR << \"Read input frame failed\";\n    return {modelbox::STATUS_FAULT, \"Read input frame failed\"};\n  }\n\n  std::vector<std::shared_ptr<AVPacket>> av_packet_list;\n  ret = EncodeFrame(encoder, av_frame_list, av_packet_list);\n  if (ret != modelbox::STATUS_SUCCESS) {\n    MBLOG_ERROR << \"Encode frame failed\";\n    return {modelbox::STATUS_FAULT, \"Encode frame failed\"};\n  }\n\n  ret = MuxPacket(muxer, encoder->GetCtx()->time_base, av_packet_list);\n  if (ret != modelbox::STATUS_SUCCESS) {\n    MBLOG_ERROR << \"Mux packet failed\";\n    return {modelbox::STATUS_FAULT, \"Mux packet failed\"};\n  }\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status VideoOutFlowUnit::ReadFrames(\n    const std::shared_ptr<modelbox::DataContext> &ctx,\n    std::vector<std::shared_ptr<AVFrame>> &av_frame_list) {\n  auto frame_buffer_list = ctx->Input(FRAME_INFO_INPUT);\n  if (frame_buffer_list == nullptr || frame_buffer_list->Size() == 0) {\n    MBLOG_ERROR << \"Input frame list is empty\";\n    return {modelbox::STATUS_FAULT, \"Input frame list is empty\"};\n  }\n\n  auto frame_index_ptr =\n      std::static_pointer_cast<int64_t>(ctx->GetPrivate(FRAME_INDEX_CTX));\n  for (auto frame_buffer : *frame_buffer_list) {\n    std::shared_ptr<AVFrame> av_frame;\n    auto ret = ReadFrameFromBuffer(frame_buffer, av_frame);\n    av_frame->pts = *frame_index_ptr;\n    ++(*frame_index_ptr);\n    if (ret != modelbox::STATUS_SUCCESS) {\n      MBLOG_ERROR << \"Read frame from buffer failed\";\n      return ret;\n    }\n\n    av_frame_list.push_back(av_frame);\n  }\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status VideoOutFlowUnit::ReadFrameFromBuffer(\n    std::shared_ptr<modelbox::Buffer> &frame_buffer,\n    std::shared_ptr<AVFrame> &av_frame) {\n  auto *frame_ptr = av_frame_alloc();\n  if (frame_ptr == nullptr) {\n    MBLOG_ERROR << \"Alloca frame failed\";\n    return {modelbox::STATUS_FAULT, \"Alloca frame failed\"};\n  }\n\n  av_frame.reset(frame_ptr, [](AVFrame *ptr) { av_frame_free(&ptr); });\n  frame_buffer->Get(\"width\", av_frame->width);\n  frame_buffer->Get(\"height\", av_frame->height);\n  std::string pix_fmt;\n  frame_buffer->Get(\"pix_fmt\", pix_fmt);\n  auto iter = videodecode::g_av_pix_fmt_map.find(pix_fmt);\n  if (iter == videodecode::g_av_pix_fmt_map.end()) {\n    MBLOG_ERROR << \"Encoder not support pix fmt \" << pix_fmt;\n    return {modelbox::STATUS_NOTSUPPORT,\n            \"Encoder not support pix fmt \" + pix_fmt};\n  }\n  av_frame->format = iter->second;\n  auto ret =\n      av_image_fill_arrays(av_frame->data, av_frame->linesize,\n                           (const uint8_t *)frame_buffer->ConstData(),\n                           iter->second, av_frame->width, av_frame->height, 1);\n  if (ret < 0) {\n    GET_FFMPEG_ERR(ret, ffmpeg_err);\n    MBLOG_ERROR << \"avpicture_fill failed, err \" << ffmpeg_err;\n    return {modelbox::STATUS_FAULT, \"avpicture_fill failed, err \"};\n  }\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status VideoOutFlowUnit::EncodeFrame(\n    const std::shared_ptr<FfmpegVideoEncoder> &encoder,\n    const std::vector<std::shared_ptr<AVFrame>> &av_frame_list,\n    std::vector<std::shared_ptr<AVPacket>> &av_packet_list) {\n  for (const auto &frame : av_frame_list) {\n    auto ret = encoder->Encode(GetBindDevice(), frame, av_packet_list);\n    if (ret != modelbox::STATUS_SUCCESS) {\n      MBLOG_ERROR << \"Encoder encode frame failed reason: \" + ret.Errormsg();\n      return ret;\n    }\n  }\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status VideoOutFlowUnit::MuxPacket(\n    const std::shared_ptr<FfmpegVideoMuxer> &muxer, const AVRational &time_base,\n    std::vector<std::shared_ptr<AVPacket>> &av_packet_list) {\n  for (const auto &packet : av_packet_list) {\n    auto ret = muxer->Mux(time_base, packet);\n    if (ret != modelbox::STATUS_SUCCESS) {\n      MBLOG_ERROR << \"Muxer mux packet failed\";\n      return ret;\n    }\n  }\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nvoid VideoOutFlowUnit::ProcessShow(\n    const std::string &dest_url,\n    const std::shared_ptr<\n        modelbox::BlockingQueue<std::shared_ptr<modelbox::Buffer>>>\n        &image_queue) {\n  std::string win_name = \"modelbox_show\";\n  if (dest_url.length() > 2) {\n    win_name = dest_url.substr(2);\n  }\n\n  cv::namedWindow(win_name, cv::WINDOW_AUTOSIZE);\n  std::shared_ptr<modelbox::Buffer> buf;\n  std::shared_ptr<modelbox::Buffer> back_buf;\n  while (image_queue->Pop(&buf)) {\n    if (buf == nullptr) {\n      break;\n    }\n\n    // at least 1, even not set widht, height\n    int32_t width = 1;\n    int32_t height = 1;\n    std::string pix_fmt = \"bgr\";\n    buf->Get(\"width\", width);\n    buf->Get(\"height\", height);\n    buf->Get(\"pix_fmt\", pix_fmt);\n    void *input_data = const_cast<void *>(buf->ConstData());\n    bool isnv12 = (pix_fmt == \"nv12\");\n    cv::Mat img_data(cv::Size(width, isnv12 ? height * 3 / 2 : height),\n                     isnv12 ? CV_8UC1 : CV_8UC3, input_data);\n    cv::Mat show_img = img_data;\n    // todo color change\n    if (pix_fmt == \"rgb\") {\n      cv::cvtColor(img_data, show_img, cv::COLOR_RGB2BGR);\n    } else if (pix_fmt == \"nv12\") {\n      cv::cvtColor(img_data, show_img, cv::COLOR_YUV2BGR_NV12);\n    }\n\n    cv::imshow(win_name, show_img);\n    cv::waitKey(10);\n    back_buf = buf;\n  }\n\n  cv::destroyWindow(win_name);\n}\n\nmodelbox::Status VideoOutFlowUnit::PrepareVideoOut(\n    const std::shared_ptr<modelbox::DataContext> &data_ctx,\n    const std::string &dest_url, const std::string &format_name) {\n  auto frame_buffer_list = data_ctx->Input(FRAME_INFO_INPUT);\n  if (frame_buffer_list == nullptr || frame_buffer_list->Size() == 0) {\n    MBLOG_ERROR << \"Input [frame_info] is empty\";\n    return {modelbox::STATUS_FAULT, \"Input [frame_info] is empty\"};\n  }\n\n  auto frame_buffer = frame_buffer_list->At(0);\n  int32_t width = 0;\n  int32_t height = 0;\n  int32_t rate_num = 25;\n  int32_t rate_den = 1;\n  frame_buffer->Get(\"width\", width);\n  frame_buffer->Get(\"height\", height);\n  frame_buffer->Get(\"rate_num\", rate_num);\n  frame_buffer->Get(\"rate_den\", rate_den);\n\n  if (width == 0 || height == 0) {\n    MBLOG_ERROR << \"buffer meta is invalid\";\n    return {modelbox::STATUS_INVALID, \"buffer meta is invalid\"};\n  }\n\n  auto encoder = std::make_shared<FfmpegVideoEncoder>();\n  auto ret =\n      encoder->Init(GetBindDevice(), width, height, {rate_num, rate_den});\n  if (ret != modelbox::STATUS_SUCCESS) {\n    MBLOG_ERROR << \"Init encoder failed\";\n    return {modelbox::STATUS_FAULT, \"Init encoder failed\"};\n  }\n\n  auto writer = std::make_shared<FfmpegWriter>();\n  ret = writer->Open(format_name, dest_url);\n  if (ret != modelbox::STATUS_SUCCESS) {\n    MBLOG_ERROR << \"Open ffmepg writer failed, format \" << format_name\n                << \", url \" << dest_url;\n    return {modelbox::STATUS_FAULT, \"Open ffmepg writer failed, format \" +\n                                        format_name + \", url \" + dest_url};\n  }\n\n  auto muxer = std::make_shared<FfmpegVideoMuxer>();\n  ret = muxer->Init(encoder->GetCtx(), writer);\n  if (ret != modelbox::STATUS_SUCCESS) {\n    MBLOG_ERROR << \"Init muxer failed\";\n    return {modelbox::STATUS_FAULT, \"Init muxer failed\"};\n  }\n\n  auto color_cvt = std::make_shared<FfmpegColorConverter>();\n\n  data_ctx->SetPrivate(MUXER_CTX, muxer);\n  data_ctx->SetPrivate(ENCODER_CTX, encoder);\n  data_ctx->SetPrivate(COLOR_CVT_CTX, color_cvt);\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status VideoOutFlowUnit::DataPre(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  std::string dest_url;\n  auto ret = GetDestUrl(data_ctx, dest_url);\n  if (ret != modelbox::STATUS_SUCCESS || dest_url.empty()) {\n    MBLOG_ERROR << \"dest_url in config is empty, no dest url available\";\n    return {modelbox::STATUS_FAULT,\n            \"dest_url in config is empty, no dest url available\"};\n  }\n\n  MBLOG_INFO << \"videoout url=\" << dest_url;\n\n  auto frame_index_ptr = std::make_shared<int64_t>(0);\n  data_ctx->SetPrivate(FRAME_INDEX_CTX, frame_index_ptr);\n\n  if (dest_url[0] >= '0' && dest_url[0] <= '9') {\n    // 视频输出， 类似0:windows_name配置\n    std::shared_ptr<std::thread> show_thread;\n    auto image_queue = std::make_shared<\n        modelbox::BlockingQueue<std::shared_ptr<modelbox::Buffer>>>(2);\n    show_thread.reset(new std::thread(&VideoOutFlowUnit::ProcessShow, this,\n                                      dest_url, image_queue),\n                      [image_queue](std::thread *p) {\n                        image_queue->Shutdown();\n                        if (p && p->joinable()) {\n                          p->join();\n                        }\n                        delete p;\n                      });\n    data_ctx->SetPrivate(SHOW_CTX, show_thread);\n    data_ctx->SetPrivate(SHOW_QUEUE_CTX, image_queue);\n    return modelbox::STATUS_OK;\n  }\n\n  std::string format_name = \"mp4\";\n  if (dest_url.substr(0, 4) == \"rtsp\") {\n    format_name = \"rtsp\";\n  }\n\n  return PrepareVideoOut(data_ctx, dest_url, format_name);\n}\n\nmodelbox::Status VideoOutFlowUnit::GetDestUrl(\n    const std::shared_ptr<modelbox::DataContext> &data_ctx,\n    std::string &dest_url) {\n  dest_url = default_dest_url_;\n\n  Defer {\n    std::regex url_auth_pattern(\"://[^ /]*?:[^ /]*?@\");\n    auto result = std::regex_replace(dest_url, url_auth_pattern, \"://*:*@\");\n    MBLOG_INFO << \"video_out url is \" << result;\n  };\n\n  // 3种方式获取\n  auto stream_meta = data_ctx->GetInputMeta(FRAME_INFO_INPUT);\n  if (stream_meta != nullptr) {\n    auto dest_url_ptr =\n        std::static_pointer_cast<std::string>(stream_meta->GetMeta(DEST_URL));\n    if (dest_url_ptr != nullptr && !(*dest_url_ptr).empty()) {\n      dest_url = *dest_url_ptr;\n      return modelbox::STATUS_SUCCESS;\n    }\n  }\n\n  auto config = data_ctx->GetSessionConfig();\n  auto cfg_str = config->GetString(\"iva_task_output\");\n  if (cfg_str.empty()) {\n    return modelbox::STATUS_SUCCESS;\n  }\n\n  try {\n    nlohmann::json url_json = nlohmann::json::parse(cfg_str);\n    if (url_json.contains(\"data\") && url_json[\"data\"].contains(\"url\")) {\n      dest_url = url_json[\"data\"][\"url\"].get<std::string>();\n    }\n  } catch (const std::exception &e) {\n    MBLOG_ERROR << \"Parse config str to json failed, detail: \" << e.what();\n    return modelbox::STATUS_INVALID;\n  }\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status VideoOutFlowUnit::DataPost(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  data_ctx->SetPrivate(MUXER_CTX, nullptr);\n  data_ctx->SetPrivate(ENCODER_CTX, nullptr);\n  data_ctx->SetPrivate(SHOW_CTX, nullptr);\n  data_ctx->SetPrivate(SHOW_QUEUE_CTX, nullptr);\n  return modelbox::STATUS_OK;\n}\n\nMODELBOX_FLOWUNIT(VideoOutFlowUnit, desc) {\n  desc.SetFlowUnitName(FLOWUNIT_NAME);\n  desc.SetFlowUnitGroupType(\"Video\");\n  desc.AddFlowUnitInput({FRAME_INFO_INPUT, \"cpu\"});\n  desc.SetFlowType(modelbox::STREAM);\n  desc.SetInputContiguous(false);\n  // 禁止异步执行，编码必须一帧帧的编码\n  desc.SetResourceNice(false);\n  desc.SetDescription(FLOWUNIT_DESC);\n  desc.AddFlowUnitOption(modelbox::FlowUnitOption(\n      \"default_dest_url\", \"string\", true, \"\", \"the encoder dest url\"));\n}\n\nMODELBOX_DRIVER_FLOWUNIT(desc) {\n  desc.Desc.SetName(FLOWUNIT_NAME);\n  desc.Desc.SetClass(modelbox::DRIVER_CLASS_FLOWUNIT);\n  desc.Desc.SetType(FLOWUNIT_TYPE);\n  desc.Desc.SetDescription(FLOWUNIT_DESC);\n  desc.Desc.SetVersion(\"1.0.0\");\n}\n"
  },
  {
    "path": "src/drivers/devices/rockchip/flowunit/video_out/video_out_flowunit.h",
    "content": "/*\n * Copyright 2022 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_VIDEO_OUT_CPU_H_\n#define MODELBOX_FLOWUNIT_VIDEO_OUT_CPU_H_\n\n#include <modelbox/base/device.h>\n#include <modelbox/base/status.h>\n#include <modelbox/flow.h>\n\n#include <vector>\n\n#include \"ffmpeg_video_encoder.h\"\n#include \"ffmpeg_video_muxer.h\"\n#include \"ffmpeg_writer.h\"\n#include \"modelbox/flowunit.h\"\n#include \"video_decode_common.h\"\n\nconstexpr const char *FLOWUNIT_TYPE = \"rockchip\";\nconstexpr const char *FLOWUNIT_NAME = \"video_out\";\nconstexpr const char *FLOWUNIT_DESC =\n    \"\\n\\t@Brief: A video out flowunit. \\n\"\n    \"\\t@Port parameter: The input port buffer meta type is image \\n\"\n    \"\\t  The image type buffer contains the following meta fields:\\n\"\n    \"\\t\\tField Name: width,         Type: int32_t\\n\"\n    \"\\t\\tField Name: height,        Type: int32_t\\n\"\n    \"\\t\\tField Name: width_stride,  Type: int32_t\\n\"\n    \"\\t\\tField Name: height_stride, Type: int32_t\\n\"\n    \"\\t\\tField Name: channel,       Type: int32_t\\n\"\n    \"\\t\\tField Name: pix_fmt,       Type: string\\n\"\n    \"\\t\\tField Name: layout,        Type: int32_t\\n\"\n    \"\\t\\tField Name: shape,         Type: vector<size_t>\\n\"\n    \"\\t\\tField Name: type,          Type: ModelBoxDataType::MODELBOX_UINT8\\n\"\n    \"\\t@Constraint: The field value range of this flowunit supports: \"\n    \"'pix_fmt': \"\n    \"[rgb, bgr, nv12], 'layout': [hwc]. \";\nconstexpr const char *DEST_URL = \"dest_url\";\nconstexpr const char *COLOR_CVT_CTX = \"color_cvt_ctx\";\nconstexpr const char *FRAME_INDEX_CTX = \"frame_index_ctx\";\nconstexpr const char *ENCODER_CTX = \"encoder_ctx\";\nconstexpr const char *MUXER_CTX = \"muxer_ctx\";\nconstexpr const char *SHOW_CTX = \"show_ctx\";\nconstexpr const char *SHOW_QUEUE_CTX = \"show_queue_ctx\";\nconstexpr const char *FORMAT_NAME = \"format_name\";\nconstexpr const char *CODEC_NAME = \"codec_name\";\nconstexpr const char *DESTINATION_URL = \"destination_url\";\nconstexpr const char *FRAME_INFO_INPUT = \"in_video_frame\";\n\nclass VideoOutFlowUnit : public modelbox::FlowUnit {\n public:\n  VideoOutFlowUnit() = default;\n  ~VideoOutFlowUnit() override = default;\n\n  modelbox::Status Open(\n      const std::shared_ptr<modelbox::Configuration> &opts) override;\n\n  modelbox::Status Close() override;\n\n  modelbox::Status Process(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override;\n\n  modelbox::Status DataPre(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override;\n\n  modelbox::Status DataPost(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override;\n\n  modelbox::Status DataGroupPre(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override {\n    return modelbox::STATUS_OK;\n  };\n\n  modelbox::Status DataGroupPost(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override {\n    return modelbox::STATUS_OK;\n  };\n\n private:\n  modelbox::Status GetDestUrl(\n      const std::shared_ptr<modelbox::DataContext> &data_ctx,\n      std::string &dest_url);\n\n  modelbox::Status ReadFrames(\n      const std::shared_ptr<modelbox::DataContext> &ctx,\n      std::vector<std::shared_ptr<AVFrame>> &av_frame_list);\n\n  modelbox::Status ReadFrameFromBuffer(\n      std::shared_ptr<modelbox::Buffer> &frame_buffer,\n      std::shared_ptr<AVFrame> &av_frame);\n\n  modelbox::Status EncodeFrame(\n      const std::shared_ptr<FfmpegVideoEncoder> &encoder,\n      const std::vector<std::shared_ptr<AVFrame>> &av_frame_list,\n      std::vector<std::shared_ptr<AVPacket>> &av_packet_list);\n\n  modelbox::Status MuxPacket(\n      const std::shared_ptr<FfmpegVideoMuxer> &muxer,\n      const AVRational &time_base,\n      std::vector<std::shared_ptr<AVPacket>> &av_packet_list);\n\n  void ProcessShow(const std::string &dest_url,\n                   const std::shared_ptr<modelbox::BlockingQueue<\n                       std::shared_ptr<modelbox::Buffer>>> &image_queue);\n  modelbox::Status PrepareVideoOut(\n      const std::shared_ptr<modelbox::DataContext> &data_ctx,\n      const std::string &dest_url, const std::string &format_name);\n\n  std::string default_dest_url_;\n};\n\n#endif  // MODELBOX_FLOWUNIT_VIDEO_OUT_CPU_H_\n"
  },
  {
    "path": "src/drivers/devices/rockchip/flowunit/video_out/video_out_flowunit_test.cc",
    "content": "/*\n * Copyright 2022 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <fstream>\n#include <functional>\n#include <future>\n#include <thread>\n\n#include \"driver_flow_test.h\"\n#include \"flowunit_mockflowunit/flowunit_mockflowunit.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"modelbox/base/log.h\"\n#include \"modelbox/buffer.h\"\n#include \"test/mock/minimodelbox/mockflow.h\"\n\nnamespace modelbox {\nclass RockchipVideoEncoderFlowUnitTest : public testing::Test {\n public:\n  RockchipVideoEncoderFlowUnitTest() : flow_(std::make_shared<MockFlow>()){};\n\n protected:\n  void SetUp() override{};\n\n  void TearDown() override{};\n\n public:\n  std::shared_ptr<MockFlow> flow_;\n\n  void StartFlow(const std::string& toml_content, uint64_t millisecond);\n\n private:\n  Status AddMockFlowUnit();\n};\n\nvoid RockchipVideoEncoderFlowUnitTest::StartFlow(\n    const std::string& toml_content, uint64_t millisecond) {\n  flow_ = std::make_shared<MockFlow>();\n  auto ret = AddMockFlowUnit();\n  EXPECT_EQ(ret, STATUS_SUCCESS);\n\n  ret = flow_->BuildAndRun(\"VideoEncoder\", toml_content, millisecond);\n  EXPECT_EQ(ret, STATUS_STOP);\n}\n\nStatus RockchipVideoEncoderFlowUnitTest::AddMockFlowUnit() {\n  {\n    auto mock_desc =\n        GenerateFlowunitDesc(\"encoder_start_unit\", {}, {\"stream_meta\"});\n    auto open_func =\n        [=](const std::shared_ptr<modelbox::Configuration>& flow_option,\n            const std::shared_ptr<MockFlowUnit>& mock_flowunit) {\n          auto ext_data = mock_flowunit->CreateExternalData();\n          EXPECT_NE(ext_data, nullptr);\n          auto buffer_list = ext_data->CreateBufferList();\n          buffer_list->Build({1});\n          auto status = ext_data->Send(buffer_list);\n          EXPECT_EQ(status, STATUS_SUCCESS);\n          status = ext_data->Close();\n          EXPECT_EQ(status, STATUS_SUCCESS);\n          return modelbox::STATUS_OK;\n        };\n    auto data_pre_func =\n        [&](const std::shared_ptr<DataContext>& data_ctx,\n            const std::shared_ptr<MockFlowUnit>& mock_flowunit) {\n          MBLOG_INFO << \"stream_meta  \"\n                     << \"DataPre\";\n          auto test_meta = std::make_shared<std::string>(\"test\");\n          auto data_meta = std::make_shared<DataMeta>();\n          data_meta->SetMeta(\"test\", test_meta);\n          data_ctx->SetOutputMeta(\"stream_meta\", data_meta);\n          return modelbox::STATUS_OK;\n        };\n    auto process_func =\n        [=](const std::shared_ptr<DataContext>& data_ctx,\n            const std::shared_ptr<MockFlowUnit>& mock_flowunit) {\n          auto output_buf = data_ctx->Output(\"stream_meta\");\n          std::vector<size_t> shape(1, 1);\n          output_buf->Build(shape);\n\n          return modelbox::STATUS_OK;\n        };\n    auto mock_functions = std::make_shared<MockFunctionCollection>();\n    mock_functions->RegisterOpenFunc(open_func);\n    mock_functions->RegisterDataPreFunc(data_pre_func);\n    mock_functions->RegisterProcessFunc(process_func);\n    flow_->AddFlowUnitDesc(mock_desc, mock_functions->GenerateCreateFunc(),\n                           TEST_DRIVER_DIR);\n  }\n  {\n    auto mock_desc = GenerateFlowunitDesc(\"encoder_image_produce\",\n                                          {\"stream_meta\"}, {\"frame_info\"});\n    mock_desc->SetOutputType(EXPAND);\n    auto process_func =\n        [=](const std::shared_ptr<DataContext>& data_ctx,\n            const std::shared_ptr<MockFlowUnit>& mock_flowunit) {\n          std::string img_path;\n          static int64_t frame_index = 0;\n          if ((frame_index / 24) % 2 == 0) {\n            img_path =\n                std::string(TEST_ASSETS) + \"/video/rgb_460800_480x320_a.data\";\n          } else {\n            img_path =\n                std::string(TEST_ASSETS) + \"/video/rgb_460800_480x320_b.data\";\n          }\n\n          std::ifstream img_file(img_path);\n          if (!img_file.is_open()) {\n            MBLOG_ERROR << \"Open failed, path \" << img_path;\n            return STATUS_FAULT;\n          }\n\n          size_t file_size = 460800;\n          auto output_buff_list = data_ctx->Output(\"frame_info\");\n          std::vector<size_t> shape(1, file_size);\n          output_buff_list->Build(shape);\n          auto output_buff = output_buff_list->At(0);\n          auto* ptr = (char*)output_buff->MutableData();\n          img_file.read(ptr, file_size);\n          output_buff->Set(\"width\", 480);\n          output_buff->Set(\"height\", 320);\n          output_buff->Set(\"rate_num\", 24);\n          output_buff->Set(\"rate_den\", 1);\n          output_buff->Set(\"pix_fmt\", std::string(\"rgb\"));\n          output_buff->Set(\"index\", frame_index);\n\n          if (frame_index == 1339) {  // 60S\n            return modelbox::STATUS_STOP;\n          }\n\n          ++frame_index;\n          auto event = std::make_shared<FlowUnitEvent>();\n          data_ctx->SendEvent(event);\n          return modelbox::STATUS_CONTINUE;\n        };\n    auto mock_functions = std::make_shared<MockFunctionCollection>();\n    mock_functions->RegisterProcessFunc(process_func);\n    flow_->AddFlowUnitDesc(mock_desc, mock_functions->GenerateCreateFunc(),\n                           TEST_DRIVER_DIR);\n  }\n\n  return STATUS_SUCCESS;\n}\n\nTEST_F(RockchipVideoEncoderFlowUnitTest, InitUnit) {\n  const std::string test_lib_dir = TEST_DRIVER_DIR;\n  const std::string test_data_dir = TEST_DATA_DIR;\n  auto ret = system(\"nc localhost 554 -z\");\n  if (errno != 0 || ret != 0) {\n    GTEST_SKIP();\n  }\n\n  std::string dest_url = \"rtsp://localhost/test_\" + std::to_string(rand());\n  std::string toml_content = R\"(\n      [driver]\n      skip-default = true\n      dir=[\")\" + test_lib_dir +\n                             \"\\\",\\\"\" + test_data_dir + \"\\\"]\\n    \" +\n                             R\"([graph]\n      graphconf = '''digraph demo {\n            encoder_start_unit[type=flowunit, flowunit=encoder_start_unit, device=cpu, deviceid=0, label=\"<stream_meta>\"]\n            encoder_image_produce[type=flowunit, flowunit=encoder_image_produce, device=cpu, deviceid=0, label=\"<stream_meta> | <frame_info>\"]\n            videoencoder[type=flowunit, flowunit=video_encoder, device=cpu, deviceid=0, label=\"<in_video_frame>\", queue_size_frame_info=16, default_dest_url=\")\" +\n                             dest_url + R\"(\"]\n            encoder_start_unit:stream_meta -> encoder_image_produce:stream_meta\n            encoder_image_produce:frame_info -> videoencoder:in_video_frame\n          }'''\n      format = \"graphviz\"\n    )\";\n  StartFlow(toml_content, 1000 * 1000);\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/drivers/graph_conf/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nproject(modelbox-drivers-graphconf)\n\nif(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_BINARY_DIR})\n    message(FATAL_ERROR \"Do not build in source directory!\")\nendif()\n\npkg_check_modules(GRAPHVIZ libcgraph)\nif(${GRAPHVIZ_FOUND}) \n    add_subdirectory(graphviz)\nelse() \n    message(STATUS \"Disable cgraph plugin\")\nendif()\n"
  },
  {
    "path": "src/drivers/graph_conf/graphviz/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n \ncmake_minimum_required(VERSION 3.10)\n \nset(UNIT_GRAPHCONF \"graphconf\")\nset(UNIT_NAME \"graphviz\")\n \nproject(modelbox-${UNIT_GRAPHCONF}-${UNIT_NAME})\n \nfile(GLOB_RECURSE UNIT_SOURCE *.cpp *.cc *.c)\ngroup_source_test_files(MODELBOX_UNIT_GRAPHCONF_GRAPHVIZ_SOURCE MODELBOX_UNIT_TEST_SOURCE \"_test.c*\" ${UNIT_SOURCE})\n \ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\n \nset(LIBMODELBOX_GRAPHCONF_GRAPHVIZ_SHARED libmodelbox-${UNIT_GRAPHCONF}-${UNIT_NAME}-shared)\nset(LIBMODELBOX_GRAPHCONF_GRAPHVIZ_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n \nadd_library(${LIBMODELBOX_GRAPHCONF_GRAPHVIZ_SHARED} SHARED ${MODELBOX_UNIT_GRAPHCONF_GRAPHVIZ_SOURCE})\n \nset_target_properties(${LIBMODELBOX_GRAPHCONF_GRAPHVIZ_SHARED} PROPERTIES \n    SOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n \ntarget_link_libraries(${LIBMODELBOX_GRAPHCONF_GRAPHVIZ_SHARED} pthread)\ntarget_link_libraries(${LIBMODELBOX_GRAPHCONF_GRAPHVIZ_SHARED} rt)\ntarget_link_libraries(${LIBMODELBOX_GRAPHCONF_GRAPHVIZ_SHARED} dl)\ntarget_link_libraries(${LIBMODELBOX_GRAPHCONF_GRAPHVIZ_SHARED} cgraph)\ntarget_link_libraries(${LIBMODELBOX_GRAPHCONF_GRAPHVIZ_SHARED} ${LIBMODELBOX_SHARED})\n \nset_target_properties(${LIBMODELBOX_GRAPHCONF_GRAPHVIZ_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-${UNIT_GRAPHCONF}-${UNIT_NAME}\")\n \ninstall(TARGETS ${LIBMODELBOX_GRAPHCONF_GRAPHVIZ_SHARED}\n    COMPONENT graph-graphviz\n    RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n    LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    ARCHIVE DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    OPTIONAL\n    )\n \nset(LIBMODELBOX_GRAPHCONF_GRAPHVIZ_SHARED ${LIBMODELBOX_GRAPHCONF_GRAPHVIZ_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_GRAPHCONF_GRAPHVIZ_INCLUDE ${LIBMODELBOX_GRAPHCONF_GRAPHVIZ_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_GRAPHCONF_GRAPHVIZ_SOURCES ${MODELBOX_UNIT_GRAPHCONF_GRAPHVIZ_SOURCE} CACHE INTERNAL \"\")\n\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_UNIT_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${LIBMODELBOX_GRAPHCONF_GRAPHVIZ_SHARED})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")\n \n "
  },
  {
    "path": "src/drivers/graph_conf/graphviz/graph_conf_desc.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <stdio.h>\n\n#include <memory>\n\n#include \"graphviz_conf.h\"\n#include \"modelbox/base/driver_api_helper.h\"\n#include \"modelbox/base/status.h\"\n\nstd::shared_ptr<modelbox::DriverFactory> CreateDriverFactory() {\n  std::shared_ptr<modelbox::DriverFactory> factory =\n      std::make_shared<modelbox::GraphvizFactory>();\n  return factory;\n}\n\nvoid DriverDescription(modelbox::DriverDesc *desc) {\n  desc->SetName(modelbox::GRAPHCONF_NAME);\n  desc->SetClass(modelbox::DRIVER_CLASS_GRAPHCONF);\n  desc->SetType(modelbox::GRAPHCONF_TYPE);\n  desc->SetVersion(modelbox::GRAPHVIZE_VERSION);\n  desc->SetDescription(modelbox::GRAPHCONF_DESC);\n}\n\nmodelbox::Status DriverInit() { return modelbox::STATUS_OK; }\n\nvoid DriverFini() {}\n"
  },
  {
    "path": "src/drivers/graph_conf/graphviz/graphviz_conf.cc",
    "content": "/*\n *  * Copyright (C) 2020 Huawei Technologies Co., Ltd. All rights reserved.\n */\n\n#include \"graphviz_conf.h\"\n\n#include \"modelbox/base/status.h\"\n\nnamespace modelbox {\n\nthread_local std::stringstream g_graphviz_error;\n\nint GraphvizeError(char *errmsg) {\n  // errmsg might be part for one specify error\n  g_graphviz_error << errmsg;\n  return 0;\n}\n\nstd::mutex kCgraphLock;\n\nGraphvizFactory::GraphvizFactory() {\n  std::unique_lock<std::mutex> lock(kCgraphLock);\n  agseterrf(GraphvizeError);\n}\n\nGraphvizFactory::~GraphvizFactory() {\n  std::unique_lock<std::mutex> lock(kCgraphLock);\n  agseterrf(nullptr);\n}\n\nstd::shared_ptr<GraphConfig> GraphvizFactory::CreateGraphConfigFromStr(\n    const std::string &graph_config) {\n  std::shared_ptr<modelbox::GraphConfig> graphviz_conf =\n      std::make_shared<modelbox::GraphvizConfig>(graph_config, false);\n  return graphviz_conf;\n}\n\nstd::shared_ptr<GraphConfig> GraphvizFactory::CreateGraphConfigFromFile(\n    const std::string &file_path) {\n  std::shared_ptr<modelbox::GraphConfig> graphviz_conf =\n      std::make_shared<modelbox::GraphvizConfig>(file_path, true);\n  return graphviz_conf;\n}\n\nstd::string GraphvizFactory::GetGraphConfFactoryType() { return factory_type_; }\n\nGraphvizConfig::GraphvizConfig(const std::string &graph_conf,\n                               const bool is_file) {\n  graphviz_conf_ = graph_conf;\n  is_file_ = is_file;\n}\n\nGraphvizConfig::~GraphvizConfig() = default;\n\nstd::shared_ptr<GCGraph> GraphvizConfig::Resolve() {\n  std::shared_ptr<modelbox::GCGraph> graph = std::make_shared<modelbox::GCGraph>();\n  std::shared_ptr<Agraph_t> g;\n\n  auto init_status = graph->Init(graph);\n  if (init_status != STATUS_OK) {\n    MBLOG_ERROR << \"gcgraph init failed\";\n    return nullptr;\n  }\n\n  if (is_file_) {\n    g = LoadGraphFromFile();\n  } else {\n    g = LoadGraphFromStr();\n  }\n\n  if (g == nullptr) {\n    MBLOG_ERROR << \"load graph faild.\";\n    return nullptr;\n  }\n\n  auto *graph_name = agnameof(g.get());\n  if (graph_name != nullptr) {\n    graph->SetGraphName(graph_name);\n  }\n\n  auto ret = TraversalsGraph(g, graph);\n  if (!ret) {\n    MBLOG_ERROR << \"traversals graph faild.\";\n    return nullptr;\n  }\n\n  ret = TraversalsNode(g, graph);\n  if (!ret) {\n    MBLOG_ERROR << \"traversals node faild.\";\n    return nullptr;\n  }\n\n  ret = TraversalsEdge(g, graph);\n  if (!ret) {\n    MBLOG_ERROR << \"traversals edge faild.\";\n    return nullptr;\n  }\n\n  return graph;\n}\n\nstd::shared_ptr<Agraph_t> GraphvizConfig::LoadGraphFromStr() {\n  Agraph_t *g = nullptr;\n\n  std::unique_lock<std::mutex> lock(kCgraphLock);\n  g_graphviz_error.clear();\n  g = agmemread(graphviz_conf_.c_str());\n\n  if (g == nullptr) {\n    MBLOG_ERROR << \"load graph from str failed, graphviz config is : \"\n                << std::endl\n                << graphviz_conf_;\n    MBLOG_ERROR << \"graphviz: \" << g_graphviz_error.str();\n    g_graphviz_error.clear();\n    StatusError = {STATUS_BADCONF};\n    return nullptr;\n  }\n\n  std::shared_ptr<Agraph_t> ret(g, [](Agraph_t *ptr) {\n    std::unique_lock<std::mutex> lock(kCgraphLock);\n    agclose(ptr);\n  });\n  return ret;\n}\n\nstd::shared_ptr<Agraph_t> GraphvizConfig::LoadGraphFromFile() {\n  Agraph_t *g = nullptr;\n  std::string file = PathCanonicalize(graphviz_conf_);\n  if (file.length() == 0) {\n    MBLOG_ERROR << \"graph path is invalid, \" << file;\n    StatusError = {STATUS_BADCONF, \"path is invalid\"};\n    return nullptr;\n  }\n\n  FILE *fp = fopen(file.c_str(), \"r\");\n  if (fp == nullptr) {\n    MBLOG_ERROR << \"open file failed, file: \" << file << \", \"\n                << StrError(errno);\n    StatusError = {STATUS_BADCONF, StrError(errno)};\n    return nullptr;\n  }\n\n  std::unique_lock<std::mutex> lock(kCgraphLock);\n  g = agread(fp, nullptr);\n  fclose(fp);\n  fp = nullptr;\n\n  if (g == nullptr) {\n    std::string errmsg = \"read graph failed. \";\n    if (aglasterr()) {\n      errmsg += aglasterr();\n    }\n    StatusError = {STATUS_BADCONF, errmsg};\n    return nullptr;\n  }\n\n  std::shared_ptr<Agraph_t> ret(g, [](Agraph_t *ptr) {\n    std::unique_lock<std::mutex> lock(kCgraphLock);\n    agclose(ptr);\n  });\n\n  return ret;\n}\n\nStatus GraphvizConfig::TraversalsGraph(const std::shared_ptr<Agraph_t> &g,\n                                       const std::shared_ptr<GCGraph> &graph) {\n  std::vector<std::string> node_keys;\n  Agsym_t *sym = nullptr;\n\n  if (g == nullptr || graph == nullptr) {\n    MBLOG_ERROR << \"graph is null.\";\n    return STATUS_INVALID;\n  }\n\n  while (true) {\n    sym = agnxtattr(g.get(), AGRAPH, sym);\n    if (sym == nullptr) {\n      break;\n    }\n\n    node_keys.emplace_back(sym->name);\n  }\n\n  for (const std::string &elem : node_keys) {\n    auto *agget_str = agget(g.get(), const_cast<char *>(elem.c_str()));\n    if (agget_str == nullptr) {\n      MBLOG_ERROR << \"failed to get graph attr name: \" << elem;\n      continue;\n    }\n\n    graph->SetConfiguration(elem, agget_str);\n  }\n\n  return STATUS_OK;\n}\n\nStatus GraphvizConfig::TraversalsNode(const std::shared_ptr<Agraph_t> &g,\n                                      const std::shared_ptr<GCGraph> &graph) {\n  std::vector<std::string> node_keys;\n  Agnode_t *agnode = nullptr;\n\n  Agsym_t *sym = nullptr;\n  while (true) {\n    sym = agnxtattr(g.get(), AGNODE, sym);\n    if (sym == nullptr) {\n      break;\n    }\n    node_keys.emplace_back(sym->name);\n  }\n\n  for (agnode = agfstnode(g.get()); agnode;\n       agnode = agnxtnode(g.get(), agnode)) {\n    auto gcnode = std::make_shared<GCNode>();\n    auto *agname = agnameof(agnode);\n    if (agname == nullptr) {\n      return {STATUS_BADCONF, \"agname is invalid\"};\n    }\n\n    auto status = gcnode->Init(agname, graph);\n    if (!status) {\n      return status;\n    }\n\n    MBLOG_DEBUG << \"add node: \" << gcnode->GetNodeName();\n    for (const std::string &elem : node_keys) {\n      const char *ag_value = agget(agnode, const_cast<char *>(elem.c_str()));\n      if (ag_value == nullptr) {\n        continue;\n      }\n\n      std::string value = ag_value;\n      if (value == \"\") {\n        continue;\n      }\n\n      gcnode->SetConfiguration(elem, value);\n      MBLOG_DEBUG << \"  key: \" << elem << \", value: \" << value;\n    }\n\n    graph->AddNode(gcnode);\n  }\n\n  return STATUS_OK;\n}\n\nstd::shared_ptr<modelbox::GCEdge> GraphvizConfig::NewGcEdgeFromAgedge(\n    const std::shared_ptr<GCGraph> &graph, Agedge_t *agedge) {\n  auto gcedge = std::make_shared<modelbox::GCEdge>();\n  auto ret = gcedge->Init(graph);\n  if (!ret) {\n    StatusError = ret;\n    return nullptr;\n  }\n\n  /* IMPORTANT NOTES:\n   * The HEAD and TAIL of an edge is totally inverse between Graphviz and\n   * GCGraph. \n   * For Graphviz, an edge arrow is pointing to the HEAD node from the\n   * TAIL node:\n   *\n   *                           Graphviz_Edge\n   *           Graphviz_TAIL   ------------>   Graphviz_HEAD\n   *\n   * For GCGraph, inversely, an edge arrow is pointing to the TAIL node\n   * (destination node) from the HEAD node (source node):\n   *\n   *                           GCGraph_Edge\n   *           GCGraph_HEAD    ------------>   GCGraph_TAIL\n   */\n\n  std::string head_node_name;\n  auto *node_name = agnameof(agtail(agedge));\n  if (node_name == nullptr) {\n    head_node_name = \"\";\n  } else {\n    head_node_name = node_name;\n  }\n\n  auto head_node = graph->GetNode(head_node_name);\n  if (head_node == nullptr) {\n    MBLOG_ERROR << \"head node [\" << head_node_name << \"]\"\n                << \" not exist.\";\n    StatusError = {STATUS_FAULT, \"get head node failed.\"};\n    return nullptr;\n  }\n\n  gcedge->SetHeadNode(head_node);\n  std::string tail_node_name;\n  node_name = agnameof(aghead(agedge));\n  if (node_name == nullptr) {\n    tail_node_name = \"\";\n  } else {\n    tail_node_name = node_name;\n  }\n\n  auto tail_node = graph->GetNode(tail_node_name);\n  if (tail_node == nullptr) {\n    MBLOG_ERROR << \"tail node [\" << tail_node_name << \"]\"\n                << \" not exist.\";\n    StatusError = {STATUS_FAULT, \"get tail node failed.\"};\n    return nullptr;\n  }\n\n  gcedge->SetTailNode(tail_node);\n\n  return gcedge;\n}\n\nStatus GraphvizConfig::TraversalsEdge(const std::shared_ptr<Agraph_t> &g,\n                                      const std::shared_ptr<GCGraph> &graph) {\n  Agnode_t *agnode = nullptr;\n  Agedge_t *agedge = nullptr;\n  std::vector<std::string> edge_keys;\n\n  Agsym_t *sym = nullptr;\n  while (true) {\n    sym = agnxtattr(g.get(), AGEDGE, sym);\n    if (sym == nullptr) {\n      break;\n    }\n    edge_keys.emplace_back(sym->name);\n  }\n\n  for (agnode = agfstnode(g.get()); agnode;\n       agnode = agnxtnode(g.get(), agnode)) {\n    for (agedge = agfstout(g.get(), agnode); agedge;\n         agedge = agnxtout(g.get(), agedge)) {\n      auto gcedge = NewGcEdgeFromAgedge(graph, agedge);\n      if (gcedge == nullptr) {\n        return {StatusError};\n      }\n\n      for (const std::string &elem : edge_keys) {\n        char *value = nullptr;\n        value = agget(agedge, const_cast<char *>(elem.c_str()));\n        if (value == nullptr || *value == '\\0') {\n          continue;\n        }\n        gcedge->SetConfiguration(elem, value);\n        if (elem == GRAPHVIZ_HEAD_PORT) {\n          gcedge->SetTailPort(value);\n          gcedge->GetTailNode()->SetInputPort(value);\n        } else if (elem == GRAPHVIZ_TAIL_PORT) {\n          gcedge->SetHeadPort(value);\n          gcedge->GetHeadNode()->SetOutputPort(value);\n        }\n      }\n\n      auto ret = graph->AddEdge(gcedge);\n      if (!ret) {\n        return ret;\n      }\n    }\n  }\n\n  return STATUS_OK;\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/drivers/graph_conf/graphviz/graphviz_conf.h",
    "content": "/*\n *  * Copyright (C) 2020 Huawei Technologies Co., Ltd. All rights reserved.\n */\n\n#include <modelbox/base/graph_manager.h>\n#include <graphviz/cgraph.h>\n#include <stdio.h>\n\n#include <iostream>\n\n#ifndef MODELBOX_GRAPHVIZ_CONF_H\n#define MODELBOX_GRAPHVIZ_CONF_H\n\nnamespace modelbox {\n\nconstexpr const char *GRAPHCONF_TYPE = \"graph\";\nconstexpr const char *GRAPHCONF_NAME = \"graphconf-graphvize\";\nconstexpr const char *GRAPHCONF_DESC = \"graph config parse graphviz\";\nconstexpr const char *GRAPHVIZE_VERSION = \"1.0.0\";\n\nconstexpr const char *GRAPHVIZ_HEAD_PORT = \"headport\";\nconstexpr const char *GRAPHVIZ_TAIL_PORT = \"tailport\";\nconstexpr const char *FACTORY_TYPE_GRAPHVIZE = \"graphviz\";\n\nclass GraphvizFactory : public GraphConfigFactory {\n private:\n  std::string factory_type_ = FACTORY_TYPE_GRAPHVIZE;\n\n public:\n  GraphvizFactory();\n\n  ~GraphvizFactory() override;\n\n  std::shared_ptr<GraphConfig> CreateGraphConfigFromStr(\n      const std::string &graph_config) override;\n\n  std::shared_ptr<GraphConfig> CreateGraphConfigFromFile(\n      const std::string &file_path) override;\n\n  std::string GetGraphConfFactoryType() override;\n};\n\nclass GraphvizConfig : public GraphConfig {\n public:\n  GraphvizConfig(const std::string &graph_conf, bool is_file);\n\n  ~GraphvizConfig() override;\n\n  std::shared_ptr<GCGraph> Resolve() override;\n\n private:\n  std::shared_ptr<Agraph_t> LoadGraphFromStr();\n\n  std::shared_ptr<Agraph_t> LoadGraphFromFile();\n\n  Status TraversalsGraph(const std::shared_ptr<Agraph_t> &g,\n                         const std::shared_ptr<GCGraph> &graph);\n\n  Status TraversalsNode(const std::shared_ptr<Agraph_t> &g,\n                        const std::shared_ptr<GCGraph> &graph);\n\n  std::shared_ptr<modelbox::GCEdge> NewGcEdgeFromAgedge(\n      const std::shared_ptr<GCGraph> &graph, Agedge_t *agedge);\n\n  Status TraversalsEdge(const std::shared_ptr<Agraph_t> &g,\n                        const std::shared_ptr<GCGraph> &graph);\n\n  Status TraversalsSubGraph(std::shared_ptr<Agraph_t> g,\n                            std::shared_ptr<GCGraph> graph);\n\n  std::string graphviz_conf_;\n  bool is_file_;\n  std::mutex lock_;\n};\n\n}  // namespace modelbox\n\n#endif  // MODELBOX_GRAPHVIZ_CONF_H\n"
  },
  {
    "path": "src/drivers/inference_engine/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nproject(modelbox-inference)\n\nif(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_BINARY_DIR})\n    message(FATAL_ERROR \"Do not build in source directory!\")\nendif()\n\nsubdirlist(SUBDIRS ${CMAKE_CURRENT_SOURCE_DIR} \"CMakeLists.txt\")\n\nforeach(subdir ${SUBDIRS})\n    add_subdirectory(${subdir})\nendforeach()"
  },
  {
    "path": "src/drivers/inference_engine/dlengine/CMakeLists.txt",
    "content": "#\n# Copyright 2022 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nproject(modelbox-inference-dlengine)\n\nif (NOT DLENGINE_FOUND) \n    message(STATUS \"Not found dlengine, disable dlengine flowunit\")\n    return()\nendif()\n\nadd_definitions(-DDLENGINE_BACKEND_ZOO=\"${DLENGINE_BACKEND_ZOO_DIR}\")\n\nfile(GLOB_RECURSE UNIT_SOURCE *.cpp *.cc *.c)\n\ngroup_source_test_files(MODELBOX_UNIT_SOURCE MODELBOX_UNIT_TEST_SOURCE \"_test.c*\" ${UNIT_SOURCE})\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_VIRTUALDRIVER_INFERENCE_INCLUDE})\ninclude_directories(${DLENGINE_INCLUDE_DIR})\ninclude_directories(${MODELBOX_COMMON_INFERENCE_INCLUDE})\n\nset(MODELBOX_UNIT_SHARED libmodelbox-engine-dlengine)\nset(MODELBOX_UNIT_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\nadd_library(${MODELBOX_UNIT_SHARED} SHARED ${MODELBOX_UNIT_SOURCE})\nset(LIBMODELBOX_FLOWUNIT_INFERENCE_DLENGINE_SHARED ${MODELBOX_UNIT_SHARED})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES \nSOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} rt)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} dl)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${DLENGINE_LIBRARIES})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_VIRTUALDRIVER_INFERENCE_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${MODELBOX_COMMON_INFERENCE_LIBRARY})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES OUTPUT_NAME \"common-modelbox-engine-dlengine\")\n\ninstall(TARGETS ${MODELBOX_UNIT_SHARED} \n        COMPONENT cpu-device-flowunit\n        RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n        LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        OPTIONAL\n        )\n\nset(LIBMODELBOX_FLOWUNIT_INFERENCE_DLENGINE_SHARED ${MODELBOX_UNIT_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_INFERENCE_DLENGINE_INCLUDE ${MODELBOX_UNIT_SOURCE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_INFERENCE_DLENGINE_SOURCES ${MODELBOX_UNIT_SOURCE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_INFERENCE_DLENGINE_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-engine-dlengine.so CACHE INTERNAL \"\")\n\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_UNIT_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_UNIT_SHARED})\nlist(APPEND DRIVER_UNIT_TEST_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_INCLUDE ${DRIVER_UNIT_TEST_INCLUDE} CACHE INTERNAL \"\")\n"
  },
  {
    "path": "src/drivers/inference_engine/dlengine/dlengine_inference.cc",
    "content": "/*\n * Copyright 2022 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"dlengine_inference.h\"\n\n#include <unordered_map>\n#include <utility>\n\nstatic const std::unordered_map<std::string, dlengine::DeviceType>\n    g_device_type_map{{\"cpu\", dlengine::kCPU},\n                      {\"cuda\", dlengine::kCUDA},\n                      {\"ascend\", dlengine::kASCEND}};\n\nstatic const std::unordered_map<dlengine::DataType, modelbox::ModelBoxDataType>\n    g_dlengine_to_mb_type_map{\n        {dlengine::DataType::FLOAT, modelbox::MODELBOX_FLOAT},\n        {dlengine::DataType::INT8, modelbox::MODELBOX_INT8},\n        {dlengine::DataType::INT32, modelbox::MODELBOX_INT32}};\n\nstatic const std::unordered_map<modelbox::ModelBoxDataType, dlengine::DataType>\n    g_mb_to_dlengine_type_map{\n        {modelbox::MODELBOX_FLOAT, dlengine::DataType::FLOAT},\n        {modelbox::MODELBOX_INT8, dlengine::DataType::INT8},\n        {modelbox::MODELBOX_INT32, dlengine::DataType::INT32}};\n\nstatic const std::unordered_map<dlengine::DataType, size_t>\n    g_dlengine_type_size_map{{dlengine::DataType::FLOAT, sizeof(float)},\n                             {dlengine::DataType::INT8, sizeof(int8_t)},\n                             {dlengine::DataType::INT32, sizeof(int32_t)}};\n\nmodelbox::Status TensorShapeParam::Init(const std::string &shape) {\n  fix_shape_ = true;\n  return Parse(shape, shape_);\n}\n\nmodelbox::Status TensorShapeParam::Init(const std::string &min_shape,\n                                        const std::string &opt_shape,\n                                        const std::string &max_shape) {\n  fix_shape_ = false;\n  auto ret = Parse(min_shape, min_shape_);\n  if (ret != modelbox::STATUS_OK) {\n    MBLOG_ERROR << \"parse min shape failed\";\n    return ret;\n  }\n\n  ret = Parse(opt_shape, opt_shape_);\n  if (ret != modelbox::STATUS_OK) {\n    MBLOG_ERROR << \"parse opt shape failed\";\n    return ret;\n  }\n\n  ret = Parse(max_shape, max_shape_);\n  if (ret != modelbox::STATUS_OK) {\n    MBLOG_ERROR << \"parse max shape failed\";\n    return ret;\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status TensorShapeParam::Parse(const std::string &shape_str,\n                                         std::vector<size_t> &shape_value) {\n  auto format_shape_str = shape_str;\n  std::transform(format_shape_str.begin(), format_shape_str.end(),\n                 format_shape_str.begin(),\n                 [](int c) { return std::tolower(c); });\n  auto dims = modelbox::StringSplit(format_shape_str, 'x');\n  if (dims.empty()) {\n    MBLOG_ERROR << \"shape [\" << shape_str\n                << \"] format error, it should be like nxcxhxw\";\n    return modelbox::STATUS_BADCONF;\n  }\n\n  for (const auto &dim : dims) {\n    try {\n      auto trim_dim = dim;\n      trim_dim.erase(0, trim_dim.find_first_not_of(' '));\n      trim_dim.erase(trim_dim.find_last_not_of(' ') + 1);\n      auto v = std::stoul(trim_dim);\n      shape_value.push_back(v);\n    } catch (const std::exception &e) {\n      MBLOG_ERROR << \"shape [\" << shape_str\n                  << \"] format error, it should be like nxcxhxw, detail: \"\n                  << e.what();\n      return modelbox::STATUS_BADCONF;\n    }\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nvoid TensorShapeParam::GenTensorConfig(nlohmann::json &tensor_config) {\n  if (fix_shape_) {\n    tensor_config[\"shape\"] = shape_;\n    return;\n  }\n\n  tensor_config[\"min_shape\"] = min_shape_;\n  tensor_config[\"opt_shape\"] = opt_shape_;\n  tensor_config[\"max_shape\"] = max_shape_;\n}\n\nmodelbox::Status DLEngineInference::Init(\n    const std::shared_ptr<modelbox::Configuration> &unit_cfg,\n    const std::shared_ptr<modelbox::FlowUnitDesc> &desc,\n    const std::string &device_type, int32_t device_id) {\n  device_type_ = device_type;\n  auto device_type_item = g_device_type_map.find(device_type);\n  if (device_type_item == g_device_type_map.end()) {\n    MBLOG_ERROR << \"not support device type \" << device_type;\n    return modelbox::STATUS_BADCONF;\n  }\n\n  infer_device_ = device_type_item->second;\n  device_id_ = device_id;\n\n  auto infer_desc =\n      std::dynamic_pointer_cast<VirtualInferenceFlowUnitDesc>(desc);\n  if (infer_desc == nullptr) {\n    MBLOG_ERROR << \"cast virtual inference desc failed, flowunit desc is null\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  auto infer_cfg = infer_desc->GetConfiguration();\n  if (infer_cfg == nullptr) {\n    MBLOG_ERROR << \"infer description get config failed\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  auto merge_config = std::make_shared<modelbox::Configuration>();\n  merge_config->Add(*infer_cfg);\n  merge_config->Add(*unit_cfg);\n\n  auto ret = InitInferInfo(infer_desc);\n  if (ret != modelbox::STATUS_OK) {\n    MBLOG_ERROR << \"init infer info failed, ret \" << ret;\n    return ret;\n  }\n\n  ret = LoadModel(merge_config);\n  if (ret != modelbox::STATUS_OK) {\n    MBLOG_ERROR << \"load model failed, ret \" << ret;\n    return ret;\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status DLEngineInference::Infer(\n    const std::shared_ptr<modelbox::DataContext> &data_ctx) {\n  auto infer_context = inferer_->GetInferContext(infer_device_);\n  Defer { delete infer_context; };\n\n  auto batch_size = data_ctx->Input(input_name_list_[0])->Size();\n\n  auto ret = PrepareInput(infer_context, data_ctx, batch_size);\n  if (ret != modelbox::STATUS_OK) {\n    MBLOG_ERROR << \"prepare input for dlengine failed, ret \" << ret;\n    return ret;\n  }\n\n  ret = PrepareOutput(infer_context, data_ctx, batch_size);\n  if (ret != modelbox::STATUS_OK) {\n    MBLOG_ERROR << \"prepare output for dlengine failed, ret \" << ret;\n    return ret;\n  }\n\n  auto dl_ret = inferer_->Run(infer_context);\n  if (!dl_ret) {\n    MBLOG_ERROR << \"dlengine infer failed, see log for detail\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status DLEngineInference::InitInferInfo(\n    const std::shared_ptr<VirtualInferenceFlowUnitDesc> &desc) {\n  model_entry_ = desc->GetModelEntry();\n  if (model_entry_.empty()) {\n    MBLOG_ERROR << \"model entry is empty\";\n    return modelbox::STATUS_BADCONF;\n  }\n\n  auto input_desc_list = desc->GetFlowUnitInput();\n  auto output_desc_list = desc->GetFlowUnitOutput();\n  for (auto &input : input_desc_list) {\n    auto ret = InitInputInfo(input);\n    if (ret != modelbox::STATUS_OK) {\n      MBLOG_ERROR << \"init input \" << input.GetPortName() << \" failed\";\n      return ret;\n    }\n  }\n\n  if (input_name_list_.empty()) {\n    MBLOG_ERROR << \"input name list is empty for model \" << model_entry_;\n    return modelbox::STATUS_BADCONF;\n  }\n\n  for (auto &output : output_desc_list) {\n    output_name_list_.push_back(output.GetPortName());\n  }\n\n  if (output_name_list_.empty()) {\n    MBLOG_ERROR << \"output name list is empty for model \" << model_entry_;\n    return modelbox::STATUS_OK;\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status DLEngineInference::InitInputInfo(\n    modelbox::FlowUnitInput &input) {\n  input_name_list_.push_back(input.GetPortName());\n  auto shape_str = input.GetProperity(\"shape\");\n  if (!shape_str.empty()) {\n    auto tensor_shape_param = std::make_shared<TensorShapeParam>();\n    auto ret = tensor_shape_param->Init(shape_str);\n    if (ret != modelbox::STATUS_OK) {\n      MBLOG_ERROR << \"input port \" << input.GetPortName()\n                  << \" config wrong shape\";\n      return modelbox::STATUS_BADCONF;\n    }\n\n    input_tensor_shape_param_list_.push_back(tensor_shape_param);\n    return modelbox::STATUS_OK;\n  }\n\n  auto min_shape_str = input.GetProperity(\"min_shape\");\n  auto opt_shape_str = input.GetProperity(\"opt_shape\");\n  auto max_shape_str = input.GetProperity(\"max_shape\");\n  if (min_shape_str.empty() && opt_shape_str.empty() && max_shape_str.empty()) {\n    // no shape config\n    return modelbox::STATUS_OK;\n  }\n\n  auto tensor_shape_param = std::make_shared<TensorShapeParam>();\n  auto ret =\n      tensor_shape_param->Init(min_shape_str, opt_shape_str, max_shape_str);\n  if (ret != modelbox::STATUS_OK) {\n    MBLOG_ERROR << \"input port \" << input.GetPortName()\n                << \" config wrong shape\";\n    return modelbox::STATUS_BADCONF;\n  }\n\n  input_tensor_shape_param_list_.push_back(tensor_shape_param);\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status DLEngineInference::LoadModel(\n    const std::shared_ptr<modelbox::Configuration> &cfg) {\n  nlohmann::json model_config;\n  auto ret = GenModelConfig(cfg, model_config);\n  if (ret != modelbox::STATUS_OK) {\n    MBLOG_ERROR << \"generate model config failed, ret \" << ret;\n    return ret;\n  }\n\n  auto model_config_str = model_config.dump();\n  inferer_ = dlengine::API::Compile(\n      model_entry_.c_str(), model_config_str.c_str(), DLENGINE_BACKEND_ZOO);\n  if (inferer_ != nullptr) {\n    return modelbox::STATUS_OK;\n  }\n\n  inferer_ = dlengine::API::GetInferer(model_entry_.c_str());\n  if (inferer_ != nullptr) {\n    return modelbox::STATUS_OK;\n  }\n\n  return {modelbox::STATUS_FAULT, \"compile model \" + model_entry_ + \" failed\"};\n}\n\nmodelbox::Status DLEngineInference::GenModelConfig(\n    const std::shared_ptr<modelbox::Configuration> &cfg,\n    nlohmann::json &model_config) {\n  model_config[\"model_type\"] = cfg->GetString(\"config.model_type\");\n  model_config[\"backend_type\"] = cfg->GetString(\"config.backend_type\");\n  if (cfg->Contain(\"config.precision\")) {\n    model_config[\"precision\"] = cfg->GetString(\"config.precision\");\n  }\n\n  if (input_tensor_shape_param_list_.empty()) {\n    return modelbox::STATUS_OK;\n  }\n\n  auto inputs_config = nlohmann::json::array();\n  for (size_t i = 0; i < input_name_list_.size(); ++i) {\n    nlohmann::json input_config;\n    input_config[\"name\"] = input_name_list_[i];\n    auto shape_param = input_tensor_shape_param_list_[i];\n    shape_param->GenTensorConfig(input_config);\n    inputs_config.push_back(input_config);\n  }\n\n  model_config[\"inputs\"] = inputs_config;\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status DLEngineInference::PrepareInput(\n    dlengine::IInferContext *infer_context,\n    const std::shared_ptr<modelbox::DataContext> &data_ctx, size_t batch_size) {\n  for (size_t i = 0; i < input_name_list_.size(); ++i) {\n    auto &input_name = input_name_list_[i];\n    auto in_buffer_list = data_ctx->Input(input_name);\n    if (in_buffer_list == nullptr) {\n      MBLOG_ERROR << \"input name \" << input_name << \" not found in data ctx\";\n      return modelbox::STATUS_FAULT;\n    }\n\n    const auto &origin_shape = inferer_->GetOriginalInputShape(i);\n    auto data_type = inferer_->GetInputDataType(i);\n\n    if (!CheckDataType(data_type)) {\n      MBLOG_ERROR << \"not support model data type \" << (int32_t)data_type;\n      return modelbox::STATUS_FAULT;\n    }\n\n    auto single_tensor_size = SingleTensorSize(origin_shape, data_type);\n    if (single_tensor_size * batch_size != in_buffer_list->GetBytes()) {\n      MBLOG_ERROR << \"process batch \" << batch_size << \", input bytes \"\n                  << in_buffer_list->GetBytes() << \" != model input size \"\n                  << single_tensor_size * batch_size << \", input name \"\n                  << input_name;\n      return modelbox::STATUS_FAULT;\n    }\n\n    dlengine::DimSize cur_input_shape;\n    SetUpTensorShape(cur_input_shape, origin_shape, batch_size);\n\n    auto tensor = infer_context->GetInputTensor(i);\n    if (tensor == nullptr) {\n      MBLOG_ERROR << \"tensor index \" << i << \" is out of range for model \"\n                  << model_entry_;\n      return modelbox::STATUS_FAULT;\n    }\n\n    tensor->Resize(cur_input_shape);\n    tensor->SetPtr((std::intptr_t)in_buffer_list->ConstData());\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status DLEngineInference::PrepareOutput(\n    dlengine::IInferContext *infer_context,\n    const std::shared_ptr<modelbox::DataContext> &data_ctx, size_t batch_size) {\n  for (size_t i = 0; i < output_name_list_.size(); ++i) {\n    auto &output_name = output_name_list_[i];\n    auto out_buffer_list = data_ctx->Output(output_name);\n    if (out_buffer_list == nullptr) {\n      MBLOG_ERROR << \"output name \" << out_buffer_list\n                  << \" not found in data ctx\";\n      return modelbox::STATUS_FAULT;\n    }\n\n    const auto &origin_shape = inferer_->GetOriginalOutputShape(i);\n    auto data_type = inferer_->GetOutputDataType(i);\n\n    if (!CheckDataType(data_type)) {\n      MBLOG_ERROR << \"not support model data type \" << (int32_t)data_type;\n      return modelbox::STATUS_FAULT;\n    }\n\n    auto single_tensor_size = SingleTensorSize(origin_shape, data_type);\n    auto ret = out_buffer_list->Build(\n        std::vector<size_t>(batch_size, single_tensor_size));\n    if (ret != modelbox::STATUS_OK) {\n      MBLOG_ERROR << \"build output buffer \" << output_name << \" failed, count \"\n                  << batch_size << \", size \" << single_tensor_size << \", err \"\n                  << ret;\n      return ret;\n    }\n\n    dlengine::DimSize cur_output_shape;\n    SetUpTensorShape(cur_output_shape, origin_shape, out_buffer_list->Size());\n\n    auto tensor = infer_context->GetOutputTensor(i);\n    if (tensor == nullptr) {\n      MBLOG_ERROR << \"tensor index \" << i << \" is out of range for model \"\n                  << model_entry_;\n      return modelbox::STATUS_FAULT;\n    }\n\n    tensor->Resize(cur_output_shape);\n    tensor->SetPtr((std::intptr_t)out_buffer_list->ConstData());\n\n    SetBufferInfo(out_buffer_list, data_type, origin_shape);\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nbool DLEngineInference::CheckDataType(dlengine::DataType data_type) {\n  auto item = g_dlengine_to_mb_type_map.find(data_type);\n  if (item == g_dlengine_to_mb_type_map.end()) {\n    return false;\n  }\n\n  return true;\n}\n\nvoid DLEngineInference::SetBufferInfo(\n    const std::shared_ptr<modelbox::BufferList> &buffer_list,\n    dlengine::DataType data_type, const dlengine::DimSize &shape) {\n  // data_type has been checked\n  auto mb_data_type = g_dlengine_to_mb_type_map.at(data_type);\n  std::vector<size_t> out_shape;\n  out_shape.reserve(shape.num_dims);\n  const size_t n = 1;\n  out_shape.push_back(n);  // in modelbox, for each buffer n must be 1\n  for (size_t i = 1; i < shape.num_dims; ++i) {\n    out_shape.push_back(shape.dims[i]);\n  }\n\n  for (const auto &buffer : *buffer_list) {\n    buffer->Set(\"type\", mb_data_type);\n    buffer->Set(\"shape\", out_shape);\n  }\n}\n\nmodelbox::Status DLEngineInference::SetUpTensorShape(\n    dlengine::DimSize &cur_shape, const dlengine::DimSize &origin_shape,\n    size_t batch_size) {\n  cur_shape.num_dims = origin_shape.num_dims;\n  for (size_t i = 0; i < origin_shape.num_dims; ++i) {\n    cur_shape.dims[i] = origin_shape.dims[i];\n  }\n\n  if (batch_size > INT_MAX) {\n    MBLOG_ERROR << \"batch size \" << batch_size << \" is too big\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  cur_shape.dims[0] = (int32_t)batch_size;\n  return modelbox::STATUS_OK;\n}\n\nsize_t DLEngineInference::SingleTensorSize(const dlengine::DimSize &shape,\n                                           dlengine::DataType data_type) {\n  size_t tensor_size = 1;\n  for (size_t i = 1; i < shape.num_dims; ++i) {\n    tensor_size *= shape.dims[i];\n  }\n\n  // data_type has been checked\n  return tensor_size * g_dlengine_type_size_map.at(data_type);\n}\n"
  },
  {
    "path": "src/drivers/inference_engine/dlengine/dlengine_inference.h",
    "content": "/*\n * Copyright 2022 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_DLENGINE_INFERENCE_H_\n#define MODELBOX_DLENGINE_INFERENCE_H_\n\n#include \"dlengine.h\"\n#include \"modelbox/base/status.h\"\n#include \"modelbox/data_context.h\"\n#include \"modelbox/flowunit.h\"\n#include \"nlohmann/json.hpp\"\n#include \"virtualdriver_inference.h\"\n\nclass TensorShapeParam {\n public:\n  modelbox::Status Init(const std::string &shape);\n\n  modelbox::Status Init(const std::string &min_shape,\n                        const std::string &opt_shape,\n                        const std::string &max_shape);\n\n  void GenTensorConfig(nlohmann::json &tensor_config);\n\n private:\n  modelbox::Status Parse(const std::string &shape_str,\n                         std::vector<size_t> &shape_value);\n\n  bool fix_shape_{false};\n  std::vector<size_t> shape_;\n  std::vector<size_t> min_shape_;\n  std::vector<size_t> opt_shape_;\n  std::vector<size_t> max_shape_;\n};\n\nclass DLEngineInference {\n public:\n  modelbox::Status Init(\n      const std::shared_ptr<modelbox::Configuration> &unit_cfg,\n      const std::shared_ptr<modelbox::FlowUnitDesc> &desc,\n      const std::string &device_type, int32_t device_id);\n\n  modelbox::Status Infer(\n      const std::shared_ptr<modelbox::DataContext> &data_ctx);\n\n private:\n  modelbox::Status InitInferInfo(\n      const std::shared_ptr<VirtualInferenceFlowUnitDesc> &desc);\n\n  modelbox::Status InitInputInfo(modelbox::FlowUnitInput &input);\n\n  modelbox::Status LoadModel(\n      const std::shared_ptr<modelbox::Configuration> &cfg);\n\n  modelbox::Status GenModelConfig(\n      const std::shared_ptr<modelbox::Configuration> &cfg,\n      nlohmann::json &model_config);\n\n  modelbox::Status PrepareInput(\n      dlengine::IInferContext *infer_context,\n      const std::shared_ptr<modelbox::DataContext> &data_ctx,\n      size_t batch_size);\n\n  modelbox::Status PrepareOutput(\n      dlengine::IInferContext *infer_context,\n      const std::shared_ptr<modelbox::DataContext> &data_ctx,\n      size_t batch_size);\n\n  bool CheckDataType(dlengine::DataType data_type);\n\n  void SetBufferInfo(const std::shared_ptr<modelbox::BufferList> &buffer_list,\n                     dlengine::DataType data_type,\n                     const dlengine::DimSize &shape);\n\n  modelbox::Status SetUpTensorShape(dlengine::DimSize &cur_shape,\n                                    const dlengine::DimSize &origin_shape,\n                                    size_t batch_size);\n\n  size_t SingleTensorSize(const dlengine::DimSize &shape,\n                          dlengine::DataType data_type);\n\n  std::string device_type_;\n  int32_t device_id_{0};\n\n  std::string model_entry_;\n  std::vector<std::string> input_name_list_;\n  std::vector<std::shared_ptr<TensorShapeParam>> input_tensor_shape_param_list_;\n  std::vector<std::string> output_name_list_;\n\n  std::string backend_zoo_;\n  dlengine::IInferer *inferer_{nullptr};\n  dlengine::DeviceType infer_device_{dlengine::DeviceType::UNKNOWN};\n};\n\n#endif  // MODELBOX_DLENGINE_INFERENCE_H_\n"
  },
  {
    "path": "src/drivers/inference_engine/dlengine/dlengine_inference_flowunit.h",
    "content": "/*\n * Copyright 2022 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_DLENGINE_INFERENCE_FLOWUNIT_H_\n#define MODELBOX_DLENGINE_INFERENCE_FLOWUNIT_H_\n\n#include \"dlengine_inference.h\"\n\nconstexpr const char *FLOWUNIT_NAME = \"dlengine_inference\";\nconstexpr const char *INFERENCE_TYPE = \"dlengine\";\n\n#endif  // MODELBOX_DLENGINE_INFERENCE_FLOWUNIT_H_\n"
  },
  {
    "path": "src/drivers/inference_engine/dlengine/dlengine_inference_flowunit_test.cc",
    "content": "/*\n * Copyright 2022 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"dlengine_inference_flowunit_test.h\"\n\nusing namespace modelbox;\n\nDLEngineInferenceFlowUnitTest::DLEngineInferenceFlowUnitTest(\n    const std::string &device_type)\n    : device_type_(device_type) {}\n\nStatus DLEngineInferenceFlowUnitTest::SetUp(\n    const std::string &infer_flowunit_name) {\n  infer_flowunit_name_ = infer_flowunit_name;\n  auto ret = flow_->Init();\n  if (!ret) {\n    return ret;\n  }\n\n  return STATUS_OK;\n}\n\nvoid DLEngineInferenceFlowUnitTest::Run(const std::string &name) {\n  std::string toml_content = R\"(\n    [driver]\n    skip-default=true\n    dir=[\")\" + test_driver_dir_ +\n                             \"\\\",\\\"\" + test_data_dir_ + \"\\\"]\\n    \" +\n                             R\"([graph]\n    graphconf = '''digraph demo {\n          input1[type=input]\n          input2[type=input]\n          dlengine_inference[type=flowunit, flowunit=\")\" +\n                             infer_flowunit_name_ + R\"(\", device=)\" +\n                             device_type_ + R\"(, deviceid=0, batch_size=2]\n          output[type=output, device=cpu]\n\n          input1 -> dlengine_inference:in1\n          input2 -> dlengine_inference:in2\n          dlengine_inference:out -> output\n        }'''\n    format = \"graphviz\"\n    )\";\n\n  auto ret = flow_->BuildAndRun(name, toml_content, -1);\n  ASSERT_EQ(ret, STATUS_OK);\n\n  auto extern_data = flow_->GetFlow()->CreateExternalDataMap();\n  // prepare input\n  auto in_buffer_list = extern_data->CreateBufferList();\n  const size_t tensor_size = 3 * 16 * 16;\n  in_buffer_list->Build({tensor_size * sizeof(float)});\n  auto in_buffer = in_buffer_list->At(0);\n  auto in_ptr = (float *)(in_buffer->MutableData());\n  for (size_t i = 0; i < tensor_size; ++i) {\n    in_ptr[i] = 1;\n  }\n  // send input\n  extern_data->Send(\"input1\", in_buffer_list);\n  extern_data->Send(\"input2\", in_buffer_list);\n  // recv output\n  OutputBufferList output_buffer_list_map;\n  ret = extern_data->Recv(output_buffer_list_map);\n  ASSERT_EQ(ret, STATUS_OK);\n\n  auto output_buffer_list = output_buffer_list_map[\"output\"];\n  ASSERT_NE(output_buffer_list, nullptr);\n  ASSERT_EQ(output_buffer_list->Size(), 1);\n\n  auto output_buffer = output_buffer_list->At(0);\n  // check output\n  auto out_ptr = (const float *)(output_buffer->ConstData());\n  ASSERT_NE(out_ptr, nullptr);\n\n  for (size_t i = 0; i < tensor_size; ++i) {\n    ASSERT_EQ(out_ptr[i], 2);\n  }\n\n  ModelBoxDataType data_type;\n  std::vector<size_t> shape;\n  auto b_ret = output_buffer->Get(\"type\", data_type);\n  ASSERT_TRUE(b_ret);\n  b_ret = output_buffer->Get(\"shape\", shape);\n  ASSERT_TRUE(b_ret);\n  ASSERT_EQ(data_type, ModelBoxDataType::MODELBOX_FLOAT);\n  ASSERT_EQ(shape, std::vector<size_t>({1, 3, 16, 16}));\n  // wait end\n  extern_data->Close();\n  flow_->GetFlow()->Wait(5000);\n}\n"
  },
  {
    "path": "src/drivers/inference_engine/dlengine/dlengine_inference_flowunit_test.h",
    "content": "/*\n * Copyright 2022 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_DLENGINE_INFERENCE_FLOWUNIT_TEST_H_\n#define MODELBOX_DLENGINE_INFERENCE_FLOWUNIT_TEST_H_\n\n#include \"modelbox/base/status.h\"\n#include \"test/mock/minimodelbox/mockflow.h\"\n\nclass DLEngineInferenceFlowUnitTest {\n public:\n  DLEngineInferenceFlowUnitTest(const std::string &device_type);\n\n  modelbox::Status SetUp(const std::string &infer_flowunit_name);\n\n  void Run(const std::string &name);\n\n  void TearDown();\n\n private:\n  std::string device_type_;\n  std::shared_ptr<modelbox::MockFlow> flow_ =\n      std::make_shared<modelbox::MockFlow>();\n\n  std::string test_driver_dir_ = TEST_DRIVER_DIR;\n  std::string test_data_dir_ = TEST_DATA_DIR;\n  std::string infer_flowunit_name_;\n};\n\n#endif  // MODELBOX_DLENGINE_INFERENCE_FLOWUNIT_TEST_H_\n"
  },
  {
    "path": "src/drivers/inference_engine/mindspore/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10.2)\n\nproject(modelbox-inference-mindspore-lite)\nif (NOT MINDSPORE_LITE_FOUND) \n    message(STATUS \"Not found mindspore-lite, disable mindsopre-lite flowunit\")\n    return()\nendif()\nset(CMAKE_CXX_STANDARD 17)\n\nif(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_BINARY_DIR})\n    message(FATAL_ERROR \"Do not build in source directory!\")\nendif()\n\nset(MINDSPORE_ENGINE_SRC_DIR \"${CMAKE_CURRENT_SOURCE_DIR}\")\n\nfile(GLOB_RECURSE UNIT_SOURCE ${MINDSPORE_ENGINE_SRC_DIR}/*.cpp \n    ${MINDSPORE_ENGINE_SRC_DIR}/*.cc \n    ${MINDSPORE_ENGINE_SRC_DIR}/*.c)\ngroup_source_test_files(MODELBOX_UNIT_SOURCE MODELBOX_UNIT_TEST_SOURCE \"_test.c*\" ${UNIT_SOURCE})\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${MINDSPORE_ENGINE_SRC_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_ASCEND_INCLUDE})\ninclude_directories(${LIBMODELBOX_VIRTUALDRIVER_INFERENCE_INCLUDE})\ninclude_directories(${MINDSPORE_LITE_INCLUDE_DIR})\ninclude_directories(${MODELBOX_COMMON_INFERENCE_INCLUDE})\n\nset(MODELBOX_UNIT_SHARED libcommon-engine-mindspore-lite-shared)\nlist(APPEND MODELBOX_UNIT_SOURCE_INCLUDE ${MINDSPORE_ENGINE_SRC_DIR})\nlist(APPEND MODELBOX_UNIT_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\nadd_library(${MODELBOX_UNIT_SHARED} SHARED ${MODELBOX_UNIT_SOURCE})\nset(LIBMODELBOX_FLOWUNIT_INFERENCE_MINDSPORE_SHARED ${MODELBOX_UNIT_SHARED})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES \n    SOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n\nset_target_properties(${MODELBOX_UNIT_SHARED}  PROPERTIES LINK_FLAGS \"-Wl,-rpath,${MINDSOPRE_LITE_LIB_DIR}\")\n\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} rt)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} dl)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_VIRTUALDRIVER_INFERENCE_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${MINDSPORE_LITE_LIBRARIES})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${MODELBOX_COMMON_INFERENCE_LIBRARY})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES OUTPUT_NAME \"common-engine-mindspore-lite\")\n\ninstall(TARGETS ${MODELBOX_UNIT_SHARED}\n        COMPONENT cpu-device-flowunit\n        RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n        LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        ARCHIVE DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        OPTIONAL)\ninstall(DIRECTORY ${HEADER} \n        DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR}\n        COMPONENT cpu-device-flowunit-devel)\n\nset(LIBMODELBOX_FLOWUNIT_INFERENCE_MINDSPORE_LITE_SHARED ${MODELBOX_UNIT_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_INFERENCE_MINDSPORE_LITE_INCLUDE ${MODELBOX_UNIT_SOURCE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_INFERENCE_MINDSPORE_LITE_SOURCES ${MODELBOX_UNIT_SOURCE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_INFERENCE_MINDSPORE_LITE_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/libcommon-engine-mindspore-lite.so CACHE INTERNAL \"\")\n\nlist(APPEND DRIVER_UNIT_TEST_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_UNIT_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_UNIT_SHARED})\nset(DRIVER_UNIT_TEST_INCLUDE ${DRIVER_UNIT_TEST_INCLUDE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")\n"
  },
  {
    "path": "src/drivers/inference_engine/mindspore/mindspore_inference.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"mindspore_inference.h\"\n\n#include <cstdint>\n#include <map>\n#include <utility>\n\n#include \"include/api/context.h\"\n#include \"include/api/model.h\"\n#include \"include/api/serialization.h\"\n#include \"model_decrypt.h\"\n#include \"modelbox/base/status.h\"\n#include \"virtualdriver_inference.h\"\n\nstatic std::map<std::string, mindspore::ModelType> model_type_map{\n    {\"mindir\", mindspore::ModelType::kMindIR},\n    {\"air\", mindspore::ModelType::kAIR},\n    {\"om\", mindspore::ModelType::kOM},\n    {\"ms\", mindspore::ModelType::kMindIR},\n    {\"onnx\", mindspore::ModelType::kONNX}};\n\nstatic std::map<mindspore::DataType, std::string> data_type_map{\n    {mindspore::DataType::kNumberTypeFloat32, \"float\"},\n    {mindspore::DataType::kNumberTypeFloat16, \"float16\"},\n    {mindspore::DataType::kNumberTypeFloat64, \"float64\"},\n    {mindspore::DataType::kNumberTypeInt8, \"int8\"},\n    {mindspore::DataType::kNumberTypeInt32, \"int\"},\n    {mindspore::DataType::kNumberTypeInt16, \"int16\"},\n    {mindspore::DataType::kNumberTypeInt64, \"int64\"},\n    {mindspore::DataType::kNumberTypeUInt8, \"uint8\"},\n    {mindspore::DataType::kNumberTypeUInt16, \"uint16\"},\n    {mindspore::DataType::kNumberTypeUInt32, \"uint32\"},\n    {mindspore::DataType::kNumberTypeUInt64, \"uint64\"},\n    {mindspore::DataType::kNumberTypeBool, \"bool\"},\n    {mindspore::DataType::kObjectTypeString, \"str\"}};\n\nstatic std::map<mindspore::DataType, modelbox::ModelBoxDataType>\n    data_type_flow_map{\n        {mindspore::DataType::kNumberTypeFloat32, modelbox::MODELBOX_FLOAT},\n        {mindspore::DataType::kNumberTypeFloat16, modelbox::MODELBOX_HALF},\n        {mindspore::DataType::kNumberTypeFloat64, modelbox::MODELBOX_DOUBLE},\n        {mindspore::DataType::kNumberTypeInt8, modelbox::MODELBOX_INT8},\n        {mindspore::DataType::kNumberTypeInt32, modelbox::MODELBOX_INT32},\n        {mindspore::DataType::kNumberTypeInt16, modelbox::MODELBOX_INT16},\n        {mindspore::DataType::kNumberTypeInt64, modelbox::MODELBOX_INT64},\n        {mindspore::DataType::kNumberTypeUInt8, modelbox::MODELBOX_UINT8},\n        {mindspore::DataType::kNumberTypeUInt16, modelbox::MODELBOX_UINT16},\n        {mindspore::DataType::kNumberTypeUInt32, modelbox::MODELBOX_UINT32},\n        {mindspore::DataType::kNumberTypeUInt64, modelbox::MODELBOX_UINT64},\n        {mindspore::DataType::kObjectTypeString, modelbox::MODELBOX_STRING},\n        {mindspore::DataType::kNumberTypeBool, modelbox::MODELBOX_BOOL}};\n\nMindSporeInference::MindSporeInference(\n    const std::shared_ptr<modelbox::Device> &flowunit_device,\n    const std::shared_ptr<mindspore::Context> &context)\n    : flowunit_device_(flowunit_device), context_(context) {}\n\nMindSporeInference::~MindSporeInference() {\n  model_ = nullptr;\n  context_ = nullptr;\n  flowunit_device_ = nullptr;\n}\n\nmodelbox::Status MindSporeInference::GetFlowUnitIO(\n    std::shared_ptr<modelbox::FlowUnitDesc> flowunit_desc) {\n  auto unit_desc =\n      std::dynamic_pointer_cast<VirtualInferenceFlowUnitDesc>(flowunit_desc);\n  auto input_desc = unit_desc->GetFlowUnitInput();\n  auto output_desc = unit_desc->GetFlowUnitOutput();\n  for (auto &input : input_desc) {\n    io_list_.input_name_list.push_back(input.GetPortName());\n    io_list_.input_type_list.push_back(input.GetPortType());\n    io_list_.input_device_list.push_back(input.GetDeviceType());\n  }\n\n  for (auto &output : output_desc) {\n    io_list_.output_name_list.push_back(output.GetPortName());\n    io_list_.output_type_list.push_back(output.GetPortType());\n  }\n\n  if (io_list_.input_name_list.empty() || io_list_.output_name_list.empty()) {\n    MBLOG_ERROR << \"Wrong input name [\" << io_list_.input_name_list.size()\n                << \"] or output name [\" << io_list_.output_name_list.size()\n                << \"] number\";\n    return modelbox::STATUS_BADCONF;\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status MindSporeInference::GetModelType(\n    const std::string &model_entry, mindspore::ModelType &model_type) {\n  auto type_vec = modelbox::StringSplit(model_entry, '.');\n  if (type_vec.size() == 0) {\n    return {modelbox::STATUS_BADCONF, \"model entry format is not suitable.\"};\n  }\n\n  auto iter = model_type_map.find(type_vec.back());\n  if (iter == model_type_map.end()) {\n    model_type = mindspore::ModelType::kUnknownType;\n    return {modelbox::STATUS_BADCONF, \"\"};\n  }\n\n  model_type = model_type_map[type_vec.back()];\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status MindSporeInference::CheckMindSporeInfo(\n    const std::vector<mindspore::MSTensor> &tensor_list,\n    const std::vector<std::string> &name_list) {\n  if (tensor_list.size() != name_list.size()) {\n    auto err_msg = \"model port size \" + std::to_string(tensor_list.size()) +\n                   \" does not match for config file port name size \" +\n                   std::to_string(name_list.size());\n    MBLOG_ERROR << err_msg;\n    return {modelbox::STATUS_BADCONF, err_msg};\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status MindSporeInference::CheckMindSporeIO() {\n  auto input_tensors = model_->GetInputs();\n  auto ret = CheckMindSporeInfo(input_tensors, io_list_.input_name_list);\n  if (ret != modelbox::STATUS_OK) {\n    auto err_msg = \"check ms input failed \" + ret.WrapErrormsgs();\n    MBLOG_ERROR << err_msg;\n    return ret;\n  }\n\n  auto output_tensors = model_->GetOutputs();\n  ret = CheckMindSporeInfo(output_tensors, io_list_.output_name_list);\n  if (ret != modelbox::STATUS_OK) {\n    auto err_msg = \"check ms output failed \" + ret.WrapErrormsgs();\n    MBLOG_ERROR << err_msg;\n    return ret;\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status MindSporeInference::Init(\n    const std::string &model_entry,\n    std::shared_ptr<modelbox::Configuration> &config,\n    const std::shared_ptr<modelbox::Drivers> &drivers_ptr) {\n  auto device_info_list = context_->MutableDeviceInfo();\n  for (const auto &device_info : device_info_list) {\n    device_type_.insert(device_info->GetDeviceType());\n  }\n  context_->SetInterOpParallelNum(std::thread::hardware_concurrency());\n  MBLOG_INFO << \"set interopparalle num: \" << context_->GetInterOpParallelNum();\n\n  mindspore::ModelType mindspore_type = mindspore::ModelType::kMindIR;\n  auto ret = GetModelType(model_entry, mindspore_type);\n  if (ret != modelbox::STATUS_OK) {\n    auto err_msg = \"get model type failed \" + ret.WrapErrormsgs();\n    MBLOG_ERROR << err_msg;\n    return ret;\n  }\n\n  mindspore::Status ms_status{mindspore::kSuccess};\n  ModelDecryption model_decrypt;\n  ret = model_decrypt.Init(model_entry, drivers_ptr, config);\n  if (ret != modelbox::STATUS_SUCCESS) {\n    return {ret, \"init model fail\"};\n  }\n\n  model_ = std::make_shared<mindspore::Model>();\n  if (!config_file_.empty()) {\n    ms_status = model_->LoadConfig(config_file_);\n    if (ms_status != mindspore::kSuccess) {\n      std::string err_msg = \"load model config:\" + config_file_ +\n                            \" failed, ret: \" + ms_status.ToString();\n      MBLOG_ERROR << err_msg;\n      return {modelbox::STATUS_FAULT, err_msg};\n    }\n  }\n\n  if (model_decrypt.GetModelState() == ModelDecryption::MODEL_STATE_ENCRYPT) {\n    int64_t model_len = 0;\n    std::shared_ptr<uint8_t> modelBuf =\n        model_decrypt.GetModelSharedBuffer(model_len);\n    if (!modelBuf) {\n      return {modelbox::StatusError, \"Decrypt model fail\"};\n    }\n\n    ms_status = model_->Build((const void *)modelBuf.get(), (size_t)model_len,\n                              mindspore_type, context_);\n  } else if (model_decrypt.GetModelState() ==\n             ModelDecryption::MODEL_STATE_PLAIN) {\n    ms_status = model_->Build(model_entry, mindspore_type, context_);\n  }\n  if (ms_status != mindspore::kSuccess) {\n    std::string err_msg =\n        \"model init failed, code: \" + std::to_string(ms_status.StatusCode()) +\n        \", msg: \" + ms_status.ToString();\n    MBLOG_ERROR << err_msg;\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  ret = CheckMindSporeIO();\n  if (ret != modelbox::STATUS_OK) {\n    auto err_msg = \"input or output info got error, \" + ret.WrapErrormsgs();\n    MBLOG_ERROR << err_msg;\n    return {modelbox::STATUS_BADCONF, err_msg};\n  }\n\n  for (auto &input_tensor : model_->GetInputs()) {\n    // check model info & whether padding\n    if (!model_need_padding_ && input_tensor.Shape()[0] > 1) {\n      model_need_padding_ = !multi_batch_in_buffer_;\n      auto max_batch_size = input_tensor.Shape()[0];\n      if (config_batch_size_ > max_batch_size) {\n        auto error_msg =\n            \"config_batch_szie: \" + std::to_string(config_batch_size_) +\n            \" is larger than model_batch_size: \" +\n            std::to_string(max_batch_size);\n        MBLOG_ERROR << error_msg;\n        return {modelbox::STATUS_BADCONF, error_msg};\n      }\n      if (config_batch_size_ < max_batch_size) {\n        MBLOG_WARN << \"config_batch_szie: \" << config_batch_size_\n                   << \" is smaller than model_batch_size: \" << max_batch_size\n                   << \", the padding data will increase the time required.\";\n      }\n\n      MBLOG_INFO << \"model: \" << model_entry << \" enable padding.\";\n    }\n\n    std::stringstream ss;\n    ss << \"input name:\" << input_tensor.Name() << \", shape: [\";\n    for (size_t i = 0; i < input_tensor.Shape().size(); ++i) {\n      ss << input_tensor.Shape()[i];\n      if (i != input_tensor.Shape().size() - 1) {\n        ss << \", \";\n      }\n    }\n    ss << \"]\";\n    MBLOG_INFO << ss.str();\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status MindSporeInference::Open(\n    const std::shared_ptr<modelbox::Configuration> &opts,\n    std::shared_ptr<modelbox::FlowUnitDesc> flowunit_desc) {\n  auto unit_desc =\n      std::dynamic_pointer_cast<VirtualInferenceFlowUnitDesc>(flowunit_desc);\n  auto config = unit_desc->GetConfiguration();\n\n  auto merge_config = std::make_shared<modelbox::Configuration>();\n  merge_config->Add(*config);\n  merge_config->Add(*opts);\n\n  config_batch_size_ = opts->GetProperty<uint32_t>(\n      \"batch_size\", modelbox::NORMAL_DEFAULT_BATCH_SIZE);\n\n  auto ret = GetFlowUnitIO(flowunit_desc);\n  if (ret != modelbox::STATUS_OK) {\n    return ret;\n  }\n\n  std::string config_file_ = merge_config->GetString(\"config.config_file\");\n  if (!modelbox::IsAbsolutePath(config_file_)) {\n    auto relpath =\n        modelbox::GetDirName(unit_desc->GetDriverDesc()->GetFilePath());\n    config_file_ = relpath + \"/\" + config_file_;\n  }\n\n  multi_batch_in_buffer_ =\n      merge_config->GetBool(\"config.multi_batch_in_buffer\", false);\n\n  ret = Init(unit_desc->GetModelEntry(), merge_config,\n             flowunit_device_->GetDeviceManager()->GetDrivers());\n  if (ret != modelbox::STATUS_SUCCESS) {\n    MBLOG_ERROR << \"Init inference failed, \" << ret;\n    return ret;\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status MindSporeInference::Infer(\n    const std::shared_ptr<modelbox::DataContext> &data_ctx) {\n  auto ms_inputs = model_->GetInputs();\n  std::vector<std::vector<int64_t>> new_shapes;\n  PrepareInputTensor(ms_inputs, new_shapes, data_ctx);\n\n  auto ms_ret = model_->Resize(ms_inputs, new_shapes);\n  if (ms_ret != mindspore::kSuccess) {\n    auto err_msg = \"mindspore resize failed, ret \" +\n                   std::to_string(ms_ret.StatusCode()) +\n                   \" err_msg: \" + ms_ret.ToString();\n    MBLOG_ERROR << err_msg;\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  for (const auto &input : ms_inputs) {\n    MBLOG_DEBUG << \"input portname: \" << input.Name()\n                << \", batch size: \" << input.Shape()[0]\n                << \", data size: \" << input.DataSize()\n                << \", element num: \" << input.ElementNum();\n  }\n\n  auto ms_outputs = model_->GetOutputs();\n  auto model_output_lists = std::vector<std::shared_ptr<modelbox::BufferList>>(\n      ms_outputs.size(),\n      std::make_shared<modelbox::BufferList>(flowunit_device_));\n  auto ret = PrepareOutputTensor(data_ctx, ms_outputs, model_output_lists);\n  if (ret != modelbox::STATUS_OK) {\n    auto err_msg = \"prepare output tensor failed, err_msg: \" + ret.Errormsg();\n    MBLOG_ERROR << err_msg;\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  ms_ret = model_->Predict(ms_inputs, &ms_outputs);\n  if (ms_ret != mindspore::kSuccess) {\n    auto err_msg = \"mindspore inference failed, ret \" +\n                   std::to_string(ms_ret.StatusCode()) +\n                   \" err_msg: \" + ms_ret.ToString();\n    MBLOG_ERROR << err_msg;\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  ret = PrepareOutputBufferList(data_ctx, ms_outputs);\n  if (ret != modelbox::STATUS_OK) {\n    auto err_msg =\n        \"prepare output bufferlist failed, err_msg: \" + ret.Errormsg();\n    MBLOG_ERROR << err_msg;\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n  padding_batch_size_ = 0;\n\n  return modelbox::STATUS_OK;\n}\n\nvoid MindSporeInference::PrepareInputTensor(\n    std::vector<mindspore::MSTensor> &ms_inputs,\n    std::vector<std::vector<int64_t>> &new_shapes,\n    const std::shared_ptr<modelbox::DataContext> &data_ctx) {\n  for (size_t i = 0; i < ms_inputs.size(); ++i) {\n    auto name = ms_inputs[i].Name();\n    auto input_shape = ms_inputs[i].Shape();\n    auto portname = io_list_.input_name_list[i];\n    auto input_buffer_list = data_ctx->Input(portname);\n    MBLOG_DEBUG << \"input_buffer_list: \" << portname << \", model port: \" << name\n                << \", size: \" << input_buffer_list->Size()\n                << \", bytes:\" << input_buffer_list->GetBytes();\n    std::vector<size_t> b_shape;\n    if (!input_buffer_list->At(0)->Get(\"shape\", b_shape) ||\n        input_shape.size() != b_shape.size()) {\n      MBLOG_ERROR << \"get input shape failed, tensor shape size:\"\n                  << input_shape.size()\n                  << \", buffer shape size: \" << b_shape.size();\n      return;\n    }\n\n    for (size_t index = 0; index < b_shape.size(); ++index) {\n      input_shape[index] = b_shape[index];\n    }\n    \n    // input batch padding\n    if (model_need_padding_) {\n      padding_batch_size_ = input_shape[0] - input_buffer_list->Size();\n      if (padding_batch_size_ > 0) {\n        auto padding_buffer =\n            std::make_shared<modelbox::Buffer>(input_buffer_list->GetDevice());\n        auto padding_bytes =\n            padding_batch_size_ * ms_inputs[i].DataSize() / input_shape[0];\n        padding_buffer->Build({padding_bytes});\n        input_buffer_list->PushBack(padding_buffer);\n        MBLOG_DEBUG << \"input_port:\" << portname\n                    << \", padding batch:\" << padding_batch_size_\n                    << \", padding bytes:\" << padding_bytes;\n      }\n      input_buffer_list->MakeContiguous();\n    }\n    // cpu is host data\n    if (io_list_.input_device_list[i] == \"cpu\") {\n      ms_inputs[i].SetData(const_cast<void *>(input_buffer_list->ConstData()),\n                           false);\n    } else {\n      ms_inputs[i].SetDeviceData(\n          const_cast<void *>(input_buffer_list->ConstData()));\n    }\n    // set current batch size\n    if (!multi_batch_in_buffer_ && !model_need_padding_) {\n      input_shape[0] = input_buffer_list->Size();\n    }\n    MBLOG_DEBUG << \"input name: \" << name << \" shape: \";\n    for (auto &item : input_shape) {\n      MBLOG_DEBUG << item;\n    }\n    new_shapes.push_back(input_shape);\n  }\n}\n\nmodelbox::Status MindSporeInference::PrepareOutputTensor(\n    const std::shared_ptr<modelbox::DataContext> &data_ctx,\n    std::vector<mindspore::MSTensor> &ms_outputs,\n    std::vector<std::shared_ptr<modelbox::BufferList>> &model_output_lists) {\n  if (device_type_.find(mindspore::DeviceType::kGPU) == device_type_.end()) {\n    // only gpu support set output device data\n    return modelbox::STATUS_OK;\n  }\n\n  // set output mem\n  for (size_t i = 0; i < ms_outputs.size(); ++i) {\n    auto name = ms_outputs[i].Name();\n    auto portname = io_list_.output_name_list[i];\n    if (ms_outputs[i].Shape()[0] == 0) {\n      auto err_msg = \"output_tensor \" + portname + \" first dim is zero\";\n      MBLOG_ERROR << err_msg;\n      return {modelbox::STATUS_FAULT, err_msg};\n    }\n\n    // set all output tensor mem\n    model_output_lists[i]->Build(std::vector<size_t>(\n        ms_outputs[i].Shape()[0],\n        ms_outputs[i].DataSize() / ms_outputs[i].Shape()[0]));\n    ms_outputs[i].SetDeviceData(model_output_lists[i]->MutableData());\n    MBLOG_DEBUG << \"output port name: \" << portname\n                << \", batch size: \" << ms_outputs[i].Shape()[0]\n                << \", data size: \" << ms_outputs[i].DataSize()\n                << \", element num: \" << ms_outputs[i].ElementNum()\n                << \", datatype: \" << (int)ms_outputs[i].DataType();\n\n    // skip padding tensor\n    auto output_buffer_list = data_ctx->Output(portname);\n    for (size_t b = 0; b < ms_outputs[i].Shape()[0] - padding_batch_size_;\n         ++b) {\n      output_buffer_list->PushBack(model_output_lists[i]->At(b));\n    }\n\n    auto tensor_shape = ms_outputs[i].Shape();\n    std::vector<size_t> output_shape;\n    tensor_shape[0] = 1;\n    MBLOG_DEBUG << \"output name:\" << name << \", shape: \";\n    for (auto &item : tensor_shape) {\n      output_shape.push_back(item);\n      MBLOG_DEBUG << item;\n    }\n    output_buffer_list->Set(\"shape\", output_shape);\n    output_buffer_list->Set(\"type\",\n                            data_type_flow_map[ms_outputs[i].DataType()]);\n  }\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status MindSporeInference::PrepareOutputBufferList(\n    const std::shared_ptr<modelbox::DataContext> &data_ctx,\n    std::vector<mindspore::MSTensor> &ms_outputs) {\n  if (device_type_.find(mindspore::DeviceType::kGPU) != device_type_.end()) {\n    // gpu infer has been set output buffer list\n    return modelbox::STATUS_OK;\n  }\n\n  for (size_t i = 0; i < ms_outputs.size(); ++i) {\n    auto portname = io_list_.output_name_list[i];\n    auto output_buffer_list = data_ctx->Output(portname);\n    MBLOG_DEBUG << \"output port name: \" << portname\n                << \", batch size: \" << ms_outputs[i].Shape()[0]\n                << \", data size: \" << ms_outputs[i].DataSize()\n                << \", element num: \" << ms_outputs[i].ElementNum()\n                << \", datatype: \" << (int)ms_outputs[i].DataType();\n    if (ms_outputs[i].Shape()[0] == 0) {\n      auto err_msg = \"ms_outputs \" + portname + \" first dim is zero\";\n      MBLOG_ERROR << err_msg;\n      return {modelbox::STATUS_FAULT, err_msg};\n    }\n\n    // skip padding batch\n    size_t tensor_output_batch = ms_outputs[i].Shape()[0];\n    size_t buffer_output_batch = tensor_output_batch - padding_batch_size_;\n    size_t output_batch_bytes =\n        ms_outputs[i].DataSize() / ms_outputs[i].Shape()[0];\n    if (multi_batch_in_buffer_) {\n      buffer_output_batch = 1;\n      output_batch_bytes = ms_outputs[i].DataSize();\n    }\n    MBLOG_DEBUG << \"tensor_output_batch:\" << tensor_output_batch\n                << \", padding_batch_size:\" << padding_batch_size_\n                << \", output_batch_size:\" << buffer_output_batch;\n    std::vector<size_t> shape_size(buffer_output_batch, output_batch_bytes);\n\n    auto status = output_buffer_list->BuildFromHost(\n        shape_size, ms_outputs[i].MutableData(),\n        buffer_output_batch * output_batch_bytes);\n    if (status != modelbox::STATUS_OK) {\n      auto err_msg =\n          \"output buffer list build from host failed \" + status.WrapErrormsgs();\n      MBLOG_ERROR << err_msg;\n      return {modelbox::STATUS_FAULT, err_msg};\n    }\n\n    auto tensor_shape = ms_outputs[i].Shape();\n    std::vector<size_t> output_shape;\n    if (!multi_batch_in_buffer_) {\n      tensor_shape[0] = 1;\n    }\n    MBLOG_DEBUG << \"output shape: \";\n    for (const auto &item : tensor_shape) {\n      output_shape.push_back(item);\n      MBLOG_DEBUG << item;\n    }\n    output_buffer_list->Set(\"shape\", output_shape);\n    output_buffer_list->Set(\"type\",\n                            data_type_flow_map[ms_outputs[i].DataType()]);\n  }\n  return modelbox::STATUS_OK;\n}\n"
  },
  {
    "path": "src/drivers/inference_engine/mindspore/mindspore_inference.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_MINDSPRORE_INFERENCE_H_\n#define MODELBOX_MINDSPRORE_INFERENCE_H_\n\n#include \"include/api/context.h\"\n#include \"include/api/model.h\"\n#include \"modelbox/base/configuration.h\"\n#include \"modelbox/data_context.h\"\n#include \"modelbox/flowunit.h\"\n\nstruct MindSporeIOList {\n  std::vector<std::string> input_name_list;\n  std::vector<std::string> output_name_list;\n  std::vector<std::string> input_type_list;\n  std::vector<std::string> output_type_list;\n  std::vector<std::string> input_device_list;\n};\n\nconstexpr const char *INFERENCE_TYPE = \"mindspore\";\n\nclass MindSporeInference {\n public:\n  MindSporeInference(const std::shared_ptr<modelbox::Device> &flowunit_device,\n                     const std::shared_ptr<mindspore::Context> &context);\n  virtual ~MindSporeInference();\n\n  modelbox::Status Open(const std::shared_ptr<modelbox::Configuration> &opts,\n                        std::shared_ptr<modelbox::FlowUnitDesc> flowunit_desc);\n  modelbox::Status Infer(\n      const std::shared_ptr<modelbox::DataContext> &data_ctx);\n\n private:\n  modelbox::Status Init(const std::string &model_entry,\n                        std::shared_ptr<modelbox::Configuration> &config,\n                        const std::shared_ptr<modelbox::Drivers> &drivers_ptr);\n  modelbox::Status GetFlowUnitIO(\n      std::shared_ptr<modelbox::FlowUnitDesc> flowunit_desc);\n  modelbox::Status GetModelType(const std::string &model_entry,\n                                mindspore::ModelType &model_type);\n  modelbox::Status CheckMindSporeInfo(\n      const std::vector<mindspore::MSTensor> &tensor_list,\n      const std::vector<std::string> &name_list);\n  modelbox::Status CheckMindSporeIO();\n  void PrepareInputTensor(\n      std::vector<mindspore::MSTensor> &ms_inputs,\n      std::vector<std::vector<int64_t>> &new_shapes,\n      const std::shared_ptr<modelbox::DataContext> &data_ctx);\n  modelbox::Status PrepareOutputTensor(\n      const std::shared_ptr<modelbox::DataContext> &data_ctx,\n      std::vector<mindspore::MSTensor> &ms_outputs,\n      std::vector<std::shared_ptr<modelbox::BufferList>> &model_output_lists);\n  modelbox::Status PrepareOutputBufferList(\n      const std::shared_ptr<modelbox::DataContext> &data_ctx,\n      std::vector<mindspore::MSTensor> &ms_outputs);\n\n private:\n  std::shared_ptr<modelbox::Device> flowunit_device_;\n  std::shared_ptr<mindspore::Context> context_;\n  std::shared_ptr<mindspore::Model> model_{nullptr};\n  int64_t batch_size_{0};\n  struct MindSporeIOList io_list_;\n  std::string config_file_;\n  std::set<mindspore::DeviceType> device_type_;\n  bool multi_batch_in_buffer_{false};\n  bool model_need_padding_{false};\n  size_t padding_batch_size_{0};\n  uint32_t config_batch_size_{1};\n};\n\n#endif\n"
  },
  {
    "path": "src/drivers/inference_engine/mindspore/mindspore_inference_flowunit_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"mindspore_inference_flowunit_test.h\"\n\n#include <functional>\n#include <future>\n#include <random>\n#include <thread>\n\n#include \"flowunit_mockflowunit/flowunit_mockflowunit.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"modelbox/base/log.h\"\n#include \"modelbox/base/utils.h\"\n#include \"modelbox/buffer.h\"\n#include \"test/mock/minimodelbox/mockflow.h\"\n\nnamespace modelbox {\n\nStatus InferenceMindSporeFlowUnitTest::Init() {\n  auto ret = AddMockFlowUnit();\n  return ret;\n}\n\nStatus InferenceMindSporeFlowUnitTest::Run(const std::string &name,\n                                           const std::string &graph) {\n  auto driver_flow = GetDriverFlow();\n  auto ret = driver_flow->BuildAndRun(name, graph);\n  return ret;\n}\n\nStatus InferenceMindSporeFlowUnitTest::AddMockFlowUnit() {\n  {\n    auto mock_desc =\n        GenerateFlowunitDesc(\"prepare_ms_infer_data\", {}, {\"out1\", \"out2\"});\n    mock_desc->SetFlowType(STREAM);\n    mock_desc->SetMaxBatchSize(2);\n    auto open_func = [=](const std::shared_ptr<Configuration> &opts,\n                         const std::shared_ptr<MockFlowUnit> &mock_flowunit) {\n      auto ext_data = mock_flowunit->CreateExternalData();\n      if (!ext_data) {\n        MBLOG_ERROR << \"can not get external data.\";\n      }\n\n      auto buffer_list = ext_data->CreateBufferList();\n      buffer_list->Build({10});\n\n      auto status = ext_data->Send(buffer_list);\n      if (!status) {\n        MBLOG_ERROR << \"external data send buffer list failed:\" << status;\n      }\n\n      status = ext_data->Close();\n      if (!status) {\n        MBLOG_ERROR << \"external data close failed:\" << status;\n      }\n\n      return STATUS_OK;\n    };\n\n    auto process_func =\n        [=](const std::shared_ptr<DataContext> &op_ctx,\n            const std::shared_ptr<MockFlowUnit> &mock_flowunit) {\n          MBLOG_INFO << \"prepare_ms_infer_data \"\n                     << \"Process\";\n          auto output_buf_1 = op_ctx->Output(\"out1\");\n          auto output_buf_2 = op_ctx->Output(\"out2\");\n          const size_t len = 2;\n          std::vector<size_t> shape_vector(2, len * sizeof(float));\n          ModelBoxDataType type = MODELBOX_FLOAT;\n\n          output_buf_1->Build(shape_vector);\n          output_buf_1->Set(\"type\", type);\n          std::vector<size_t> shape{len, 2};\n          output_buf_1->Set(\"shape\", shape);\n          auto *dev_data1 = (float *)(output_buf_1->MutableData());\n          MBLOG_INFO << \"output_buf_1.size: \" << output_buf_1->Size();\n          float val = 1.0;\n          for (size_t i = 0; i < output_buf_1->Size(); ++i) {\n            for (size_t j = 0; j < len; ++j) {\n              dev_data1[i * len + j] = val;\n              val += 1.0;\n            }\n          }\n\n          output_buf_2->Build(shape_vector);\n          output_buf_2->Set(\"type\", type);\n          output_buf_2->Set(\"shape\", shape);\n          auto *dev_data2 = (float *)(output_buf_2->MutableData());\n          val = 2.0;\n          for (size_t i = 0; i < output_buf_2->Size(); ++i) {\n            for (size_t j = 0; j < len; ++j) {\n              dev_data2[i * len + j] = val;\n              val += 1.0;\n            }\n          }\n\n          return STATUS_OK;\n        };\n\n    auto mock_functions = std::make_shared<MockFunctionCollection>();\n    mock_functions->RegisterOpenFunc(open_func);\n    mock_functions->RegisterProcessFunc(process_func);\n    driver_flow_->AddFlowUnitDesc(\n        mock_desc, mock_functions->GenerateCreateFunc(), TEST_DRIVER_DIR);\n  }\n\n  {\n    auto mock_desc = GenerateFlowunitDesc(\"check_ms_infer_result\", {\"in\"}, {});\n    mock_desc->SetFlowType(STREAM);\n    mock_desc->SetMaxBatchSize(2);\n    auto data_post_func =\n        [=](const std::shared_ptr<DataContext> &op_ctx,\n            const std::shared_ptr<MockFlowUnit> &mock_flowunit) {\n          MBLOG_INFO << \"check_ms_infer_result \"\n                     << \"DataPost\";\n          return STATUS_STOP;\n        };\n\n    auto process_func =\n        [=](const std::shared_ptr<DataContext> &op_ctx,\n            const std::shared_ptr<MockFlowUnit> &mock_flowunit) {\n          std::shared_ptr<BufferList> input_bufs = op_ctx->Input(\"in\");\n          EXPECT_EQ(input_bufs->Size(), 2);\n          std::vector<int64_t> input_shape;\n          auto result = input_bufs->At(0)->Get(\"shape\", input_shape);\n          EXPECT_TRUE(result);\n          EXPECT_EQ(input_shape.size(), 2);\n          EXPECT_EQ(input_shape[0], 2);\n          EXPECT_EQ(input_shape[1], 2);\n\n          const auto *ptr = (const float *)input_bufs->ConstData();\n          float val = 3.0;\n          for (size_t i = 0; i < 4; ++i) {\n            EXPECT_TRUE((std::abs(ptr[i]) - val) < 1e-7);\n            val += 2.0;\n          }\n\n          return STATUS_OK;\n        };\n\n    auto mock_functions = std::make_shared<MockFunctionCollection>();\n    mock_functions->RegisterDataPostFunc(data_post_func);\n    mock_functions->RegisterProcessFunc(process_func);\n    driver_flow_->AddFlowUnitDesc(\n        mock_desc, mock_functions->GenerateCreateFunc(), TEST_DRIVER_DIR);\n  }\n\n  return STATUS_OK;\n}\n\nstd::shared_ptr<MockFlow> InferenceMindSporeFlowUnitTest::GetDriverFlow() {\n  return driver_flow_;\n}\n}  // namespace modelbox\n"
  },
  {
    "path": "src/drivers/inference_engine/mindspore/mindspore_inference_flowunit_test.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_MINDSPRORE_INFERENCE_TEST_H_\n#define MODELBOX_FLOWUNIT_MINDSPRORE_INFERENCE_TEST_H_\n\n#include <functional>\n#include <future>\n#include <random>\n#include <thread>\n\n#include \"flowunit_mockflowunit/flowunit_mockflowunit.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"test/mock/minimodelbox/mockflow.h\"\n\nnamespace modelbox {\nclass InferenceMindSporeFlowUnitTest {\n public:\n  Status Init();\n\n  Status Run(const std::string &name, const std::string &graph);\n\n  std::shared_ptr<MockFlow> GetDriverFlow();\n\n private:\n  Status AddMockFlowUnit();\n  std::shared_ptr<MockFlow> driver_flow_ = std::make_shared<MockFlow>();\n};\n}  // namespace modelbox\n#endif\n"
  },
  {
    "path": "src/drivers/inference_engine/tensorflow/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nproject(modelbox-inference-tensorflow)\n\nif (NOT TENSORFLOW_FOUND) \n    message(STATUS \"Not found tensorflow, disable common flowunit\")\n    return()\nendif()\n\nfile(GLOB_RECURSE MODELBOX_UNIT_SOURCE *.cpp *.cc *.c)\nfile(GLOB PLUGIN_FILES ./test_plugin/*.cc ./test_plugin/*.cpp ./test_plugin/*.c)\nlist(REMOVE_ITEM MODELBOX_UNIT_SOURCE ${PLUGIN_FILES})\nset(HEADER tensorflow_inference_plugin.h)\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_VIRTUALDRIVER_INFERENCE_INCLUDE})\ninclude_directories(${TENSORFLOW_INCLUDE_DIR})\ninclude_directories(${MODELBOX_COMMON_INFERENCE_INCLUDE})\n\nset(MODELBOX_UNIT_SHARED libmodelbox-engine-tensorflow)\nset(MODELBOX_UNIT_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\nadd_library(${MODELBOX_UNIT_SHARED} SHARED ${MODELBOX_UNIT_SOURCE})\nset(LIBMODELBOX_FLOWUNIT_INFERENCE_TENSORFLOW_SHARED ${MODELBOX_UNIT_SHARED})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES \nSOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} rt)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} dl)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${TENSORFLOW_LIBRARIES})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_VIRTUALDRIVER_INFERENCE_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${MODELBOX_COMMON_INFERENCE_LIBRARY})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-engine-tensorflow\")\n\ninstall(FILES ${HEADER} \n        COMPONENT cpu-device-flowunit-devel\n        DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR}/modelbox)\n\n\ninstall(TARGETS ${MODELBOX_UNIT_SHARED} \n        COMPONENT cpu-device-flowunit\n        RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n        LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        OPTIONAL\n        )\n\nset(LIBMODELBOX_FLOWUNIT_INFERENCE_TENSORFLOW_SHARED ${MODELBOX_UNIT_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_INFERENCE_TENSORFLOW_INCLUDE ${MODELBOX_UNIT_SOURCE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_INFERENCE_TENSORFLOW_SOURCES ${MODELBOX_UNIT_SOURCE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_INFERENCE_TENSORFLOW_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-engine-tensorflow.so CACHE INTERNAL \"\")\n\nadd_subdirectory(test_plugin)\n"
  },
  {
    "path": "src/drivers/inference_engine/tensorflow/tensorflow_inference_common.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"tensorflow_inference_common.h\"\n\n#include <model_decrypt.h>\n#include <modelbox/base/crypto.h>\n\n#include <utility>\n\n#include \"modelbox/base/status.h\"\n#include \"virtualdriver_inference.h\"\n\nstatic std::map<std::string, TF_DataType> type_map = {\n    {\"FLOAT\", TF_FLOAT}, {\"DOUBLE\", TF_DOUBLE}, {\"INT\", TF_INT32},\n    {\"UINT8\", TF_UINT8}, {\"LONG\", TF_INT64},    {\"STRING\", TF_STRING}};\n\nstatic std::map<TF_DataType, modelbox::ModelBoxDataType> tftype_mbtype_map = {\n    {TF_FLOAT, modelbox::MODELBOX_FLOAT},\n    {TF_DOUBLE, modelbox::MODELBOX_DOUBLE},\n    {TF_INT32, modelbox::MODELBOX_INT32},\n    {TF_UINT8, modelbox::MODELBOX_UINT8},\n    {TF_INT64, modelbox::MODELBOX_INT64},\n    {TF_STRING, modelbox::MODELBOX_STRING}};\n\nstatic std::map<modelbox::ModelBoxDataType, TF_DataType> mbtype_tftype_map = {\n    {modelbox::MODELBOX_FLOAT, TF_FLOAT},\n    {modelbox::MODELBOX_DOUBLE, TF_DOUBLE},\n    {modelbox::MODELBOX_INT32, TF_INT32},\n    {modelbox::MODELBOX_UINT8, TF_UINT8},\n    {modelbox::MODELBOX_INT64, TF_INT64},\n    {modelbox::MODELBOX_STRING, TF_STRING}};\n\nmodelbox::Status ConvertTFTypeToModelBoxType(\n    TF_DataType tf_type, modelbox::ModelBoxDataType &modelbox_type) {\n  auto iter = tftype_mbtype_map.find(tf_type);\n  if (iter == tftype_mbtype_map.end()) {\n    return {modelbox::STATUS_NOTSUPPORT,\n            \"covert Tensorflow Type to ModelBox Type failed, unsupport type \"};\n  }\n  modelbox_type = iter->second;\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status ConvertModelBoxTypeToTFType(\n    modelbox::ModelBoxDataType modelbox_type, TF_DataType &tf_type) {\n  auto iter = mbtype_tftype_map.find(modelbox_type);\n  if (iter == mbtype_tftype_map.end()) {\n    return {modelbox::STATUS_NOTSUPPORT,\n            \"covert ModelBox Type to Tensorflow Type failed, unsupport type \" +\n                std::to_string(modelbox_type)};\n  }\n  tf_type = iter->second;\n  return modelbox::STATUS_SUCCESS;\n}\n\nvoid DeleteTensor(TF_Tensor *tensor) {\n  if (tensor == nullptr) {\n    return;\n  }\n  TF_DeleteTensor(tensor);\n}\n\nmodelbox::Status InferenceTensorflowFlowUnit::ClearTensor(\n    std::vector<TF_Tensor *> &input_tensor_list,\n    std::vector<TF_Tensor *> &output_tensor_list) {\n  for (auto &t : input_tensor_list) {\n    DeleteTensor(t);\n  }\n\n  for (auto &t : output_tensor_list) {\n    DeleteTensor(t);\n  }\n\n  input_tensor_list.clear();\n  output_tensor_list.clear();\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status InferenceTensorflowParams::Clear() {\n  input_name_list_.clear();\n  output_name_list_.clear();\n  input_type_list_.clear();\n  output_type_list_.clear();\n  input_op_list.clear();\n  output_op_list.clear();\n\n  if (nullptr != options) {\n    TF_DeleteSessionOptions(options);\n    options = nullptr;\n  }\n\n  if (nullptr != session && nullptr != status) {\n    TF_CloseSession(session, status);\n    if (TF_GetCode(status) != TF_OK) {\n      auto err_msg = \"close session failed: \" + std::string(TF_Message(status));\n      MBLOG_ERROR << err_msg;\n      return {modelbox::STATUS_FAULT, err_msg};\n    }\n\n    TF_DeleteSession(session, status);\n    if (TF_GetCode(status) != TF_OK) {\n      auto err_msg =\n          \"delete session failed: \" + std::string(TF_Message(status));\n      MBLOG_ERROR << err_msg;\n      return {modelbox::STATUS_FAULT, err_msg};\n    }\n\n    session = nullptr;\n  }\n\n  if (nullptr != status) {\n    TF_DeleteStatus(status);\n    status = nullptr;\n  }\n\n  if (graph != nullptr) {\n    TF_DeleteGraph(graph);\n    graph = nullptr;\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nInferenceTensorflowFlowUnit::InferenceTensorflowFlowUnit() = default;\nInferenceTensorflowFlowUnit::~InferenceTensorflowFlowUnit() {\n  pre_process_ = nullptr;\n  post_process_ = nullptr;\n  inference_plugin_ = nullptr;\n\n  if (driver_handler_ != nullptr) {\n    dlclose(driver_handler_);\n    driver_handler_ = nullptr;\n  }\n};\n\nmodelbox::Status InferenceTensorflowFlowUnit::ReadBufferFromFile(\n    const std::string &file, TF_Buffer *buf) {\n  int64_t model_len = 0;\n  auto config = std::dynamic_pointer_cast<VirtualInferenceFlowUnitDesc>(\n                    this->GetFlowUnitDesc())\n                    ->GetConfiguration();\n  ModelDecryption model_decrypt;\n\n  if (modelbox::STATUS_SUCCESS !=\n      model_decrypt.Init(\n          file, GetBindDevice()->GetDeviceManager()->GetDrivers(), config)) {\n    return {modelbox::STATUS_INVALID, \"int model failed.\"};\n  }\n  uint8_t *modelBuf = model_decrypt.GetModelBuffer(model_len);\n  if (!modelBuf) {\n    return {modelbox::STATUS_INVALID, \"decrypt model data failed.\"};\n  }\n  buf->data = modelBuf;\n  buf->length = model_len;\n  buf->data_deallocator = [](void *data, size_t length) { free(data); };\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status InferenceTensorflowFlowUnit::LoadGraph(\n    const std::string &model_path) {\n  modelbox::Status status;\n  MBLOG_INFO << \"model path: \" << model_path;\n  if (model_path.empty()) {\n    return {modelbox::STATUS_INVALID, \"model path is empty.\"};\n  }\n\n  TF_Buffer *buffer = TF_NewBuffer();\n  if (buffer == nullptr) {\n    return {modelbox::STATUS_NOMEM, \"create tf buffer failed.\"};\n  }\n  Defer { TF_DeleteBuffer(buffer); };\n\n  status = ReadBufferFromFile(model_path, buffer);\n  if (status != modelbox::STATUS_OK) {\n    return {status, \"load model failed.\"};\n  }\n\n  params_.graph = TF_NewGraph();\n  if (nullptr == params_.graph) {\n    return {modelbox::STATUS_FAULT, \"TF_NewGraph() failed.\"};\n  }\n\n  auto *opts = TF_NewImportGraphDefOptions();\n  if (nullptr == opts) {\n    TF_DeleteGraph(params_.graph);\n    auto err_msg = \"TF_NewImportGraphDefOptions() failed: \" +\n                   std::string(TF_Message(params_.status));\n    MBLOG_ERROR << err_msg;\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  TF_GraphImportGraphDef(params_.graph, buffer, opts, params_.status);\n  if (TF_GetCode(params_.status) != TF_OK) {\n    TF_DeleteGraph(params_.graph);\n    auto err_msg = \"TF_GraphImportGraphDef failed: \" +\n                   std::string(TF_Message(params_.status));\n    MBLOG_ERROR << err_msg;\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  TF_DeleteImportGraphDefOptions(opts);\n\n  if (TF_GetCode(params_.status) != TF_OK) {\n    TF_DeleteGraph(params_.graph);\n    auto err_msg =\n        \"loadGraph failed: \" + std::string(TF_Message(params_.status));\n    MBLOG_ERROR << err_msg;\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status InferenceTensorflowFlowUnit::GetTFOperation(\n    const std::string &name, TF_Output &op) {\n  auto port_info = modelbox::StringSplit(name, ':');\n  int index = 0;\n  try {\n    if (port_info.size() == 2) {\n      index = std::stoi(port_info[1]);\n    }\n  } catch (const std::exception &e) {\n    MBLOG_WARN << \"Convert id \" << port_info[1] << \" failed, err \" << e.what()\n               << \"; use index 0 as default.\";\n  }\n\n  op = TF_Output{TF_GraphOperationByName(params_.graph, port_info[0].c_str()),\n                 index};\n  if (nullptr == op.oper) {\n    auto err_msg = \"model port \" + name + \":\" + std::to_string(index) +\n                   \" not exists, please check if port name is correct.\";\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status InferenceTensorflowFlowUnit::FillInput(\n    const std::vector<modelbox::FlowUnitInput> &flowunit_input_list) {\n  for (auto const &input_item : flowunit_input_list) {\n    auto input_name = input_item.GetPortName();\n    auto input_type = input_item.GetPortType();\n    params_.input_name_list_.push_back(input_name);\n    params_.input_type_list_.push_back(input_type);\n    TF_Output input_op;\n    auto status = GetTFOperation(input_name, input_op);\n    if (status != modelbox::STATUS_OK) {\n      return status;\n    }\n\n    params_.input_op_list.push_back(input_op);\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status InferenceTensorflowFlowUnit::FillOutput(\n    const std::vector<modelbox::FlowUnitOutput> &flowunit_output_list) {\n  for (auto const &output_item : flowunit_output_list) {\n    auto output_name = output_item.GetPortName();\n    auto output_type = output_item.GetPortType();\n    params_.output_name_list_.push_back(output_name);\n    params_.output_type_list_.push_back(output_type);\n    TF_Output output_op;\n    auto status = GetTFOperation(output_name, output_op);\n    if (status != modelbox::STATUS_OK) {\n      return status;\n    }\n    params_.output_op_list.push_back(output_op);\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status InferenceTensorflowFlowUnit::NewSession(\n    bool is_save_model, const std::string &model_entry) {\n  params_.status = TF_NewStatus();\n  if (nullptr == params_.status) {\n    return {modelbox::STATUS_FAULT, \"TF_NewStatus failed.\"};\n  }\n\n  params_.options = TF_NewSessionOptions();\n  if (nullptr == params_.options) {\n    return {modelbox::STATUS_FAULT, \"TF_NewSessionOptions failed.\"};\n  }\n\n  TF_SetConfig(params_.options, (void *)params_.config_proto_binary_.data(),\n               params_.config_proto_binary_.size(), params_.status);\n  if (TF_GetCode(params_.status) != TF_OK) {\n    auto err_msg =\n        \"TF_SetConfig failed: \" + std::string(TF_Message(params_.status));\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  if (is_save_model) {\n    TF_Buffer *metagraph = TF_NewBuffer();\n    if (metagraph == nullptr) {\n      const auto *err_msg = \"TF_NewBuffer metagraph failed.\";\n      return {modelbox::STATUS_FAULT, err_msg};\n    }\n\n    params_.graph = TF_NewGraph();\n    if (params_.graph == nullptr) {\n      const auto *err_msg = \"TF_NewGraph graph failed.\";\n      return {modelbox::STATUS_FAULT, err_msg};\n    }\n\n    params_.session = TF_LoadSessionFromSavedModel(\n        params_.options, nullptr, model_entry.c_str(), &TAGS, 1, params_.graph,\n        metagraph, params_.status);\n    Defer { TF_DeleteBuffer(metagraph); };\n\n    if (TF_GetCode(params_.status) != TF_OK) {\n      TF_DeleteGraph(params_.graph);\n      auto err_msg = \"TF_LoadSessionFromSavedModel failed: \" +\n                     std::string(TF_Message(params_.status));\n      return {modelbox::STATUS_FAULT, err_msg};\n    }\n\n    return modelbox::STATUS_OK;\n  }\n\n  params_.session =\n      TF_NewSession(params_.graph, params_.options, params_.status);\n\n  if (TF_GetCode(params_.status) != TF_OK) {\n    auto err_msg =\n        \"TF_NewSession failed: \" + std::string(TF_Message(params_.status));\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nbool InferenceTensorflowFlowUnit::IsSaveModelType(\n    const std::string &model_path) {\n  size_t found = model_path.find(\".pb\");\n  if (found == std::string::npos) {\n    return true;\n  }\n\n  return false;\n}\n\nstatic void StringHex2Hex(const std::vector<std::string> &string_vector,\n                          std::vector<uint8_t> &uint8_vector) {\n  if (string_vector.empty()) {\n    uint8_vector.clear();\n  }\n\n  for (const auto &str : string_vector) {\n    auto num = std::stoul(str, nullptr, 16);\n    uint8_vector.push_back((uint8_t)num);\n  }\n}\n\nmodelbox::Status InferenceTensorflowFlowUnit::InitConfig(\n    const std::shared_ptr<modelbox::Configuration> &fu_config) {\n  auto inference_desc_ =\n      std::dynamic_pointer_cast<VirtualInferenceFlowUnitDesc>(\n          this->GetFlowUnitDesc());\n  auto flowunit_input_list = inference_desc_->GetFlowUnitInput();\n  auto flowunit_output_list = inference_desc_->GetFlowUnitOutput();\n\n  std::string model_path = inference_desc_->GetModelEntry();\n  params_.status = TF_NewStatus();\n  if (params_.status == nullptr) {\n    return {modelbox::STATUS_FAULT, \"TF_NewStatus failed.\"};\n  }\n\n  if (fu_config->Contain(\"config.config_proto\")) {\n    auto config_strings = fu_config->GetStrings(\"config.config_proto\");\n    StringHex2Hex(config_strings, params_.config_proto_binary_);\n  }\n\n  bool is_save_model = IsSaveModelType(model_path);\n  MBLOG_INFO << \"is_save_model:\\t\" << is_save_model;\n  modelbox::Status status = modelbox::STATUS_OK;\n  if (!is_save_model) {\n    status = LoadGraph(model_path);\n    if (modelbox::STATUS_OK != status) {\n      auto err_msg =\n          \"could not load inference graph, err: \" + status.WrapErrormsgs();\n      MBLOG_ERROR << err_msg;\n      return {status, err_msg};\n    }\n  }\n\n  status = NewSession(is_save_model, model_path);\n  if (modelbox::STATUS_OK != status) {\n    auto err_msg = \"new session failed, err: \" + status.WrapErrormsgs();\n    MBLOG_ERROR << err_msg;\n    return {status, err_msg};\n  }\n\n  status = FillInput(flowunit_input_list);\n  if (modelbox::STATUS_OK != status) {\n    auto err_msg = \"fill input failed, err: \" + status.WrapErrormsgs();\n    MBLOG_ERROR << err_msg;\n    return {status, err_msg};\n  }\n\n  status = FillOutput(flowunit_output_list);\n  if (modelbox::STATUS_OK != status) {\n    auto err_msg = \"fill output failed, err: \" + status.WrapErrormsgs();\n    MBLOG_ERROR << err_msg;\n    return {status, err_msg};\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status InferenceTensorflowFlowUnit::Open(\n    const std::shared_ptr<modelbox::Configuration> &opts) {\n  if (setenv(\"TF_CPP_MIN_LOG_LEVEL\", \"0\", 1) == -1) {\n    MBLOG_WARN << \"set tensorflow cpp log level failed.\";\n  };\n\n  auto inference_desc = std::dynamic_pointer_cast<VirtualInferenceFlowUnitDesc>(\n      this->GetFlowUnitDesc());\n  inference_desc->SetResourceNice(false);\n  auto config = inference_desc->GetConfiguration();\n  if (config == nullptr) {\n    return {modelbox::STATUS_BADCONF, \"inference config is invalid.\"};\n  }\n\n  auto merge_config = std::make_shared<modelbox::Configuration>();\n  // opts override python_desc_ config\n  merge_config->Add(*config);\n  merge_config->Add(*opts);\n  modelbox::Status status = InitConfig(merge_config);\n  if (status != modelbox::STATUS_OK) {\n    auto err_msg = \"init config failed: \" + status.WrapErrormsgs();\n    MBLOG_ERROR << err_msg;\n    return {status, err_msg};\n  }\n\n  plugin_ = merge_config->GetString(\"config.plugin\");\n  status = SetUpInferencePlugin(merge_config);\n  if (status != modelbox::STATUS_OK) {\n    auto err_msg =\n        \"setup preprocess and postprocess failed: \" + status.WrapErrormsgs();\n    MBLOG_ERROR << err_msg;\n    return {status, err_msg};\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status InferenceTensorflowFlowUnit::SetUpInferencePlugin(\n    const std::shared_ptr<modelbox::Configuration> &config) {\n  if (plugin_.empty()) {\n    pre_process_ = std::bind(&InferenceTensorflowFlowUnit::PreProcess, this,\n                             std::placeholders::_1, std::placeholders::_2);\n    post_process_ = std::bind(&InferenceTensorflowFlowUnit::PostProcess, this,\n                              std::placeholders::_1, std::placeholders::_2);\n    return modelbox::STATUS_OK;\n  }\n\n  if (!modelbox::IsAbsolutePath(plugin_)) {\n    auto relpath = modelbox::GetDirName(plugin_);\n    plugin_ = relpath + \"/\" + plugin_;\n  }\n\n  return SetUpDynamicLibrary(config);\n}\n\nmodelbox::Status InferenceTensorflowFlowUnit::PreProcess(\n    const std::shared_ptr<modelbox::DataContext> &data_ctx,\n    std::vector<TF_Tensor *> &input_tf_tensor_list) {\n  int index = 0;\n  modelbox::Status status;\n  for (const auto &input_name : params_.input_name_list_) {\n    const auto input_buf = data_ctx->Input(input_name);\n\n    std::string type = params_.input_type_list_[index++];\n\n    TF_DataType tf_type;\n    if (type.empty()) {\n      // Get type form buffer meta when model input type is not set\n      modelbox::ModelBoxDataType buffer_type = modelbox::MODELBOX_TYPE_INVALID;\n      status = input_buf->At(0)->Get(\"type\", buffer_type);\n      if (!status) {\n        auto err_msg =\n            \"input type is not set ,please set it in inference toml file or \"\n            \"buffer meta . error: \" +\n            status.WrapErrormsgs();\n        return {modelbox::STATUS_FAULT, err_msg};\n      }\n      status = ConvertModelBoxTypeToTFType(buffer_type, tf_type);\n      if (!status) {\n        auto err_msg =\n            \"input type convert failed, error: \" + status.WrapErrormsgs();\n        return {modelbox::STATUS_FAULT, err_msg};\n      }\n    } else {\n      std::transform(type.begin(), type.end(), type.begin(), ::toupper);\n      status = ConvertType(type, tf_type);\n      if (status != modelbox::STATUS_OK) {\n        return {status, \"input type convert failed.\"};\n      }\n    }\n\n    std::vector<size_t> buffer_shape;\n    auto result = input_buf->At(0)->Get(\"shape\", buffer_shape);\n    if (!result) {\n      MBLOG_ERROR << \"the input buffer don't have meta shape.\";\n      return {modelbox::STATUS_FAULT,\n              \"the input buffer don't have meta shape.\"};\n    }\n\n    if (std::any_of(input_buf->begin(), input_buf->end(),\n                    [&](const std::shared_ptr<modelbox::Buffer> &buffer) {\n                      std::vector<size_t> shape;\n                      buffer->Get(\"shape\", shape);\n                      return shape != buffer_shape;\n                    })) {\n      MBLOG_ERROR << \"the input shapes are not the same.\";\n      return {modelbox::STATUS_FAULT, \"the input shapes are not the same.\"};\n    }\n\n    std::vector<int64_t> tf_dims{static_cast<int64_t>(input_buf->Size())};\n    copy(buffer_shape.begin(), buffer_shape.end(), back_inserter(tf_dims));\n\n    auto *buf_list_ptr = new std::shared_ptr<modelbox::BufferList>(input_buf);\n    TF_Tensor *input_tensor = TF_NewTensor(\n        tf_type, tf_dims.data(), tf_dims.size(),\n        const_cast<void *>(input_buf->ConstData()), input_buf->GetBytes(),\n        [](void *data, size_t length, void *arg) {\n          delete (std::shared_ptr<modelbox::BufferList> *)(arg);\n        },\n        buf_list_ptr);\n    if (nullptr == input_tensor) {\n      auto err_msg = \"TF_NewTensor \" + std::string(input_name) + \" failed. \";\n      err_msg += \"please check the input type and shape.\";\n      MBLOG_ERROR << err_msg;\n      return {modelbox::STATUS_FAULT, err_msg};\n    }\n    input_tf_tensor_list.push_back(input_tensor);\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status InferenceTensorflowFlowUnit::PostProcess(\n    const std::shared_ptr<modelbox::DataContext> &data_ctx,\n    std::vector<TF_Tensor *> &output_tf_tensor_list) {\n  int index = 0;\n  for (const auto &output_name : params_.output_name_list_) {\n    auto tensor_byte = TF_TensorByteSize(output_tf_tensor_list[index]);\n    auto *tensor_data = TF_TensorData(output_tf_tensor_list[index]);\n    auto tensor_type = TF_TensorType(output_tf_tensor_list[index]);\n    std::vector<size_t> output_shape;\n\n    int64_t num_dims = TF_NumDims(output_tf_tensor_list[index]);\n    if (0 == num_dims) {\n      auto err_msg = \"the size of the \" + std::string(output_name) + \"is null.\";\n      MBLOG_ERROR << err_msg;\n      return {modelbox::STATUS_FAULT, err_msg};\n    }\n\n    for (int i = 1; i < num_dims; ++i) {\n      output_shape.push_back(TF_Dim(output_tf_tensor_list[index], i));\n    }\n\n    auto num = TF_Dim(output_tf_tensor_list[index], 0);\n    if (num == 0) {\n      return {modelbox::STATUS_FAULT, \"dim is zero\"};\n    }\n\n    auto output_buf = data_ctx->Output(output_name);\n    auto single_bytes = tensor_byte / num;\n    std::vector<size_t> shape_vector(num, single_bytes);\n    auto status = CreateOutputBufferList(output_buf, shape_vector, tensor_data,\n                                         tensor_byte, tensor_type, index);\n    if (status != modelbox::STATUS_OK) {\n      auto err_msg = \"postProcess failed.\" + status.WrapErrormsgs();\n      MBLOG_ERROR << err_msg;\n      return {modelbox::STATUS_FAULT, err_msg};\n    }\n\n    output_buf->Set(\"shape\", output_shape);\n\n    index++;\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status InferenceTensorflowFlowUnit::SetUpDynamicLibrary(\n    const std::shared_ptr<modelbox::Configuration> &config) {\n  typedef std::shared_ptr<InferencePlugin> (*PluginObject)();\n  auto status = modelbox::STATUS_OK;\n  void *driver_handler = dlopen(plugin_.c_str(), RTLD_NOW | RTLD_LOCAL);\n  if (driver_handler == nullptr) {\n    auto *dl_errmsg = dlerror();\n    auto err_msg = \"dlopen \" + plugin_ + \" failed\";\n    if (dl_errmsg) {\n      err_msg += \", error: \" + std::string(dl_errmsg);\n    }\n\n    MBLOG_ERROR << err_msg;\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  DeferCond { return !status; };\n  DeferCondAdd {\n    if (driver_handler != nullptr) {\n      dlclose(driver_handler);\n      driver_handler = nullptr;\n    }\n  };\n\n  auto create_plugin =\n      reinterpret_cast<PluginObject>(dlsym(driver_handler, \"CreatePlugin\"));\n  if (create_plugin == nullptr) {\n    auto *dlerr_msg = dlerror();\n    std::string err_msg = \"dlsym CreatePlugin failed\";\n    if (dlerr_msg) {\n      err_msg += \" error: \";\n      err_msg += dlerr_msg;\n    }\n\n    MBLOG_ERROR << err_msg;\n    status = {modelbox::STATUS_FAULT, err_msg};\n    return status;\n  }\n\n  std::shared_ptr<InferencePlugin> inference_plugin = create_plugin();\n  if (inference_plugin == nullptr) {\n    const auto *err_msg = \"CreatePlugin failed\";\n    MBLOG_ERROR << err_msg;\n    status = {modelbox::STATUS_FAULT, err_msg};\n    return status;\n  }\n\n  status = inference_plugin->PluginInit(config);\n  if (status != modelbox::STATUS_OK) {\n    auto err_msg = \"plugin init failed, error: \" + status.WrapErrormsgs();\n    MBLOG_ERROR << err_msg;\n    status = {modelbox::STATUS_FAULT, err_msg};\n    return status;\n  }\n\n  driver_handler_ = driver_handler;\n  inference_plugin_ = inference_plugin;\n\n  pre_process_ = std::bind(&InferencePlugin::PreProcess, inference_plugin_,\n                           std::placeholders::_1, std::placeholders::_2);\n  post_process_ = std::bind(&InferencePlugin::PostProcess, inference_plugin_,\n                            std::placeholders::_1, std::placeholders::_2);\n\n  return status;\n}\n\nmodelbox::Status InferenceTensorflowFlowUnit::Process(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  // TODO consider without N model and nhwc check\n\n  std::vector<TF_Tensor *> input_tf_tensor_list;\n  std::vector<TF_Tensor *> output_tf_tensor_list(\n      params_.output_name_list_.size(), nullptr);\n\n  Defer { ClearTensor(input_tf_tensor_list, output_tf_tensor_list); };\n\n  auto status = pre_process_(data_ctx, input_tf_tensor_list);\n  if (status != modelbox::STATUS_OK) {\n    auto err_msg =\n        \"tensorflow flowunit preprocess failed, \" + status.WrapErrormsgs();\n    MBLOG_ERROR << err_msg;\n    return {status, err_msg};\n  }\n\n  status = Inference(input_tf_tensor_list, output_tf_tensor_list);\n  if (modelbox::STATUS_OK != status) {\n    auto err_msg =\n        \"tensorflow flowunit inference failed, \" + status.WrapErrormsgs();\n    MBLOG_ERROR << err_msg;\n    return {status, err_msg};\n  }\n\n  status = post_process_(data_ctx, output_tf_tensor_list);\n  if (modelbox::STATUS_OK != status) {\n    auto err_msg =\n        \"tensorflow flowunit postprocess failed, \" + status.WrapErrormsgs();\n    MBLOG_ERROR << err_msg;\n    return {status, err_msg};\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status InferenceTensorflowFlowUnit::CreateOutputBufferList(\n    std::shared_ptr<modelbox::BufferList> &output_buffer_list,\n    const std::vector<size_t> &shape_vector, void *tensor_data,\n    size_t tensor_byte, TF_DataType tensor_type, int index) {\n  auto status =\n      output_buffer_list->BuildFromHost(shape_vector, tensor_data, tensor_byte);\n  if (!status) {\n    auto err_msg = \"output buffer list builds error: \" + status.WrapErrormsgs();\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  modelbox::ModelBoxDataType modelbox_type = modelbox::MODELBOX_TYPE_INVALID;\n  status = ConvertTFTypeToModelBoxType(tensor_type, modelbox_type);\n  if (!status) {\n    auto err_msg =\n        \"output type convert failed ,error: \" + status.WrapErrormsgs();\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n  output_buffer_list->Set(\"type\", modelbox_type);\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status InferenceTensorflowFlowUnit::ConvertType(\n    const std::string &type, TF_DataType &TFType) {\n  if (type_map.find(type) == type_map.end()) {\n    return {modelbox::STATUS_FAULT, \"unsupported type \" + type};\n  }\n\n  TFType = type_map[type];\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status InferenceTensorflowFlowUnit::Inference(\n    const std::vector<TF_Tensor *> &input_tensor_list,\n    std::vector<TF_Tensor *> &output_tensor_list) {\n  TF_SessionRun(params_.session, nullptr, params_.input_op_list.data(),\n                input_tensor_list.data(), params_.input_name_list_.size(),\n                params_.output_op_list.data(), output_tensor_list.data(),\n                params_.output_name_list_.size(), nullptr, 0, nullptr,\n                params_.status);\n  if (TF_GetCode(params_.status) != TF_OK) {\n    auto err_msg =\n        \"doInference failed: \" + std::string(TF_Message(params_.status));\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status InferenceTensorflowFlowUnit::Close() {\n  return params_.Clear();\n}\n\nvoid InferenceTensorflowFlowUnitDesc::SetModelEntry(std::string model_entry) {\n  model_entry_ = std::move(model_entry);\n}\n\nstd::string InferenceTensorflowFlowUnitDesc::GetModelEntry() {\n  return model_entry_;\n}\n"
  },
  {
    "path": "src/drivers/inference_engine/tensorflow/tensorflow_inference_common.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_TENSORFLOW_INFERENCE_COMMON_H_\n#define MODELBOX_TENSORFLOW_INFERENCE_COMMON_H_\n\n#include <dlfcn.h>\n#include <modelbox/base/device.h>\n#include <modelbox/base/refcache.h>\n#include <modelbox/base/status.h>\n#include <modelbox/base/utils.h>\n#include <modelbox/buffer.h>\n#include <modelbox/flow.h>\n#include <modelbox/flowunit.h>\n#include <modelbox/tensor.h>\n#include <modelbox/tensor_list.h>\n\n#include <typeinfo>\n\n#include \"tensorflow/c/c_api.h\"\n#include \"tensorflow_inference_plugin.h\"\n\nconstexpr const char *INFERENCE_TYPE = \"tensorflow\";\nconstexpr const char *TAGS = \"serve\";\n\nclass InferenceTensorflowParams {\n public:\n  InferenceTensorflowParams() = default;\n  virtual ~InferenceTensorflowParams() = default;\n\n  modelbox::Status Clear();\n\n  std::vector<std::string> input_name_list_, output_name_list_;\n  std::vector<std::string> input_type_list_, output_type_list_;\n  std::vector<TF_Output> input_op_list, output_op_list;\n\n  int device{0};\n\n  // Tensorflow Options\n  TF_Graph *graph{nullptr};\n  TF_Session *session{nullptr};\n  TF_SessionOptions *options{nullptr};\n  TF_Status *status{nullptr};\n  std::vector<uint8_t> config_proto_binary_ = {\n      0x32, 0xe,  0x9,  0xcd, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,\n      0xec, 0x3f, 0x20, 0x1,  0x2a, 0x1,  0x30, 0x38, 0x1};\n};\n\nusing TensorflowProcess = std::function<modelbox::Status(\n    std::shared_ptr<modelbox::DataContext>, std::vector<TF_Tensor *> &)>;\n\nclass InferenceTensorflowFlowUnitDesc : public modelbox::FlowUnitDesc {\n  friend class InferenceTensorflowFlowUnit;\n\n public:\n  InferenceTensorflowFlowUnitDesc() = default;\n  ~InferenceTensorflowFlowUnitDesc() override = default;\n\n  void SetModelEntry(std::string model_entry);\n  std::string GetModelEntry();\n\n  std::string model_entry_;\n};\n\nclass InferenceTensorflowFlowUnit : public modelbox::FlowUnit {\n public:\n  InferenceTensorflowFlowUnit();\n  ~InferenceTensorflowFlowUnit() override;\n\n  modelbox::Status Open(\n      const std::shared_ptr<modelbox::Configuration> &opts) override;\n\n  modelbox::Status Close() override;\n\n  /* run when processing data */\n  modelbox::Status Process(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override;\n\n private:\n  modelbox::Status PreProcess(\n      const std::shared_ptr<modelbox::DataContext> &data_ctx,\n      std::vector<TF_Tensor *> &input_tf_tensor_list);\n\n  modelbox::Status PostProcess(\n      const std::shared_ptr<modelbox::DataContext> &data_ctx,\n      std::vector<TF_Tensor *> &output_tf_tensor_list);\n  modelbox::Status SetUpInferencePlugin(\n      const std::shared_ptr<modelbox::Configuration> &config);\n  modelbox::Status SetUpDynamicLibrary(\n      const std::shared_ptr<modelbox::Configuration> &config);\n\n  modelbox::Status ReadBufferFromFile(const std::string &file, TF_Buffer *buf);\n  modelbox::Status InitConfig(\n      const std::shared_ptr<modelbox::Configuration> &fu_config);\n  modelbox::Status LoadGraph(const std::string &model_path);\n  modelbox::Status Inference(const std::vector<TF_Tensor *> &input_tensor_list,\n                             std::vector<TF_Tensor *> &output_tensor_list);\n  modelbox::Status ConvertType(const std::string &type, TF_DataType &TFType);\n  modelbox::Status ClearTensor(std::vector<TF_Tensor *> &input_tensor_list,\n                               std::vector<TF_Tensor *> &output_tensor_list);\n  modelbox::Status CreateOutputBufferList(\n      std::shared_ptr<modelbox::BufferList> &output_buffer_list,\n      const std::vector<size_t> &shape_vector, void *tensor_data,\n      size_t tensor_byte, TF_DataType tensor_type, int index);\n  modelbox::Status GetTFOperation(const std::string &name, TF_Output &op);\n  modelbox::Status FillInput(\n      const std::vector<modelbox::FlowUnitInput> &flowunit_input_list);\n  modelbox::Status FillOutput(\n      const std::vector<modelbox::FlowUnitOutput> &flowunit_output_list);\n  modelbox::Status NewSession(bool is_save_model,\n                              const std::string &model_entry);\n  bool IsSaveModelType(const std::string &model_path);\n  InferenceTensorflowParams params_;\n  std::string plugin_;\n  void *driver_handler_{nullptr};\n\n  std::shared_ptr<InferencePlugin> inference_plugin_{nullptr};\n  TensorflowProcess pre_process_{nullptr};\n  TensorflowProcess post_process_{nullptr};\n};\n\n#endif  // MODELBOX_TENSORFLOW_INFERENCE_COMMON_H_\n"
  },
  {
    "path": "src/drivers/inference_engine/tensorflow/tensorflow_inference_plugin.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_INFER_PLUGIN_H_\n#define MODELBOX_INFER_PLUGIN_H_\n\n#include <modelbox/base/configuration.h>\n#include <modelbox/base/status.h>\n#include <modelbox/buffer_list.h>\n#include <modelbox/data_context.h>\n\n#include <vector>\n\n#include \"tensorflow/c/c_api.h\"\n\nclass InferencePlugin {\n public:\n  InferencePlugin() = default;\n  virtual ~InferencePlugin() = default;\n\n  virtual modelbox::Status PluginInit(\n      std::shared_ptr<modelbox::Configuration> config) = 0;\n\n  virtual modelbox::Status PreProcess(\n      std::shared_ptr<modelbox::DataContext> data_ctx,\n      std::vector<TF_Tensor *> &input_tf_tensor_list) = 0;\n\n  virtual modelbox::Status PostProcess(\n      std::shared_ptr<modelbox::DataContext> data_ctx,\n      std::vector<TF_Tensor *> &output_tf_tensor_list) = 0;\n};\n\nextern \"C\" {\n\n#if defined(__clang__)\n#pragma clang diagnostic push\n#pragma clang diagnostic ignored \"-Wreturn-type-c-linkage\"\n#endif\n\nMODELBOX_DLL_PUBLIC std::shared_ptr<InferencePlugin> CreatePlugin();\n\n#if defined(__clang__)\n#pragma clang diagnostic pop\n#endif\n}\n\n#endif"
  },
  {
    "path": "src/drivers/inference_engine/tensorflow/test_plugin/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nset(UNIT_NAME \"tensorflow-inference-plugin\")\nproject(modelbox-flowunit-${UNIT_NAME})\n\nfile(GLOB PLUGIN_SOURCE *.cpp *.cc *.c)\n\nif (NOT TENSORFLOW_FOUND) \n    message(STATUS \"Not found tensorflow, disable ${UNIT_NAME} flowunit\")\n    return()\nendif()\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_CONFIG_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${TENSORFLOW_INCLUDE_DIR})\n\n\nset(PLUGIN_SHARED inferece-plugin)\nadd_library(${PLUGIN_SHARED} SHARED ${PLUGIN_SOURCE})\n\ntarget_link_libraries(${PLUGIN_SHARED} ${LIBMODELBOX_SHARED})\ntarget_link_libraries(${PLUGIN_SHARED} ${TENSORFLOW_LIBRARIES})\n\nset_target_properties(${PLUGIN_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-unit-${PLUGIN_SHARED}\")\n\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${PLUGIN_SHARED})\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")\n"
  },
  {
    "path": "src/drivers/inference_engine/tensorflow/test_plugin/generate_plugin.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"generate_plugin.h\"\n\nstatic std::map<std::string, TF_DataType> map = {\n    {\"FLOAT\", TF_FLOAT}, {\"DOUBLE\", TF_DOUBLE}, {\"INT\", TF_INT32},\n    {\"UINT8\", TF_UINT8}, {\"LONG\", TF_INT64},    {\"STRING\", TF_STRING}};\n\nstd::shared_ptr<InferencePlugin> CreatePlugin() {\n  return std::make_shared<OriginInferencePlugin>();\n}\n\nmodelbox::Status OriginInferencePlugin::ConvertType(const std::string &type,\n                                                    TF_DataType &TFType) {\n  auto iter = map.find(type);\n  if (iter == map.end()) {\n    MBLOG_ERROR << \"unsupported type \" << type;\n    return {modelbox::STATUS_BADCONF, \"unsuppored type\"};\n  }\n\n  TFType = map[type];\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status OriginInferencePlugin::CreateOutputBufferList(\n    std::shared_ptr<modelbox::BufferList> &output_buffer_list,\n    const std::vector<size_t> &shape_vector, void *tensor_data,\n    size_t tensor_byte, int index) {\n  auto type_output_temp = output_type_list_[index];\n  auto status =\n      output_buffer_list->BuildFromHost(shape_vector, tensor_data, tensor_byte);\n  if (type_output_temp == \"float\") {\n    output_buffer_list->Set(\"type\", modelbox::MODELBOX_FLOAT);\n  } else if (type_output_temp == \"double\") {\n    output_buffer_list->Set(\"type\", modelbox::MODELBOX_DOUBLE);\n  } else if (type_output_temp == \"int\") {\n    output_buffer_list->Set(\"type\", modelbox::MODELBOX_INT32);\n  } else if (type_output_temp == \"uint8\") {\n    output_buffer_list->Set(\"type\", modelbox::MODELBOX_UINT8);\n  } else if (type_output_temp == \"long\") {\n    output_buffer_list->Set(\"type\", modelbox::MODELBOX_INT16);\n  } else {\n    return {modelbox::STATUS_NOTSUPPORT, \"unsupport output type.\"};\n  }\n\n  if (status != modelbox::STATUS_OK) {\n    auto err_msg = \"output buffer list builds error: \" + status.WrapErrormsgs();\n    MBLOG_ERROR << err_msg;\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status OriginInferencePlugin::PluginInit(\n    std::shared_ptr<modelbox::Configuration> config) {\n  modelbox::Status status = modelbox::STATUS_OK;\n  std::vector<std::string> names;\n  std::vector<std::string> types;\n  status = SetUpInputOutput(config, \"input\", names, types);\n  if (status != modelbox::STATUS_OK) {\n    auto err_msg = \"set up input failed, error: \" + status.WrapErrormsgs();\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  input_name_list_.swap(names);\n  input_type_list_.swap(types);\n\n  status = SetUpInputOutput(config, \"output\", names, types);\n  if (status != modelbox::STATUS_OK) {\n    auto err_msg = \"set up output failed, error: \" + status.WrapErrormsgs();\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  output_name_list_.swap(names);\n  output_type_list_.swap(types);\n\n  return status;\n}\n\nmodelbox::Status OriginInferencePlugin::SetUpInputOutput(\n    const std::shared_ptr<modelbox::Configuration> &config,\n    const std::string &type, std::vector<std::string> &names,\n    std::vector<std::string> &types) {\n  auto keys = config->GetSubKeys(type);\n  for (unsigned int i = 1; i <= keys.size(); ++i) {\n    std::string inner_name;\n    std::string inner_type;\n    auto key = type + \".\";\n    key += type;\n    key += std::to_string(i);\n    auto item_table = config->GetSubKeys(key);\n    if (item_table.empty()) {\n      auto err_msg = \"the key \" + key + \" is not found in config file.\";\n      return {modelbox::STATUS_FAULT, err_msg};\n    }\n\n    auto name_index = key + \".name\";\n    inner_name = config->GetString(name_index);\n    if (inner_name.empty()) {\n      auto err_msg = \"the key \" + key + \" should have key name.\";\n      return {modelbox::STATUS_FAULT, err_msg};\n    }\n\n    auto type_index = key + \".type\";\n    inner_type = config->GetString(type_index);\n    if (inner_type.empty()) {\n      auto err_msg = \"the key \" + key + \" should have key type.\";\n      return {modelbox::STATUS_FAULT, err_msg};\n    }\n\n    names.emplace_back(inner_name);\n    types.emplace_back(inner_type);\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status OriginInferencePlugin::PreProcess(\n    std::shared_ptr<modelbox::DataContext> data_ctx,\n    std::vector<TF_Tensor *> &input_tf_tensor_list) {\n  int index = 0;\n  modelbox::Status status;\n  for (const auto &input_name : input_name_list_) {\n    const auto input_buf = data_ctx->Input(input_name);\n\n    std::string type = input_type_list_[index++];\n    std::transform(type.begin(), type.end(), type.begin(), ::toupper);\n    TF_DataType tf_type;\n    status = ConvertType(type, tf_type);\n    if (status != modelbox::STATUS_OK) {\n      MBLOG_ERROR << \"input type convert failed. \" << status.WrapErrormsgs();\n      return {status, \"input type convert failed.\"};\n    }\n\n    std::vector<size_t> buffer_shape;\n    auto result = input_buf->At(0)->Get(\"shape\", buffer_shape);\n    if (!result) {\n      MBLOG_ERROR << \"the input buffer don't have meta shape.\";\n      return {modelbox::STATUS_FAULT,\n              \"the input buffer don't have meta shape.\"};\n    }\n\n    if (std::any_of(input_buf->begin(), input_buf->end(),\n                    [&](const std::shared_ptr<modelbox::Buffer> &buffer) {\n                      std::vector<size_t> shape;\n                      buffer->Get(\"shape\", shape);\n                      return shape != buffer_shape;\n                    })) {\n      MBLOG_ERROR << \"the input shapes are not the same.\";\n      return {modelbox::STATUS_FAULT, \"the input shapes are not the same.\"};\n    }\n\n    std::vector<int64_t> tf_dims{static_cast<int64_t>(input_buf->Size())};\n    copy(buffer_shape.begin(), buffer_shape.end(), back_inserter(tf_dims));\n\n    TF_Tensor *input_tensor = TF_NewTensor(\n        tf_type, tf_dims.data(), tf_dims.size(),\n        const_cast<void *>(input_buf->ConstData()), input_buf->GetBytes(),\n        [](void *data, size_t length, void *arg) {}, nullptr);\n    if (nullptr == input_tensor) {\n      auto err_msg = \"TF_NewTensor \" + std::string(input_name) + \" failed.\";\n      MBLOG_ERROR << err_msg;\n      return {modelbox::STATUS_FAULT, err_msg};\n    }\n    input_tf_tensor_list.push_back(input_tensor);\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status OriginInferencePlugin::PostProcess(\n    std::shared_ptr<modelbox::DataContext> data_ctx,\n    std::vector<TF_Tensor *> &output_tf_tensor_list) {\n  int index = 0;\n  for (const auto &output_name : output_name_list_) {\n    auto tensor_byte = TF_TensorByteSize(output_tf_tensor_list[index]);\n    auto *tensor_data = TF_TensorData(output_tf_tensor_list[index]);\n    std::vector<size_t> output_shape;\n\n    int64_t num_dims = TF_NumDims(output_tf_tensor_list[index]);\n    if (0 == num_dims) {\n      auto err_msg = \"the size of the \" + std::string(output_name) + \"is null.\";\n      MBLOG_ERROR << err_msg;\n      return {modelbox::STATUS_FAULT, err_msg};\n    }\n\n    for (int i = 1; i < num_dims; ++i) {\n      output_shape.push_back(TF_Dim(output_tf_tensor_list[index], i));\n    }\n\n    auto num = TF_Dim(output_tf_tensor_list[index], 0);\n    if (num == 0) {\n      return {modelbox::STATUS_INVALID, \"output tensor dim is zero\"};\n    }\n\n    auto output_buf = data_ctx->Output(output_name);\n    auto single_bytes = tensor_byte / num;\n    std::vector<size_t> shape_vector(num, single_bytes);\n    auto status = CreateOutputBufferList(output_buf, shape_vector, tensor_data,\n                                         tensor_byte, index);\n    if (status != modelbox::STATUS_OK) {\n      auto err_msg = \"postProcess failed.\" + status.WrapErrormsgs();\n      MBLOG_ERROR << err_msg;\n      return {modelbox::STATUS_FAULT, err_msg};\n    }\n\n    output_buf->Set(\"shape\", output_shape);\n\n    index++;\n  }\n\n  return modelbox::STATUS_OK;\n}"
  },
  {
    "path": "src/drivers/inference_engine/tensorflow/test_plugin/generate_plugin.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_SAMPLE_INFER_PLUGIN_H_\n#define MODELBOX_SAMPLE_INFER_PLUGIN_H_\n\n#include <modelbox/base/configuration.h>\n#include <modelbox/base/status.h>\n#include <modelbox/buffer_list.h>\n#include <modelbox/data_context.h>\n\n#include \"tensorflow/c/c_api.h\"\n#include \"tensorflow_inference_plugin.h\"\n\nclass OriginInferencePlugin : public InferencePlugin {\n public:\n  OriginInferencePlugin() = default;\n  ~OriginInferencePlugin() override = default;\n\n  modelbox::Status PreProcess(\n      std::shared_ptr<modelbox::DataContext> data_ctx,\n      std::vector<TF_Tensor *> &input_tf_tensor_list) override;\n\n  modelbox::Status PostProcess(\n      std::shared_ptr<modelbox::DataContext> data_ctx,\n      std::vector<TF_Tensor *> &output_tf_tensor_list) override;\n\n  modelbox::Status PluginInit(\n      std::shared_ptr<modelbox::Configuration> config) override;\n\n private:\n  modelbox::Status ConvertType(const std::string &type, TF_DataType &TFType);\n\n  modelbox::Status CreateOutputBufferList(\n      std::shared_ptr<modelbox::BufferList> &output_buffer_list,\n      const std::vector<size_t> &shape_vector, void *tensor_data,\n      size_t tensor_byte, int index);\n\n  modelbox::Status SetUpInputOutput(\n      const std::shared_ptr<modelbox::Configuration> &config,\n      const std::string &type, std::vector<std::string> &names,\n      std::vector<std::string> &types);\n\n  std::vector<std::string> input_name_list_, output_name_list_;\n  std::vector<std::string> input_type_list_, output_type_list_;\n};\n\n#endif"
  },
  {
    "path": "src/drivers/virtual/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nproject(modelbox-drivers-virtual-flowunit)\n\nif(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_BINARY_DIR})\n    message(FATAL_ERROR \"Do not build in source directory!\")\nendif()\n\nadd_subdirectory(python)\nadd_subdirectory(java)\nadd_subdirectory(inference)\nadd_subdirectory(yolobox)"
  },
  {
    "path": "src/drivers/virtual/inference/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nset(UNIT_NAME \"inference\")\n\nproject(modelbox-virtualdriver-${UNIT_NAME})\n\nfile(GLOB_RECURSE UNIT_SOURCE *.cpp *.cc *.c)\ngroup_source_test_files(MODELBOX_VIRTUALDRIVER_SOURCE MODELBOX_UNIT_TEST_SOURCE \"_test.c*\" ${UNIT_SOURCE})\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\n\nset(MODELBOX_VIRTUALDRIVER_SHARED libmodelbox-virtualdriver-${UNIT_NAME}-shared)\nset(MODELBOX_VIRTUALDRIVER_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\nadd_library(${MODELBOX_VIRTUALDRIVER_SHARED} SHARED ${MODELBOX_VIRTUALDRIVER_SOURCE})\nset(LIBMODELBOX_VIRTUAL_INFERENCE_SHARED ${MODELBOX_VIRTUALDRIVER_SHARED})\n\nset_target_properties(${MODELBOX_VIRTUALDRIVER_SHARED} PROPERTIES \n    SOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n\ntarget_link_libraries(${MODELBOX_VIRTUALDRIVER_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_VIRTUALDRIVER_SHARED} rt)\ntarget_link_libraries(${MODELBOX_VIRTUALDRIVER_SHARED} dl)\ntarget_link_libraries(${MODELBOX_VIRTUALDRIVER_SHARED} ${LIBMODELBOX_FLOWUNIT_INFERENCE_CPU_SHARED})\ntarget_link_libraries(${MODELBOX_VIRTUALDRIVER_SHARED} ${LIBMODELBOX_SHARED})\n\nset_target_properties(${MODELBOX_VIRTUALDRIVER_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-virtualdriver-${UNIT_NAME}\")\n\ninstall(TARGETS ${MODELBOX_VIRTUALDRIVER_SHARED}\n        COMPONENT libmodelbox\n        RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n        LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        ARCHIVE DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        OPTIONAL)\ninstall(DIRECTORY ${HEADER} \n        DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR}\n        COMPONENT libmodelbox-devel\n        )\n\nset(LIBMODELBOX_VIRTUALDRIVER_INFERENCE_SHARED ${MODELBOX_VIRTUALDRIVER_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_VIRTUALDRIVER_INFERENCE_INCLUDE ${MODELBOX_VIRTUALDRIVER_SOURCE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_VIRTUALDRIVER_INFERENCE_SOURCES ${MODELBOX_VIRTUALDRIVER_SOURCE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_VIRTUALDRIVER_INFERENCE_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-virtualdriver-${UNIT_NAME}.so CACHE INTERNAL \"\")\n\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_UNIT_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_VIRTUALDRIVER_SHARED})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")"
  },
  {
    "path": "src/drivers/virtual/inference/virtualdriver_desc.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <stdio.h>\n\n#include <memory>\n\n#include \"modelbox/base/driver_api_helper.h\"\n#include \"modelbox/base/status.h\"\n#include \"virtualdriver_inference.h\"\n\nstd::shared_ptr<modelbox::DriverFactory> CreateDriverFactory() {\n  std::shared_ptr<modelbox::DriverFactory> factory =\n      std::make_shared<InferenceVirtualDriverManager>();\n  return factory;\n}\n\nvoid DriverDescription(modelbox::DriverDesc *desc) {\n  desc->SetClass(modelbox::DRIVER_CLASS_VIRTUAL);\n  desc->SetName(VIRTUAL_INFERENCE_FLOWUNIT);\n  desc->SetType(modelbox::DRIVER_TYPE_VIRTUAL);\n  desc->SetVersion(BIND_INFERENCE_FLOWUNIT_VERSION);\n  desc->SetNodelete(true);\n}\n\nmodelbox::Status DriverInit() {\n  // Driver Init.\n  return modelbox::STATUS_OK;\n}\n\nvoid DriverFini() {\n  // Driver Fini.\n}\n"
  },
  {
    "path": "src/drivers/virtual/inference/virtualdriver_inference.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"virtualdriver_inference.h\"\n\n#include <utility>\n\n#include \"modelbox/base/driver.h\"\n\nconstexpr const char *VIRTUAL_FLOWUNIT_TYPE = \"inference\";\n\nvoid VirtualInferenceFlowUnitDesc::SetModelEntry(std::string model_entry) {\n  model_entry_ = std::move(model_entry);\n}\n\nstd::shared_ptr<modelbox::DriverFactory>\nInferenceVirtualDriver::CreateFactory() {\n  auto factory = std::make_shared<VirtualInferenceFlowUnitFactory>();\n  auto real_driver_list = GetBindDriver();\n  factory->SetDriver(shared_from_this());\n  auto real_factory_list =\n      std::vector<std::shared_ptr<modelbox::DriverFactory>>();\n  for (auto &real_driver : real_driver_list) {\n    auto real_factory = real_driver->CreateFactory();\n    if (real_factory == nullptr) {\n      auto driver_desc = real_driver->GetDriverDesc();\n      MBLOG_ERROR << \"real driver binded by virtual inference driver create \"\n                     \"factory failed, real drivers is \"\n                  << driver_desc->GetName() << \", \" << driver_desc->GetType()\n                  << \", \" << driver_desc->GetFilePath();\n      continue;\n    }\n    real_factory_list.push_back(real_factory);\n  }\n  factory->SetFlowUnitFactory(real_factory_list);\n  return factory;\n}\n\nstd::vector<std::shared_ptr<modelbox::Driver>>\nInferenceVirtualDriver::GetBindDriver() {\n  return inference_flowunit_driver_list_;\n}\n\nvoid InferenceVirtualDriver::SetBindDriver(\n    const std::vector<std::shared_ptr<modelbox::Driver>> &driver_list) {\n  inference_flowunit_driver_list_ = driver_list;\n}\n\nmodelbox::Status InferenceVirtualDriverManager::Init(\n    modelbox::Drivers &driver) {\n  auto ret = BindBaseDriver(driver);\n  return ret;\n}\n\nmodelbox::Status InferenceVirtualDriverManager::Scan(const std::string &path) {\n  std::vector<std::string> drivers_list;\n  std::string filter = \"*.toml\";\n  auto status = modelbox::ListSubDirectoryFiles(path, filter, &drivers_list);\n  if (status != modelbox::STATUS_OK) {\n    auto err_msg = \"list directory:  \" + path + \"/\" + filter + \" failed.\";\n    return {status, err_msg};\n  }\n\n  for (auto &driver_file : drivers_list) {\n    auto result = Add(driver_file);\n    if (result) {\n      MBLOG_INFO << \"Add virtual driver \" << driver_file << \" success\";\n    }\n\n    if (result == modelbox::STATUS_NOTSUPPORT) {\n      MBLOG_DEBUG << \"add file: \" << driver_file << \" failed, \"\n                  << result.WrapErrormsgs();\n    } else if (!result) {\n      MBLOG_ERROR << \"add file: \" << driver_file << \" failed, \"\n                  << result.WrapErrormsgs();\n    }\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status InferenceVirtualDriverManager::Add(const std::string &file) {\n  std::string name;\n  std::string type;\n  std::string version;\n  std::string description;\n  std::string entry;\n  std::string flowunit_type;\n  std::shared_ptr<modelbox::ConfigurationBuilder> builder =\n      std::make_shared<modelbox::ConfigurationBuilder>();\n  std::shared_ptr<modelbox::Configuration> config = builder->Build(file);\n  if (config == nullptr) {\n    const auto &err_msg = modelbox::StatusError.Errormsg();\n    MBLOG_ERROR << err_msg;\n    return {modelbox::STATUS_BADCONF, err_msg};\n  }\n\n  flowunit_type = config->GetString(\"base.type\");\n  if (flowunit_type.empty()) {\n    MBLOG_ERROR << \"the config does not have 'type'.\";\n    return {modelbox::STATUS_BADCONF, \"the config does not have 'type'.\"};\n  }\n\n  if (flowunit_type != VIRTUAL_FLOWUNIT_TYPE) {\n    auto err_msg = \"the config type is \" + flowunit_type +\n                   \", but the so type is \" + std::string(VIRTUAL_FLOWUNIT_TYPE);\n    return {modelbox::STATUS_NOTSUPPORT, err_msg};\n  }\n\n  name = config->GetString(\"base.name\");\n  if (name.empty()) {\n    MBLOG_ERROR << \"the config does not have 'name'.\";\n    return {modelbox::STATUS_BADCONF, \"the config does not have 'name'.\"};\n  }\n\n  type = config->GetString(\"base.device\");\n  if (type.empty()) {\n    MBLOG_ERROR << \"the config does not have 'device'.\";\n    return {modelbox::STATUS_BADCONF, \"the config does not have 'device'.\"};\n  }\n\n  version = config->GetString(\"base.version\");\n  if (version.empty()) {\n    MBLOG_ERROR << \"the config does not have 'version'.\";\n    return {modelbox::STATUS_BADCONF, \"the config does not have 'version'.\"};\n  }\n\n  description = config->GetString(\"base.description\");\n  if (description.empty()) {\n    MBLOG_ERROR << \"the config does not have 'description'.\";\n    return {modelbox::STATUS_BADCONF,\n            \"the config does not have 'description'.\"};\n  }\n\n  std::shared_ptr<InferenceVirtualDriver> driver =\n      std::make_shared<InferenceVirtualDriver>();\n  std::shared_ptr<modelbox::DriverDesc> driver_desc =\n      std::make_shared<modelbox::DriverDesc>();\n  driver_desc->SetClass(\"DRIVER-FLOWUNIT\");\n  driver_desc->SetFilePath(file);\n  driver_desc->SetName(name);\n  driver_desc->SetType(type);\n  auto status = driver_desc->SetVersion(version);\n  if (status != modelbox::STATUS_SUCCESS) {\n    auto err_msg = \"SetVersion failed, version: \" + version;\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  driver_desc->SetDescription(description);\n  driver->SetDriverDesc(driver_desc);\n  driver->SetVirtual(true);\n  driver->SetBindDriver(inference_flowunit_driver_list_);\n  // TODO: 判断是否重复存在\n  drivers_list_.push_back(driver);\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status InferenceVirtualDriverManager::BindBaseDriver(\n    modelbox::Drivers &driver) {\n  auto inference_drivers =\n      driver.GetDriverListByClass(modelbox::DRIVER_CLASS_INFERENCE);\n  for (const auto &infer_driver : inference_drivers) {\n    inference_flowunit_driver_list_.push_back(infer_driver);\n  }\n\n  if (inference_flowunit_driver_list_.empty()) {\n    return {modelbox::STATUS_NOTFOUND, \"can not find inference flowunit\"};\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nstd::string\nVirtualInferenceFlowUnitFactory::GetInferenceFlowUintInputDeviceType(\n    const std::string &unit_type, const std::string &virtual_type) {\n  for (auto &flowunit_factory : bind_flowunit_factory_list_) {\n    if (std::dynamic_pointer_cast<FlowUnitFactory>(flowunit_factory)\n            ->GetFlowUnitFactoryType() != unit_type) {\n      continue;\n    }\n\n    if (std::dynamic_pointer_cast<FlowUnitFactory>(flowunit_factory)\n            ->GetVirtualType() != virtual_type) {\n      continue;\n    }\n\n    return std::dynamic_pointer_cast<FlowUnitFactory>(flowunit_factory)\n        ->GetFlowUnitInputDeviceType();\n  }\n\n  return \"\";\n}\n\nmodelbox::Status VirtualInferenceFlowUnitFactory::FillItem(\n    std::shared_ptr<modelbox::Configuration> &config,\n    std::shared_ptr<VirtualInferenceFlowUnitDesc> &flowunit_desc,\n    const std::string &device, const std::string &type) {\n  auto item = config->GetSubKeys(type);\n  if (item.empty()) {\n    MBLOG_ERROR << \"the key \" << type << \" is not found in config file.\";\n    return modelbox::STATUS_BADCONF;\n  }\n\n  for (unsigned int i = 1; i <= item.size(); ++i) {\n    std::string item_device = device;\n    std::string item_name;\n    std::string item_type;\n    auto key = type;\n    key += \".\" + type;\n    key += std::to_string(i);\n    auto item_table = config->GetSubKeys(key);\n    if (item_table.empty()) {\n      MBLOG_ERROR << \"the key \" << key << \" is not found in config file.\";\n      return modelbox::STATUS_BADCONF;\n    }\n\n    std::map<std::string, std::string> ext_map;\n    for (const auto &inner_item : item_table) {\n      auto item_index = key;\n      item_index += \".\" + inner_item;\n      if (inner_item == \"name\") {\n        item_name = config->GetString(item_index);\n        if (item_name.empty()) {\n          MBLOG_ERROR << \"the key \" << key << \" should have key name.\";\n          return modelbox::STATUS_BADCONF;\n        }\n        continue;\n      }\n\n      if (inner_item == \"type\") {\n        auto config_type = config->GetString(item_index);\n        if (!config_type.empty()) {\n          item_type = config_type;\n        }\n        continue;\n      }\n\n      if (inner_item == \"device\") {\n        auto config_device = config->GetString(item_index);\n        if (!config_device.empty()) {\n          item_device = config_device;\n        }\n        continue;\n      }\n\n      ext_map[inner_item] = config->GetString(item_index);\n    }\n\n    if (type == \"input\") {\n      auto device_type = GetInferenceFlowUintInputDeviceType(\n          GetDriver()->GetDriverDesc()->GetType(),\n          flowunit_desc->GetVirtualType());\n      item_device = device_type.empty() ? item_device : device_type;\n      flowunit_desc->AddFlowUnitInput(\n          modelbox::FlowUnitInput(item_name, item_device, item_type, ext_map));\n    } else {\n      flowunit_desc->AddFlowUnitOutput(\n          modelbox::FlowUnitOutput(item_name, item_device, item_type, ext_map));\n    }\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status VirtualInferenceFlowUnitFactory::FillBaseInfo(\n    std::shared_ptr<modelbox::Configuration> &config,\n    std::shared_ptr<VirtualInferenceFlowUnitDesc> &flowunit_desc,\n    const std::string &toml_file, std::string *device) {\n  auto model_entry = config->GetString(\"base.entry\");\n  if (model_entry.empty()) {\n    MBLOG_ERROR << \"the key 'entry' is not found under base.\";\n    return modelbox::STATUS_BADCONF;\n  }\n\n  if (!modelbox::IsAbsolutePath(model_entry)) {\n    auto relpath = modelbox::GetDirName(toml_file);\n    model_entry = relpath + \"/\" + model_entry;\n  }\n  MBLOG_DEBUG << \"module entry path: \" << model_entry;\n  flowunit_desc->SetModelEntry(model_entry);\n\n  auto virtual_type = config->GetString(\"base.virtual_type\");\n  if (virtual_type.empty()) {\n    MBLOG_ERROR << \"the key 'virtual_type' is not found under base.\";\n    return modelbox::STATUS_BADCONF;\n  }\n\n  flowunit_desc->SetVirtualType(virtual_type);\n  *device = config->GetString(\"base.device\");\n  if (device->empty()) {\n    MBLOG_ERROR << \"the key 'device' is not found under base.\";\n    return modelbox::STATUS_BADCONF;\n  }\n\n  auto group_type = config->GetString(\"base.group_type\");\n  if (!group_type.empty()) {\n    flowunit_desc->SetFlowUnitGroupType(group_type);\n  }\n\n  bool is_input_contiguous = true;\n  auto contiguous_str = config->GetString(\"base.is_input_contiguous\");\n  if (contiguous_str.empty()) {\n    // if set it as bool\n    is_input_contiguous = config->GetBool(\"base.is_input_contiguous\", true);\n  } else {\n    // key word is \"false\", so I need check it is false\n    is_input_contiguous = !(contiguous_str == \"false\");\n  }\n  if (!is_input_contiguous) {\n    // generally, it true, but some hw, for ex. sdc, must be false\n    flowunit_desc->SetInputContiguous(false);\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nvoid VirtualInferenceFlowUnitFactory::FillFlowUnitType(\n    std::shared_ptr<modelbox::Configuration> &config,\n    std::shared_ptr<VirtualInferenceFlowUnitDesc> &flowunit_desc) {\n  flowunit_desc->SetFlowType(modelbox::NORMAL);\n  flowunit_desc->SetOutputType(modelbox::ORIGIN);\n}\n\nstd::map<std::string, std::shared_ptr<modelbox::FlowUnitDesc>>\nVirtualInferenceFlowUnitFactory::FlowUnitProbe() {\n  std::map<std::string, std::shared_ptr<modelbox::FlowUnitDesc>> return_map;\n  auto driver_desc = GetDriver()->GetDriverDesc();\n  auto toml_file = driver_desc->GetFilePath();\n\n  std::shared_ptr<VirtualInferenceFlowUnitDesc> flowunit_desc =\n      std::make_shared<VirtualInferenceFlowUnitDesc>();\n  modelbox::Status status;\n\n  std::shared_ptr<modelbox::ConfigurationBuilder> builder =\n      std::make_shared<modelbox::ConfigurationBuilder>();\n  std::shared_ptr<modelbox::Configuration> config = builder->Build(toml_file);\n\n  std::string device;\n  auto ret = FillBaseInfo(config, flowunit_desc, toml_file, &device);\n  if (ret != modelbox::STATUS_OK) {\n    return return_map;\n  }\n\n  ret = FillItem(config, flowunit_desc, device, \"input\");\n  if (ret != modelbox::STATUS_OK) {\n    return return_map;\n  }\n\n  ret = FillItem(config, flowunit_desc, device, \"output\");\n  if (ret != modelbox::STATUS_OK) {\n    return return_map;\n  }\n\n  FillFlowUnitType(config, flowunit_desc);\n  flowunit_desc->SetFlowUnitName(driver_desc->GetName());\n  flowunit_desc->SetConfiguration(config);\n  flowunit_desc->SetDescription(driver_desc->GetDescription());\n  return_map.insert(std::make_pair(driver_desc->GetName(), flowunit_desc));\n  return return_map;\n}\n\nvoid VirtualInferenceFlowUnitFactory::SetFlowUnitFactory(\n    const std::vector<std::shared_ptr<modelbox::DriverFactory>>\n        &bind_flowunit_factory_list) {\n  for (const auto &bind_flowunit_factory : bind_flowunit_factory_list) {\n    bind_flowunit_factory_list_.push_back(\n        std::dynamic_pointer_cast<FlowUnitFactory>(bind_flowunit_factory));\n  }\n}\n\nstd::shared_ptr<modelbox::FlowUnit>\nVirtualInferenceFlowUnitFactory::VirtualCreateFlowUnit(\n    const std::string &unit_name, const std::string &unit_type,\n    const std::string &virtual_type) {\n  for (auto &flowunit_factory : bind_flowunit_factory_list_) {\n    if (std::dynamic_pointer_cast<FlowUnitFactory>(flowunit_factory)\n            ->GetFlowUnitFactoryType() != unit_type) {\n      continue;\n    }\n\n    if (std::dynamic_pointer_cast<FlowUnitFactory>(flowunit_factory)\n            ->GetVirtualType() != virtual_type) {\n      continue;\n    }\n\n    return std::dynamic_pointer_cast<FlowUnitFactory>(flowunit_factory)\n        ->CreateFlowUnit(unit_name, unit_type);\n  }\n  modelbox::StatusError = {\n      modelbox::STATUS_NOTFOUND,\n      \"current environment does not support the inference type: '\" +\n          virtual_type + \":\" + unit_type + \"'\"};\n\n  return nullptr;\n};\n\nstd::string VirtualInferenceFlowUnitDesc::GetModelEntry() {\n  return model_entry_;\n}\n\nvoid VirtualInferenceFlowUnitDesc::SetConfiguration(\n    const std::shared_ptr<modelbox::Configuration> &config) {\n  config_ = config;\n}\n\nstd::shared_ptr<modelbox::Configuration>\nVirtualInferenceFlowUnitDesc::GetConfiguration() {\n  return config_;\n}\n\nstd::string VirtualInferenceFlowUnitFactory::GetVirtualType() {\n  return virtual_type_;\n};\n\nvoid VirtualInferenceFlowUnitFactory::SetVirtualType(\n    const std::string &virtual_type) {\n  virtual_type_ = virtual_type;\n};\n"
  },
  {
    "path": "src/drivers/virtual/inference/virtualdriver_inference.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_VIRTUAL_DRIVER_INFERENCE_H_\n#define MODELBOX_VIRTUAL_DRIVER_INFERENCE_H_\n\n#include <modelbox/base/driver.h>\n#include <modelbox/base/log.h>\n#include <modelbox/base/status.h>\n#include <modelbox/base/utils.h>\n#include <modelbox/flowunit.h>\n\nconstexpr const char *BIND_INFERENCE_FLOWUNIT_VERSION = \"1.0.0\";\nconstexpr const char *VIRTUAL_INFERENCE_FLOWUNIT = \"inference\";\n\n// Virtual\nclass InferenceVirtualDriverDesc : public modelbox::VirtualDriverDesc {\n public:\n  InferenceVirtualDriverDesc() = default;\n  ~InferenceVirtualDriverDesc() override = default;\n};\n\nclass VirtualInferenceFlowUnitDesc : public modelbox::FlowUnitDesc {\n public:\n  VirtualInferenceFlowUnitDesc() = default;\n  ~VirtualInferenceFlowUnitDesc() override = default;\n\n  void SetModelEntry(std::string model_entry);\n  std::string GetModelEntry();\n\n  void SetConfiguration(const std::shared_ptr<modelbox::Configuration> &config);\n  std::shared_ptr<modelbox::Configuration> GetConfiguration();\n\n protected:\n  std::string model_entry_;\n  std::shared_ptr<modelbox::Configuration> config_;\n};\n\nclass InferenceVirtualDriver : public modelbox::VirtualDriver {\n public:\n  InferenceVirtualDriver() = default;\n  ~InferenceVirtualDriver() override = default;\n\n  std::shared_ptr<modelbox::DriverFactory> CreateFactory() override;\n  std::vector<std::shared_ptr<modelbox::Driver>> GetBindDriver();\n  void SetBindDriver(\n      const std::vector<std::shared_ptr<modelbox::Driver>> &driver_list);\n\n private:\n  std::vector<std::shared_ptr<modelbox::Driver>>\n      inference_flowunit_driver_list_;\n};\n\nclass VirtualInferenceFlowUnitFactory : public modelbox::FlowUnitFactory {\n public:\n  VirtualInferenceFlowUnitFactory() = default;\n  ~VirtualInferenceFlowUnitFactory() override = default;\n\n  std::shared_ptr<modelbox::FlowUnit> VirtualCreateFlowUnit(\n      const std::string &unit_name, const std::string &unit_type,\n      const std::string &virtual_type) override;\n\n  std::map<std::string, std::shared_ptr<modelbox::FlowUnitDesc>> FlowUnitProbe()\n      override;\n\n  void SetFlowUnitFactory(\n      const std::vector<std::shared_ptr<modelbox::DriverFactory>>\n          &bind_flowunit_factory_list) override;\n\n  std::string GetVirtualType() override;\n  void SetVirtualType(const std::string &virtual_type) override;\n\n  std::shared_ptr<modelbox::Driver> GetDriver() override { return driver_; };\n\n  void SetDriver(const std::shared_ptr<modelbox::Driver> &driver) override {\n    driver_ = driver;\n  }\n\n private:\n  modelbox::Status FillItem(\n      std::shared_ptr<modelbox::Configuration> &config,\n      std::shared_ptr<VirtualInferenceFlowUnitDesc> &flowunit_desc,\n      const std::string &device, const std::string &type);\n  modelbox::Status FillBaseInfo(\n      std::shared_ptr<modelbox::Configuration> &config,\n      std::shared_ptr<VirtualInferenceFlowUnitDesc> &flowunit_desc,\n      const std::string &toml_file, std::string *device);\n  void FillFlowUnitType(\n      std::shared_ptr<modelbox::Configuration> &config,\n      std::shared_ptr<VirtualInferenceFlowUnitDesc> &flowunit_desc);\n  std::string GetInferenceFlowUintInputDeviceType(\n      const std::string &unit_type, const std::string &virtual_type);\n  std::shared_ptr<modelbox::Driver> driver_;\n  std::vector<std::shared_ptr<modelbox::DriverFactory>>\n      bind_flowunit_factory_list_;\n  std::string virtual_type_;\n};\n\nclass InferenceVirtualDriverManager : public modelbox::VirtualDriverManager {\n public:\n  InferenceVirtualDriverManager() = default;\n  ~InferenceVirtualDriverManager() override = default;\n\n  modelbox::Status Scan(const std::string &path) override;\n  modelbox::Status Add(const std::string &file) override;\n  modelbox::Status Init(modelbox::Drivers &driver) override;\n\n private:\n  modelbox::Status BindBaseDriver(modelbox::Drivers &driver);\n  std::vector<std::shared_ptr<modelbox::Driver>>\n      inference_flowunit_driver_list_;\n};\n\n#endif  // MODELBOX_VIRTUAL_DRIVER_PYTHON_H_\n"
  },
  {
    "path": "src/drivers/virtual/java/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nif (NOT WITH_JAVA) \n    message(STATUS \"java support is disabled\")\n    return()\nendif()\n\nset(UNIT_NAME \"java\")\n\nproject(modelbox-virtualdriver-${UNIT_NAME})\n\nfile(GLOB_RECURSE MODELBOX_VIRTUALDRIVER_SOURCE *.cpp *.cc *.c)\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_CPU_INCLUDE})\n\nset(MODELBOX_VIRTUALDRIVER_SHARED libmodelbox-virtualdriver-${UNIT_NAME}-shared)\nset(MODELBOX_VIRTUALDRIVER_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\nadd_library(${MODELBOX_VIRTUALDRIVER_SHARED} SHARED ${MODELBOX_VIRTUALDRIVER_SOURCE})\nset(LIBMODELBOX_VIRTUAL_JAVA_SHARED ${MODELBOX_VIRTUALDRIVER_SHARED})\n\nset_target_properties(${MODELBOX_VIRTUALDRIVER_SHARED} PROPERTIES \n    SOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n\nset(LIBMODELBOX_VIRTUALDRIVER_JAVA_SHARED ${MODELBOX_VIRTUALDRIVER_SHARED})\n\ntarget_link_libraries(${MODELBOX_VIRTUALDRIVER_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_VIRTUALDRIVER_SHARED} rt)\ntarget_link_libraries(${MODELBOX_VIRTUALDRIVER_SHARED} dl)\ntarget_link_libraries(${MODELBOX_VIRTUALDRIVER_SHARED} ${LIBMODELBOX_SHARED})\n\nset_target_properties(${MODELBOX_VIRTUALDRIVER_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-virtualdriver-${UNIT_NAME}\")\n\ninstall(TARGETS ${MODELBOX_VIRTUALDRIVER_SHARED}\n    COMPONENT libmodelbox\n    RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n    LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    ARCHIVE DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    OPTIONAL\n    )\n\ninstall(DIRECTORY \n    ${HEADER} DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR}\n    COMPONENT libmodelbox-devel\n    )\n\nset(LIBMODELBOX_VIRTUALDRIVER_JAVA_SHARED ${MODELBOX_VIRTUALDRIVER_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_VIRTUALDRIVER_JAVA_INCLUDE ${MODELBOX_VIRTUALDRIVER_SOURCE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_VIRTUALDRIVER_JAVA_SOURCES ${MODELBOX_VIRTUALDRIVER_SOURCE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_VIRTUALDRIVER_JAVA_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-virtualdriver-${UNIT_NAME}.so CACHE INTERNAL \"\")\n\n\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_UNIT_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_VIRTUALDRIVER_SHARED})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")\n\n"
  },
  {
    "path": "src/drivers/virtual/java/virtualdriver_desc.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <stdio.h>\n\n#include <memory>\n\n#include \"modelbox/base/driver_api_helper.h\"\n#include \"modelbox/base/status.h\"\n#include \"virtualdriver_java.h\"\n\nstd::shared_ptr<modelbox::DriverFactory> CreateDriverFactory() {\n  std::shared_ptr<modelbox::DriverFactory> factory =\n      std::make_shared<JavaVirtualDriverManager>();\n  return factory;\n}\n\nvoid DriverDescription(modelbox::DriverDesc *desc) {\n  desc->SetClass(modelbox::DRIVER_CLASS_VIRTUAL);\n  desc->SetName(BIND_JAVA_FLOWUNIT_NAME);\n  desc->SetType(modelbox::DRIVER_TYPE_VIRTUAL);\n  desc->SetVersion(BIND_JAVA_FLOWUNIT_VERSION);\n  desc->SetNodelete(true);\n}\n\nmodelbox::Status DriverInit() {\n  // Driver Init.\n  return modelbox::STATUS_OK;\n}\n\nvoid DriverFini() {\n  // Driver Fini.\n}\n"
  },
  {
    "path": "src/drivers/virtual/java/virtualdriver_java.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"virtualdriver_java.h\"\n\n#include <libgen.h>\n\n#include <utility>\n\nconstexpr const char *VIRTUAL_FLOWUNIT_TYPE = \"java\";\n\nvoid VirtualJavaFlowUnitDesc::SetJarEntry(std::string java_entry) {\n  java_entry_ = std::move(java_entry);\n}\n\nstd::shared_ptr<modelbox::DriverFactory> JavaVirtualDriver::CreateFactory() {\n  auto factory = std::make_shared<VirtualJavaFlowUnitFactory>();\n  auto real_driver_list = GetBindDriver();\n  factory->SetDriver(shared_from_this());\n  auto real_factory_list =\n      std::vector<std::shared_ptr<modelbox::DriverFactory>>();\n  for (auto &real_driver : real_driver_list) {\n    auto real_factory = real_driver->CreateFactory();\n    if (real_factory == nullptr) {\n      auto driver_desc = real_driver->GetDriverDesc();\n      MBLOG_ERROR << \"real driver binded by virtual java driver create \"\n                     \"factory failed, real drivers is \"\n                  << driver_desc->GetName() << \", \" << driver_desc->GetType()\n                  << \", \" << driver_desc->GetFilePath();\n      continue;\n    }\n    real_factory_list.push_back(real_factory);\n  }\n  factory->SetFlowUnitFactory(real_factory_list);\n  return factory;\n}\n\nstd::vector<std::shared_ptr<modelbox::Driver>>\nJavaVirtualDriver::GetBindDriver() {\n  return java_flowunit_driver_;\n}\n\nvoid JavaVirtualDriver::SetBindDriver(\n    const std::vector<std::shared_ptr<modelbox::Driver>> &driver_list) {\n  java_flowunit_driver_ = driver_list;\n}\n\nJavaVirtualDriverManager::JavaVirtualDriverManager() = default;\n\nJavaVirtualDriverManager::~JavaVirtualDriverManager() = default;\n\nmodelbox::Status JavaVirtualDriverManager::Init(modelbox::Drivers &driver) {\n  auto ret = BindBaseDriver(driver);\n  return ret;\n}\n\nmodelbox::Status JavaVirtualDriverManager::Scan(const std::string &path) {\n  std::vector<std::string> drivers_list;\n  std::string filter = \"*.toml\";\n  auto status = modelbox::ListSubDirectoryFiles(path, filter, &drivers_list);\n  if (status != modelbox::STATUS_OK) {\n    auto err_msg = \"list directory:  \" + path + \"/\" + filter + \" failed.\";\n    return status;\n  }\n\n  for (auto &driver_file : drivers_list) {\n    auto result = Add(driver_file);\n    if (result) {\n      MBLOG_INFO << \"Add virtual driver \" << driver_file << \" success\";\n    }\n\n    if (result == modelbox::STATUS_NOTSUPPORT) {\n      MBLOG_DEBUG << \"add file: \" << driver_file << \" failed, \"\n                  << result.WrapErrormsgs();\n    } else if (!result) {\n      MBLOG_ERROR << \"add file: \" << driver_file << \" failed, \"\n                  << result.WrapErrormsgs();\n    }\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status JavaVirtualDriverManager::Add(const std::string &file) {\n  std::string name;\n  std::string type;\n  std::string version;\n  std::string description;\n  std::string entry;\n  std::string flowunit_type;\n  std::shared_ptr<modelbox::ConfigurationBuilder> builder =\n      std::make_shared<modelbox::ConfigurationBuilder>();\n  std::shared_ptr<modelbox::Configuration> config = builder->Build(file);\n  if (config == nullptr) {\n    const auto &err_msg = modelbox::StatusError.Errormsg();\n    MBLOG_ERROR << err_msg;\n    return {modelbox::STATUS_BADCONF, err_msg};\n  }\n\n  flowunit_type = config->GetString(\"base.type\");\n  if (flowunit_type.empty()) {\n    MBLOG_ERROR << \"the config does not have 'type'.\";\n    return {modelbox::STATUS_BADCONF, \"the config does not have 'type'.\"};\n  }\n\n  if (flowunit_type != VIRTUAL_FLOWUNIT_TYPE) {\n    auto err_msg = \"the config type is \" + flowunit_type +\n                   \", but the so type is \" + std::string(VIRTUAL_FLOWUNIT_TYPE);\n    return {modelbox::STATUS_NOTSUPPORT, err_msg};\n  }\n\n  name = config->GetString(\"base.name\");\n  if (name.empty()) {\n    MBLOG_ERROR << \"the config does not have 'name'.\";\n    return {modelbox::STATUS_BADCONF, \"the config does not have 'name'.\"};\n  }\n\n  type = config->GetString(\"base.device\");\n  if (type.empty()) {\n    MBLOG_ERROR << \"the config does not have 'device'.\";\n    return {modelbox::STATUS_BADCONF, \"the config does not have 'device'.\"};\n  }\n\n  version = config->GetString(\"base.version\");\n  if (version.empty()) {\n    MBLOG_ERROR << \"the config does not have 'version'.\";\n    return {modelbox::STATUS_BADCONF, \"the config does not have 'version'.\"};\n  }\n\n  description = config->GetString(\"base.description\");\n  if (description.empty()) {\n    MBLOG_ERROR << \"the config does not have 'description'.\";\n    return {modelbox::STATUS_BADCONF,\n            \"the config does not have 'description'.\"};\n  }\n\n  std::shared_ptr<JavaVirtualDriver> driver =\n      std::make_shared<JavaVirtualDriver>();\n  std::shared_ptr<modelbox::DriverDesc> driver_desc =\n      std::make_shared<modelbox::DriverDesc>();\n  driver_desc->SetClass(\"DRIVER-FLOWUNIT\");\n  driver_desc->SetFilePath(file);\n  driver_desc->SetName(name);\n  driver_desc->SetType(type);\n  auto status = driver_desc->SetVersion(version);\n  if (status != modelbox::STATUS_SUCCESS) {\n    auto err_msg = \"SetVersion failed, version: \" + version;\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  driver_desc->SetDescription(description);\n  driver->SetDriverDesc(driver_desc);\n  driver->SetVirtual(true);\n  driver->SetBindDriver(java_flowunit_driver_list_);\n  drivers_list_.push_back(driver);\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status JavaVirtualDriverManager::BindBaseDriver(\n    modelbox::Drivers &driver) {\n  for (const auto &bind_type : BIND_JAVA_FLOWUNIT_TYPE) {\n    auto tmp_driver =\n        driver.GetDriver(modelbox::DRIVER_CLASS_FLOWUNIT, bind_type,\n                         BIND_JAVA_FLOWUNIT_NAME, BIND_JAVA_FLOWUNIT_VERSION);\n    if (tmp_driver == nullptr) {\n      continue;\n    }\n\n    java_flowunit_driver_list_.push_back(tmp_driver);\n  }\n\n  if (java_flowunit_driver_list_.empty()) {\n    return {modelbox::STATUS_NOTFOUND, \"can not find java flowunit\"};\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status VirtualJavaFlowUnitFactory::FillInput(\n    std::shared_ptr<modelbox::Configuration> &config,\n    std::shared_ptr<VirtualJavaFlowUnitDesc> &flowunit_desc,\n    const std::string &device) {\n  auto input = config->GetSubKeys(\"input\");\n  if (input.empty()) {\n    return modelbox::STATUS_NOTFOUND;\n  }\n\n  for (unsigned int i = 1; i <= input.size(); ++i) {\n    std::string input_device;\n    std::string input_name;\n    std::string input_type;\n    auto key = \"input.input\" + std::to_string(i);\n    auto input_item_table = config->GetSubKeys(key);\n    if (input_item_table.empty()) {\n      MBLOG_ERROR << \"the key \" << key << \" is not found in config file.\";\n      return modelbox::STATUS_BADCONF;\n    }\n\n    auto name_index = key + \".name\";\n    input_name = config->GetString(name_index);\n    if (input_name.empty()) {\n      MBLOG_ERROR << \"the key \" << key << \" should have key name.\";\n      return modelbox::STATUS_BADCONF;\n    }\n\n    auto device_index = key + \".device\";\n    input_device = config->GetString(device_index);\n    if (input_device.empty()) {\n      input_device = device;\n    }\n\n    flowunit_desc->AddFlowUnitInput(\n        modelbox::FlowUnitInput(input_name, input_device, input_type));\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status VirtualJavaFlowUnitFactory::FillOutput(\n    std::shared_ptr<modelbox::Configuration> &config,\n    std::shared_ptr<VirtualJavaFlowUnitDesc> &flowunit_desc,\n    const std::string &device) {\n  auto output = config->GetSubKeys(\"output\");\n  if (output.empty()) {\n    return modelbox::STATUS_NOTFOUND;\n  }\n\n  for (unsigned int i = 1; i <= output.size(); ++i) {\n    std::string output_device;\n    std::string output_name;\n    std::string output_type;\n    auto key = \"output.output\" + std::to_string(i);\n    auto output_item_table = config->GetSubKeys(key);\n    if (output_item_table.empty()) {\n      MBLOG_ERROR << \"the key \" << key << \" is not found in config file.\";\n      return modelbox::STATUS_BADCONF;\n    }\n\n    auto name_index = key + \".name\";\n    output_name = config->GetString(name_index);\n    if (output_name.empty()) {\n      MBLOG_ERROR << \"the key \" << key << \" should have key name.\";\n      return modelbox::STATUS_BADCONF;\n    }\n\n    auto device_index = key + \".device\";\n    output_device = config->GetString(device_index);\n    if (output_device.empty()) {\n      output_device = device;\n    }\n\n    flowunit_desc->AddFlowUnitOutput(\n        modelbox::FlowUnitOutput(output_name, output_device, output_type));\n  }\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status VirtualJavaFlowUnitFactory::FillBaseInfo(\n    std::shared_ptr<modelbox::Configuration> &config,\n    std::shared_ptr<VirtualJavaFlowUnitDesc> &flowunit_desc,\n    const std::string &toml_file, std::string *device) {\n  auto java_entry = config->GetString(\"base.entry\");\n  if (java_entry.empty()) {\n    MBLOG_ERROR << \"the key 'entry' is not found under base.\";\n    return modelbox::STATUS_BADCONF;\n  }\n\n  flowunit_desc->SetJarEntry(java_entry);\n\n  *device = config->GetString(\"base.device\");\n  if (device->empty()) {\n    MBLOG_ERROR << \"the key 'device' is not found under base.\";\n    return modelbox::STATUS_BADCONF;\n  }\n\n  auto group_type = config->GetString(\"base.group_type\");\n  if (group_type.empty()) {\n    MBLOG_WARN << \"the key group type is empty, so classify it into Undefined.\";\n  }\n  flowunit_desc->SetFlowUnitGroupType(group_type);\n\n  return modelbox::STATUS_OK;\n}\n\nvoid VirtualJavaFlowUnitFactory::FillFlowUnitType(\n    std::shared_ptr<modelbox::Configuration> &config,\n    std::shared_ptr<VirtualJavaFlowUnitDesc> &flowunit_desc) {\n  auto config_op = config->GetSubKeys(\"config\");\n  if (!config_op.empty()) {\n    flowunit_desc->SetConfiguration(config->GetSubConfig(\"config\"));\n  }\n\n  auto is_stream = config->GetBool(\"base.stream\", true);\n  if (is_stream) {\n    flowunit_desc->SetFlowType(modelbox::STREAM);\n  } else {\n    flowunit_desc->SetFlowType(modelbox::NORMAL);\n  }\n\n  auto is_condition = config->GetBool(\"base.condition\", false);\n  if (is_condition) {\n    flowunit_desc->SetConditionType(modelbox::IF_ELSE);\n  } else {\n    flowunit_desc->SetConditionType(modelbox::NONE);\n  }\n\n  flowunit_desc->SetOutputType(modelbox::ORIGIN);\n\n  auto is_collapse = config->GetBool(\"base.collapse\", false);\n  if (is_collapse) {\n    flowunit_desc->SetOutputType(modelbox::COLLAPSE);\n    auto is_collapse_all = config->GetBool(\"base.collapse_all\", true);\n    flowunit_desc->SetCollapseAll(is_collapse_all);\n  }\n\n  auto is_expand = config->GetBool(\"base.expand\", false);\n  if (is_expand) {\n    flowunit_desc->SetOutputType(modelbox::EXPAND);\n  }\n\n  auto is_same_count = config->GetBool(\"base.stream_same_count\", false);\n  if (is_same_count) {\n    flowunit_desc->SetStreamSameCount(true);\n  } else {\n    flowunit_desc->SetStreamSameCount(false);\n  }\n}\n\nstd::map<std::string, std::shared_ptr<modelbox::FlowUnitDesc>>\nVirtualJavaFlowUnitFactory::FlowUnitProbe() {\n  std::map<std::string, std::shared_ptr<modelbox::FlowUnitDesc>> return_map;\n  auto driver_desc = GetDriver()->GetDriverDesc();\n  auto toml_file = driver_desc->GetFilePath();\n  std::shared_ptr<VirtualJavaFlowUnitDesc> flowunit_desc =\n      std::make_shared<VirtualJavaFlowUnitDesc>();\n  modelbox::Status status;\n\n  std::shared_ptr<modelbox::ConfigurationBuilder> builder =\n      std::make_shared<modelbox::ConfigurationBuilder>();\n  std::shared_ptr<modelbox::Configuration> config = builder->Build(toml_file);\n\n  std::string device;\n  auto ret = FillBaseInfo(config, flowunit_desc, toml_file, &device);\n  if (ret != modelbox::STATUS_OK) {\n    return return_map;\n  }\n\n  auto input_ret = FillInput(config, flowunit_desc, device);\n  if (input_ret == modelbox::STATUS_BADCONF) {\n    return return_map;\n  }\n\n  auto output_ret = FillOutput(config, flowunit_desc, device);\n  if (output_ret == modelbox::STATUS_BADCONF) {\n    return return_map;\n  }\n\n  if (output_ret == modelbox::STATUS_NOTFOUND &&\n      input_ret == modelbox::STATUS_NOTFOUND) {\n    MBLOG_ERROR\n        << \"neither the key 'input' nor 'output' is not found in config file.\";\n    return return_map;\n  }\n\n  FillFlowUnitType(config, flowunit_desc);\n\n  const auto &tom_file_path = driver_desc->GetFilePath();\n  auto dir_name = modelbox::GetDirName(tom_file_path);\n  flowunit_desc->SetJarFilePath(std::string(dir_name));\n\n  flowunit_desc->SetFlowUnitName(driver_desc->GetName());\n  return_map.insert(std::make_pair(driver_desc->GetName(), flowunit_desc));\n  return return_map;\n}\n\nvoid VirtualJavaFlowUnitFactory::SetFlowUnitFactory(\n    const std::vector<std::shared_ptr<modelbox::DriverFactory>>\n        &bind_flowunit_factory_list) {\n  for (const auto &bind_flowunit_factory : bind_flowunit_factory_list) {\n    bind_flowunit_factory_list_.push_back(\n        std::dynamic_pointer_cast<FlowUnitFactory>(bind_flowunit_factory));\n  }\n}\n\nstd::shared_ptr<modelbox::FlowUnit> VirtualJavaFlowUnitFactory::CreateFlowUnit(\n    const std::string &unit_name, const std::string &unit_type) {\n  for (auto &flowunit_factory : bind_flowunit_factory_list_) {\n    if (std::dynamic_pointer_cast<FlowUnitFactory>(flowunit_factory)\n            ->GetFlowUnitFactoryType() != unit_type) {\n      continue;\n    }\n    return std::dynamic_pointer_cast<FlowUnitFactory>(flowunit_factory)\n        ->CreateFlowUnit(unit_name, unit_type);\n  }\n  return nullptr;\n}\n\nVirtualJavaFlowUnitFactory::VirtualJavaFlowUnitFactory() = default;\n\nVirtualJavaFlowUnitFactory::~VirtualJavaFlowUnitFactory() = default;\n\nstd::shared_ptr<modelbox::Driver> VirtualJavaFlowUnitFactory::GetDriver() {\n  return driver_;\n}\n\nvoid VirtualJavaFlowUnitFactory::SetDriver(\n    const std::shared_ptr<modelbox::Driver> &driver) {\n  driver_ = driver;\n}\n\nstd::string VirtualJavaFlowUnitDesc::GetJarEntry() { return java_entry_; }\n\nvoid VirtualJavaFlowUnitDesc::SetConfiguration(\n    const std::shared_ptr<modelbox::Configuration> &config) {\n  config_ = config;\n}\n\nstd::shared_ptr<modelbox::Configuration>\nVirtualJavaFlowUnitDesc::GetConfiguration() {\n  return config_;\n}\n\nVirtualJavaFlowUnitDesc::VirtualJavaFlowUnitDesc() = default;\n\nVirtualJavaFlowUnitDesc::~VirtualJavaFlowUnitDesc() = default;\n\nvoid VirtualJavaFlowUnitDesc::SetJarFilePath(const std::string &path) {\n  jar_file_path_ = path;\n}\n\nconst std::string &VirtualJavaFlowUnitDesc::GetJarFilePath() const {\n  return jar_file_path_;\n}\n\nJavaVirtualDriverDesc::JavaVirtualDriverDesc() = default;\n\nJavaVirtualDriverDesc::~JavaVirtualDriverDesc() = default;\n\nJavaVirtualDriver::JavaVirtualDriver() = default;\n\nJavaVirtualDriver::~JavaVirtualDriver() = default;"
  },
  {
    "path": "src/drivers/virtual/java/virtualdriver_java.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_VIRTUAL_DRIVER_JAVA_H_\n#define MODELBOX_VIRTUAL_DRIVER_JAVA_H_\n\n#include <modelbox/base/driver.h>\n#include <modelbox/base/log.h>\n#include <modelbox/base/status.h>\n#include <modelbox/base/utils.h>\n#include <modelbox/flowunit.h>\n\nconstexpr const char *BIND_JAVA_FLOWUNIT_NAME = \"java\";\nconstexpr const char *BIND_JAVA_FLOWUNIT_VERSION = \"1.0.0\";\nconst std::vector<std::string> BIND_JAVA_FLOWUNIT_TYPE{\"cpu\"};\n\n// Virtual\nclass JavaVirtualDriverDesc : public modelbox::VirtualDriverDesc {\n public:\n  JavaVirtualDriverDesc();\n  ~JavaVirtualDriverDesc() override;\n};\n\nclass VirtualJavaFlowUnitDesc : public modelbox::FlowUnitDesc {\n public:\n  VirtualJavaFlowUnitDesc();\n  ~VirtualJavaFlowUnitDesc() override;\n\n  void SetJarEntry(std::string java_entry);\n  std::string GetJarEntry();\n\n  void SetConfiguration(const std::shared_ptr<modelbox::Configuration> &config);\n  std::shared_ptr<modelbox::Configuration> GetConfiguration();\n\n  void SetJarFilePath(const std::string &path);\n  const std::string &GetJarFilePath() const;\n\n protected:\n  std::string java_entry_;\n  std::shared_ptr<modelbox::Configuration> config_;\n  std::string jar_file_path_;\n};\n\nclass JavaVirtualDriver : public modelbox::VirtualDriver {\n public:\n  JavaVirtualDriver();\n  ~JavaVirtualDriver() override;\n\n  std::shared_ptr<modelbox::DriverFactory> CreateFactory() override;\n  std::vector<std::shared_ptr<modelbox::Driver>> GetBindDriver();\n  void SetBindDriver(\n      const std::vector<std::shared_ptr<modelbox::Driver>> &driver_list);\n\n private:\n  std::vector<std::shared_ptr<modelbox::Driver>> java_flowunit_driver_;\n};\n\nclass VirtualJavaFlowUnitFactory : public modelbox::FlowUnitFactory {\n public:\n  VirtualJavaFlowUnitFactory();\n  ~VirtualJavaFlowUnitFactory() override;\n\n  std::shared_ptr<modelbox::FlowUnit> CreateFlowUnit(\n      const std::string &unit_name, const std::string &unit_type) override;\n\n  std::map<std::string, std::shared_ptr<modelbox::FlowUnitDesc>> FlowUnitProbe()\n      override;\n\n  void SetFlowUnitFactory(\n      const std::vector<std::shared_ptr<modelbox::DriverFactory>>\n          &bind_flowunit_factory_list) override;\n\n  std::shared_ptr<modelbox::Driver> GetDriver() override;\n\n  void SetDriver(const std::shared_ptr<modelbox::Driver> &driver) override;\n\n private:\n  modelbox::Status FillInput(\n      std::shared_ptr<modelbox::Configuration> &config,\n      std::shared_ptr<VirtualJavaFlowUnitDesc> &flowunit_desc,\n      const std::string &device);\n  modelbox::Status FillOutput(\n      std::shared_ptr<modelbox::Configuration> &config,\n      std::shared_ptr<VirtualJavaFlowUnitDesc> &flowunit_desc,\n      const std::string &device);\n  modelbox::Status FillBaseInfo(\n      std::shared_ptr<modelbox::Configuration> &config,\n      std::shared_ptr<VirtualJavaFlowUnitDesc> &flowunit_desc,\n      const std::string &toml_file, std::string *device);\n  void FillFlowUnitType(\n      std::shared_ptr<modelbox::Configuration> &config,\n      std::shared_ptr<VirtualJavaFlowUnitDesc> &flowunit_desc);\n  std::shared_ptr<modelbox::Driver> driver_;\n  std::vector<std::shared_ptr<modelbox::DriverFactory>>\n      bind_flowunit_factory_list_;\n};\n\nclass JavaVirtualDriverManager : public modelbox::VirtualDriverManager {\n public:\n  JavaVirtualDriverManager();\n  ~JavaVirtualDriverManager() override;\n\n  modelbox::Status Scan(const std::string &path) override;\n  modelbox::Status Add(const std::string &file) override;\n  modelbox::Status Init(modelbox::Drivers &driver) override;\n\n private:\n  modelbox::Status BindBaseDriver(modelbox::Drivers &driver);\n  std::vector<std::shared_ptr<modelbox::Driver>> java_flowunit_driver_list_;\n};\n\n#endif  // MODELBOX_VIRTUAL_DRIVER_JAVA_H_"
  },
  {
    "path": "src/drivers/virtual/python/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nset(UNIT_NAME \"python\")\n\nproject(modelbox-virtualdriver-${UNIT_NAME})\n\nfile(GLOB_RECURSE MODELBOX_VIRTUALDRIVER_SOURCE *.cpp *.cc *.c)\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_CPU_INCLUDE})\n\nset(MODELBOX_VIRTUALDRIVER_SHARED libmodelbox-virtualdriver-${UNIT_NAME}-shared)\nset(MODELBOX_VIRTUALDRIVER_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\nadd_library(${MODELBOX_VIRTUALDRIVER_SHARED} SHARED ${MODELBOX_VIRTUALDRIVER_SOURCE})\nset(LIBMODELBOX_VIRTUAL_PYTHON_SHARED ${MODELBOX_VIRTUALDRIVER_SHARED})\n\nset_target_properties(${MODELBOX_VIRTUALDRIVER_SHARED} PROPERTIES \n    SOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n\nset(LIBMODELBOX_VIRTUALDRIVER_PYTHON_SHARED ${MODELBOX_VIRTUALDRIVER_SHARED})\n\ntarget_link_libraries(${MODELBOX_VIRTUALDRIVER_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_VIRTUALDRIVER_SHARED} rt)\ntarget_link_libraries(${MODELBOX_VIRTUALDRIVER_SHARED} dl)\ntarget_link_libraries(${MODELBOX_VIRTUALDRIVER_SHARED} ${LIBMODELBOX_SHARED})\n\nset_target_properties(${MODELBOX_VIRTUALDRIVER_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-virtualdriver-${UNIT_NAME}\")\n\ninstall(TARGETS ${MODELBOX_VIRTUALDRIVER_SHARED}\n    COMPONENT libmodelbox\n    RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n    LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    ARCHIVE DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    OPTIONAL\n    )\n\ninstall(DIRECTORY \n    ${HEADER} DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR}\n    COMPONENT libmodelbox-devel\n    )\n\nset(LIBMODELBOX_VIRTUALDRIVER_PYTHON_SHARED ${MODELBOX_VIRTUALDRIVER_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_VIRTUALDRIVER_PYTHON_INCLUDE ${MODELBOX_VIRTUALDRIVER_SOURCE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_VIRTUALDRIVER_PYTHON_SOURCES ${MODELBOX_VIRTUALDRIVER_SOURCE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_VIRTUALDRIVER_PYTHON_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-virtualdriver-${UNIT_NAME}.so CACHE INTERNAL \"\")\n\n\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_UNIT_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_VIRTUALDRIVER_SHARED})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")\n\n"
  },
  {
    "path": "src/drivers/virtual/python/virtualdriver_desc.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <stdio.h>\n\n#include <memory>\n\n#include \"modelbox/base/driver_api_helper.h\"\n#include \"modelbox/base/status.h\"\n#include \"virtualdriver_python.h\"\n\nstd::shared_ptr<modelbox::DriverFactory> CreateDriverFactory() {\n  std::shared_ptr<modelbox::DriverFactory> factory =\n      std::make_shared<PythonVirtualDriverManager>();\n  return factory;\n}\n\nvoid DriverDescription(modelbox::DriverDesc *desc) {\n  desc->SetClass(modelbox::DRIVER_CLASS_VIRTUAL);\n  desc->SetName(BIND_PYTHON_FLOWUNIT_NAME);\n  desc->SetType(modelbox::DRIVER_TYPE_VIRTUAL);\n  desc->SetVersion(BIND_PYTHON_FLOWUNIT_VERSION);\n  desc->SetNodelete(true);\n}\n\nmodelbox::Status DriverInit() {\n  // Driver Init.\n  return modelbox::STATUS_OK;\n}\n\nvoid DriverFini() {\n  // Driver Fini.\n}\n"
  },
  {
    "path": "src/drivers/virtual/python/virtualdriver_python.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"virtualdriver_python.h\"\n\n#include <libgen.h>\n\n#include <utility>\n\nconstexpr const char *VIRTUAL_FLOWUNIT_TYPE = \"python\";\n\nPythonVirtualDriverDesc::PythonVirtualDriverDesc() = default;\n\nPythonVirtualDriverDesc::~PythonVirtualDriverDesc() = default;\n\nstd::shared_ptr<modelbox::DriverFactory> PythonVirtualDriver::CreateFactory() {\n  auto factory = std::make_shared<VirtualPythonFlowUnitFactory>();\n  auto real_driver_list = GetBindDriver();\n  factory->SetDriver(shared_from_this());\n  auto real_factory_list =\n      std::vector<std::shared_ptr<modelbox::DriverFactory>>();\n  for (auto &real_driver : real_driver_list) {\n    auto real_factory = real_driver->CreateFactory();\n    if (real_factory == nullptr) {\n      auto driver_desc = real_driver->GetDriverDesc();\n      MBLOG_ERROR << \"real driver binded by virtual python driver create \"\n                     \"factory failed, real drivers is \"\n                  << driver_desc->GetName() << \", \" << driver_desc->GetType()\n                  << \", \" << driver_desc->GetFilePath();\n      continue;\n    }\n    real_factory_list.push_back(real_factory);\n  }\n  factory->SetFlowUnitFactory(real_factory_list);\n  return factory;\n}\n\nPythonVirtualDriver::PythonVirtualDriver() = default;\n\nPythonVirtualDriver::~PythonVirtualDriver() = default;\n\nstd::vector<std::shared_ptr<modelbox::Driver>>\nPythonVirtualDriver::GetBindDriver() {\n  return python_flowunit_driver_;\n}\n\nvoid PythonVirtualDriver::SetBindDriver(\n    const std::vector<std::shared_ptr<modelbox::Driver>> &driver_list) {\n  python_flowunit_driver_ = driver_list;\n}\n\nPythonVirtualDriverManager::PythonVirtualDriverManager() = default;\n\nPythonVirtualDriverManager::~PythonVirtualDriverManager() = default;\n\nmodelbox::Status PythonVirtualDriverManager::Init(modelbox::Drivers &driver) {\n  auto ret = BindBaseDriver(driver);\n  return ret;\n}\n\nmodelbox::Status PythonVirtualDriverManager::Scan(const std::string &path) {\n  std::vector<std::string> drivers_list;\n  std::string filter = \"*.toml\";\n  auto status = modelbox::ListSubDirectoryFiles(path, filter, &drivers_list);\n  if (status != modelbox::STATUS_OK) {\n    auto err_msg = \"list directory:  \" + path + \"/\" + filter + \" failed.\";\n    return status;\n  }\n\n  for (auto &driver_file : drivers_list) {\n    auto result = Add(driver_file);\n    if (result) {\n      MBLOG_INFO << \"Add virtual driver \" << driver_file << \" success\";\n    }\n\n    if (result == modelbox::STATUS_NOTSUPPORT) {\n      MBLOG_DEBUG << \"add file: \" << driver_file << \" failed, \"\n                  << result.WrapErrormsgs();\n    } else if (!result) {\n      MBLOG_ERROR << \"add file: \" << driver_file << \" failed, \"\n                  << result.WrapErrormsgs();\n    }\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status PythonVirtualDriverManager::Add(const std::string &file) {\n  std::string name;\n  std::string type;\n  std::string version;\n  std::string description;\n  std::string entry;\n  std::string flowunit_type;\n  std::shared_ptr<modelbox::ConfigurationBuilder> builder =\n      std::make_shared<modelbox::ConfigurationBuilder>();\n  std::shared_ptr<modelbox::Configuration> config = builder->Build(file);\n  if (config == nullptr) {\n    const auto &err_msg = modelbox::StatusError.Errormsg();\n    MBLOG_ERROR << err_msg;\n    return {modelbox::STATUS_BADCONF, err_msg};\n  }\n\n  flowunit_type = config->GetString(\"base.type\");\n  if (flowunit_type.empty()) {\n    MBLOG_ERROR << \"the config does not have 'type'.\";\n    return {modelbox::STATUS_BADCONF, \"the config does not have 'type'.\"};\n  }\n\n  if (flowunit_type != VIRTUAL_FLOWUNIT_TYPE) {\n    auto err_msg = \"the config type is \" + flowunit_type +\n                   \", but the so type is \" + std::string(VIRTUAL_FLOWUNIT_TYPE);\n    return {modelbox::STATUS_NOTSUPPORT, err_msg};\n  }\n\n  name = config->GetString(\"base.name\");\n  if (name.empty()) {\n    MBLOG_ERROR << \"the config does not have 'name'.\";\n    return {modelbox::STATUS_BADCONF, \"the config does not have 'name'.\"};\n  }\n\n  type = config->GetString(\"base.device\");\n  if (type.empty()) {\n    MBLOG_ERROR << \"the config does not have 'device'.\";\n    return {modelbox::STATUS_BADCONF, \"the config does not have 'device'.\"};\n  }\n\n  version = config->GetString(\"base.version\");\n  if (version.empty()) {\n    MBLOG_ERROR << \"the config does not have 'version'.\";\n    return {modelbox::STATUS_BADCONF, \"the config does not have 'version'.\"};\n  }\n\n  description = config->GetString(\"base.description\");\n  if (description.empty()) {\n    MBLOG_ERROR << \"the config does not have 'description'.\";\n    return {modelbox::STATUS_BADCONF,\n            \"the config does not have 'description'.\"};\n  }\n\n  std::shared_ptr<PythonVirtualDriver> driver =\n      std::make_shared<PythonVirtualDriver>();\n  std::shared_ptr<modelbox::DriverDesc> driver_desc =\n      std::make_shared<modelbox::DriverDesc>();\n  driver_desc->SetClass(\"DRIVER-FLOWUNIT\");\n  driver_desc->SetFilePath(file);\n  driver_desc->SetName(name);\n  driver_desc->SetType(type);\n  auto status = driver_desc->SetVersion(version);\n  if (status != modelbox::STATUS_SUCCESS) {\n    auto err_msg = \"SetVersion failed, version: \" + version;\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  driver_desc->SetDescription(description);\n  driver->SetDriverDesc(driver_desc);\n  driver->SetVirtual(true);\n  driver->SetBindDriver(python_flowunit_driver_list_);\n  // TODO: 判断是否重复存在\n  drivers_list_.push_back(driver);\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status PythonVirtualDriverManager::BindBaseDriver(\n    modelbox::Drivers &driver) {\n  for (const auto &bind_type : BIND_PYTHON_FLOWUNIT_TYPE) {\n    auto tmp_driver = driver.GetDriver(modelbox::DRIVER_CLASS_FLOWUNIT,\n                                       bind_type, BIND_PYTHON_FLOWUNIT_NAME,\n                                       BIND_PYTHON_FLOWUNIT_VERSION);\n    if (tmp_driver == nullptr) {\n      continue;\n    }\n\n    python_flowunit_driver_list_.push_back(tmp_driver);\n  }\n\n  if (python_flowunit_driver_list_.empty()) {\n    return {modelbox::STATUS_NOTFOUND, \"can not find python flowunit\"};\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nVirtualPythonFlowUnitFactory::VirtualPythonFlowUnitFactory() = default;\nVirtualPythonFlowUnitFactory::~VirtualPythonFlowUnitFactory() = default;\n\nstd::shared_ptr<modelbox::Driver> VirtualPythonFlowUnitFactory::GetDriver() {\n  return driver_;\n}\n\nvoid VirtualPythonFlowUnitFactory::SetDriver(\n    const std::shared_ptr<modelbox::Driver> &driver) {\n  driver_ = driver;\n}\n\nmodelbox::Status VirtualPythonFlowUnitFactory::FillInput(\n    std::shared_ptr<modelbox::Configuration> &config,\n    std::shared_ptr<VirtualPythonFlowUnitDesc> &flowunit_desc,\n    const std::string &device) {\n  auto input = config->GetSubKeys(\"input\");\n  if (input.empty()) {\n    return modelbox::STATUS_NOTFOUND;\n  }\n\n  for (unsigned int i = 1; i <= input.size(); ++i) {\n    std::string input_device;\n    std::string input_name;\n    std::string input_type;\n    auto key = \"input.input\" + std::to_string(i);\n    auto input_item_table = config->GetSubKeys(key);\n    if (input_item_table.empty()) {\n      MBLOG_ERROR << \"the key \" << key << \" is not found in config file.\";\n      return modelbox::STATUS_BADCONF;\n    }\n\n    auto name_index = key + \".name\";\n    input_name = config->GetString(name_index);\n    if (input_name.empty()) {\n      MBLOG_ERROR << \"the key \" << key << \" should have key name.\";\n      return modelbox::STATUS_BADCONF;\n    }\n\n    auto device_index = key + \".device\";\n    input_device = config->GetString(device_index);\n    if (input_device.empty()) {\n      input_device = device;\n    }\n\n    flowunit_desc->AddFlowUnitInput(\n        modelbox::FlowUnitInput(input_name, input_device, input_type));\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status VirtualPythonFlowUnitFactory::FillOutput(\n    std::shared_ptr<modelbox::Configuration> &config,\n    std::shared_ptr<VirtualPythonFlowUnitDesc> &flowunit_desc,\n    const std::string &device) {\n  auto output = config->GetSubKeys(\"output\");\n  if (output.empty()) {\n    return modelbox::STATUS_NOTFOUND;\n  }\n\n  for (unsigned int i = 1; i <= output.size(); ++i) {\n    std::string output_device;\n    std::string output_name;\n    std::string output_type;\n    auto key = \"output.output\" + std::to_string(i);\n    auto output_item_table = config->GetSubKeys(key);\n    if (output_item_table.empty()) {\n      MBLOG_ERROR << \"the key \" << key << \" is not found in config file.\";\n      return modelbox::STATUS_BADCONF;\n    }\n\n    auto name_index = key + \".name\";\n    output_name = config->GetString(name_index);\n    if (output_name.empty()) {\n      MBLOG_ERROR << \"the key \" << key << \" should have key name.\";\n      return modelbox::STATUS_BADCONF;\n    }\n\n    auto device_index = key + \".device\";\n    output_device = config->GetString(device_index);\n    if (output_device.empty()) {\n      output_device = device;\n    }\n\n    flowunit_desc->AddFlowUnitOutput(\n        modelbox::FlowUnitOutput(output_name, output_device, output_type));\n  }\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status VirtualPythonFlowUnitFactory::FillBaseInfo(\n    std::shared_ptr<modelbox::Configuration> &config,\n    std::shared_ptr<VirtualPythonFlowUnitDesc> &flowunit_desc,\n    const std::string &toml_file, std::string *device) {\n  auto python_entry = config->GetString(\"base.entry\");\n  if (python_entry.empty()) {\n    MBLOG_ERROR << \"the key 'entry' is not found under base.\";\n    return modelbox::STATUS_BADCONF;\n  }\n\n  flowunit_desc->SetPythonEntry(python_entry);\n\n  *device = config->GetString(\"base.device\");\n  if (device->empty()) {\n    MBLOG_ERROR << \"the key 'device' is not found under base.\";\n    return modelbox::STATUS_BADCONF;\n  }\n\n  auto group_type = config->GetString(\"base.group_type\");\n  if (group_type.empty()) {\n    MBLOG_WARN << \"the key group type is empty, so classify it into Undefined.\";\n  }\n  flowunit_desc->SetFlowUnitGroupType(group_type);\n\n  return modelbox::STATUS_OK;\n}\n\nvoid VirtualPythonFlowUnitFactory::FillFlowUnitType(\n    std::shared_ptr<modelbox::Configuration> &config,\n    std::shared_ptr<VirtualPythonFlowUnitDesc> &flowunit_desc) {\n  auto config_op = config->GetSubKeys(\"config\");\n  if (!config_op.empty()) {\n    flowunit_desc->SetConfiguration(config->GetSubConfig(\"config\"));\n  }\n\n  auto is_stream = config->GetBool(\"base.stream\", true);\n  if (is_stream) {\n    flowunit_desc->SetFlowType(modelbox::STREAM);\n  } else {\n    flowunit_desc->SetFlowType(modelbox::NORMAL);\n  }\n\n  auto is_condition = config->GetBool(\"base.condition\", false);\n  if (is_condition) {\n    flowunit_desc->SetConditionType(modelbox::IF_ELSE);\n  } else {\n    flowunit_desc->SetConditionType(modelbox::NONE);\n  }\n\n  flowunit_desc->SetOutputType(modelbox::ORIGIN);\n\n  auto is_collapse = config->GetBool(\"base.collapse\", false);\n  if (is_collapse) {\n    flowunit_desc->SetOutputType(modelbox::COLLAPSE);\n    auto is_collapse_all = config->GetBool(\"base.collapse_all\", true);\n    flowunit_desc->SetCollapseAll(is_collapse_all);\n  }\n\n  auto is_expand = config->GetBool(\"base.expand\", false);\n  if (is_expand) {\n    flowunit_desc->SetOutputType(modelbox::EXPAND);\n  }\n\n  auto is_same_count = config->GetBool(\"base.stream_same_count\", false);\n  if (is_same_count) {\n    flowunit_desc->SetStreamSameCount(true);\n  } else {\n    flowunit_desc->SetStreamSameCount(false);\n  }\n\n  auto exception_visible = config->GetBool(\"base.exception_visible\", false);\n  if (exception_visible) {\n    flowunit_desc->SetExceptionVisible(true);\n  } else {\n    flowunit_desc->SetExceptionVisible(false);\n  }\n\n  if (config->Contain(\"base.max_batch_size\")) {\n    auto max_batch_size = config->GetInt32(\"base.max_batch_size\", 1);\n    flowunit_desc->SetMaxBatchSize(max_batch_size);\n  }\n\n  if (config->Contain(\"base.default_batch_size\")) {\n    auto default_batch_size = config->GetInt32(\"base.default_batch_size\", 1);\n    flowunit_desc->SetDefaultBatchSize(default_batch_size);\n  }\n}\n\nstd::map<std::string, std::shared_ptr<modelbox::FlowUnitDesc>>\nVirtualPythonFlowUnitFactory::FlowUnitProbe() {\n  std::map<std::string, std::shared_ptr<modelbox::FlowUnitDesc>> return_map;\n  auto driver_desc = GetDriver()->GetDriverDesc();\n  auto toml_file = driver_desc->GetFilePath();\n  std::shared_ptr<VirtualPythonFlowUnitDesc> flowunit_desc =\n      std::make_shared<VirtualPythonFlowUnitDesc>();\n  modelbox::Status status;\n\n  std::shared_ptr<modelbox::ConfigurationBuilder> builder =\n      std::make_shared<modelbox::ConfigurationBuilder>();\n  std::shared_ptr<modelbox::Configuration> config = builder->Build(toml_file);\n\n  std::string device;\n  auto ret = FillBaseInfo(config, flowunit_desc, toml_file, &device);\n  if (ret != modelbox::STATUS_OK) {\n    return return_map;\n  }\n\n  auto input_ret = FillInput(config, flowunit_desc, device);\n  if (input_ret == modelbox::STATUS_BADCONF) {\n    return return_map;\n  }\n\n  auto output_ret = FillOutput(config, flowunit_desc, device);\n  if (output_ret == modelbox::STATUS_BADCONF) {\n    return return_map;\n  }\n\n  if (output_ret == modelbox::STATUS_NOTFOUND &&\n      input_ret == modelbox::STATUS_NOTFOUND) {\n    MBLOG_ERROR\n        << \"neither the key 'input' nor 'output' is not found in config file.\";\n    return return_map;\n  }\n\n  FillFlowUnitType(config, flowunit_desc);\n\n  const auto &tom_file_path = driver_desc->GetFilePath();\n  auto dir_name = modelbox::GetDirName(tom_file_path);\n  flowunit_desc->SetPythonFilePath(std::string(dir_name));\n\n  flowunit_desc->SetFlowUnitName(driver_desc->GetName());\n  return_map.insert(std::make_pair(driver_desc->GetName(), flowunit_desc));\n  return return_map;\n}\n\nvoid VirtualPythonFlowUnitFactory::SetFlowUnitFactory(\n    const std::vector<std::shared_ptr<modelbox::DriverFactory>>\n        &bind_flowunit_factory_list) {\n  for (const auto &bind_flowunit_factory : bind_flowunit_factory_list) {\n    bind_flowunit_factory_list_.push_back(\n        std::dynamic_pointer_cast<FlowUnitFactory>(bind_flowunit_factory));\n  }\n}\n\nstd::shared_ptr<modelbox::FlowUnit>\nVirtualPythonFlowUnitFactory::CreateFlowUnit(const std::string &unit_name,\n                                             const std::string &unit_type) {\n  for (auto &flowunit_factory : bind_flowunit_factory_list_) {\n    if (std::dynamic_pointer_cast<FlowUnitFactory>(flowunit_factory)\n            ->GetFlowUnitFactoryType() != unit_type) {\n      continue;\n    }\n    return std::dynamic_pointer_cast<FlowUnitFactory>(flowunit_factory)\n        ->CreateFlowUnit(unit_name, unit_type);\n  }\n  modelbox::StatusError = {modelbox::STATUS_NOTFOUND,\n                           \"not found flowunit \" + std::string(unit_name)};\n  return nullptr;\n}\n\nVirtualPythonFlowUnitDesc::VirtualPythonFlowUnitDesc() = default;\n\nVirtualPythonFlowUnitDesc::~VirtualPythonFlowUnitDesc() = default;\n\nvoid SetConfiguration(const std::shared_ptr<modelbox::Configuration> &config);\nstd::shared_ptr<modelbox::Configuration> GetConfiguration();\n\nvoid VirtualPythonFlowUnitDesc::SetPythonFilePath(const std::string &path) {\n  python_file_path_ = path;\n}\nconst std::string &VirtualPythonFlowUnitDesc::GetPythonFilePath() const {\n  return python_file_path_;\n}\n\nvoid VirtualPythonFlowUnitDesc::SetPythonEntry(std::string python_entry) {\n  python_entry_ = std::move(python_entry);\n}\n\nstd::string VirtualPythonFlowUnitDesc::GetPythonEntry() {\n  return python_entry_;\n}\n\nvoid VirtualPythonFlowUnitDesc::SetConfiguration(\n    const std::shared_ptr<modelbox::Configuration> &config) {\n  config_ = config;\n}\n\nstd::shared_ptr<modelbox::Configuration>\nVirtualPythonFlowUnitDesc::GetConfiguration() {\n  return config_;\n}"
  },
  {
    "path": "src/drivers/virtual/python/virtualdriver_python.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_VIRTUAL_DRIVER_PYTHON_H_\n#define MODELBOX_VIRTUAL_DRIVER_PYTHON_H_\n\n#include <modelbox/base/driver.h>\n#include <modelbox/base/log.h>\n#include <modelbox/base/status.h>\n#include <modelbox/base/utils.h>\n#include <modelbox/flowunit.h>\n\nconstexpr const char *BIND_PYTHON_FLOWUNIT_NAME = \"python\";\nconstexpr const char *BIND_PYTHON_FLOWUNIT_VERSION = \"1.0.0\";\nconst std::vector<std::string> BIND_PYTHON_FLOWUNIT_TYPE{\"cpu\"};\n\n// Virtual\nclass PythonVirtualDriverDesc : public modelbox::VirtualDriverDesc {\n public:\n  PythonVirtualDriverDesc();\n  ~PythonVirtualDriverDesc() override;\n};\n\nclass VirtualPythonFlowUnitDesc : public modelbox::FlowUnitDesc {\n public:\n  VirtualPythonFlowUnitDesc();\n  ~VirtualPythonFlowUnitDesc() override;\n\n  void SetPythonEntry(std::string python_entry);\n  std::string GetPythonEntry();\n\n  void SetConfiguration(const std::shared_ptr<modelbox::Configuration> &config);\n  std::shared_ptr<modelbox::Configuration> GetConfiguration();\n\n  void SetPythonFilePath(const std::string &path);\n  const std::string &GetPythonFilePath() const;\n\n protected:\n  std::string python_entry_;\n  std::shared_ptr<modelbox::Configuration> config_;\n  std::string python_file_path_;\n};\n\nclass PythonVirtualDriver : public modelbox::VirtualDriver {\n public:\n  PythonVirtualDriver();\n  ~PythonVirtualDriver() override;\n\n  std::shared_ptr<modelbox::DriverFactory> CreateFactory() override;\n  std::vector<std::shared_ptr<modelbox::Driver>> GetBindDriver();\n  void SetBindDriver(\n      const std::vector<std::shared_ptr<modelbox::Driver>> &driver_list);\n\n private:\n  std::vector<std::shared_ptr<modelbox::Driver>> python_flowunit_driver_;\n};\n\nclass VirtualPythonFlowUnitFactory : public modelbox::FlowUnitFactory {\n public:\n  VirtualPythonFlowUnitFactory();\n  ~VirtualPythonFlowUnitFactory() override;\n\n  std::shared_ptr<modelbox::FlowUnit> CreateFlowUnit(\n      const std::string &unit_name, const std::string &unit_type) override;\n\n  std::map<std::string, std::shared_ptr<modelbox::FlowUnitDesc>> FlowUnitProbe()\n      override;\n\n  void SetFlowUnitFactory(\n      const std::vector<std::shared_ptr<modelbox::DriverFactory>>\n          &bind_flowunit_factory_list) override;\n\n  std::shared_ptr<modelbox::Driver> GetDriver() override;\n\n  void SetDriver(const std::shared_ptr<modelbox::Driver> &driver) override;\n\n private:\n  modelbox::Status FillInput(\n      std::shared_ptr<modelbox::Configuration> &config,\n      std::shared_ptr<VirtualPythonFlowUnitDesc> &flowunit_desc,\n      const std::string &device);\n  modelbox::Status FillOutput(\n      std::shared_ptr<modelbox::Configuration> &config,\n      std::shared_ptr<VirtualPythonFlowUnitDesc> &flowunit_desc,\n      const std::string &device);\n  modelbox::Status FillBaseInfo(\n      std::shared_ptr<modelbox::Configuration> &config,\n      std::shared_ptr<VirtualPythonFlowUnitDesc> &flowunit_desc,\n      const std::string &toml_file, std::string *device);\n  void FillFlowUnitType(\n      std::shared_ptr<modelbox::Configuration> &config,\n      std::shared_ptr<VirtualPythonFlowUnitDesc> &flowunit_desc);\n  std::shared_ptr<modelbox::Driver> driver_;\n  std::vector<std::shared_ptr<modelbox::DriverFactory>>\n      bind_flowunit_factory_list_;\n};\n\nclass PythonVirtualDriverManager : public modelbox::VirtualDriverManager {\n public:\n  PythonVirtualDriverManager();\n  ~PythonVirtualDriverManager() override;\n\n  modelbox::Status Scan(const std::string &path) override;\n  modelbox::Status Add(const std::string &file) override;\n  modelbox::Status Init(modelbox::Drivers &driver) override;\n\n private:\n  modelbox::Status BindBaseDriver(modelbox::Drivers &driver);\n  std::vector<std::shared_ptr<modelbox::Driver>> python_flowunit_driver_list_;\n};\n\n#endif  // MODELBOX_VIRTUAL_DRIVER_PYTHON_H_"
  },
  {
    "path": "src/drivers/virtual/yolobox/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nset(UNIT_NAME \"yolo_postprocess\")\n\nproject(modelbox-virtualdriver-${UNIT_NAME})\n\nfile(GLOB_RECURSE UNIT_SOURCE *.cpp *.cc *.c)\ngroup_source_test_files(MODELBOX_VIRTUALDRIVER_SOURCE MODELBOX_UNIT_TEST_SOURCE \"_test.c*\" ${UNIT_SOURCE})\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\n\nset(MODELBOX_VIRTUALDRIVER_SHARED libmodelbox-virtualdriver-${UNIT_NAME}-shared)\nset(MODELBOX_VIRTUALDRIVER_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\nadd_library(${MODELBOX_VIRTUALDRIVER_SHARED} SHARED ${MODELBOX_VIRTUALDRIVER_SOURCE})\nset(LIBMODELBOX_VIRTUAL_YOLOBOX_SHARED ${MODELBOX_VIRTUALDRIVER_SHARED})\n\nset_target_properties(${MODELBOX_VIRTUALDRIVER_SHARED} PROPERTIES \n    SOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n\ntarget_link_libraries(${MODELBOX_VIRTUALDRIVER_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_VIRTUALDRIVER_SHARED} rt)\ntarget_link_libraries(${MODELBOX_VIRTUALDRIVER_SHARED} dl)\ntarget_link_libraries(${MODELBOX_VIRTUALDRIVER_SHARED} ${LIBMODELBOX_SHARED})\n\nset_target_properties(${MODELBOX_VIRTUALDRIVER_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-virtualdriver-${UNIT_NAME}\")\n\ninstall(TARGETS ${MODELBOX_VIRTUALDRIVER_SHARED}\n        COMPONENT libmodelbox\n        RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n        LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        ARCHIVE DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        OPTIONAL)\ninstall(DIRECTORY ${HEADER} \n        DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR}\n        COMPONENT libmodelbox-devel\n        )\n\nset(LIBMODELBOX_VIRTUALDRIVER_YOLOBOX_SHARED ${MODELBOX_VIRTUALDRIVER_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_VIRTUALDRIVER_YOLOBOX_INCLUDE ${MODELBOX_VIRTUALDRIVER_SOURCE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_VIRTUALDRIVER_YOLOBOX_SOURCES ${MODELBOX_VIRTUALDRIVER_SOURCE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_VIRTUALDRIVER_YOLOBOX_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-virtualdriver-${UNIT_NAME}.so CACHE INTERNAL \"\")\n\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_UNIT_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_VIRTUALDRIVER_SHARED})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")"
  },
  {
    "path": "src/drivers/virtual/yolobox/virtualdriver_desc.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <memory>\n\n#include \"modelbox/base/driver_api_helper.h\"\n#include \"modelbox/base/status.h\"\n#include \"virtualdriver_yolobox.h\"\n\nstd::shared_ptr<modelbox::DriverFactory> CreateDriverFactory() {\n  std::shared_ptr<modelbox::DriverFactory> factory =\n      std::make_shared<YoloBoxVirtualDriverManager>();\n  return factory;\n}\n\nvoid DriverDescription(modelbox::DriverDesc *desc) {\n  desc->SetClass(modelbox::DRIVER_CLASS_VIRTUAL);\n  desc->SetName(VIRTUAL_YOLO_POST_FLOWUNIT);\n  desc->SetType(modelbox::DRIVER_TYPE_VIRTUAL);\n  desc->SetVersion(BIND_FLOWUNIT_VERSION);\n}\n\nmodelbox::Status DriverInit() {\n  // Driver Init.\n  return modelbox::STATUS_OK;\n}\n\nvoid DriverFini() {\n  // Driver Fini.\n}\n"
  },
  {
    "path": "src/drivers/virtual/yolobox/virtualdriver_yolobox.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"virtualdriver_yolobox.h\"\n\n#include <utility>\n\nconstexpr const char *VIRTUAL_FLOWUNIT_TYPE = \"yolo_postprocess\";\n\nstd::shared_ptr<modelbox::DriverFactory> YoloBoxVirtualDriver::CreateFactory() {\n  auto factory = std::make_shared<YoloBoxVirtualFlowUnitFactory>();\n  auto real_driver_list = GetBindDriver();\n  factory->SetDriver(shared_from_this());\n  auto real_factory_list =\n      std::vector<std::shared_ptr<modelbox::DriverFactory>>();\n  for (auto &real_driver : real_driver_list) {\n    auto real_factory = real_driver->CreateFactory();\n    if (real_factory == nullptr) {\n      auto driver_desc = real_driver->GetDriverDesc();\n      MBLOG_ERROR << \"real driver binded by virtual yolo driver create \"\n                     \"factory failed, real drivers is \"\n                  << driver_desc->GetName() << \", \" << driver_desc->GetType()\n                  << \", \" << driver_desc->GetFilePath();\n      continue;\n    }\n    real_factory_list.push_back(real_factory);\n  }\n  factory->SetFlowUnitFactory(real_factory_list);\n  return factory;\n}\n\nstd::vector<std::shared_ptr<modelbox::Driver>>\nYoloBoxVirtualDriver::GetBindDriver() {\n  return flowunit_driver_list_;\n}\n\nvoid YoloBoxVirtualDriver::SetBindDriver(\n    const std::vector<std::shared_ptr<modelbox::Driver>> &driver_list) {\n  flowunit_driver_list_ = driver_list;\n}\n\nmodelbox::Status YoloBoxVirtualDriverManager::Init(modelbox::Drivers &driver) {\n  auto ret = GetTargetDriverList(driver);\n  return ret;\n}\n\nmodelbox::Status YoloBoxVirtualDriverManager::Scan(const std::string &path) {\n  std::vector<std::string> config_file_list;\n  std::string filter = \"*.toml\";\n  auto status =\n      modelbox::ListSubDirectoryFiles(path, filter, &config_file_list);\n  if (status != modelbox::STATUS_OK) {\n    auto err_msg = \"list directory:  \" + path + \"/\" + filter + \" failed.\";\n    return {status, err_msg};\n  }\n\n  for (auto &config_file : config_file_list) {\n    auto result = Add(config_file);\n    if (result) {\n      MBLOG_INFO << \"Add virtual driver \" << config_file << \" success\";\n    }\n\n    if (result == modelbox::STATUS_NOTSUPPORT) {\n      MBLOG_DEBUG << \"add file: \" << config_file << \" failed, \"\n                  << result.WrapErrormsgs();\n    } else if (!result) {\n      MBLOG_ERROR << \"add file: \" << config_file << \" failed, \"\n                  << result.WrapErrormsgs();\n    }\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status YoloBoxVirtualDriverManager::Add(const std::string &file) {\n  std::string name;\n  std::string type;\n  std::string version;\n  std::string description;\n  std::string entry;\n  std::string flowunit_type;\n  auto builder = std::make_shared<modelbox::ConfigurationBuilder>();\n  std::shared_ptr<modelbox::Configuration> config = builder->Build(file);\n  if (config == nullptr) {\n    MBLOG_ERROR << modelbox::StatusError.Errormsg();\n    return modelbox::STATUS_BADCONF;\n  }\n\n  flowunit_type = config->GetString(\"base.type\");\n  if (flowunit_type.empty()) {\n    auto err_msg = \"config \" + file + \" the config does not have 'type'.\";\n    MBLOG_ERROR << err_msg;\n    return {modelbox::STATUS_BADCONF, err_msg};\n  }\n\n  if (flowunit_type != VIRTUAL_FLOWUNIT_TYPE) {\n    auto err_msg = \"config \" + file + \" type is \" + flowunit_type +\n                   \", will not load as \" + std::string(VIRTUAL_FLOWUNIT_TYPE);\n    return {modelbox::STATUS_NOTSUPPORT, err_msg};\n  }\n\n  name = config->GetString(\"base.name\");\n  if (name.empty()) {\n    auto err_msg = \"config \" + file + \" does not have 'name'.\";\n    MBLOG_ERROR << err_msg;\n    return {modelbox::STATUS_BADCONF, err_msg};\n  }\n\n  type = config->GetString(\"base.device\");\n  if (type.empty()) {\n    auto err_msg = \"config \" + file + \" does not have 'device'.\";\n    MBLOG_ERROR << err_msg;\n    return {modelbox::STATUS_BADCONF, err_msg};\n  }\n\n  version = config->GetString(\"base.version\");\n  if (version.empty()) {\n    auto err_msg = \"config \" + file + \" does not have 'version'.\";\n    MBLOG_ERROR << err_msg;\n    return {modelbox::STATUS_BADCONF, err_msg};\n  }\n\n  description = config->GetString(\"base.description\");\n  if (description.empty()) {\n    auto err_msg = \"config \" + file + \" does not have 'description'.\";\n    MBLOG_ERROR << err_msg;\n    return {modelbox::STATUS_BADCONF, err_msg};\n  }\n\n  std::shared_ptr<YoloBoxVirtualDriver> driver =\n      std::make_shared<YoloBoxVirtualDriver>();\n  std::shared_ptr<modelbox::DriverDesc> driver_desc =\n      std::make_shared<modelbox::DriverDesc>();\n  driver_desc->SetClass(modelbox::DRIVER_CLASS_FLOWUNIT);\n  driver_desc->SetFilePath(file);\n  driver_desc->SetName(name);\n  driver_desc->SetType(type);\n  auto status = driver_desc->SetVersion(version);\n  if (status != modelbox::STATUS_SUCCESS) {\n    auto err_msg = \"SetVersion failed, version: \" + version;\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  driver_desc->SetDescription(description);\n  driver->SetDriverDesc(driver_desc);\n  driver->SetVirtual(true);\n  driver->SetBindDriver(flowunit_driver_list_);\n  drivers_list_.push_back(driver);\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status YoloBoxVirtualDriverManager::GetTargetDriverList(\n    modelbox::Drivers &drivers) {\n  for (const auto &bind_type : BIND_FLOWUNIT_TYPE) {\n    auto tmp_driver =\n        drivers.GetDriver(modelbox::DRIVER_CLASS_FLOWUNIT, bind_type,\n                          BIND_FLOWUNIT_NAME, BIND_FLOWUNIT_VERSION);\n    if (tmp_driver == nullptr) {\n      continue;\n    }\n\n    flowunit_driver_list_.push_back(tmp_driver);\n  }\n\n  if (flowunit_driver_list_.empty()) {\n    return {modelbox::STATUS_NOTFOUND, \"can not find yolo flowunit\"};\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status YoloBoxVirtualFlowUnitFactory::FillInput(\n    std::shared_ptr<modelbox::Configuration> &config,\n    std::shared_ptr<YoloBoxVirtualFlowUnitDesc> &flowunit_desc) {\n  auto unit_input = config->GetSubKeys(\"input\");\n  if (unit_input.empty()) {\n    MBLOG_ERROR << \"the key 'input' is not found in config file.\";\n    return modelbox::STATUS_BADCONF;\n  }\n\n  for (size_t i = 1; i <= unit_input.size(); ++i) {\n    std::string input_device;\n    std::string input_name;\n    std::string input_type;\n    auto key = \"input.input\" + std::to_string(i);\n    auto input_item_table = config->GetSubKeys(key);\n    if (input_item_table.empty()) {\n      MBLOG_ERROR << \"the key \" << key << \" is not found in config file.\";\n      return modelbox::STATUS_BADCONF;\n    }\n\n    auto name_index = key + \".name\";\n    input_name = config->GetString(name_index);\n    if (input_name.empty()) {\n      MBLOG_ERROR << \"the key \" << key << \" should have key name.\";\n      return modelbox::STATUS_BADCONF;\n    }\n\n    flowunit_desc->AddFlowUnitInput(modelbox::FlowUnitInput(input_name));\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status YoloBoxVirtualFlowUnitFactory::FillOutput(\n    std::shared_ptr<modelbox::Configuration> &config,\n    std::shared_ptr<YoloBoxVirtualFlowUnitDesc> &flowunit_desc) {\n  auto unit_output = config->GetSubKeys(\"output\");\n  if (unit_output.empty()) {\n    MBLOG_ERROR << \"the key 'output' is not found in config file.\";\n    return modelbox::STATUS_BADCONF;\n  }\n\n  for (size_t i = 1; i <= unit_output.size(); ++i) {\n    std::string output_device;\n    std::string output_name;\n    std::string output_type;\n    auto key = \"output.output\" + std::to_string(i);\n    auto output_item_table = config->GetSubKeys(key);\n    if (output_item_table.empty()) {\n      MBLOG_ERROR << \"the key \" << key << \" is not found in config file.\";\n      return modelbox::STATUS_BADCONF;\n    }\n\n    auto name_index = key + \".name\";\n    output_name = config->GetString(name_index);\n    if (output_name.empty()) {\n      MBLOG_ERROR << \"the key \" << key << \" should have key name.\";\n      return modelbox::STATUS_BADCONF;\n    }\n\n    flowunit_desc->AddFlowUnitOutput(modelbox::FlowUnitOutput(output_name));\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nstd::map<std::string, std::shared_ptr<modelbox::FlowUnitDesc>>\nYoloBoxVirtualFlowUnitFactory::FlowUnitProbe() {\n  std::map<std::string, std::shared_ptr<modelbox::FlowUnitDesc>> return_map;\n  auto driver_desc = GetDriver()->GetDriverDesc();\n  auto toml_file = driver_desc->GetFilePath();\n\n  auto flowunit_desc = std::make_shared<YoloBoxVirtualFlowUnitDesc>();\n  flowunit_desc->SetVirtualType(VIRTUAL_FLOWUNIT_TYPE);\n  flowunit_desc->SetInputContiguous(false);\n\n  std::shared_ptr<modelbox::ConfigurationBuilder> builder =\n      std::make_shared<modelbox::ConfigurationBuilder>();\n  std::shared_ptr<modelbox::Configuration> config = builder->Build(toml_file);\n\n  auto ret = FillInput(config, flowunit_desc);\n  if (!ret) {\n    return return_map;\n  }\n\n  ret = FillOutput(config, flowunit_desc);\n  if (!ret) {\n    return return_map;\n  }\n\n  auto virtual_type = config->GetString(\"base.virtual_type\");\n  if (virtual_type.empty()) {\n    MBLOG_ERROR << \"the key 'virtual_type' is not found under base.\";\n    return return_map;\n  }\n\n  flowunit_desc->SetFlowUnitName(driver_desc->GetName());\n  flowunit_desc->SetVirtualType(virtual_type);\n  flowunit_desc->SetConfiguration(config->GetSubConfig(\"config\"));\n  flowunit_desc->SetFlowType(modelbox::FlowType::NORMAL);\n  flowunit_desc->SetInputContiguous(false);\n  flowunit_desc->SetFlowUnitGroupType(\"Image\");\n  flowunit_desc->SetDescription(driver_desc->GetDescription());\n  return_map.insert(std::make_pair(driver_desc->GetName(), flowunit_desc));\n  return return_map;\n}\n\nvoid YoloBoxVirtualFlowUnitFactory::SetFlowUnitFactory(\n    const std::vector<std::shared_ptr<modelbox::DriverFactory>>\n        &bind_flowunit_factory_list) {\n  for (const auto &bind_flowunit_factory : bind_flowunit_factory_list) {\n    bind_flowunit_factory_list_.push_back(\n        std::dynamic_pointer_cast<FlowUnitFactory>(bind_flowunit_factory));\n  }\n}\n\nstd::shared_ptr<modelbox::FlowUnit>\nYoloBoxVirtualFlowUnitFactory::VirtualCreateFlowUnit(\n    const std::string &unit_name, const std::string &unit_type,\n    const std::string &virtual_type) {\n  for (auto &flowunit_factory : bind_flowunit_factory_list_) {\n    if (std::dynamic_pointer_cast<FlowUnitFactory>(flowunit_factory)\n            ->GetFlowUnitFactoryType() != unit_type) {\n      continue;\n    }\n\n    if (std::dynamic_pointer_cast<FlowUnitFactory>(flowunit_factory)\n            ->GetVirtualType() != virtual_type) {\n      continue;\n    }\n\n    return std::dynamic_pointer_cast<FlowUnitFactory>(flowunit_factory)\n        ->CreateFlowUnit(unit_name, unit_type);\n  }\n\n  return nullptr;\n}\n\nvoid YoloBoxVirtualFlowUnitFactory::SetVirtualType(\n    const std::string &virtual_type) {\n  virtual_type_ = virtual_type;\n}\n\nstd::string YoloBoxVirtualFlowUnitFactory::GetVirtualType() {\n  return virtual_type_;\n}\n\nvoid YoloBoxVirtualFlowUnitDesc::SetConfiguration(\n    const std::shared_ptr<modelbox::Configuration> &config) {\n  config_ = config;\n}\n\nstd::shared_ptr<modelbox::Configuration>\nYoloBoxVirtualFlowUnitDesc::GetConfiguration() {\n  return config_;\n}"
  },
  {
    "path": "src/drivers/virtual/yolobox/virtualdriver_yolobox.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_VIRTUALDRIVER_YOLOBOX_H_\n#define MODELBOX_VIRTUALDRIVER_YOLOBOX_H_\n\n#include <modelbox/base/driver.h>\n#include <modelbox/base/log.h>\n#include <modelbox/base/status.h>\n#include <modelbox/base/utils.h>\n#include <modelbox/flowunit.h>\n\nconstexpr const char *BIND_FLOWUNIT_NAME = \"yolov3_postprocess\";\nconstexpr const char *VIRTUAL_YOLO_POST_FLOWUNIT = \"yolo_postprocess\";\nconstexpr const char *BIND_FLOWUNIT_VERSION = \"1.0.0\";\nconst std::vector<std::string> BIND_FLOWUNIT_TYPE{\"cpu\"};\n// Virtual\nclass YoloBoxVirtualDriverDesc : public modelbox::VirtualDriverDesc {\n public:\n  YoloBoxVirtualDriverDesc() = default;\n  ~YoloBoxVirtualDriverDesc() override = default;\n};\n\nclass YoloBoxVirtualFlowUnitDesc : public modelbox::FlowUnitDesc {\n public:\n  YoloBoxVirtualFlowUnitDesc() = default;\n  ~YoloBoxVirtualFlowUnitDesc() override = default;\n\n  void SetConfiguration(const std::shared_ptr<modelbox::Configuration> &config);\n  std::shared_ptr<modelbox::Configuration> GetConfiguration();\n\n protected:\n  std::shared_ptr<modelbox::Configuration> config_;\n};\n\nclass YoloBoxVirtualDriver : public modelbox::VirtualDriver {\n public:\n  YoloBoxVirtualDriver() = default;\n  ~YoloBoxVirtualDriver() override = default;\n\n  std::shared_ptr<modelbox::DriverFactory> CreateFactory() override;\n  std::vector<std::shared_ptr<modelbox::Driver>> GetBindDriver();\n  void SetBindDriver(\n      const std::vector<std::shared_ptr<modelbox::Driver>> &driver_list);\n\n private:\n  std::vector<std::shared_ptr<modelbox::Driver>> flowunit_driver_list_;\n};\n\nclass YoloBoxVirtualFlowUnitFactory : public modelbox::FlowUnitFactory {\n public:\n  YoloBoxVirtualFlowUnitFactory() = default;\n  ~YoloBoxVirtualFlowUnitFactory() override = default;\n\n  std::shared_ptr<modelbox::FlowUnit> VirtualCreateFlowUnit(\n      const std::string &unit_name, const std::string &unit_type,\n      const std::string &virtual_type) override;\n\n  std::map<std::string, std::shared_ptr<modelbox::FlowUnitDesc>> FlowUnitProbe()\n      override;\n\n  void SetFlowUnitFactory(\n      const std::vector<std::shared_ptr<modelbox::DriverFactory>>\n          &bind_flowunit_factory_list) override;\n\n  std::string GetVirtualType() override;\n\n  void SetVirtualType(const std::string &virtual_type) override;\n\n  std::shared_ptr<modelbox::Driver> GetDriver() override { return driver_; };\n\n  void SetDriver(const std::shared_ptr<modelbox::Driver> &driver) override {\n    driver_ = driver;\n  }\n\n private:\n  std::shared_ptr<modelbox::Driver> driver_;\n  std::string virtual_type_;\n  modelbox::Status FillInput(\n      std::shared_ptr<modelbox::Configuration> &config,\n      std::shared_ptr<YoloBoxVirtualFlowUnitDesc> &flowunit_desc);\n\n  modelbox::Status FillOutput(\n      std::shared_ptr<modelbox::Configuration> &config,\n      std::shared_ptr<YoloBoxVirtualFlowUnitDesc> &flowunit_desc);\n\n  std::vector<std::shared_ptr<modelbox::DriverFactory>>\n      bind_flowunit_factory_list_;\n};\n\nclass YoloBoxVirtualDriverManager : public modelbox::VirtualDriverManager {\n public:\n  YoloBoxVirtualDriverManager() = default;\n  ~YoloBoxVirtualDriverManager() override = default;\n\n  modelbox::Status Scan(const std::string &path) override;\n  modelbox::Status Add(const std::string &file) override;\n  modelbox::Status Init(modelbox::Drivers &driver) override;\n\n private:\n  modelbox::Status GetTargetDriverList(modelbox::Drivers &drivers);\n\n  std::vector<std::shared_ptr<modelbox::Driver>> flowunit_driver_list_;\n};\n\n#endif  // MODELBOX_VIRTUALDRIVER_YOLOBOX_H_"
  },
  {
    "path": "src/java/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nproject(modelbox-java)\n\nif(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_BINARY_DIR})\n    message(FATAL_ERROR \"Do not build in source directory!\")\nendif()\n\nif (NOT WITH_JAVA) \n    message(STATUS \"java support is disabled\")\n    return()\nendif()\n\nfind_program(MVN mvn)\nif (NOT MVN) \n    message(STATUS \"not found maven, not build modelbox java\")\n    return()\nendif()\n\nif(NOT ${JNI_FOUND})\n    message(STATUS \"not found jdk, not build modelbox jar\")\n    return()\nendif()\n\nif(NOT ${JAVA_FOUND})\n    message(STATUS \"not found jdk, not build modelbox jar\")\n    return()\nendif()\n\nset(TEST_CONFIG_JSON_FILE ${CMAKE_CURRENT_BINARY_DIR}/src/test/java/com/modelbox/TestConfig.json)\nconfigure_file(${CMAKE_CURRENT_SOURCE_DIR}/src/test/java/com/modelbox/TestConfig.json.in ${TEST_CONFIG_JSON_FILE})\nset(MODELBOX_JNI_HEADER_DIR ${CMAKE_CURRENT_BINARY_DIR} CACHE INTERNAL \"\")\nset(JAVA_NATIVE_SOURCE ${CMAKE_CURRENT_SOURCE_DIR}/src/main/java/com/modelbox)\n\nfile(MAKE_DIRECTORY ${MODELBOX_JNI_HEADER_DIR})\n\nadd_subdirectory(jni)\n\nADD_CUSTOM_TARGET(modelbox-java-jni-header\n   DEPENDS ${MODELBOX_JNI_HEADER_DIR}/*.h\n)\n\nadd_custom_command(OUTPUT ${MODELBOX_JNI_HEADER_DIR}/*.h\n    COMMAND javac -h ${MODELBOX_JNI_HEADER_DIR} ${JAVA_NATIVE_SOURCE}/* -cp ${CMAKE_CURRENT_BINARY_DIR}/*.jar -d ${MODELBOX_JNI_HEADER_DIR}\n    DEPENDS ${JAVA_NATIVE_SOURCE}\n)\nset_source_files_properties(${MODELBOX_JNI_HEADER_DIR} PROPERTIES\n    GENERATED TRUE\n)\n\nadd_dependencies(modelbox-java-jni-header modelbox-java)\n\nadd_custom_target(\n    modelbox-java\n    COMMAND mvn package -DskipTests -DbuildDirectory=${CMAKE_CURRENT_BINARY_DIR}\n    WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}\n)\n\nadd_custom_target(\n    unittest-java\n    ${CMAKE_COMMAND} -E env \"TEST_CONFIG_JSON_FILE=${TEST_CONFIG_JSON_FILE}\"\n    ${CMAKE_COMMAND} -E env \"LD_LIBRARY_PATH=${JNI_LIBRARY_DIR}\" mvn test -DbuildDirectory=${CMAKE_CURRENT_BINARY_DIR}\n    WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}\n)\n\nlist(APPEND MODELBOX_UNIT_TEST_TARGETS modelbox-java)\nset(MODELBOX_UNIT_TEST_TARGETS ${MODELBOX_UNIT_TEST_TARGETS} CACHE INTERNAL \"\")\nlist(APPEND MODELBOX_UNIT_TEST_RUN_TARGETS unittest-java)\nset(MODELBOX_UNIT_TEST_RUN_TARGETS ${MODELBOX_UNIT_TEST_RUN_TARGETS} CACHE INTERNAL \"\")\n\nfile(GLOB JAVA_RELEASE_FILE ${CMAKE_CURRENT_BINARY_DIR}/*.jar)\ninstall(CODE \n    \"file(COPY ${JAVA_RELEASE_FILE} DESTINATION ${RELEASE_PACKAGE_DIR}/java)\"\n)\n\nadd_dependencies(unittest-java modelbox-java)\nadd_dependencies(unittest-java modelbox-jni)\nadd_dependencies(unittest-java all-drivers)"
  },
  {
    "path": "src/java/jni/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nproject(modelbox-python)\n\nif(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_BINARY_DIR})\n    message(FATAL_ERROR \"Do not build in source directory!\")\nendif()\n\nif(NOT ${JNI_FOUND})\n    message(STATUS \"not found jdk, not build modelbox jni\")\n    return()\nendif()\n\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${CMAKE_CURRENT_LIST_DIR}/include)\ninclude_directories(${JNI_INCLUDE_DIRS})\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${MODELBOX_JNI_HEADER_DIR})\ninclude_directories(${HUAWEI_SECURE_C_INCLUDE_DIR})\n\nset(MODELBOX_JNI \"modelbox-jni\")\nfile(GLOB_RECURSE LIBMODELBOX_JAVA_JNI_SOURCES *.cpp *.cc *.c)\nadd_library(${MODELBOX_JNI} SHARED ${LIBMODELBOX_JAVA_JNI_SOURCES})\ntarget_compile_options(${MODELBOX_JNI} PUBLIC -fvisibility=hidden)\nadd_dependencies(${MODELBOX_JNI} modelbox-java-jni-header)\n\ntarget_link_libraries(${MODELBOX_JNI} ${LIBMODELBOX_SHARED})\nset(JNI_LIBRARY_DIR ${CMAKE_CURRENT_BINARY_DIR} CACHE INTERNAL \"\")\nset(JNI_PACKAGE_INSTALLDIR \"/usr/java/packages/lib\")\n\ninstall(TARGETS ${MODELBOX_JNI} \n    COMPONENT cpu-device-flowunit\n    RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n    LIBRARY DESTINATION ${JNI_PACKAGE_INSTALLDIR}\n    ARCHIVE DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    OPTIONAL\n    )\n\n"
  },
  {
    "path": "src/java/jni/jni_export/buffer.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/buffer.h\"\n\n#include <memory>\n\n#include \"com_modelbox_Buffer.h\"\n#include \"jni_native_object.h\"\n#include \"modelbox/buffer.h\"\n#include \"securec.h\"\n#include \"throw.h\"\n#include \"utils.h\"\n\n/*\n * Class:     com_modelbox_Buffer\n * Method:    BufferBuild\n * Signature: (J)V\n */\nJNIEXPORT void JNICALL Java_com_modelbox_Buffer_BufferBuild__J(JNIEnv *env,\n                                                               jobject j_this,\n                                                               jlong j_size) {\n  auto n_buffer =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::Buffer>(env,\n                                                                      j_this);\n  if (n_buffer == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return;\n  }\n\n  auto ret = n_buffer->Build(j_size);\n  modelbox::ModelBoxJNIThrow(env, ret);\n}\n\n/*\n * Class:     com_modelbox_Buffer\n * Method:    BufferBuild\n * Signature: ([B)V\n */\nJNIEXPORT void JNICALL Java_com_modelbox_Buffer_BufferBuild___3B(\n    JNIEnv *env, jobject j_this, jbyteArray j_data_array) {\n  if (j_data_array == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_INVALID,\n                               \"input argument is null\");\n    return;\n  }\n\n  auto n_buffer =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::Buffer>(env,\n                                                                      j_this);\n  if (n_buffer == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return;\n  }\n\n  auto j_data_len = env->GetArrayLength(j_data_array);\n  if (j_data_len <= 0) {\n    return;\n  }\n\n  jbyte *j_data_ptr = env->GetByteArrayElements(j_data_array, nullptr);\n  if (j_data_ptr == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_INVALID,\n                               \"Buffer data array is invalid\");\n    return;\n  }\n  Defer { env->ReleaseByteArrayElements(j_data_array, j_data_ptr, (jint)0); };\n\n  auto ret = n_buffer->BuildFromHost(j_data_ptr, j_data_len);\n  if (ret != modelbox::STATUS_SUCCESS) {\n    modelbox::ModelBoxJNIThrow(env, ret);\n    return;\n  }\n\n  ret = n_buffer->Build(j_data_ptr, j_data_len);\n  modelbox::ModelBoxJNIThrow(env, ret);\n}\n\n/*\n * Class:     com_modelbox_Buffer\n * Method:    BufferGetData\n * Signature: ()[B\n */\nJNIEXPORT jbyteArray JNICALL\nJava_com_modelbox_Buffer_BufferGetData(JNIEnv *env, jobject j_this) {\n  auto n_buffer =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::Buffer>(env,\n                                                                      j_this);\n  if (n_buffer == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return nullptr;\n  }\n\n  const void *n_buffer_ptr = n_buffer->ConstData();\n  if (n_buffer_ptr == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_NOBUFS,\n                               \"Buffer data is null\");\n    return nullptr;\n  }\n\n  int n_buffer_len = n_buffer->GetBytes();\n  auto *j_data_array = env->NewByteArray(n_buffer_len);\n  if (j_data_array == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_NOMEM,\n                               \"alloc memory for Buffer byte failed.\");\n    return nullptr;\n  }\n\n  env->SetByteArrayRegion(j_data_array, 0, n_buffer_len, (jbyte *)n_buffer_ptr);\n  return j_data_array;\n}\n\n/*\n * Class:     com_modelbox_Buffer\n * Method:    BufferGetDirectData\n * Signature: ()Ljava/nio/ByteBuffer;\n */\nJNIEXPORT jobject JNICALL\nJava_com_modelbox_Buffer_BufferGetDirectData(JNIEnv *env, jobject j_this) {\n  bool is_const = false;\n  auto n_buffer =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::Buffer>(env,\n                                                                      j_this);\n  if (n_buffer == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return nullptr;\n  }\n\n  void *n_buffer_ptr = n_buffer->MutableData();\n  if (n_buffer_ptr == nullptr) {\n    n_buffer_ptr = (void *)n_buffer->ConstData();\n    if (n_buffer_ptr == nullptr) {\n      modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_NOBUFS,\n                                 \"Buffer data is null\");\n      return nullptr;\n    }\n\n    is_const = true;\n  }\n\n  //TODO The reference to n_bufferlist should be added to avoid dangling pointers of j_byte_buffer\n  //https://stackoverflow.com/questions/46844275/freeing-memory-wrapped-with-newdirectbytebuffer\n  jobject j_byte_buffer =\n      env->NewDirectByteBuffer((void *)n_buffer_ptr, n_buffer->GetBytes());\n  if (j_byte_buffer == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_NOMEM,\n                               \"alloc memory for Buffer byte failed.\");\n    return nullptr;\n  }\n\n  if (is_const == false) {\n    return j_byte_buffer;\n  }\n\n  jmethodID asreadonly_method =\n      env->GetMethodID(env->GetObjectClass(j_byte_buffer), \"asReadOnlyBuffer\",\n                       \"()Ljava/nio/ByteBuffer;\");\n  if (asreadonly_method == nullptr) {\n    MBLOG_ERROR << \"get asreadonly method failed.\";\n    return nullptr;\n  }\n\n  jobject j_readonly_byte_buffer =\n      env->CallObjectMethod(j_byte_buffer, asreadonly_method);\n  return j_readonly_byte_buffer;\n}\n\n/*\n * Class:     com_modelbox_Buffer\n * Method:    BufferHasError\n * Signature: ()Z\n */\nJNIEXPORT jboolean JNICALL\nJava_com_modelbox_Buffer_BufferHasError(JNIEnv *env, jobject j_this) {\n  auto n_buffer =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::Buffer>(env,\n                                                                      j_this);\n  if (n_buffer == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return false;\n  }\n\n  return (jboolean)n_buffer->HasError();\n}\n\n/*\n * Class:     com_modelbox_Buffer\n * Method:    BufferSetError\n * Signature: (Ljava/lang/String;Ljava/lang/String;)V\n */\nJNIEXPORT void JNICALL Java_com_modelbox_Buffer_BufferSetError(\n    JNIEnv *env, jobject j_this, jstring j_code, jstring j_message) {\n  if (j_code == nullptr || j_message == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_INVALID,\n                               \"input argument is null\");\n    return;\n  }\n\n  auto n_buffer =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::Buffer>(env,\n                                                                      j_this);\n  if (n_buffer == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return;\n  }\n\n  n_buffer->SetError(modelbox::jstring2string(env, j_code),\n                     modelbox::jstring2string(env, j_message));\n}\n\n/*\n * Class:     com_modelbox_Buffer\n * Method:    BufferGetErrorCode\n * Signature: ()Ljava/lang/String;\n */\nJNIEXPORT jstring JNICALL\nJava_com_modelbox_Buffer_BufferGetErrorCode(JNIEnv *env, jobject j_this) {\n  auto n_buffer =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::Buffer>(env,\n                                                                      j_this);\n  if (n_buffer == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return nullptr;\n  }\n\n  return env->NewStringUTF(n_buffer->GetErrorCode().c_str());\n}\n\n/*\n * Class:     com_modelbox_Buffer\n * Method:    BufferGetErrorMsg\n * Signature: ()Ljava/lang/String;\n */\nJNIEXPORT jstring JNICALL\nJava_com_modelbox_Buffer_BufferGetErrorMsg(JNIEnv *env, jobject j_this) {\n  auto n_buffer =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::Buffer>(env,\n                                                                      j_this);\n  if (n_buffer == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return nullptr;\n  }\n\n  return env->NewStringUTF(n_buffer->GetErrorMsg().c_str());\n}\n\n/*\n * Class:     com_modelbox_Buffer\n * Method:    BufferGetBytes\n * Signature: ()J\n */\nJNIEXPORT jlong JNICALL\nJava_com_modelbox_Buffer_BufferGetBytes(JNIEnv *env, jobject j_this) {\n  auto n_buffer =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::Buffer>(env,\n                                                                      j_this);\n  if (n_buffer == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return 0;\n  }\n\n  return (jlong)n_buffer->GetBytes();\n}\n\n/*\n * Class:     com_modelbox_Buffer\n * Method:    BufferSetMetaLong\n * Signature: (Ljava/lang/String;J)V\n */\nJNIEXPORT void JNICALL Java_com_modelbox_Buffer_BufferSetMetaLong(\n    JNIEnv *env, jobject j_this, jstring j_key, jlong j_value) {\n  if (j_key == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_INVALID,\n                               \"input argument is null\");\n    return;\n  }\n\n  auto n_buffer =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::Buffer>(env,\n                                                                      j_this);\n  if (n_buffer == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return;\n  }\n\n  auto n_value = (int64_t)j_value;\n\n  n_buffer->Set(modelbox::jstring2string(env, j_key), n_value);\n}\n\n/*\n * Class:     com_modelbox_Buffer\n * Method:    BufferSetMetaInt\n * Signature: (Ljava/lang/String;I)V\n */\nJNIEXPORT void JNICALL Java_com_modelbox_Buffer_BufferSetMetaInt(JNIEnv *env,\n                                                                 jobject j_this,\n                                                                 jstring j_key,\n                                                                 jint j_value) {\n  if (j_key == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_INVALID,\n                               \"input argument is null\");\n    return;\n  }\n\n  auto n_buffer =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::Buffer>(env,\n                                                                      j_this);\n  if (n_buffer == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return;\n  }\n\n  auto n_value = (int32_t)j_value;\n\n  n_buffer->Set(modelbox::jstring2string(env, j_key), n_value);\n}\n\n/*\n * Class:     com_modelbox_Buffer\n * Method:    BufferSetMetaString\n * Signature: (Ljava/lang/String;Ljava/lang/String;)V\n */\nJNIEXPORT void JNICALL Java_com_modelbox_Buffer_BufferSetMetaString(\n    JNIEnv *env, jobject j_this, jstring j_key, jstring j_value) {\n  if (j_key == nullptr || j_value == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_INVALID,\n                               \"input argument is null\");\n    return;\n  }\n\n  auto n_buffer =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::Buffer>(env,\n                                                                      j_this);\n  if (n_buffer == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return;\n  }\n\n  auto n_value = modelbox::jstring2string(env, j_value);\n\n  n_buffer->Set(modelbox::jstring2string(env, j_key), n_value);\n}\n\n/*\n * Class:     com_modelbox_Buffer\n * Method:    BufferSetMetaDouble\n * Signature: (Ljava/lang/String;D)V\n */\nJNIEXPORT void JNICALL Java_com_modelbox_Buffer_BufferSetMetaDouble(\n    JNIEnv *env, jobject j_this, jstring j_key, jdouble j_value) {\n  if (j_key == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_INVALID,\n                               \"input argument is null\");\n    return;\n  }\n\n  auto n_buffer =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::Buffer>(env,\n                                                                      j_this);\n  if (n_buffer == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return;\n  }\n\n  auto n_value = (double)j_value;\n\n  n_buffer->Set(modelbox::jstring2string(env, j_key), n_value);\n}\n\n/*\n * Class:     com_modelbox_Buffer\n * Method:    BufferSetMetaFloat\n * Signature: (Ljava/lang/String;F)V\n */\nJNIEXPORT void JNICALL Java_com_modelbox_Buffer_BufferSetMetaFloat(\n    JNIEnv *env, jobject j_this, jstring j_key, jfloat j_value) {\n  if (j_key == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_INVALID,\n                               \"input argument is null\");\n    return;\n  }\n\n  auto n_buffer =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::Buffer>(env,\n                                                                      j_this);\n  if (n_buffer == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return;\n  }\n\n  auto n_value = (float)j_value;\n\n  n_buffer->Set(modelbox::jstring2string(env, j_key), n_value);\n}\n\n/*\n * Class:     com_modelbox_Buffer\n * Method:    BufferSetMetaBool\n * Signature: (Ljava/lang/String;Z)V\n */\nJNIEXPORT void JNICALL Java_com_modelbox_Buffer_BufferSetMetaBool(\n    JNIEnv *env, jobject j_this, jstring j_key, jboolean j_value) {\n  if (j_key == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_INVALID,\n                               \"input argument is null\");\n    return;\n  }\n\n  auto n_buffer =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::Buffer>(env,\n                                                                      j_this);\n  if (n_buffer == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return;\n  }\n\n  auto n_value = (bool)j_value;\n\n  n_buffer->Set(modelbox::jstring2string(env, j_key), n_value);\n}\n\n/*\n * Class:     com_modelbox_Buffer\n * Method:    BufferGetMetaLong\n * Signature: (Ljava/lang/String;)J\n */\nJNIEXPORT jlong JNICALL Java_com_modelbox_Buffer_BufferGetMetaLong(\n    JNIEnv *env, jobject j_this, jstring j_key) {\n  if (j_key == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_INVALID,\n                               \"input argument is null\");\n    return 0;\n  }\n\n  auto n_buffer =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::Buffer>(env,\n                                                                      j_this);\n  if (n_buffer == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return 0;\n  }\n\n  int64_t n_value = 0;\n  if (n_buffer->Get(modelbox::jstring2string(env, j_key), n_value) == false) {\n    modelbox::ModelBoxJNIThrow(\n        env, modelbox::STATUS_INVALID,\n        \"key not found in meta or value type is invalid.\");\n    return 0;\n  }\n\n  return (jlong)n_value;\n}\n\n/*\n * Class:     com_modelbox_Buffer\n * Method:    BufferGetMetaInt\n * Signature: (Ljava/lang/String;)I\n */\nJNIEXPORT jint JNICALL Java_com_modelbox_Buffer_BufferGetMetaInt(\n    JNIEnv *env, jobject j_this, jstring j_key) {\n  if (j_key == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_INVALID,\n                               \"input argument is null\");\n    return 0;\n  }\n\n  auto n_buffer =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::Buffer>(env,\n                                                                      j_this);\n  if (n_buffer == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return 0;\n  }\n\n  int32_t n_value = 0;\n  if (n_buffer->Get(modelbox::jstring2string(env, j_key), n_value) == false) {\n    modelbox::ModelBoxJNIThrow(\n        env, modelbox::STATUS_INVALID,\n        \"key not found in meta or value type is invalid.\");\n    return 0;\n  }\n\n  return (jint)n_value;\n}\n\n/*\n * Class:     com_modelbox_Buffer\n * Method:    BufferGetMetaString\n * Signature: (Ljava/lang/String;)Ljava/lang/String;\n */\nJNIEXPORT jstring JNICALL Java_com_modelbox_Buffer_BufferGetMetaString(\n    JNIEnv *env, jobject j_this, jstring j_key) {\n  if (j_key == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_INVALID,\n                               \"input argument is null\");\n    return nullptr;\n  }\n\n  auto n_buffer =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::Buffer>(env,\n                                                                      j_this);\n  if (n_buffer == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return nullptr;\n  }\n\n  std::string n_value;\n  if (n_buffer->Get(modelbox::jstring2string(env, j_key), n_value) == false) {\n    modelbox::ModelBoxJNIThrow(\n        env, modelbox::STATUS_INVALID,\n        \"key not found in meta or value type is invalid.\");\n    return nullptr;\n  }\n\n  return env->NewStringUTF(n_value.c_str());\n}\n\n/*\n * Class:     com_modelbox_Buffer\n * Method:    BufferGetMetaDouble\n * Signature: (Ljava/lang/String;)D\n */\nJNIEXPORT jdouble JNICALL Java_com_modelbox_Buffer_BufferGetMetaDouble(\n    JNIEnv *env, jobject j_this, jstring j_key) {\n  if (j_key == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_INVALID,\n                               \"input argument is null\");\n    return 0;\n  }\n\n  auto n_buffer =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::Buffer>(env,\n                                                                      j_this);\n  if (n_buffer == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return 0;\n  }\n\n  double n_value = 0;\n  if (n_buffer->Get(modelbox::jstring2string(env, j_key), n_value) == false) {\n    modelbox::ModelBoxJNIThrow(\n        env, modelbox::STATUS_INVALID,\n        \"key not found in meta or value type is invalid.\");\n    return 0;\n  }\n\n  return (jdouble)n_value;\n}\n\n/*\n * Class:     com_modelbox_Buffer\n * Method:    BufferGetMetaFloat\n * Signature: (Ljava/lang/String;)F\n */\nJNIEXPORT jfloat JNICALL Java_com_modelbox_Buffer_BufferGetMetaFloat(\n    JNIEnv *env, jobject j_this, jstring j_key) {\n  if (j_key == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_INVALID,\n                               \"input argument is null\");\n    return 0;\n  }\n\n  auto n_buffer =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::Buffer>(env,\n                                                                      j_this);\n  if (n_buffer == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return 0;\n  }\n\n  float n_value = 0;\n  if (n_buffer->Get(modelbox::jstring2string(env, j_key), n_value) == false) {\n    modelbox::ModelBoxJNIThrow(\n        env, modelbox::STATUS_INVALID,\n        \"key not found in meta or value type is invalid.\");\n    return 0;\n  }\n\n  return (jfloat)n_value;\n}\n\n/*\n * Class:     com_modelbox_Buffer\n * Method:    BufferGetMetaBool\n * Signature: (Ljava/lang/String;)Z\n */\nJNIEXPORT jboolean JNICALL Java_com_modelbox_Buffer_BufferGetMetaBool(\n    JNIEnv *env, jobject j_this, jstring j_key) {\n  if (j_key == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_INVALID,\n                               \"input argument is null\");\n    return false;\n  }\n\n  auto n_buffer =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::Buffer>(env,\n                                                                      j_this);\n  if (n_buffer == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return false;\n  }\n\n  bool n_value = false;\n  if (n_buffer->Get(modelbox::jstring2string(env, j_key), n_value) == false) {\n    modelbox::ModelBoxJNIThrow(\n        env, modelbox::STATUS_INVALID,\n        \"key not found in meta or value type is invalid.\");\n    return false;\n  }\n\n  return (jboolean)n_value;\n}\n\n/*\n * Class:     com_modelbox_Buffer\n * Method:    BufferCopyMeta\n * Signature: (Lcom/modelbox/Buffer;Z)V\n */\nJNIEXPORT void JNICALL Java_com_modelbox_Buffer_BufferCopyMeta(\n    JNIEnv *env, jobject j_this, jobject j_buffer, jboolean j_is_overwrite) {\n  if (j_buffer == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_INVALID,\n                               \"input argument is null\");\n    return;\n  }\n\n  auto n_buffer =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::Buffer>(env,\n                                                                      j_this);\n  if (n_buffer == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return;\n  }\n\n  auto n_buffer_other =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::Buffer>(env,\n                                                                      j_buffer);\n  if (n_buffer == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return;\n  }\n\n  auto ret = n_buffer->CopyMeta(n_buffer_other, j_is_overwrite);\n  modelbox::ModelBoxJNIThrow(env, ret);\n}\n\n/*\n * Class:     com_modelbox_Buffer\n * Method:    BufferGetDevice\n * Signature: ()Lcom/modelbox/Device;\n */\nJNIEXPORT jobject JNICALL\nJava_com_modelbox_Buffer_BufferGetDevice(JNIEnv *env, jobject j_this) {\n  auto n_buffer =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::Buffer>(env,\n                                                                      j_this);\n  if (n_buffer == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return nullptr;\n  }\n\n  auto n_device = n_buffer->GetDevice();\n  auto *j_device = modelbox::JNINativeObject::NewJObject(\n      env, \"com/modelbox/Device\", n_device);\n  if (j_device == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return nullptr;\n  }\n\n  return j_device;\n}\n"
  },
  {
    "path": "src/java/jni/jni_export/bufferlist.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <memory>\n\n#include \"com_modelbox_BufferList.h\"\n#include \"jni_native_object.h\"\n#include \"modelbox/buffer_list.h\"\n#include \"throw.h\"\n#include \"utils.h\"\n\n/*\n * Class:     com_modelbox_BufferList\n * Method:    BufferListBuild\n * Signature: ([I)V\n */\nJNIEXPORT void JNICALL Java_com_modelbox_BufferList_BufferListBuild(\n    JNIEnv *env, jobject j_this, jintArray j_size_list) {\n  if (j_size_list == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_INVALID,\n                               \"input argument is null\");\n    return;\n  }\n\n  auto n_bufferlist =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::BufferList>(\n          env, j_this);\n  if (n_bufferlist == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return;\n  }\n\n  jsize j_size_list_len = env->GetArrayLength(j_size_list);\n  if (j_size_list_len <= 0) {\n    return;\n  }\n\n  jint *j_size_list_data = env->GetIntArrayElements(j_size_list, nullptr);\n  if (j_size_list_data == nullptr) {\n    modelbox::Status ret = {modelbox::STATUS_NOMEM,\n                            \"Get Buffer list array element failed.\"};\n    modelbox::ModelBoxJNIThrow(env, ret);\n    return;\n  }\n\n  std::vector<size_t> size_list;\n  size_list.reserve(j_size_list_len);\n  for (int i = 0; i < j_size_list_len; i++) {\n    size_list.push_back(j_size_list_data[i]);\n  }\n\n  env->ReleaseIntArrayElements(j_size_list, j_size_list_data, (jint)0);\n\n  auto ret = n_bufferlist->Build(size_list, true);\n  modelbox::ModelBoxJNIThrow(env, ret);\n}\n\n/*\n * Class:     com_modelbox_BufferList\n * Method:    BufferListAt\n * Signature: (J)Lcom/modelbox/Buffer;\n */\nJNIEXPORT jobject JNICALL Java_com_modelbox_BufferList_BufferListAt(\n    JNIEnv *env, jobject j_this, jlong j_index) {\n  auto n_bufferlist =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::BufferList>(\n          env, j_this);\n  if (n_bufferlist == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return nullptr;\n  }\n\n  auto n_buffer = n_bufferlist->At((size_t)j_index);\n  if (n_buffer == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_RANGE,\n                               \"Bufferlist index is invalid.\");\n    return nullptr;\n  }\n\n  auto *j_buffer = modelbox::JNINativeObject::NewJObject(\n      env, \"com/modelbox/Buffer\", n_buffer);\n  if (j_buffer == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return nullptr;\n  }\n\n  return j_buffer;\n}\n\n/*\n * Class:     com_modelbox_BufferList\n * Method:    BufferListSize\n * Signature: ()J\n */\nJNIEXPORT jlong JNICALL\nJava_com_modelbox_BufferList_BufferListSize(JNIEnv *env, jobject j_this) {\n  auto n_bufferlist =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::BufferList>(\n          env, j_this);\n  if (n_bufferlist == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return 0;\n  }\n\n  return (jlong)n_bufferlist->Size();\n}\n\n/*\n * Class:     com_modelbox_BufferList\n * Method:    BufferListPushBack\n * Signature: (Lcom/modelbox/Buffer;)V\n */\nJNIEXPORT void JNICALL\nJava_com_modelbox_BufferList_BufferListPushBack__Lcom_modelbox_Buffer_2(\n    JNIEnv *env, jobject j_this, jobject j_buffer) {\n  if (j_buffer == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_INVALID,\n                               \"input argument is null\");\n    return;\n  }\n\n  auto n_bufferlist =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::BufferList>(\n          env, j_this);\n  if (n_bufferlist == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return;\n  }\n\n  auto n_buffer =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::Buffer>(env,\n                                                                      j_buffer);\n  if (n_buffer == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return;\n  }\n\n  n_bufferlist->PushBack(n_buffer);\n}\n\n/*\n * Class:     com_modelbox_BufferList\n * Method:    BufferListPushBack\n * Signature: ([B)V\n */\nJNIEXPORT void JNICALL Java_com_modelbox_BufferList_BufferListPushBack___3B(\n    JNIEnv *env, jobject j_this, jbyteArray j_data_array) {\n  if (j_data_array == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_INVALID,\n                               \"input argument is null\");\n    return;\n  }\n\n  auto n_bufferlist =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::BufferList>(\n          env, j_this);\n  if (n_bufferlist == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return;\n  }\n\n  auto j_data_len = env->GetArrayLength(j_data_array);\n  if (j_data_len <= 0) {\n    return;\n  }\n\n  jbyte *j_data_ptr = env->GetByteArrayElements(j_data_array, nullptr);\n  if (j_data_ptr == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_INVALID,\n                               \"Buffer data array is invalid\");\n    return;\n  }\n  Defer { env->ReleaseByteArrayElements(j_data_array, j_data_ptr, (jint)0); };\n\n  auto n_buffer = std::make_shared<modelbox::Buffer>(n_bufferlist->GetDevice());\n  auto ret = n_buffer->BuildFromHost(j_data_ptr, j_data_len);\n  if (ret != modelbox::STATUS_SUCCESS) {\n    modelbox::ModelBoxJNIThrow(env, ret);\n    return;\n  }\n\n  n_bufferlist->PushBack(n_buffer);\n}\n\n/*\n * Class:     com_modelbox_BufferList\n * Method:    BufferListAssign\n * Signature: ([Lcom/modelbox/Buffer;)V\n */\nJNIEXPORT void JNICALL Java_com_modelbox_BufferList_BufferListAssign(\n    JNIEnv *env, jobject j_this, jobjectArray j_buffer_list) {\n  auto n_bufferlist =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::BufferList>(\n          env, j_this);\n  if (n_bufferlist == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return;\n  }\n\n  modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_NOTSUPPORT, \"not supported\");\n}\n\n/*\n * Class:     com_modelbox_BufferList\n * Method:    BufferListGetData\n * Signature: ()[B\n */\nJNIEXPORT jbyteArray JNICALL\nJava_com_modelbox_BufferList_BufferListGetData(JNIEnv *env, jobject j_this) {\n  auto n_bufferlist =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::BufferList>(\n          env, j_this);\n  if (n_bufferlist == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return nullptr;\n  }\n\n  auto *j_data = env->NewByteArray(n_bufferlist->GetBytes());\n  if (j_data == nullptr) {\n    modelbox::Status ret = {modelbox::STATUS_NOMEM,\n                            \"alloc memory for buffer list data failed.\"};\n    modelbox::ModelBoxJNIThrow(env, ret);\n    return nullptr;\n  }\n\n  env->SetByteArrayRegion(j_data, 0, n_bufferlist->GetBytes(),\n                          (jbyte *)n_bufferlist->ConstData());\n  return j_data;\n}\n\n/*\n * Class:     com_modelbox_BufferList\n * Method:    BufferListGetDirectData\n * Signature: ()Ljava/nio/ByteBuffer;\n */\nJNIEXPORT jobject JNICALL\nJava_com_modelbox_BufferList_BufferListGetDirectData__(JNIEnv *env,\n                                                       jobject j_this) {\n  bool is_const = false;\n  auto n_bufferlist =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::BufferList>(\n          env, j_this);\n  if (n_bufferlist == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return nullptr;\n  }\n\n  void *n_buffer_ptr = n_bufferlist->MutableData();\n  if (n_buffer_ptr == nullptr) {\n    n_buffer_ptr = (void *)n_bufferlist->ConstData();\n    if (n_buffer_ptr == nullptr) {\n      modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_INVALID,\n                                 \"buffer list data is null\");\n      return nullptr;\n    }\n    is_const = true;\n  }\n\n  auto *j_byte_buffer =\n      env->NewDirectByteBuffer(n_buffer_ptr, n_bufferlist->GetBytes());\n  if (j_byte_buffer == nullptr) {\n    modelbox::Status ret = {modelbox::STATUS_NOMEM,\n                            \"alloc memory for buffer list data failed.\"};\n    modelbox::ModelBoxJNIThrow(env, ret);\n    return nullptr;\n  }\n\n  if (is_const == false) {\n    return j_byte_buffer;\n  }\n\n  jmethodID asreadonly_method =\n      env->GetMethodID(env->GetObjectClass(j_byte_buffer), \"asReadOnlyBuffer\",\n                       \"()Ljava/nio/ByteBuffer;\");\n  if (asreadonly_method == nullptr) {\n    modelbox::Status ret = {modelbox::STATUS_NOMEM,\n                            \"get asreadonly method failed.\"};\n    modelbox::ModelBoxJNIThrow(env, ret);\n    return nullptr;\n  }\n\n  jobject j_readonly_byte_buffer =\n      env->CallObjectMethod(j_byte_buffer, asreadonly_method);\n  return j_readonly_byte_buffer;\n}\n\n/*\n * Class:     com_modelbox_BufferList\n * Method:    BufferListGetDirectData\n * Signature: (J)Ljava/nio/ByteBuffer;\n */\nJNIEXPORT jobject JNICALL\nJava_com_modelbox_BufferList_BufferListGetDirectData__J(JNIEnv *env,\n                                                        jobject j_this,\n                                                        jlong j_index) {\n  bool is_const = false;\n  auto n_bufferlist =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::BufferList>(\n          env, j_this);\n  if (n_bufferlist == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return nullptr;\n  }\n\n  void *n_buffer_ptr = n_bufferlist->MutableBufferData(j_index);\n  if (n_buffer_ptr == nullptr) {\n    n_buffer_ptr = (void *)n_bufferlist->ConstBufferData(j_index);\n    if (n_buffer_ptr == nullptr) {\n      if (n_bufferlist->GetBytes() != 0) {\n        modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_INVALID,\n                                   \"buffer is not continuous.\");\n        return nullptr;\n      }\n\n      modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_INVALID,\n                                 \"buffer list data is null\");\n      return nullptr;\n    }\n    is_const = true;\n  }\n\n  //TODO The reference to n_bufferlist should be added to avoid dangling pointers of j_byte_buffer\n  //https://stackoverflow.com/questions/46844275/freeing-memory-wrapped-with-newdirectbytebuffer\n  auto *j_byte_buffer =\n      env->NewDirectByteBuffer(n_buffer_ptr, n_bufferlist->GetBytes());\n  if (j_byte_buffer == nullptr) {\n    modelbox::Status ret = {modelbox::STATUS_NOMEM,\n                            \"alloc memory for buffer list data failed.\"};\n    modelbox::ModelBoxJNIThrow(env, ret);\n    return nullptr;\n  }\n\n  if (is_const == false) {\n    return j_byte_buffer;\n  }\n\n  jmethodID asreadonly_method =\n      env->GetMethodID(env->GetObjectClass(j_byte_buffer), \"asReadOnlyBuffer\",\n                       \"()Ljava/nio/ByteBuffer;\");\n  if (asreadonly_method == nullptr) {\n    modelbox::Status ret = {modelbox::STATUS_NOMEM,\n                            \"get asreadonly method failed.\"};\n    modelbox::ModelBoxJNIThrow(env, ret);\n    return nullptr;\n  }\n\n  jobject j_readonly_byte_buffer =\n      env->CallObjectMethod(j_byte_buffer, asreadonly_method);\n  return j_readonly_byte_buffer;\n}\n/*\n * Class:     com_modelbox_BufferList\n * Method:    BufferListGetDevice\n * Signature: ()Lcom/modelbox/Device;\n */\nJNIEXPORT jobject JNICALL\nJava_com_modelbox_BufferList_BufferListGetDevice(JNIEnv *env, jobject j_this) {\n  auto n_bufferlist =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::BufferList>(\n          env, j_this);\n  if (n_bufferlist == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return nullptr;\n  }\n\n  auto n_device = n_bufferlist->GetDevice();\n  if (n_device == nullptr) {\n    return nullptr;\n  }\n\n  auto *j_device = modelbox::JNINativeObject::NewJObject(\n      env, \"com/modelbox/Device\", n_device);\n  if (j_device == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return nullptr;\n  }\n\n  return j_device;\n}\n\n/*\n * Class:     com_modelbox_BufferList\n * Method:    BufferListReset\n * Signature: ()V\n */\nJNIEXPORT void JNICALL\nJava_com_modelbox_BufferList_BufferListReset(JNIEnv *env, jobject j_this) {\n  auto n_bufferlist =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::BufferList>(\n          env, j_this);\n  if (n_bufferlist == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return;\n  }\n\n  auto ret = n_bufferlist->Reset();\n  modelbox::ModelBoxJNIThrow(env, ret);\n}\n"
  },
  {
    "path": "src/java/jni/jni_export/configuration.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/base/configuration.h\"\n\n#include <memory>\n\n#include \"com_modelbox_Configuration.h\"\n#include \"jni_native_object.h\"\n#include \"scoped_jvm.h\"\n#include \"throw.h\"\n#include \"utils.h\"\n\n/*\n * Class:     com_modelbox_Configuration\n * Method:    ConfigurationGetBoolean\n * Signature: (Ljava/lang/String;Z)Z\n */\nJNIEXPORT jboolean JNICALL\nJava_com_modelbox_Configuration_ConfigurationGetBoolean(JNIEnv *env,\n                                                        jobject j_this,\n                                                        jstring j_key,\n                                                        jboolean j_default) {\n  if (j_key == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_INVALID,\n                               \"input argument is null\");\n    return false;\n  }\n  auto n_config =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::Configuration>(\n          env, j_this);\n  if (n_config == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return false;\n  }\n\n  return (jboolean)n_config->GetBool(modelbox::jstring2string(env, j_key),\n                                     (bool)j_default);\n}\n\n/*\n * Class:     com_modelbox_Configuration\n * Method:    ConfigurationGetInt\n * Signature: (Ljava/lang/String;I)I\n */\nJNIEXPORT jint JNICALL Java_com_modelbox_Configuration_ConfigurationGetInt(\n    JNIEnv *env, jobject j_this, jstring j_key, jint j_default) {\n  if (j_key == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_INVALID,\n                               \"input argument is null\");\n    return 0;\n  }\n  auto n_config =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::Configuration>(\n          env, j_this);\n  if (n_config == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return 0;\n  }\n\n  return (jint)n_config->GetInt32(modelbox::jstring2string(env, j_key),\n                                  (jint)j_default);\n}\n\n/*\n * Class:     com_modelbox_Configuration\n * Method:    ConfigurationGetLong\n * Signature: (Ljava/lang/String;J)J\n */\nJNIEXPORT jlong JNICALL Java_com_modelbox_Configuration_ConfigurationGetLong(\n    JNIEnv *env, jobject j_this, jstring j_key, jlong j_default) {\n  if (j_key == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_INVALID,\n                               \"input argument is null\");\n    return 0;\n  }\n  auto n_config =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::Configuration>(\n          env, j_this);\n  if (n_config == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return 0;\n  }\n\n  return (jlong)n_config->GetInt64(modelbox::jstring2string(env, j_key),\n                                   (jlong)j_default);\n}\n\n/*\n * Class:     com_modelbox_Configuration\n * Method:    ConfigurationGetString\n * Signature: (Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String;\n */\nJNIEXPORT jstring JNICALL\nJava_com_modelbox_Configuration_ConfigurationGetString(JNIEnv *env,\n                                                       jobject j_this,\n                                                       jstring j_key,\n                                                       jstring j_default) {\n  std::string defaultValue;\n  if (j_key == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_INVALID,\n                               \"input argument is null\");\n    return nullptr;\n  }\n\n  if (j_default) {\n    defaultValue = modelbox::jstring2string(env, j_default);\n  }\n\n  auto n_config =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::Configuration>(\n          env, j_this);\n  if (n_config == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return nullptr;\n  }\n\n  auto retvalue =\n      n_config->GetString(modelbox::jstring2string(env, j_key), defaultValue);\n  return env->NewStringUTF(retvalue.c_str());\n}\n\n/*\n * Class:     com_modelbox_Configuration\n * Method:    ConfigurationGetFloat\n * Signature: (Ljava/lang/String;F)F\n */\nJNIEXPORT jfloat JNICALL Java_com_modelbox_Configuration_ConfigurationGetFloat(\n    JNIEnv *env, jobject j_this, jstring j_key, jfloat j_default) {\n  if (j_key == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_INVALID,\n                               \"input argument is null\");\n    return (jfloat)0;\n  }\n\n  auto n_config =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::Configuration>(\n          env, j_this);\n  if (n_config == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return (jfloat)0;\n  }\n\n  return (jfloat)n_config->GetFloat(modelbox::jstring2string(env, j_key),\n                                    (jfloat)j_default);\n}\n\n/*\n * Class:     com_modelbox_Configuration\n * Method:    ConfigurationGetDouble\n * Signature: (Ljava/lang/String;D)D\n */\nJNIEXPORT jdouble JNICALL\nJava_com_modelbox_Configuration_ConfigurationGetDouble(JNIEnv *env,\n                                                       jobject j_this,\n                                                       jstring j_key,\n                                                       jdouble j_default) {\n  if (j_key == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_INVALID,\n                               \"input argument is null\");\n    return 0;\n  }\n\n  auto n_config =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::Configuration>(\n          env, j_this);\n  if (n_config == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return 0;\n  }\n\n  return (jdouble)n_config->GetDouble(modelbox::jstring2string(env, j_key),\n                                      (jdouble)j_default);\n}\n\n/*\n * Class:     com_modelbox_Configuration\n * Method:    ConfigurationSet\n * Signature: (Ljava/lang/String;Z)V\n */\nJNIEXPORT void JNICALL\nJava_com_modelbox_Configuration_ConfigurationSet__Ljava_lang_String_2Z(\n    JNIEnv *env, jobject j_this, jstring j_key, jboolean j_value) {\n  if (j_key == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_INVALID,\n                               \"input argument is null\");\n    return;\n  }\n\n  auto n_config =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::Configuration>(\n          env, j_this);\n  if (n_config == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return;\n  }\n\n  n_config->SetProperty<bool>(modelbox::jstring2string(env, j_key),\n                              (bool)j_value);\n}\n\n/*\n * Class:     com_modelbox_Configuration\n * Method:    ConfigurationSet\n * Signature: (Ljava/lang/String;I)V\n */\nJNIEXPORT void JNICALL\nJava_com_modelbox_Configuration_ConfigurationSet__Ljava_lang_String_2I(\n    JNIEnv *env, jobject j_this, jstring j_key, jint j_value) {\n  if (j_key == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_INVALID,\n                               \"input argument is null\");\n    return;\n  }\n\n  auto n_config =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::Configuration>(\n          env, j_this);\n  if (n_config == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return;\n  }\n\n  n_config->SetProperty<int32_t>(modelbox::jstring2string(env, j_key),\n                                 (int32_t)j_value);\n}\n\n/*\n * Class:     com_modelbox_Configuration\n * Method:    ConfigurationSet\n * Signature: (Ljava/lang/String;J)V\n */\nJNIEXPORT void JNICALL\nJava_com_modelbox_Configuration_ConfigurationSet__Ljava_lang_String_2J(\n    JNIEnv *env, jobject j_this, jstring j_key, jlong j_value) {\n  if (j_key == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_INVALID,\n                               \"input argument is null\");\n    return;\n  }\n\n  auto n_config =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::Configuration>(\n          env, j_this);\n  if (n_config == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return;\n  }\n\n  n_config->SetProperty<int64_t>(modelbox::jstring2string(env, j_key),\n                                 (int64_t)j_value);\n}\n\n/*\n * Class:     com_modelbox_Configuration\n * Method:    ConfigurationSet\n * Signature: (Ljava/lang/String;F)V\n */\nJNIEXPORT void JNICALL\nJava_com_modelbox_Configuration_ConfigurationSet__Ljava_lang_String_2F(\n    JNIEnv *env, jobject j_this, jstring j_key, jfloat j_value) {\n  if (j_key == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_INVALID,\n                               \"input argument is null\");\n    return;\n  }\n\n  auto n_config =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::Configuration>(\n          env, j_this);\n  if (n_config == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return;\n  }\n\n  n_config->SetProperty<float>(modelbox::jstring2string(env, j_key),\n                               (float)j_value);\n}\n\n/*\n * Class:     com_modelbox_Configuration\n * Method:    ConfigurationSet\n * Signature: (Ljava/lang/String;D)V\n */\nJNIEXPORT void JNICALL\nJava_com_modelbox_Configuration_ConfigurationSet__Ljava_lang_String_2D(\n    JNIEnv *env, jobject j_this, jstring j_key, jdouble j_value) {\n  if (j_key == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_INVALID,\n                               \"input argument is null\");\n    return;\n  }\n\n  auto n_config =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::Configuration>(\n          env, j_this);\n  if (n_config == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return;\n  }\n\n  n_config->SetProperty<double>(modelbox::jstring2string(env, j_key),\n                                (double)j_value);\n}\n\n/*\n * Class:     com_modelbox_Configuration\n * Method:    ConfigurationSet\n * Signature: (Ljava/lang/String;Ljava/lang/String;)V\n */\nJNIEXPORT void JNICALL\nJava_com_modelbox_Configuration_ConfigurationSet__Ljava_lang_String_2Ljava_lang_String_2(\n    JNIEnv *env, jobject j_this, jstring j_key, jstring j_value) {\n  if (j_key == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_INVALID,\n                               \"input argument is null\");\n    return;\n  }\n\n  auto n_config =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::Configuration>(\n          env, j_this);\n  if (n_config == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return;\n  }\n\n  auto n_value = modelbox::jstring2string(env, j_value);\n\n  n_config->SetProperty<std::string>(modelbox::jstring2string(env, j_key),\n                                     n_value);\n}\n\n/*\n * Class:     com_modelbox_Configuration\n * Method:    ConfigurationGetStrings\n * Signature: (Ljava/lang/String;Ljava/util/ArrayList;)Ljava/util/ArrayList;\n */\nJNIEXPORT jobject JNICALL\nJava_com_modelbox_Configuration_ConfigurationGetStrings(JNIEnv *env,\n                                                        jobject j_this,\n                                                        jstring j_key,\n                                                        jobject j_default) {\n  if (j_key == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_INVALID,\n                               \"input argument is null\");\n    return nullptr;\n  }\n\n  auto n_config =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::Configuration>(\n          env, j_this);\n  if (n_config == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return nullptr;\n  }\n\n  auto n_values = n_config->GetStrings(modelbox::jstring2string(env, j_key));\n  if (n_values.empty()) {\n    return j_default;\n  }\n\n  auto *j_arraylist_cls = env->FindClass(\"java/util/ArrayList\");\n  if (j_arraylist_cls == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_FAULT,\n                               \"cannot find array list\");\n    return nullptr;\n  }\n  Defer { env->DeleteLocalRef(j_arraylist_cls); };\n\n  jmethodID j_list_add_ID =\n      env->GetMethodID(j_arraylist_cls, \"add\", \"(Ljava/lang/Object;)Z\");\n  jmethodID j_list_init_ID =\n      env->GetMethodID(j_arraylist_cls, \"<init>\", \"(I)V\");\n\n  if (j_list_add_ID == nullptr || j_list_init_ID == nullptr) {\n    modelbox::ModelBoxJNIThrow(\n        env, modelbox::STATUS_FAULT,\n        \"Cannot find arraylist functions add and <init>\");\n    return nullptr;\n  }\n\n  auto *j_arraylist =\n      env->NewObject(j_arraylist_cls, j_list_init_ID, n_values.size());\n  if (j_arraylist == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_NOMEM,\n                               \"cannot create arraylist\");\n    return nullptr;\n  }\n\n  for (const auto &n_value : n_values) {\n    auto *j_obj = env->NewStringUTF(n_value.c_str());\n    env->CallBooleanMethod(j_arraylist, j_list_add_ID, j_obj);\n    env->DeleteLocalRef(j_obj);\n  }\n\n  return j_arraylist;\n}\n\n/*\n * Class:     com_modelbox_Configuration\n * Method:    ConfigurationSet\n * Signature: (Ljava/lang/String;Ljava/util/ArrayList;)V\n */\nJNIEXPORT void JNICALL\nJava_com_modelbox_Configuration_ConfigurationSet__Ljava_lang_String_2Ljava_util_ArrayList_2(\n    JNIEnv *env, jobject j_this, jstring j_key, jobject j_array_string) {\n  if (j_key == nullptr || j_array_string == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_INVALID,\n                               \"input argument is null\");\n    return;\n  }\n\n  auto n_config =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::Configuration>(\n          env, j_this);\n  if (n_config == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return;\n  }\n\n  auto *j_arraylist_cls = env->FindClass(\"java/util/ArrayList\");\n  if (j_arraylist_cls == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_FAULT,\n                               \"cannot find array list\");\n    return;\n  }\n  Defer { env->DeleteLocalRef(j_arraylist_cls); };\n\n  jmethodID j_list_get_ID =\n      env->GetMethodID(j_arraylist_cls, \"get\", \"(I)Ljava/lang/Object;\");\n  jmethodID j_list_size_ID = env->GetMethodID(j_arraylist_cls, \"size\", \"()I\");\n\n  if (j_list_get_ID == nullptr || j_list_size_ID == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_FAULT,\n                               \"Cannot find arraylist functions get and size\");\n    return;\n  }\n\n  jsize j_arraylist_size = env->CallIntMethod(j_array_string, j_list_size_ID);\n  if (j_arraylist_size <= 0) {\n    return;\n  }\n\n  std::vector<std::string> n_values;\n  n_values.reserve(j_arraylist_size);\n  for (int i = 0; i < j_arraylist_size; i++) {\n    auto *j_obj =\n        (jstring)env->CallObjectMethod(j_array_string, j_list_get_ID, i);\n    Defer { env->DeleteLocalRef(j_obj); };\n\n    auto n_value = modelbox::jstring2string(env, j_obj);\n    n_values.emplace_back(n_value);\n  }\n\n  n_config->SetProperty<std::string>(modelbox::jstring2string(env, j_key),\n                                     n_values);\n}\n\n/*\n * Class:     com_modelbox_Configuration\n * Method:    ConfigurationParser\n * Signature: (Ljava/lang/String;)V\n */\nJNIEXPORT void JNICALL Java_com_modelbox_Configuration_ConfigurationParser(\n    JNIEnv *env, jobject j_this, jstring j_file) {\n  if (j_file == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_INVALID,\n                               \"input argument is null\");\n    return;\n  }\n\n  modelbox::ConfigurationBuilder builder;\n  auto n_newconfig = builder.Build(modelbox::jstring2string(env, j_file));\n  if (n_newconfig == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return;\n  }\n\n  auto ret =\n      modelbox::JNINativeObject::SetNativeSharedPtr(env, j_this, n_newconfig);\n  modelbox::ModelBoxJNIThrow(env, ret);\n}\n\n/*\n * Class:     com_modelbox_Configuration\n * Method:    ConfigurationNew\n * Signature: ()J\n */\nJNIEXPORT jlong JNICALL\nJava_com_modelbox_Configuration_ConfigurationNew(JNIEnv *env, jobject j_this) {\n  return modelbox::JNINativeObject::NewHandle(\n      j_this, std::make_shared<modelbox::Configuration>());\n}\n"
  },
  {
    "path": "src/java/jni/jni_export/data_meta.cc",
    "content": "\n/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <memory>\n\n#include \"com_modelbox_DataMeta.h\"\n#include \"jni_native_object.h\"\n#include \"modelbox/external_data_map.h\"\n#include \"throw.h\"\n#include \"utils.h\"\n\n/*\n * Class:     com_modelbox_DataMeta\n * Method:    DataMetaNew\n * Signature: ()J\n */\nJNIEXPORT jlong JNICALL Java_com_modelbox_DataMeta_DataMetaNew(JNIEnv *env,\n                                                               jobject j_this) {\n  return modelbox::JNINativeObject::NewHandle(\n      j_this, std::make_shared<modelbox::DataMeta>());\n}\n\n/*\n * Class:     com_modelbox_DataMeta\n * Method:    DataMetaSet\n * Signature: (Ljava/lang/String;Ljava/lang/String;)V\n */\nJNIEXPORT void JNICALL Java_com_modelbox_DataMeta_DataMetaSet(JNIEnv *env,\n                                                              jobject j_this,\n                                                              jstring j_key,\n                                                              jstring j_value) {\n  if (j_key == nullptr || j_value == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_INVALID,\n                               \"input argument is null\");\n    return;\n  }\n\n  auto n_datameta =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::DataMeta>(env,\n                                                                        j_this);\n  if (n_datameta == nullptr || j_key == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return;\n  }\n\n  n_datameta->SetMeta(\n      modelbox::jstring2string(env, j_key),\n      std::make_shared<std::string>(modelbox::jstring2string(env, j_value)));\n}\n\n/*\n * Class:     com_modelbox_DataMeta\n * Method:    DataMetaGetString\n * Signature: (Ljava/lang/String;)Ljava/lang/String;\n */\nJNIEXPORT jstring JNICALL Java_com_modelbox_DataMeta_DataMetaGetString(\n    JNIEnv *env, jobject j_this, jstring j_key) {\n  if (j_key == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_INVALID,\n                               \"input argument is null\");\n    return nullptr;\n  }\n\n  auto n_datameta =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::DataMeta>(env,\n                                                                        j_this);\n  if (n_datameta == nullptr || j_key == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return nullptr;\n  }\n\n  auto value = n_datameta->GetMeta(modelbox::jstring2string(env, j_key));\n  if (value == nullptr) {\n    return nullptr;\n  }\n\n  auto n_value = std::static_pointer_cast<std::string>(value); \n  if (value == nullptr) {\n    return nullptr;\n  }\n\n  return env->NewStringUTF(n_value->c_str());\n}\n"
  },
  {
    "path": "src/java/jni/jni_export/datacontext.cc",
    "content": "\n/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <memory>\n\n#include \"com_modelbox_DataContext.h\"\n#include \"jni_native_object.h\"\n#include \"modelbox/data_context.h\"\n#include \"scoped_jvm.h\"\n#include \"throw.h\"\n#include \"utils.h\"\n\n/*\n * Class:     com_modelbox_DataContext\n * Method:    DataContext_Input\n * Signature: (Ljava/lang/String;)Lcom/modelbox/BufferList;\n */\nJNIEXPORT jobject JNICALL Java_com_modelbox_DataContext_DataContext_1Input(\n    JNIEnv *env, jobject j_this, jstring j_portname) {\n  if (j_portname == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_INVALID,\n                               \"input argument is null\");\n    return nullptr;\n  }\n\n  auto n_data_ctx =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::DataContext>(\n          env, j_this);\n  if (n_data_ctx == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return nullptr;\n  }\n\n  auto n_buffer_list =\n      n_data_ctx->Input(modelbox::jstring2string(env, j_portname));\n  if (n_buffer_list == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_INVALID,\n                               \"input port not exists\");\n    return nullptr;\n  }\n\n  auto *j_buffer_list = modelbox::JNINativeObject::NewJObject(\n      env, \"com/modelbox/BufferList\", n_buffer_list);\n  if (j_buffer_list == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return nullptr;\n  }\n\n  return j_buffer_list;\n}\n\n/*\n * Class:     com_modelbox_DataContext\n * Method:    DataContext_Output\n * Signature: (Ljava/lang/String;)Lcom/modelbox/BufferList;\n */\nJNIEXPORT jobject JNICALL Java_com_modelbox_DataContext_DataContext_1Output(\n    JNIEnv *env, jobject j_this, jstring j_portname) {\n  if (j_portname == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_INVALID,\n                               \"input argument is null\");\n    return nullptr;\n  }\n\n  auto n_data_ctx =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::DataContext>(\n          env, j_this);\n  if (n_data_ctx == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return nullptr;\n  }\n\n  auto n_buffer_list =\n      n_data_ctx->Output(modelbox::jstring2string(env, j_portname));\n  if (n_buffer_list == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_INVALID,\n                               \"output port not exists\");\n    return nullptr;\n  }\n\n  auto *j_buffer_list = modelbox::JNINativeObject::NewJObject(\n      env, \"com/modelbox/BufferList\", n_buffer_list);\n  if (j_buffer_list == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return nullptr;\n  }\n\n  return j_buffer_list;\n}\n\n/*\n * Class:     com_modelbox_DataContext\n * Method:    DataContext_External\n * Signature: ()Lcom/modelbox/BufferList;\n */\nJNIEXPORT jobject JNICALL Java_com_modelbox_DataContext_DataContext_1External(\n    JNIEnv *env, jobject j_this) {\n  auto n_data_ctx =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::DataContext>(\n          env, j_this);\n  if (n_data_ctx == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return nullptr;\n  }\n\n  auto n_buffer_list = n_data_ctx->External();\n  if (n_buffer_list) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_INVALID,\n                               \"external port not exists\");\n    return nullptr;\n  }\n\n  auto *j_buffer_list = modelbox::JNINativeObject::NewJObject(\n      env, \"com/modelbox/BufferList\", n_buffer_list);\n  if (j_buffer_list == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return nullptr;\n  }\n\n  return j_buffer_list;\n}\n\n/*\n * Class:     com_modelbox_DataContext\n * Method:    DataContext_HasError\n * Signature: ()Z\n */\nJNIEXPORT jboolean JNICALL Java_com_modelbox_DataContext_DataContext_1HasError(\n    JNIEnv *env, jobject j_this) {\n  auto n_data_ctx =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::DataContext>(\n          env, j_this);\n  if (n_data_ctx == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return false;\n  }\n\n  return n_data_ctx->HasError();\n}\n\n/*\n * Class:     com_modelbox_DataContext\n * Method:    DataContext_SendEvent\n * Signature: (Lcom/modelbo::DataContext;)V\n */\nJNIEXPORT void JNICALL Java_com_modelbox_DataContext_DataContext_1SendEvent(\n    JNIEnv *env, jobject j_this, jobject j_event) {\n  if (j_event == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_INVALID,\n                               \"input argument is null\");\n    return;\n  }\n\n  auto n_data_ctx =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::DataContext>(\n          env, j_this);\n  if (n_data_ctx == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return;\n  }\n\n  auto n_event =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::FlowUnitEvent>(\n          env, j_event);\n  if (n_event == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return;\n  }\n\n  n_data_ctx->SendEvent(n_event);\n}\n\n/*\n * Class:     com_modelbox_DataContext\n * Method:    DataContext_SetPrivate\n * Signature: (Ljava/lang/String;Ljava/lang/Object;)V\n */\nJNIEXPORT void JNICALL Java_com_modelbox_DataContext_DataContext_1SetPrivate(\n    JNIEnv *env, jobject j_this, jstring j_key, jobject j_object) {\n  if (j_object == nullptr || j_key == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_INVALID,\n                               \"input argument is null\");\n    return;\n  }\n\n  auto n_data_ctx =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::DataContext>(\n          env, j_this);\n  if (n_data_ctx == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return;\n  }\n\n  auto *j_global_object = env->NewGlobalRef(j_object);\n  std::shared_ptr<void> priv_ptr(\n      (void *)j_global_object, [](void *j_global_object) {\n        modelbox::ScopedJvm scoped;\n        scoped.GetJNIEnv()->DeleteGlobalRef((jobject)j_global_object);\n      });\n  n_data_ctx->SetPrivate(modelbox::jstring2string(env, j_key), priv_ptr);\n}\n\n/*\n * Class:     com_modelbox_DataContext\n * Method:    DataContext_GetPrivate\n * Signature: (Ljava/lang/String;)Ljava/lang/Object;\n */\nJNIEXPORT jobject JNICALL Java_com_modelbox_DataContext_DataContext_1GetPrivate(\n    JNIEnv *env, jobject j_this, jstring j_key) {\n  if (j_key == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_INVALID,\n                               \"input argument is null\");\n    return nullptr;\n  }\n\n  auto n_data_ctx =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::DataContext>(\n          env, j_this);\n  if (n_data_ctx == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return nullptr;\n  }\n\n  auto n_object = n_data_ctx->GetPrivate(modelbox::jstring2string(env, j_key));\n  if (n_object == nullptr) {\n    return nullptr;\n  }\n\n  return (jobject)n_object.get();\n}\n\n/*\n * Class:     com_modelbox_DataContext\n * Method:    DataContext_GetInputMeta\n * Signature: (Ljava/lang/String;)Lcom/modelbox/DataMeta;\n */\nJNIEXPORT jobject JNICALL\nJava_com_modelbox_DataContext_DataContext_1GetInputMeta(JNIEnv *env,\n                                                        jobject j_this,\n                                                        jstring j_portname) {\n  if (j_portname == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_INVALID,\n                               \"input argument is null\");\n    return nullptr;\n  }\n\n  auto n_data_ctx =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::DataContext>(\n          env, j_this);\n  if (n_data_ctx == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return nullptr;\n  }\n\n  auto n_datameta =\n      n_data_ctx->GetInputMeta(modelbox::jstring2string(env, j_portname));\n  if (n_datameta == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_INVALID,\n                               \"port meta not exists\");\n    return nullptr;\n  }\n\n  auto *j_datameta = modelbox::JNINativeObject::NewJObject(\n      env, \"com/modelbox/DataMeta\", n_datameta);\n  if (j_datameta == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n  }\n\n  return j_datameta;\n}\n\n/*\n * Class:     com_modelbox_DataContext\n * Method:    DataContext_SetOututMeta\n * Signature: (Ljava/lang/String;Lcom/modelbox/DataMeta;)V\n */\nJNIEXPORT void JNICALL Java_com_modelbox_DataContext_DataContext_1SetOututMeta(\n    JNIEnv *env, jobject j_this, jstring j_portname, jobject j_datameta) {\n  if (j_portname == nullptr || j_datameta == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_INVALID,\n                               \"input argument is null\");\n    return;\n  }\n\n  auto n_data_ctx =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::DataContext>(\n          env, j_this);\n  if (n_data_ctx == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return;\n  }\n\n  auto n_datameta =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::DataMeta>(\n          env, j_datameta);\n  if (n_datameta == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return;\n  }\n\n  n_data_ctx->SetOutputMeta(modelbox::jstring2string(env, j_portname),\n                            n_datameta);\n}\n\n/*\n * Class:     com_modelbox_DataContext\n * Method:    DataContext_GetSessionContext\n * Signature: ()Lcom/modelbox/SessionContext;\n */\nJNIEXPORT jobject JNICALL\nJava_com_modelbox_DataContext_DataContext_1GetSessionContext(JNIEnv *env,\n                                                             jobject j_this) {\n  auto n_data_ctx =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::DataContext>(\n          env, j_this);\n  if (n_data_ctx == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return nullptr;\n  }\n\n  auto n_session_config = n_data_ctx->GetSessionConfig();\n  if (n_session_config == nullptr) {\n    return nullptr;\n  }\n\n  auto *j_session_config = modelbox::JNINativeObject::NewJObject(\n      env, \"com/modelbox/SessionConfig\", n_session_config);\n  if (j_session_config == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return nullptr;\n  }\n\n  return j_session_config;\n}\n\n/*\n * Class:     com_modelbox_DataContext\n * Method:    DataContext_GetSessionConfig\n * Signature: ()Lcom/modelbox/Configuration;\n */\nJNIEXPORT jobject JNICALL\nJava_com_modelbox_DataContext_DataContext_1GetSessionConfig(JNIEnv *env,\n                                                            jobject j_this) {\n  auto n_data_ctx =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::DataContext>(\n          env, j_this);\n  if (n_data_ctx == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return nullptr;\n  }\n\n  auto n_session_context = n_data_ctx->GetSessionContext();\n  if (n_session_context == nullptr) {\n    return nullptr;\n  }\n\n  auto *j_session_ctx = modelbox::JNINativeObject::NewJObject(\n      env, \"com/modelbox/SessionContext\", n_session_context);\n  if (j_session_ctx == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return nullptr;\n  }\n\n  return j_session_ctx;\n}\n\n/*\n * Class:     com_modelbox_DataContext\n * Method:    DataContext_GetStatistics\n * Signature: ()Lcom/modelbox/StatisticsItem;\n */\nJNIEXPORT jobject JNICALL\nJava_com_modelbox_DataContext_DataContext_1GetStatistics(JNIEnv *env,\n                                                         jobject j_this) {\n  auto n_data_ctx =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::DataContext>(\n          env, j_this);\n  if (n_data_ctx == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return nullptr;\n  }\n\n  auto n_statistics = n_data_ctx->GetStatistics();\n  if (n_statistics == nullptr) {\n    return nullptr;\n  }\n\n  auto *j_statistics = modelbox::JNINativeObject::NewJObject(\n      env, \"com/modelbox/StatisticsItem\", n_statistics);\n  if (j_statistics == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return nullptr;\n  }\n\n  return j_statistics;\n}"
  },
  {
    "path": "src/java/jni/jni_export/device.cc",
    "content": "\n/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/base/device.h\"\n\n#include <memory>\n\n#include \"com_modelbox_Device.h\"\n#include \"jni_native_object.h\"\n#include \"throw.h\"\n#include \"utils.h\"\n\n/*\n * Class:     com_modelbox_Device\n * Method:    DeviceGetType\n * Signature: ()Ljava/lang/String;\n */\nJNIEXPORT jstring JNICALL\nJava_com_modelbox_Device_DeviceGetType(JNIEnv *env, jobject j_this) {\n  auto n_device =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::Device>(env,\n                                                                      j_this);\n  if (n_device == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return nullptr;\n  }\n\n  return env->NewStringUTF(n_device->GetType().c_str());\n}\n\n/*\n * Class:     com_modelbox_Device\n * Method:    DeviceGetDeviceID\n * Signature: ()Ljava/lang/String;\n */\nJNIEXPORT jstring JNICALL\nJava_com_modelbox_Device_DeviceGetDeviceID(JNIEnv *env, jobject j_this) {\n  auto n_device =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::Device>(env,\n                                                                      j_this);\n  if (n_device == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return nullptr;\n  }\n\n  return env->NewStringUTF(n_device->GetDeviceID().c_str());\n}\n"
  },
  {
    "path": "src/java/jni/jni_export/external_data_map.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/external_data_map.h\"\n\n#include <memory>\n\n#include \"com_modelbox_ExternalDataMap.h\"\n#include \"jni_native_object.h\"\n#include \"throw.h\"\n#include \"utils.h\"\n\n/*\n * Class:     com_modelbox_ExternalDataMap\n * Method:    ExternalDataMap_CreateBufferList\n * Signature: ()Lcom/modelbox/BufferList;\n */\nJNIEXPORT jobject JNICALL\nJava_com_modelbox_ExternalDataMap_ExternalDataMap_1CreateBufferList(\n    JNIEnv *env, jobject j_this) {\n  auto n_datamap =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::ExternalDataMap>(\n          env, j_this);\n  if (n_datamap == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return nullptr;\n  }\n\n  auto n_bufflist = n_datamap->CreateBufferList();\n  if (n_bufflist == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_NOMEM,\n                               \"create buffer list failed.\");\n    return nullptr;\n  }\n\n  auto *j_buffer_list = modelbox::JNINativeObject::NewJObject(\n      env, \"com/modelbox/BufferList\", n_bufflist);\n  if (j_buffer_list == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return nullptr;\n  }\n\n  return j_buffer_list;\n}\n\n/*\n * Class:     com_modelbox_ExternalDataMap\n * Method:    ExternalDataMap_SetOutputMeta\n * Signature: (Ljava/lang/String;Lcom/modelbox/DataMeta;)V\n */\nJNIEXPORT void JNICALL\nJava_com_modelbox_ExternalDataMap_ExternalDataMap_1SetOutputMeta(\n    JNIEnv *env, jobject j_this, jstring j_name, jobject j_data_meta) {\n  if (j_name == nullptr || j_data_meta) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_INVALID,\n                               \"input argument is null\");\n    return;\n  }\n\n  auto n_datamap =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::ExternalDataMap>(\n          env, j_this);\n  if (n_datamap == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return;\n  }\n\n  auto n_data_meta =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::DataMeta>(\n          env, j_data_meta);\n  if (n_data_meta == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return;\n  }\n\n  auto ret = n_datamap->SetOutputMeta(modelbox::jstring2string(env, j_name),\n                                      n_data_meta);\n  modelbox::ModelBoxJNIThrow(env, ret);\n}\n\n/*\n * Class:     com_modelbox_ExternalDataMap\n * Method:    ExternalDataMap_Send\n * Signature: (Ljava/lang/String;Lcom/modelbox/BufferList;)V\n */\nJNIEXPORT void JNICALL Java_com_modelbox_ExternalDataMap_ExternalDataMap_1Send(\n    JNIEnv *env, jobject j_this, jstring j_port_name, jobject j_bufferlist) {\n  if (j_port_name == nullptr || j_bufferlist == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_INVALID,\n                               \"input argument is null\");\n    return;\n  }\n\n  auto n_datamap =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::ExternalDataMap>(\n          env, j_this);\n  if (n_datamap == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return;\n  }\n\n  auto j_buffer_list =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::BufferList>(\n          env, j_bufferlist);\n  if (j_buffer_list == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return;\n  }\n\n  auto ret = n_datamap->Send(modelbox::jstring2string(env, j_port_name),\n                             j_buffer_list);\n  modelbox::ModelBoxJNIThrow(env, ret);\n}\n\n/*\n * Class:     com_modelbox_ExternalDataMap\n * Method:    ExternalDataMap_Recv\n * Signature: (J)Ljava/util/HashMap;\n */\nJNIEXPORT jobject JNICALL\nJava_com_modelbox_ExternalDataMap_ExternalDataMap_1Recv(JNIEnv *env,\n                                                        jobject j_this,\n                                                        jlong j_timeout) {\n  auto n_datamap =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::ExternalDataMap>(\n          env, j_this);\n  if (n_datamap == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return nullptr;\n  }\n\n  modelbox::OutputBufferList map_buffer_list;\n  auto ret = n_datamap->Recv(map_buffer_list, (int32_t)j_timeout);\n  if (ret != modelbox::STATUS_SUCCESS) {\n    if (ret == modelbox::STATUS_EOF) {\n      return nullptr;\n    }\n\n    modelbox::ModelBoxJNIThrow(env, ret);\n    return nullptr;\n  }\n\n  jclass j_map_cls = env->FindClass(\"java/util/HashMap\");\n  if (j_map_cls == nullptr) {\n    ret = {modelbox::STATUS_INTERNAL, \"cannot found hash map class\"};\n    modelbox::ModelBoxJNIThrow(env, ret);\n    return nullptr;\n  }\n\n  Defer { env->DeleteLocalRef(j_map_cls); };\n\n  jmethodID init = env->GetMethodID(j_map_cls, \"<init>\", \"()V\");\n  jobject j_hashmap = env->NewObject(j_map_cls, init, 10);\n  jmethodID put = env->GetMethodID(\n      j_map_cls, \"put\",\n      \"(Ljava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;\");\n\n  for (const auto &item : map_buffer_list) {\n    jstring j_key = env->NewStringUTF(item.first.c_str());\n    auto *j_bufflist = modelbox::JNINativeObject::NewJObject(\n        env, \"com/modelbox/BufferList\", item.second);\n    env->CallObjectMethod(j_hashmap, put, j_key, j_bufflist);\n    env->DeleteLocalRef(j_key);\n    env->DeleteLocalRef(j_bufflist);\n  }\n\n  return j_hashmap;\n}\n\n/*\n * Class:     com_modelbox_ExternalDataMap\n * Method:    ExternalDataMap_Close\n * Signature: ()V\n */\nJNIEXPORT void JNICALL Java_com_modelbox_ExternalDataMap_ExternalDataMap_1Close(\n    JNIEnv *env, jobject j_this) {\n  auto n_datamap =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::ExternalDataMap>(\n          env, j_this);\n  if (n_datamap == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return;\n  }\n\n  n_datamap->Close();\n}\n\n/*\n * Class:     com_modelbox_ExternalDataMap\n * Method:    ExternalDataMap_Shutdown\n * Signature: ()V\n */\nJNIEXPORT void JNICALL\nJava_com_modelbox_ExternalDataMap_ExternalDataMap_1Shutdown(JNIEnv *env,\n                                                            jobject j_this) {\n  auto n_datamap =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::ExternalDataMap>(\n          env, j_this);\n  if (n_datamap == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return;\n  }\n\n  n_datamap->Shutdown();\n}\n\n/*\n * Class:     com_modelbox_ExternalDataMap\n * Method:    ExternalDataMap_GetSessionContext\n * Signature: ()Lcom/modelbox/SessionContext;\n */\nJNIEXPORT jobject JNICALL\nJava_com_modelbox_ExternalDataMap_ExternalDataMap_1GetSessionContext(\n    JNIEnv *env, jobject j_this) {\n  auto n_datamap =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::ExternalDataMap>(\n          env, j_this);\n  if (n_datamap == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return nullptr;\n  }\n\n  auto n_session_context = n_datamap->GetSessionContext();\n  if (n_session_context == nullptr) {\n    return nullptr;\n  }\n\n  auto *j_session_ctx = modelbox::JNINativeObject::NewJObject(\n      env, \"com/modelbox/SessionContext\", n_session_context);\n  if (j_session_ctx == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return nullptr;\n  }\n\n  return j_session_ctx;\n}\n\n/*\n * Class:     com_modelbox_ExternalDataMap\n * Method:    ExternalDataMap_GetSessionConfig\n * Signature: ()Lcom/modelbox/Configuration;\n */\nJNIEXPORT jobject JNICALL\nJava_com_modelbox_ExternalDataMap_ExternalDataMap_1GetSessionConfig(\n    JNIEnv *env, jobject j_this) {\n  auto n_datamap =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::ExternalDataMap>(\n          env, j_this);\n  if (n_datamap == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return nullptr;\n  }\n\n  auto n_session_config = n_datamap->GetSessionConfig();\n  if (n_session_config == nullptr) {\n    return nullptr;\n  }\n\n  auto *j_session_config = modelbox::JNINativeObject::NewJObject(\n      env, \"com/modelbox/SessionConfig\", n_session_config);\n  if (j_session_config == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return nullptr;\n  }\n\n  return j_session_config;\n}\n\n/*\n * Class:     com_modelbox_ExternalDataMap\n * Method:    ExternalDataMap_GetLastError\n * Signature: ()Lcom/modelbox/FlowUnitError;\n */\nJNIEXPORT jobject JNICALL\nJava_com_modelbox_ExternalDataMap_ExternalDataMap_1GetLastError(\n    JNIEnv *env, jobject j_this) {\n  auto n_datamap =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::ExternalDataMap>(\n          env, j_this);\n  if (n_datamap == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return nullptr;\n  }\n\n  auto n_laste_error = n_datamap->GetLastError();\n  if (n_laste_error == nullptr) {\n    return nullptr;\n  }\n\n  auto *j_last_error = modelbox::JNINativeObject::NewJObject(\n      env, \"com/modelbox/FlowUnitError\", n_laste_error);\n  if (j_last_error == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return nullptr;\n  }\n\n  return j_last_error;\n}\n"
  },
  {
    "path": "src/java/jni/jni_export/external_data_select.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <memory>\n\n#include \"com_modelbox_ExternalDataSelect.h\"\n#include \"jni_native_object.h\"\n#include \"modelbox/external_data_map.h\"\n#include \"throw.h\"\n#include \"utils.h\"\n#include \"scoped_jvm.h\"\n\n/*\n * Class:     com_modelbox_ExternalDataSelect\n * Method:    ExternalDataSelect_New\n * Signature: ()J\n */\nJNIEXPORT jlong JNICALL\nJava_com_modelbox_ExternalDataSelect_ExternalDataSelect_1New(JNIEnv *env,\n                                                             jobject j_this) {\n  return modelbox::JNINativeObject::NewHandle(\n      j_this, std::make_shared<modelbox::ExternalDataSelect>());\n}\n\n/*\n * Class:     com_modelbox_ExternalDataSelect\n * Method:    ExternalDataSelect_RegisterExternalData\n * Signature: (Lcom/modelbox/ExternalDataMap;)V\n */\nJNIEXPORT void JNICALL\nJava_com_modelbox_ExternalDataSelect_ExternalDataSelect_1RegisterExternalData(\n    JNIEnv *env, jobject j_this, jobject j_data_map) {\n  if (j_data_map == nullptr) {\n    modelbox::ModelBoxJNIThrow(\n        env, modelbox::STATUS_INVALID,\n        \"ExternalDataSelect Register: input argument is null\");\n    return;\n  }\n\n  auto n_data_select = modelbox::JNINativeObject::GetNativeSharedPtr<\n      modelbox::ExternalDataSelect>(env, j_this);\n  if (n_data_select == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return;\n  }\n\n  auto n_data_map =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::ExternalDataMap>(\n          env, j_data_map);\n  if (n_data_map == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return;\n  }\n\n  auto *j_global_data_map = env->NewGlobalRef(j_data_map);\n  std::shared_ptr<void> priv_ptr(\n      (void *)j_global_data_map, [](void *global_data_map) {\n        modelbox::ScopedJvm scoped;\n        scoped.GetJNIEnv()->DeleteGlobalRef((jobject)global_data_map);\n      });\n  n_data_map->SetPrivate(priv_ptr);\n  n_data_select->RegisterExternalData(n_data_map);\n}\n\n/*\n * Class:     com_modelbox_ExternalDataSelect\n * Method:    ExternalDataSelect_RemoveExternalData\n * Signature: (Lcom/modelbox/ExternalDataMap;)V\n */\nJNIEXPORT void JNICALL\nJava_com_modelbox_ExternalDataSelect_ExternalDataSelect_1RemoveExternalData(\n    JNIEnv *env, jobject j_this, jobject j_data_map) {\n  if (j_data_map == nullptr) {\n    modelbox::ModelBoxJNIThrow(\n        env, modelbox::STATUS_INVALID,\n        \"ExternalDataSelect Remove: input argument is null\");\n    return;\n  }\n\n  auto n_data_select = modelbox::JNINativeObject::GetNativeSharedPtr<\n      modelbox::ExternalDataSelect>(env, j_this);\n  if (n_data_select == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return;\n  }\n\n  auto n_data_map =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::ExternalDataMap>(\n          env, j_data_map);\n  if (n_data_map == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return;\n  }\n\n  n_data_select->RemoveExternalData(n_data_map);\n}\n\n/*\n * Class:     com_modelbox_ExternalDataSelect\n * Method:    ExternalDataSelect_SelectExternalData\n * Signature: (J)Ljava/util/ArrayList;\n */\nJNIEXPORT jobject JNICALL\nJava_com_modelbox_ExternalDataSelect_ExternalDataSelect_1SelectExternalData(\n    JNIEnv *env, jobject j_this, jlong j_timeout) {\n  auto n_data_select = modelbox::JNINativeObject::GetNativeSharedPtr<\n      modelbox::ExternalDataSelect>(env, j_this);\n  if (n_data_select == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return nullptr;\n  }\n\n  std::list<std::shared_ptr<modelbox::ExternalDataMap>> datamap_list;\n  auto ret = n_data_select->SelectExternalData(\n      datamap_list, std::chrono::milliseconds((int64_t)j_timeout));\n  if (ret != modelbox::STATUS_SUCCESS) {\n    if (ret == modelbox::STATUS_TIMEDOUT) {\n      return nullptr;\n    }\n    modelbox::ModelBoxJNIThrow(env, ret);\n    return nullptr;\n  }\n\n  auto *j_arraylist_cls = env->FindClass(\"java/util/ArrayList\");\n  if (j_arraylist_cls == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_FAULT,\n                               \"cannot find array list\");\n    return nullptr;\n  }\n  Defer { env->DeleteLocalRef(j_arraylist_cls); };\n\n  jmethodID j_list_add_ID =\n      env->GetMethodID(j_arraylist_cls, \"add\", \"(Ljava/lang/Object;)Z\");\n  jmethodID j_list_init_ID =\n      env->GetMethodID(j_arraylist_cls, \"<init>\", \"(I)V\");\n\n  if (j_list_add_ID == nullptr || j_list_init_ID == nullptr) {\n    modelbox::ModelBoxJNIThrow(\n        env, modelbox::STATUS_FAULT,\n        \"Cannot find arraylist functions add and <init>\");\n    return nullptr;\n  }\n\n  auto *j_arraylist =\n      env->NewObject(j_arraylist_cls, j_list_init_ID, datamap_list.size());\n  if (j_arraylist == nullptr) {\n    ret = {modelbox::STATUS_NOMEM, \"cannot create arraylist\"};\n    modelbox::ModelBoxJNIThrow(env, ret);\n    return nullptr;\n  }\n\n  for (const auto &datamap : datamap_list) {\n    std::shared_ptr<void> object = datamap->GetPrivate<void>();\n    if (object == nullptr) {\n      continue;\n    }\n\n    env->CallBooleanMethod(j_arraylist, j_list_add_ID, (jobject)object.get());\n  }\n\n  return j_arraylist;\n}"
  },
  {
    "path": "src/java/jni/jni_export/flow.cc",
    "content": "\n/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/flow.h\"\n\n#include <memory>\n\n#include \"com_modelbox_Flow.h\"\n#include \"jni_native_object.h\"\n#include \"modelbox/base/log.h\"\n#include \"throw.h\"\n#include \"utils.h\"\n\n/*\n * Class:     com_modelbox_Flow\n * Method:    FlowNew\n * Signature: ()J\n */\nJNIEXPORT jlong JNICALL Java_com_modelbox_Flow_FlowNew(JNIEnv *env,\n                                                       jobject j_this) {\n  return modelbox::JNINativeObject::NewHandle(\n      j_this, std::make_shared<modelbox::Flow>());\n}\n\n/*\n * Class:     com_modelbox_Flow\n * Method:    FlowWait\n * Signature: (JLcom/modelbox/Status;)Z\n */\nJNIEXPORT jboolean JNICALL Java_com_modelbox_Flow_FlowWait(JNIEnv *env,\n                                                           jobject j_this,\n                                                           jlong j_timeout,\n                                                           jobject j_status) {\n  auto n_flow = modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::Flow>(\n      env, j_this);\n  if (n_flow == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return false;\n  }\n\n  modelbox::Status wait_ret;\n  auto ret = n_flow->Wait((int64_t)j_timeout, &wait_ret);\n  if (ret != modelbox::STATUS_SUCCESS) {\n    if (ret == modelbox::STATUS_TIMEDOUT) {\n      return false;\n    }\n\n    modelbox::ModelBoxJNIThrow(env, ret);\n    return false;\n  }\n\n  if (j_status) {\n    auto n_status =\n        modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::Status>(\n            env, j_status);\n    if (n_status) {\n      *n_status = wait_ret;\n    }\n  }\n\n  return true;\n}\n\n/*\n * Class:     com_modelbox_Flow\n * Method:    FlowStartRun\n * Signature: ()V\n */\nJNIEXPORT void JNICALL Java_com_modelbox_Flow_FlowStartRun(JNIEnv *env,\n                                                           jobject j_this) {\n  auto n_flow = modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::Flow>(\n      env, j_this);\n  if (n_flow == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return;\n  }\n\n  auto ret = n_flow->StartRun();\n  modelbox::ModelBoxJNIThrow(env, ret);\n}\n\n/*\n * Class:     com_modelbox_Flow\n * Method:    FlowInit\n * Signature: (Ljava/lang/String;Ljava/lang/String;)V\n */\nJNIEXPORT void JNICALL\nJava_com_modelbox_Flow_FlowInit__Ljava_lang_String_2Ljava_lang_String_2(\n    JNIEnv *env, jobject j_this, jstring j_name, jstring j_graph) {\n  if (j_graph == nullptr || j_name == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_INVALID,\n                               \"input argument is null\");\n    return;\n  }\n\n  auto n_flow = modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::Flow>(\n      env, j_this);\n  if (n_flow == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return;\n  }\n\n  auto ret = n_flow->Init(modelbox::jstring2string(env, j_name),\n                          modelbox::jstring2string(env, j_graph));\n  modelbox::ModelBoxJNIThrow(env, ret);\n}\n\n/*\n * Class:     com_modelbox_Flow\n * Method:    FlowInit\n * Signature: (Ljava/lang/String;)V\n */\nJNIEXPORT void JNICALL Java_com_modelbox_Flow_FlowInit__Ljava_lang_String_2(\n    JNIEnv *env, jobject j_this, jstring j_file) {\n  if (j_file == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_INVALID,\n                               \"input argument is null\");\n    return;\n  }\n\n  auto n_flow = modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::Flow>(\n      env, j_this);\n  if (n_flow == nullptr || j_file == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return;\n  }\n\n  auto ret = n_flow->Init(modelbox::jstring2string(env, j_file));\n  modelbox::ModelBoxJNIThrow(env, ret);\n}\n\n/*\n * Class:     com_modelbox_Flow\n * Method:    FlowInit\n * Signature:\n * (Ljava/lang/String;Lcom/modelbox/Configuration;Ljava/lang/String;)V\n */\nJNIEXPORT void JNICALL\nJava_com_modelbox_Flow_FlowInitByName__Ljava_lang_String_2Lcom_modelbox_Configuration_2Ljava_lang_String_2(\n    JNIEnv *env, jobject j_this, jstring j_name, jobject j_args,\n    jstring j_flowdir) {\n  if (j_name == nullptr || j_args == nullptr || j_flowdir == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_INVALID,\n                               \"input argument is null\");\n    return;\n  }\n\n  auto n_flow = modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::Flow>(\n      env, j_this);\n  if (n_flow == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return;\n  }\n\n  std::unordered_map<std::string, std::string> m_args;\n  if (j_args != nullptr) {\n    auto n_args =\n        modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::Configuration>(\n            env, j_args);\n    if (n_args == nullptr) {\n      modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n      return;\n    }\n\n    for (const auto &key : n_args->GetKeys()) {\n      m_args[key] = n_args->GetString(key);\n    }\n  }\n\n  auto ret = n_flow->InitByName(modelbox::jstring2string(env, j_name), m_args,\n                                modelbox::jstring2string(env, j_flowdir));\n  modelbox::ModelBoxJNIThrow(env, ret);\n}\n\n/*\n * Class:     com_modelbox_Flow\n * Method:    FlowInit\n * Signature: (Ljava/lang/String;Lcom/modelbox/Configuration;)V\n */\nJNIEXPORT void JNICALL\nJava_com_modelbox_Flow_FlowInitByName__Ljava_lang_String_2Lcom_modelbox_Configuration_2(\n    JNIEnv *env, jobject j_this, jstring j_name, jobject j_args) {\n  if (j_name == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_INVALID,\n                               \"input argument is null\");\n  }\n\n  auto n_flow = modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::Flow>(\n      env, j_this);\n  if (n_flow == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return;\n  }\n\n  std::unordered_map<std::string, std::string> m_args;\n  if (j_args != nullptr) {\n    auto n_args =\n        modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::Configuration>(\n            env, j_args);\n    if (n_args == nullptr) {\n      modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n      return;\n    }\n\n    for (const auto &key : n_args->GetKeys()) {\n      m_args[key] = n_args->GetString(key);\n    }\n  }\n\n  auto ret = n_flow->InitByName(modelbox::jstring2string(env, j_name), m_args);\n  modelbox::ModelBoxJNIThrow(env, ret);\n}\n\n/*\n * Class:     com_modelbox_Flow\n * Method:    FlowRegisterFlowUnit\n * Signature: (Lcom/modelbox/FlowUnitBuilder;)V\n */\nJNIEXPORT void JNICALL Java_com_modelbox_Flow_FlowRegisterFlowUnit(\n    JNIEnv *env, jobject j_this, jobject j_builder) {\n\n  auto n_flow = modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::Flow>(\n      env, j_this);\n  if (n_flow == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return;\n  }\n\n  if (j_builder == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_INVALID,\n                               \"input argument is null\");\n    return;\n  }\n\n  auto n_builder =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::FlowUnitBuilder>(\n          env, j_builder);\n  if (n_builder == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return;\n  }\n\n  n_flow->RegisterFlowUnit(n_builder);\n}\n\n/*\n * Class:     com_modelbox_Flow\n * Method:    FlowStop\n * Signature: ()V\n */\nJNIEXPORT void JNICALL Java_com_modelbox_Flow_FlowStop(JNIEnv *env,\n                                                       jobject j_this) {\n  auto n_flow = modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::Flow>(\n      env, j_this);\n  if (n_flow == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return;\n  }\n\n  n_flow->Stop();\n}\n\n/*\n * Class:     com_modelbox_Flow\n * Method:    FlowCreateExternalDataMap\n * Signature: ()Lcom/modelbox/ExternalDataMap;\n */\nJNIEXPORT jobject JNICALL\nJava_com_modelbox_Flow_FlowCreateExternalDataMap(JNIEnv *env, jobject j_this) {\n  auto n_flow = modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::Flow>(\n      env, j_this);\n  if (n_flow == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return nullptr;\n  }\n\n  auto datamap = n_flow->CreateExternalDataMap();\n  if (datamap == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_FAULT,\n                               \"Create External data failed.\");\n    return nullptr;\n  }\n\n  jobject j_data_map = modelbox::JNINativeObject::NewJObject(\n      env, \"com/modelbox/ExternalDataMap\", datamap);\n  if (j_data_map == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return nullptr;\n  }\n\n  return j_data_map;\n}\n\n/*\n * Class:     com_modelbox_Flow\n * Method:    FlowCreateStreamIO\n * Signature: ()Lcom/modelbox/FlowStreamIO;\n */\nJNIEXPORT jobject JNICALL\nJava_com_modelbox_Flow_FlowCreateStreamIO(JNIEnv *env, jobject j_this) {\n  auto n_flow = modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::Flow>(\n      env, j_this);\n  if (n_flow == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return nullptr;\n  }\n\n  auto stream_io = n_flow->CreateStreamIO();\n  if (stream_io == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_FAULT,\n                               \"Create External data failed.\");\n    return nullptr;\n  }\n\n  jobject j_stream_io = modelbox::JNINativeObject::NewJObject(\n      env, \"com/modelbox/FlowStreamIO\", stream_io);\n  if (j_stream_io == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return nullptr;\n  }\n\n  return j_stream_io;\n}\n"
  },
  {
    "path": "src/java/jni/jni_export/flow_streamio.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <memory>\n\n#include \"com_modelbox_FlowStreamIO.h\"\n#include \"jni_native_object.h\"\n#include \"modelbox/flow_stream_io.h\"\n#include \"scoped_jvm.h\"\n#include \"throw.h\"\n#include \"utils.h\"\n\n/*\n * Class:     com_modelbox_FlowStreamIO\n * Method:    FlowStreamIO_CreateBuffer\n * Signature: ()Lcom/modelbox/Buffer;\n */\nJNIEXPORT jobject JNICALL\nJava_com_modelbox_FlowStreamIO_FlowStreamIO_1CreateBuffer(JNIEnv *env,\n                                                          jobject j_this) {\n  auto n_stream_io =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::FlowStreamIO>(\n          env, j_this);\n  if (n_stream_io == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return nullptr;\n  }\n\n  auto n_buff = n_stream_io->CreateBuffer();\n  if (n_buff == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_NOMEM,\n                               \"create buffer list failed.\");\n    return nullptr;\n  }\n\n  auto *j_buffer =\n      modelbox::JNINativeObject::NewJObject(env, \"com/modelbox/Buffer\", n_buff);\n  if (j_buffer == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return nullptr;\n  }\n\n  return j_buffer;\n}\n\n/*\n * Class:     com_modelbox_FlowStreamIO\n * Method:    FlowStreamIO_Send\n * Signature: (Ljava/lang/String;Lcom/modelbox/Buffer;)V\n */\nJNIEXPORT void JNICALL\nJava_com_modelbox_FlowStreamIO_FlowStreamIO_1Send__Ljava_lang_String_2Lcom_modelbox_Buffer_2(\n    JNIEnv *env, jobject j_this, jstring j_inport_name, jobject j_buffer) {\n  if (j_inport_name == nullptr || j_buffer == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_INVALID,\n                               \"input argument is null\");\n    return;\n  }\n\n  auto n_stream_io =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::FlowStreamIO>(\n          env, j_this);\n  if (n_stream_io == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return;\n  }\n\n  auto n_buffer =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::Buffer>(env,\n                                                                      j_buffer);\n  if (n_buffer == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return;\n  }\n\n  auto ret =\n      n_stream_io->Send(modelbox::jstring2string(env, j_inport_name), n_buffer);\n  modelbox::ModelBoxJNIThrow(env, ret);\n}\n\n/*\n * Class:     com_modelbox_FlowStreamIO\n * Method:    FlowStreamIO_Send\n * Signature: (Ljava/lang/String;[B)V\n */\nJNIEXPORT void JNICALL\nJava_com_modelbox_FlowStreamIO_FlowStreamIO_1Send__Ljava_lang_String_2_3B(\n    JNIEnv *env, jobject j_this, jstring j_inport_name,\n    jbyteArray j_data_array) {\n  if (j_inport_name == nullptr || j_data_array == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_INVALID,\n                               \"input argument is null\");\n    return;\n  }\n\n  auto n_stream_io =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::FlowStreamIO>(\n          env, j_this);\n  if (n_stream_io == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return;\n  }\n\n  auto j_data_len = env->GetArrayLength(j_data_array);\n  if (j_data_len <= 0) {\n    return;\n  }\n\n  jbyte *j_data_ptr = env->GetByteArrayElements(j_data_array, nullptr);\n  if (j_data_ptr == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_INVALID,\n                               \"Buffer data array is invalid\");\n    return;\n  }\n  Defer { env->ReleaseByteArrayElements(j_data_array, j_data_ptr, (jint)0); };\n\n  auto n_buffer = n_stream_io->CreateBuffer();\n  auto ret = n_buffer->BuildFromHost(j_data_ptr, j_data_len);\n  if (ret != modelbox::STATUS_SUCCESS) {\n    modelbox::ModelBoxJNIThrow(env, ret);\n    return;\n  }\n\n  ret =\n      n_stream_io->Send(modelbox::jstring2string(env, j_inport_name), n_buffer);\n  modelbox::ModelBoxJNIThrow(env, ret);\n}\n\n/*\n * Class:     com_modelbox_FlowStreamIO\n * Method:    FlowStreamIO_Recv\n * Signature: (Ljava/lang/String;J)Lcom/modelbox/Buffer;\n */\nJNIEXPORT jobject JNICALL Java_com_modelbox_FlowStreamIO_FlowStreamIO_1Recv(\n    JNIEnv *env, jobject j_this, jstring j_outport_name, jlong j_timeout) {\n  auto n_stream_io =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::FlowStreamIO>(\n          env, j_this);\n  if (n_stream_io == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return nullptr;\n  }\n\n  std::shared_ptr<modelbox::Buffer> n_buff;\n  auto ret = n_stream_io->Recv(modelbox::jstring2string(env, j_outport_name),\n                               n_buff, (int64_t)j_timeout);\n  if (ret != modelbox::STATUS_SUCCESS) {\n    if (ret == modelbox::STATUS_EOF) {\n      return nullptr;\n    }\n\n    modelbox::ModelBoxJNIThrow(env, ret, \"recv buffer failed.\");\n    return nullptr;\n  }\n\n  auto *j_buffer =\n      modelbox::JNINativeObject::NewJObject(env, \"com/modelbox/Buffer\", n_buff);\n  if (j_buffer == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return nullptr;\n  }\n\n  return j_buffer;\n}\n\n/*\n * Class:     com_modelbox_FlowStreamIO\n * Method:    FlowStreamIO_CloseInput\n * Signature: ()V\n */\nJNIEXPORT void JNICALL Java_com_modelbox_FlowStreamIO_FlowStreamIO_1CloseInput(\n    JNIEnv *env, jobject j_this) {\n  auto n_stream_io =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::FlowStreamIO>(\n          env, j_this);\n  if (n_stream_io == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return;\n  }\n\n  n_stream_io->CloseInput();\n}"
  },
  {
    "path": "src/java/jni/jni_export/flowunit.cc",
    "content": "\n/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <modelbox/base/log.h>\n#include <modelbox/flowunit.h>\n\n#include <memory>\n\n#include \"com_modelbox_FlowUnit.h\"\n#include \"jni_native_object.h\"\n#include \"scoped_jvm.h\"\n#include \"throw.h\"\n#include \"utils.h\"\n\nclass JavaFlowUnit : public modelbox::FlowUnit {\n public:\n  JavaFlowUnit() = default;\n\n  modelbox::Status JavaInit(JNIEnv *env, jobject j_flowunit) {\n    if (env == nullptr) {\n      std::string errmsg = \"invalid env\";\n      return {modelbox::STATUS_INVALID, errmsg};\n    }\n\n    jclass cls = env->GetObjectClass(j_flowunit);\n    if (cls == nullptr) {\n      std::string errmsg = \"get object class failed.\";\n      return {modelbox::STATUS_INVALID, errmsg};\n    }\n    Defer { env->DeleteLocalRef(cls); };\n\n    open_method_id_ =\n        env->GetMethodID(cls, \"open\", \"(Lcom/modelbox/Configuration;)V\");\n    if (open_method_id_ == nullptr) {\n      std::string errmsg = \"get open method id failed.\";\n      return {modelbox::STATUS_INVALID, errmsg};\n    }\n\n    close_method_id_ = env->GetMethodID(cls, \"close\", \"()V\");\n    if (close_method_id_ == nullptr) {\n      std::string errmsg = \"get close method id failed.\";\n      return {modelbox::STATUS_INVALID, errmsg};\n    }\n\n    process_method_id_ = env->GetMethodID(\n        cls, \"process\", \"(Lcom/modelbox/DataContext;)Lcom/modelbox/Status;\");\n    if (process_method_id_ == nullptr) {\n      std::string errmsg = \"get process method id failed.\";\n      return {modelbox::STATUS_INVALID, errmsg};\n    }\n\n    data_pre_method_id_ =\n        env->GetMethodID(cls, \"dataPre\", \"(Lcom/modelbox/DataContext;)V\");\n    if (data_pre_method_id_ == nullptr) {\n      std::string errmsg = \"get dataPre method id failed.\";\n      return {modelbox::STATUS_INVALID, errmsg};\n    }\n\n    data_post_method_id_ =\n        env->GetMethodID(cls, \"dataPost\", \"(Lcom/modelbox/DataContext;)V\");\n    if (data_post_method_id_ == nullptr) {\n      std::string errmsg = \"get dataPost method id failed.\";\n      return {modelbox::STATUS_INVALID, errmsg};\n    }\n\n    j_flowunit_ = env->NewGlobalRef(j_flowunit);\n\n    return modelbox::STATUS_SUCCESS;\n  }\n\n  ~JavaFlowUnit() override {\n    modelbox::ScopedJvm scope;\n    auto *env = scope.GetJNIEnv();\n    if (env == nullptr) {\n      return;\n    }\n\n    if (j_flowunit_ == nullptr) {\n      return;\n    }\n\n    env->DeleteGlobalRef(j_flowunit_);\n  }\n\n  modelbox::Status Open(\n      const std::shared_ptr<modelbox::Configuration> &config) override {\n    modelbox::ScopedJvm scope;\n    auto *env = scope.GetJNIEnv();\n    if (env == nullptr) {\n      return {modelbox::STATUS_INVALID, \"Failed to get JNIEnv\"};\n    }\n\n    auto *j_config = modelbox::JNINativeObject::NewJObject(\n        env, \"com/modelbox/Configuration\", config);\n    if (j_config == nullptr) {\n      return {modelbox::STATUS_INVALID, \"Failed to create Configuration\"};\n    }\n    Defer { env->DeleteLocalRef(j_config); };\n\n    env->CallVoidMethod(j_flowunit_, open_method_id_, j_config);\n    if (env->ExceptionCheck() == JNI_TRUE) {\n      auto status = modelbox::ModelboxJNICatchException(env);\n      if (status != nullptr) {\n        return *status;\n      }\n      std::string java_stack;\n      std::string errmsg = modelbox::ModelboxExceptionMsg(env, &java_stack);\n      MBLOG_WARN << \"JavaFlowUnit::open exception: \" << errmsg << \"\\n\"\n                 << java_stack;\n      return {modelbox::STATUS_FAULT, \"open exception:\" + errmsg};\n    }\n\n    return modelbox::STATUS_SUCCESS;\n  }\n\n  modelbox::Status Process(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override {\n    modelbox::ScopedJvm scope;\n    auto *env = scope.GetJNIEnv();\n    if (env == nullptr) {\n      return {modelbox::STATUS_INVALID, \"Failed to get JNIEnv\"};\n    }\n\n    auto *j_data_ctx = modelbox::JNINativeObject::NewJObject(\n        env, \"com/modelbox/DataContext\", data_ctx);\n    if (j_data_ctx == nullptr) {\n      return {modelbox::STATUS_INVALID, \"Failed to create DataContext\"};\n    }\n    Defer { env->DeleteLocalRef(j_data_ctx); };\n\n    auto *j_status =\n        env->CallObjectMethod(j_flowunit_, process_method_id_, j_data_ctx);\n    if (j_status == nullptr) {\n      if (env->ExceptionCheck() == JNI_TRUE) {\n        auto status = modelbox::ModelboxJNICatchException(env);\n        if (status != nullptr) {\n          return *status;\n        }\n\n        std::string java_stack;\n        std::string errmsg = modelbox::ModelboxExceptionMsg(env, &java_stack);\n        MBLOG_WARN << \"JavaFlowUnit::process exception: \" << errmsg << \"\\n\"\n                   << java_stack;\n        return {modelbox::STATUS_FAULT, \"process exception:\" + errmsg};\n      }\n      return {modelbox::STATUS_FAULT, \"process failed\"};\n    }\n    Defer { env->DeleteLocalRef(j_status); };\n\n    auto status =\n        modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::Status>(\n            env, j_status);\n    if (status == nullptr) {\n      return {modelbox::STATUS_INVALID, \"Failed to get Status\"};\n    }\n\n    return *status;\n  }\n\n  modelbox::Status Close() override {\n    modelbox::ScopedJvm scope;\n    auto *env = scope.GetJNIEnv();\n    if (env == nullptr) {\n      return {modelbox::STATUS_INVALID, \"Failed to get JNIEnv\"};\n    }\n\n    env->CallVoidMethod(j_flowunit_, close_method_id_);\n    if (env->ExceptionCheck() == JNI_TRUE) {\n      auto status = modelbox::ModelboxJNICatchException(env);\n      if (status != nullptr) {\n        return *status;\n      }\n      std::string java_stack;\n      std::string errmsg = modelbox::ModelboxExceptionMsg(env, &java_stack);\n      MBLOG_WARN << \"JavaFlowUnit::close exception: \" << errmsg << \"\\n\"\n                 << java_stack;\n      return {modelbox::STATUS_FAULT, \"close exception:\" + errmsg};\n    }\n\n    return modelbox::STATUS_SUCCESS;\n  }\n\n  modelbox::Status DataPre(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override {\n    modelbox::ScopedJvm scope;\n    auto *env = scope.GetJNIEnv();\n    if (env == nullptr) {\n      return {modelbox::STATUS_INVALID, \"Failed to get JNIEnv\"};\n    }\n\n    auto *j_data_ctx = modelbox::JNINativeObject::NewJObject(\n        env, \"com/modelbox/DataContext\", data_ctx);\n    if (j_data_ctx == nullptr) {\n      return {modelbox::STATUS_INVALID, \"Failed to create DataContext\"};\n    }\n    Defer { env->DeleteLocalRef(j_data_ctx); };\n\n    env->CallVoidMethod(j_flowunit_, data_pre_method_id_, j_data_ctx);\n    if (env->ExceptionCheck() == JNI_TRUE) {\n      auto status = modelbox::ModelboxJNICatchException(env);\n      if (status != nullptr) {\n        return *status;\n      }\n\n      std::string java_stack;\n      std::string errmsg = modelbox::ModelboxExceptionMsg(env, &java_stack);\n      MBLOG_WARN << \"JavaFlowUnit::dataPre exception: \" << errmsg << \"\\n\"\n                 << java_stack;\n      return {modelbox::STATUS_FAULT, \"dataPre exception:\" + errmsg};\n    }\n\n    return modelbox::STATUS_SUCCESS;\n  }\n\n  modelbox::Status DataPost(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override {\n    modelbox::ScopedJvm scope;\n    auto *env = scope.GetJNIEnv();\n    if (env == nullptr) {\n      return {modelbox::STATUS_INVALID, \"Failed to get JNIEnv\"};\n    }\n\n    auto *j_data_ctx = modelbox::JNINativeObject::NewJObject(\n        env, \"com/modelbox/DataContext\", data_ctx);\n    if (j_data_ctx == nullptr) {\n      return {modelbox::STATUS_INVALID, \"Failed to create DataContext\"};\n    }\n    Defer { env->DeleteLocalRef(j_data_ctx); };\n\n    env->CallVoidMethod(j_flowunit_, data_post_method_id_, j_data_ctx);\n    if (env->ExceptionCheck() == JNI_TRUE) {\n      std::string java_stack;\n      std::string errmsg = modelbox::ModelboxExceptionMsg(env, &java_stack);\n      MBLOG_WARN << \"JavaFlowUnit::dataPost exception: \" << errmsg << \"\\n\"\n                 << java_stack;\n      return {modelbox::STATUS_FAULT, \"dataPost exception:\" + errmsg};\n    }\n\n    return modelbox::STATUS_SUCCESS;\n  }\n\n private:\n  jobject j_flowunit_{nullptr};\n  jmethodID open_method_id_{nullptr};\n  jmethodID process_method_id_{nullptr};\n  jmethodID close_method_id_{nullptr};\n  jmethodID data_pre_method_id_{nullptr};\n  jmethodID data_post_method_id_{nullptr};\n};\n\n/*\n * Class:     com_modelbox_FlowUnit\n * Method:    FlowUnit_New\n * Signature: ()J\n */\nJNIEXPORT jlong JNICALL\nJava_com_modelbox_FlowUnit_FlowUnit_1New(JNIEnv *env, jobject j_this) {\n  auto n_flowunit = std::make_shared<JavaFlowUnit>();\n  auto ret = n_flowunit->JavaInit(env, j_this);\n  if (!ret) {\n    modelbox::ModelBoxJNIThrow(env, ret);\n    return 0;\n  }\n\n  return modelbox::JNINativeObject::NewHandle(j_this, n_flowunit);\n}\n"
  },
  {
    "path": "src/java/jni/jni_export/flowunit_builder.cc",
    "content": "\n/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <modelbox/base/log.h>\n#include <modelbox/flowunit.h>\n#include <modelbox/flowunit_builder.h>\n\n#include <memory>\n\n#include \"com_modelbox_FlowUnitBuilder.h\"\n#include \"jni_native_object.h\"\n#include \"scoped_jvm.h\"\n#include \"throw.h\"\n#include \"utils.h\"\n\nclass JavaFlowUnitBuilder : public modelbox::FlowUnitBuilder {\n public:\n  JavaFlowUnitBuilder(jobject j_builder) {\n    modelbox::ScopedJvm scope;\n    auto *env = scope.GetJNIEnv();\n    if (env == nullptr) {\n      return;\n    }\n\n    j_builder_ = env->NewGlobalRef(j_builder);\n  }\n\n  virtual ~JavaFlowUnitBuilder() {\n    modelbox::ScopedJvm scope;\n    auto *env = scope.GetJNIEnv();\n    if (env == nullptr) {\n      return;\n    }\n\n    env->DeleteGlobalRef(j_builder_);\n  }\n\n  void Probe(std::shared_ptr<modelbox::FlowUnitDesc> &desc) override {\n    modelbox::ScopedJvm scope;\n    auto *env = scope.GetJNIEnv();\n    if (env == nullptr) {\n      MBLOG_ERROR << \"Failed to get JNIEnv\";\n      return;\n    }\n\n    auto *j_desc = modelbox::JNINativeObject::NewJObject(\n        env, \"com/modelbox/FlowUnitDesc\", desc);\n    if (j_desc == nullptr) {\n      MBLOG_ERROR << \"new java flowunit desc failed\" << modelbox::StatusError;\n      return;\n    }\n    Defer { env->DeleteLocalRef(j_desc); };\n\n    jmethodID probe_method =\n        env->GetMethodID(env->GetObjectClass(j_builder_), \"probe\",\n                         \"(Lcom/modelbox/FlowUnitDesc;)V\");\n    if (probe_method == nullptr) {\n      MBLOG_ERROR << \"get probe method failed.\";\n      return;\n    }\n\n    env->CallVoidMethod(j_builder_, probe_method, j_desc);\n    if (env->ExceptionCheck() == JNI_TRUE) {\n      std::string java_stack;\n      std::string errmsg = modelbox::ModelboxExceptionMsg(env, &java_stack);\n      MBLOG_WARN << \"flowunit probe exception: \" << errmsg << \"\\n\"\n                 << java_stack;\n    }\n  }\n\n  std::shared_ptr<modelbox::FlowUnit> Build() override {\n    modelbox::ScopedJvm scope;\n    auto *env = scope.GetJNIEnv();\n    if (env == nullptr) {\n      return nullptr;\n    }\n\n    jmethodID build_method = env->GetMethodID(\n        env->GetObjectClass(j_builder_), \"build\", \"()Lcom/modelbox/FlowUnit;\");\n    if (build_method == nullptr) {\n      MBLOG_ERROR << \"get build method failed.\";\n      return nullptr;\n    }\n\n    jobject j_flow_unit = env->CallObjectMethod(j_builder_, build_method);\n    if (j_flow_unit == nullptr) {\n      if (env->ExceptionCheck() == JNI_TRUE) {\n        std::string java_stack;\n        std::string errmsg = modelbox::ModelboxExceptionMsg(env, &java_stack);\n        MBLOG_WARN << \"flowunit builder exception: \" << errmsg << \"\\n\"\n                   << java_stack;\n        modelbox::StatusError = {modelbox::STATUS_FAULT,\n                                 \"flowunit builder exception:\" + errmsg};\n      }\n      modelbox::StatusError = {modelbox::STATUS_FAULT,\n                               \"flowunit builder failed\"};\n      return nullptr;\n    }\n\n    auto n_flowunit =\n        modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::FlowUnit>(\n            env, j_flow_unit);\n    if (n_flowunit == nullptr) {\n      MBLOG_ERROR << \"get native flowunit failed\" << modelbox::StatusError;\n      modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n      return nullptr;\n    }\n\n    return n_flowunit;\n  }\n\n private:\n  jobject j_builder_;\n};\n\n/*\n * Class:     com_modelbox_FlowUnitBuilder\n * Method:    FlowUnitBuilderNew\n * Signature: ()J\n */\nJNIEXPORT jlong JNICALL Java_com_modelbox_FlowUnitBuilder_FlowUnitBuilderNew(\n    JNIEnv *env, jobject j_this) {\n  return modelbox::JNINativeObject::NewHandle(\n      j_this, std::make_shared<JavaFlowUnitBuilder>(j_this));\n}\n"
  },
  {
    "path": "src/java/jni/jni_export/flowunit_desc.cc",
    "content": "\n/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <modelbox/base/log.h>\n#include <modelbox/flowunit.h>\n\n#include <memory>\n\n#include \"com_modelbox_FlowUnitDesc.h\"\n#include \"jni_native_object.h\"\n#include \"scoped_jvm.h\"\n#include \"throw.h\"\n#include \"utils.h\"\n\n/*\n * Class:     com_modelbox_FlowUnitDesc\n * Method:    FlowUnitDescNew\n * Signature: ()J\n */\nJNIEXPORT jlong JNICALL\nJava_com_modelbox_FlowUnitDesc_FlowUnitDescNew(JNIEnv *env, jobject j_this) {\n  return modelbox::JNINativeObject::NewHandle(\n      j_this, std::make_shared<modelbox::FlowUnitDesc>());\n}\n\n/*\n * Class:     com_modelbox_FlowUnitDesc\n * Method:    FlowUnitDescGetFlowUnitName\n * Signature: ()Ljava/lang/String;\n */\nJNIEXPORT jstring JNICALL\nJava_com_modelbox_FlowUnitDesc_FlowUnitDescGetFlowUnitName(JNIEnv *env,\n                                                           jobject j_this) {\n  auto n_desc =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::FlowUnitDesc>(\n          env, j_this);\n  if (n_desc == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return nullptr;\n  }\n\n  return env->NewStringUTF(n_desc->GetFlowUnitName().c_str());\n}\n\n/*\n * Class:     com_modelbox_FlowUnitDesc\n * Method:    FlowUnitDescGetFlowUnitType\n * Signature: ()Ljava/lang/String;\n */\nJNIEXPORT jstring JNICALL\nJava_com_modelbox_FlowUnitDesc_FlowUnitDescGetFlowUnitType(JNIEnv *env,\n                                                           jobject j_this) {\n  auto n_desc =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::FlowUnitDesc>(\n          env, j_this);\n  if (n_desc == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return nullptr;\n  }\n\n  return env->NewStringUTF(n_desc->GetFlowUnitType().c_str());\n}\n\n/*\n * Class:     com_modelbox_FlowUnitDesc\n * Method:    FlowUnitDescGetFlowUnitAliasName\n * Signature: ()Ljava/lang/String;\n */\nJNIEXPORT jstring JNICALL\nJava_com_modelbox_FlowUnitDesc_FlowUnitDescGetFlowUnitAliasName(\n    JNIEnv *env, jobject j_this) {\n  auto n_desc =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::FlowUnitDesc>(\n          env, j_this);\n  if (n_desc == nullptr) {\n    return nullptr;\n  }\n\n  return env->NewStringUTF(n_desc->GetFlowUnitAliasName().c_str());\n}\n\n/*\n * Class:     com_modelbox_FlowUnitDesc\n * Method:    FlowUnitDescGetFlowUnitArgument\n * Signature: ()Ljava/lang/String;\n */\nJNIEXPORT jstring JNICALL\nJava_com_modelbox_FlowUnitDesc_FlowUnitDescGetFlowUnitArgument(JNIEnv *env,\n                                                               jobject j_this) {\n  auto n_desc =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::FlowUnitDesc>(\n          env, j_this);\n  if (n_desc == nullptr) {\n    return nullptr;\n  }\n\n  return env->NewStringUTF(n_desc->GetFlowUnitArgument().c_str());\n}\n\n/*\n * Class:     com_modelbox_FlowUnitDesc\n * Method:    FlowUnitDescSetFlowUnitName\n * Signature: (Ljava/lang/String;)V\n */\nJNIEXPORT void JNICALL\nJava_com_modelbox_FlowUnitDesc_FlowUnitDescSetFlowUnitName(JNIEnv *env,\n                                                           jobject j_this,\n                                                           jstring j_name) {\n  auto n_desc =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::FlowUnitDesc>(\n          env, j_this);\n  if (n_desc == nullptr) {\n    return;\n  }\n\n  n_desc->SetFlowUnitName(modelbox::jstring2string(env, j_name));\n}\n\n/*\n * Class:     com_modelbox_FlowUnitDesc\n * Method:    FlowUnitDescSetFlowUnitType\n * Signature: (Ljava/lang/String;)V\n */\nJNIEXPORT void JNICALL\nJava_com_modelbox_FlowUnitDesc_FlowUnitDescSetFlowUnitType(JNIEnv *env,\n                                                           jobject j_this,\n                                                           jstring j_type) {\n  auto n_desc =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::FlowUnitDesc>(\n          env, j_this);\n  if (n_desc == nullptr) {\n    return;\n  }\n\n  n_desc->SetFlowUnitType(modelbox::jstring2string(env, j_type));\n}\n\n/*\n * Class:     com_modelbox_FlowUnitDesc\n * Method:    FlowUnitDescAddFlowUnitInput\n * Signature: (Lcom/modelbox/FlowUnitInput;)V;\n */\nJNIEXPORT void JNICALL\nJava_com_modelbox_FlowUnitDesc_FlowUnitDescAddFlowUnitInput(JNIEnv *env,\n                                                            jobject j_this,\n                                                            jobject j_input) {\n  auto n_desc =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::FlowUnitDesc>(\n          env, j_this);\n  if (n_desc == nullptr) {\n    return;\n  }\n\n  auto n_input =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::FlowUnitInput>(\n          env, j_input);\n  if (n_input == nullptr) {\n    return;\n  }\n\n  auto status = n_desc->AddFlowUnitInput(*n_input);\n  modelbox::ModelBoxJNIThrow(env, status);\n}\n\n/*\n * Class:     com_modelbox_FlowUnitDesc\n * Method:    FlowUnitDescAddFlowUnitOutput\n * Signature: (Lcom/modelbox/FlowUnitOutput;)V;\n */\nJNIEXPORT void JNICALL\nJava_com_modelbox_FlowUnitDesc_FlowUnitDescAddFlowUnitOutput(JNIEnv *env,\n                                                             jobject j_this,\n                                                             jobject j_output) {\n  auto n_desc =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::FlowUnitDesc>(\n          env, j_this);\n  if (n_desc == nullptr) {\n    return;\n  }\n\n  auto n_output =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::FlowUnitOutput>(\n          env, j_output);\n  if (n_output == nullptr) {\n    return;\n  }\n\n  auto status = n_desc->AddFlowUnitOutput(*n_output);\n  modelbox::ModelBoxJNIThrow(env, status);\n}\n\n/*\n * Class:     com_modelbox_FlowUnitDesc\n * Method:    FlowUnitDescAddFlowUnitOption\n * Signature: (Lcom/modelbox/FlowUnitOption;)Lcom/modelbox/Status;\n */\nJNIEXPORT jobject JNICALL\nJava_com_modelbox_FlowUnitDesc_FlowUnitDescAddFlowUnitOption(JNIEnv *env,\n                                                             jobject j_this,\n                                                             jobject j_option) {\n  auto n_desc =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::FlowUnitDesc>(\n          env, j_this);\n  if (n_desc == nullptr) {\n    return nullptr;\n  }\n\n  auto n_option =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::FlowUnitOption>(\n          env, j_option);\n  if (n_option == nullptr) {\n    return nullptr;\n  }\n\n  auto status = n_desc->AddFlowUnitOption(*n_option);\n  return nullptr;\n}\n\n/*\n * Class:     com_modelbox_FlowUnitDesc\n * Method:    FlowUnitDescSetConditionType\n * Signature: (J)V\n */\nJNIEXPORT void JNICALL\nJava_com_modelbox_FlowUnitDesc_FlowUnitDescSetConditionType(JNIEnv *env,\n                                                            jobject j_this,\n                                                            jlong j_type) {\n  auto n_desc =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::FlowUnitDesc>(\n          env, j_this);\n  if (n_desc == nullptr) {\n    return;\n  }\n\n  if (j_type > modelbox::IF_ELSE) {\n    return;\n  }\n\n  n_desc->SetConditionType(static_cast<modelbox::ConditionType>(j_type));\n}\n\n/*\n * Class:     com_modelbox_FlowUnitDesc\n * Method:    FlowUnitDescSetLoopType\n * Signature: (J)V\n */\nJNIEXPORT void JNICALL Java_com_modelbox_FlowUnitDesc_FlowUnitDescSetLoopType(\n    JNIEnv *env, jobject j_this, jlong j_type) {\n  auto n_desc =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::FlowUnitDesc>(\n          env, j_this);\n  if (n_desc == nullptr) {\n    return;\n  }\n\n  if (j_type > modelbox::LOOP) {\n    return;\n  }\n\n  n_desc->SetLoopType(static_cast<modelbox::LoopType>(j_type));\n}\n\n/*\n * Class:     com_modelbox_FlowUnitDesc\n * Method:    FlowUnitDescSetOutputType\n * Signature: (J)V\n */\nJNIEXPORT void JNICALL Java_com_modelbox_FlowUnitDesc_FlowUnitDescSetOutputType(\n    JNIEnv *env, jobject j_this, jlong j_type) {\n  auto n_desc =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::FlowUnitDesc>(\n          env, j_this);\n  if (n_desc == nullptr) {\n    return;\n  }\n\n  if (j_type > modelbox::COLLAPSE) {\n    return;\n  }\n\n  n_desc->SetOutputType(static_cast<modelbox::FlowOutputType>(j_type));\n}\n\n/*\n * Class:     com_modelbox_FlowUnitDesc\n * Method:    FlowUnitDescSetFlowType\n * Signature: (J)V\n */\nJNIEXPORT void JNICALL Java_com_modelbox_FlowUnitDesc_FlowUnitDescSetFlowType(\n    JNIEnv *env, jobject j_this, jlong j_type) {\n  auto n_desc =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::FlowUnitDesc>(\n          env, j_this);\n  if (n_desc == nullptr) {\n    return;\n  }\n\n  if (j_type > modelbox::NORMAL) {\n    return;\n  }\n\n  n_desc->SetFlowType(static_cast<modelbox::FlowType>(j_type));\n}\n\n/*\n * Class:     com_modelbox_FlowUnitDesc\n * Method:    FlowUnitDescSetStreamSameCount\n * Signature: (Z)V\n */\nJNIEXPORT void JNICALL\nJava_com_modelbox_FlowUnitDesc_FlowUnitDescSetStreamSameCount(\n    JNIEnv *env, jobject j_this, jboolean j_same_count) {\n  auto n_desc =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::FlowUnitDesc>(\n          env, j_this);\n  if (n_desc == nullptr) {\n    return;\n  }\n\n  n_desc->SetStreamSameCount(j_same_count);\n}\n\n/*\n * Class:     com_modelbox_FlowUnitDesc\n * Method:    FlowUnitDescSetInputContiguous\n * Signature: (Z)V\n */\nJNIEXPORT void JNICALL\nJava_com_modelbox_FlowUnitDesc_FlowUnitDescSetInputContiguous(\n    JNIEnv *env, jobject j_this, jboolean j_contiguous) {\n  auto n_desc =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::FlowUnitDesc>(\n          env, j_this);\n  if (n_desc == nullptr) {\n    return;\n  }\n\n  n_desc->SetInputContiguous(j_contiguous);\n}\n\n/*\n * Class:     com_modelbox_FlowUnitDesc\n * Method:    FlowUnitDescSetResourceNice\n * Signature: (Z)V\n */\nJNIEXPORT void JNICALL\nJava_com_modelbox_FlowUnitDesc_FlowUnitDescSetResourceNice(JNIEnv *env,\n                                                           jobject j_this,\n                                                           jboolean j_nice) {\n  auto n_desc =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::FlowUnitDesc>(\n          env, j_this);\n  if (n_desc == nullptr) {\n    return;\n  }\n\n  n_desc->SetResourceNice(j_nice);\n}\n\n/*\n * Class:     com_modelbox_FlowUnitDesc\n * Method:    FlowUnitDescSetCollapseAll\n * Signature: (Z)V\n */\nJNIEXPORT void JNICALL\nJava_com_modelbox_FlowUnitDesc_FlowUnitDescSetCollapseAll(\n    JNIEnv *env, jobject j_this, jboolean j_collapse_all) {\n  auto n_desc =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::FlowUnitDesc>(\n          env, j_this);\n  if (n_desc == nullptr) {\n    return;\n  }\n\n  n_desc->SetCollapseAll(j_collapse_all);\n}\n\n/*\n * Class:     com_modelbox_FlowUnitDesc\n * Method:    FlowUnitDescSetExceptionVisible\n * Signature: (Z)V\n */\nJNIEXPORT void JNICALL\nJava_com_modelbox_FlowUnitDesc_FlowUnitDescSetExceptionVisible(\n    JNIEnv *env, jobject j_this, jboolean j_visible) {\n  auto n_desc =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::FlowUnitDesc>(\n          env, j_this);\n  if (n_desc == nullptr) {\n    return;\n  }\n\n  n_desc->SetExceptionVisible(j_visible);\n}\n\n/*\n * Class:     com_modelbox_FlowUnitDesc\n * Method:    FlowUnitDescSetDescription\n * Signature: (Ljava/lang/String;)V\n */\nJNIEXPORT void JNICALL\nJava_com_modelbox_FlowUnitDesc_FlowUnitDescSetDescription(JNIEnv *env,\n                                                          jobject j_this,\n                                                          jstring j_desc) {\n  auto n_desc =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::FlowUnitDesc>(\n          env, j_this);\n  if (n_desc == nullptr) {\n    return;\n  }\n\n  auto s_desc = modelbox::jstring2string(env, j_desc);\n  n_desc->SetDescription(s_desc);\n}\n\n/*\n * Class:     com_modelbox_FlowUnitDesc\n * Method:    FlowUnitDescSetMaxBatchSize\n * Signature: (J)V\n */\nJNIEXPORT void JNICALL\nJava_com_modelbox_FlowUnitDesc_FlowUnitDescSetMaxBatchSize(\n    JNIEnv *env, jobject j_this, jlong j_max_batch_size) {\n  auto n_desc =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::FlowUnitDesc>(\n          env, j_this);\n  if (n_desc == nullptr) {\n    return;\n  }\n\n  n_desc->SetMaxBatchSize(j_max_batch_size);\n}\n\n/*\n * Class:     com_modelbox_FlowUnitDesc\n * Method:    FlowUnitDescSetDefaultBatchSize\n * Signature: (J)V\n */\nJNIEXPORT void JNICALL\nJava_com_modelbox_FlowUnitDesc_FlowUnitDescSetDefaultBatchSize(\n    JNIEnv *env, jobject j_this, jlong j_default_batch_size) {\n  auto n_desc =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::FlowUnitDesc>(\n          env, j_this);\n  if (n_desc == nullptr) {\n    return;\n  }\n\n  n_desc->SetDefaultBatchSize(j_default_batch_size);\n}\n"
  },
  {
    "path": "src/java/jni/jni_export/flowunit_error.cc",
    "content": "\n/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <memory>\n\n#include \"com_modelbox_FlowUnitError.h\"\n#include \"jni_native_object.h\"\n#include \"modelbox/error.h\"\n#include \"throw.h\"\n#include \"utils.h\"\n\n/*\n * Class:     com_modelbox_FlowUnitError\n * Method:    FlowUnitError_New\n * Signature: (Ljava/lang/String;)J\n */\nJNIEXPORT jlong JNICALL\nJava_com_modelbox_FlowUnitError_FlowUnitError_1New__Ljava_lang_String_2(\n    JNIEnv *env, jobject j_this, jstring j_desc) {\n  if (j_desc == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::JNIEXCEPT_NullPointer,\n                               \"input argument is null\");\n    return 0;\n  }\n\n  return modelbox::JNINativeObject::NewHandle(\n      j_this, std::make_shared<modelbox::FlowUnitError>(\n                  modelbox::jstring2string(env, j_desc)));\n}\n\n/*\n * Class:     com_modelbox_FlowUnitError\n * Method:    FlowUnitError_New\n * Signature: (Ljava/lang/String;Ljava/lang/String;Lcom/modelbox/Status;)J\n */\nJNIEXPORT jlong JNICALL\nJava_com_modelbox_FlowUnitError_FlowUnitError_1New__Ljava_lang_String_2Ljava_lang_String_2Lcom_modelbox_Status_2(\n    JNIEnv *env, jobject j_this, jstring j_node, jstring j_pos,\n    jobject j_status) {\n  if (j_node == nullptr || j_pos == nullptr || j_status == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::JNIEXCEPT_NullPointer,\n                               \"input argument is null\");\n    return 0;\n  }\n\n  auto n_flowunit_status =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::Status>(env,\n                                                                      j_status);\n  if (j_status == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return 0;\n  }\n\n  return modelbox::JNINativeObject::NewHandle(\n      j_this, std::make_shared<modelbox::FlowUnitError>(\n                  modelbox::jstring2string(env, j_node),\n                  modelbox::jstring2string(env, j_pos), *n_flowunit_status));\n}\n\n/*\n * Class:     com_modelbox_FlowUnitError\n * Method:    FlowUnitError_GetDesc\n * Signature: ()Ljava/lang/String;\n */\nJNIEXPORT jstring JNICALL\nJava_com_modelbox_FlowUnitError_FlowUnitError_1GetDesc(JNIEnv *env,\n                                                       jobject j_this) {\n  auto n_flowunit_error =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::FlowUnitError>(\n          env, j_this);\n  if (n_flowunit_error == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return nullptr;\n  }\n\n  return env->NewStringUTF(n_flowunit_error->GetDesc().c_str());\n}\n\n/*\n * Class:     com_modelbox_FlowUnitError\n * Method:    FlowUnitError_GetStatus\n * Signature: ()Lcom/modelbox/Status;\n */\nJNIEXPORT jobject JNICALL\nJava_com_modelbox_FlowUnitError_FlowUnitError_1GetStatus(JNIEnv *env,\n                                                         jobject j_this) {\n  auto n_flowunit_error =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::FlowUnitError>(\n          env, j_this);\n  if (n_flowunit_error == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return nullptr;\n  }\n\n  auto stat = std::make_shared<modelbox::Status>(n_flowunit_error->GetStatus());\n\n  auto *j_status =\n      modelbox::JNINativeObject::NewJObject(env, \"com/modelbox/Status\", stat);\n  modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n  return j_status;\n}\n"
  },
  {
    "path": "src/java/jni/jni_export/flowunit_event.cc",
    "content": "\n/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <memory>\n\n#include \"com_modelbox_FlowUnitEvent.h\"\n#include \"jni_native_object.h\"\n#include \"modelbox/data_context.h\"\n#include \"scoped_jvm.h\"\n#include \"throw.h\"\n#include \"utils.h\"\n\n/*\n * Class:     com_modelbox_FlowUnitEvent\n * Method:    FlowUnitEventNew\n * Signature: ()J\n */\nJNIEXPORT jlong JNICALL\nJava_com_modelbox_FlowUnitEvent_FlowUnitEventNew(JNIEnv *env, jobject j_this) {\n  return modelbox::JNINativeObject::NewHandle(\n      j_this, std::make_shared<modelbox::FlowUnitEvent>());\n}\n\n/*\n * Class:     com_modelbox_FlowUnitEvent\n * Method:    FlowUnitEventSet\n * Signature: (Ljava/lang/String;Ljava/lang/Object;)V\n */\nJNIEXPORT void JNICALL Java_com_modelbox_FlowUnitEvent_FlowUnitEventSet(\n    JNIEnv *env, jobject j_this, jstring j_key, jobject j_object) {\n  if (j_key == nullptr || j_object == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_INVALID,\n                               \"input argument is null\");\n    return;\n  }\n\n  auto n_flowunit_event =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::FlowUnitEvent>(\n          env, j_this);\n  if (n_flowunit_event == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return;\n  }\n\n  auto *j_global_object = env->NewGlobalRef(j_object);\n  std::shared_ptr<void> priv_ptr(\n      (void *)j_global_object, [](void *j_global_object) {\n        modelbox::ScopedJvm scoped;\n        scoped.GetJNIEnv()->DeleteGlobalRef((jobject)j_global_object);\n      });\n  n_flowunit_event->SetPrivate(modelbox::jstring2string(env, j_key), priv_ptr);\n}\n\n/*\n * Class:     com_modelbox_FlowUnitEvent\n * Method:    FlowUnitEventGet\n * Signature: (Ljava/lang/String;)Ljava/lang/Object;\n */\nJNIEXPORT jobject JNICALL Java_com_modelbox_FlowUnitEvent_FlowUnitEventGet(\n    JNIEnv *env, jobject j_this, jstring j_key) {\n  if (j_key == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_INVALID,\n                               \"input argument is null\");\n    return nullptr;\n  }\n\n  auto n_flowunit_event =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::FlowUnitEvent>(\n          env, j_this);\n  if (n_flowunit_event == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return nullptr;\n  }\n\n  auto n_object =\n      n_flowunit_event->GetPrivate(modelbox::jstring2string(env, j_key));\n  if (n_object == nullptr) {\n    return nullptr;\n  }\n\n  return (jobject)n_object.get();\n}"
  },
  {
    "path": "src/java/jni/jni_export/flowunit_input.cc",
    "content": "\n/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <modelbox/base/log.h>\n#include <modelbox/flowunit.h>\n\n#include <memory>\n\n#include \"com_modelbox_FlowUnitInput.h\"\n#include \"jni_native_object.h\"\n#include \"scoped_jvm.h\"\n#include \"throw.h\"\n#include \"utils.h\"\n\n/*\n * Class:     com_modelbox_FlowUnitInput\n * Method:    FlowUnitInput_New\n * Signature: (Ljava/lang/String;)J\n */\nJNIEXPORT jlong JNICALL\nJava_com_modelbox_FlowUnitInput_FlowUnitInput_1New__Ljava_lang_String_2(\n    JNIEnv *env, jobject j_this, jstring j_name) {\n  auto name = modelbox::jstring2string(env, j_name);\n  return modelbox::JNINativeObject::NewHandle(\n      j_this, std::make_shared<modelbox::FlowUnitInput>(name));\n}\n\n/*\n * Class:     com_modelbox_FlowUnitInput\n * Method:    FlowUnitInput_New\n * Signature: (Ljava/lang/String;Ljava/lang/String;)J\n */\nJNIEXPORT jlong JNICALL\nJava_com_modelbox_FlowUnitInput_FlowUnitInput_1New__Ljava_lang_String_2Ljava_lang_String_2(\n    JNIEnv *env, jobject j_this, jstring j_name, jstring j_device_type) {\n  auto name = modelbox::jstring2string(env, j_name);\n  auto device_type = modelbox::jstring2string(env, j_device_type);\n  return modelbox::JNINativeObject::NewHandle(\n      j_this, std::make_shared<modelbox::FlowUnitInput>(name, device_type));\n}\n\n/*\n * Class:     com_modelbox_FlowUnitInput\n * Method:    FlowUnitInput_New\n * Signature: (Ljava/lang/String;J)J\n */\nJNIEXPORT jlong JNICALL\nJava_com_modelbox_FlowUnitInput_FlowUnitInput_1New__Ljava_lang_String_2J(\n    JNIEnv *env, jobject j_this, jstring j_name, jlong j_device_flags) {\n  auto name = modelbox::jstring2string(env, j_name);\n\n  return modelbox::JNINativeObject::NewHandle(\n      j_this, std::make_shared<modelbox::FlowUnitInput>(name, j_device_flags));\n}\n\n/*\n * Class:     com_modelbox_FlowUnitInput\n * Method:    FlowUnitInput_New\n * Signature: (Ljava/lang/String;Ljava/lang/String;J)J\n */\nJNIEXPORT jlong JNICALL\nJava_com_modelbox_FlowUnitInput_FlowUnitInput_1New__Ljava_lang_String_2Ljava_lang_String_2J(\n    JNIEnv *env, jobject j_this, jstring j_name, jstring j_device_type,\n    jlong j_device_flags) {\n  auto name = modelbox::jstring2string(env, j_name);\n  auto device_type = modelbox::jstring2string(env, j_device_type);\n  return modelbox::JNINativeObject::NewHandle(\n      j_this, std::make_shared<modelbox::FlowUnitInput>(name, device_type,\n                                                        j_device_flags));\n}"
  },
  {
    "path": "src/java/jni/jni_export/flowunit_output.cc",
    "content": "\n/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <modelbox/base/log.h>\n#include <modelbox/flowunit.h>\n\n#include <memory>\n\n#include \"com_modelbox_FlowUnitOutput.h\"\n#include \"jni_native_object.h\"\n#include \"scoped_jvm.h\"\n#include \"throw.h\"\n#include \"utils.h\"\n\n/*\n * Class:     com_modelbox_FlowUnitOutput\n * Method:    FlowUnitOutput_New\n * Signature: (Ljava/lang/String;)J\n */\nJNIEXPORT jlong JNICALL\nJava_com_modelbox_FlowUnitOutput_FlowUnitOutput_1New__Ljava_lang_String_2(\n    JNIEnv *env, jobject j_this, jstring j_name) {\n  auto name = modelbox::jstring2string(env, j_name);\n  return modelbox::JNINativeObject::NewHandle(\n      j_this, std::make_shared<modelbox::FlowUnitOutput>(name));\n}\n\n/*\n * Class:     com_modelbox_FlowUnitOutput\n * Method:    FlowUnitOutput_New\n * Signature: (Ljava/lang/String;J)J\n */\nJNIEXPORT jlong JNICALL\nJava_com_modelbox_FlowUnitOutput_FlowUnitOutput_1New__Ljava_lang_String_2J(\n    JNIEnv *env, jobject j_this, jstring j_name, jlong j_device_flags) {\n  auto name = modelbox::jstring2string(env, j_name);\n  return modelbox::JNINativeObject::NewHandle(\n      j_this, std::make_shared<modelbox::FlowUnitOutput>(name, j_device_flags));\n}\n"
  },
  {
    "path": "src/java/jni/jni_export/log.cc",
    "content": "\n/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"log.h\"\n\n#include <modelbox/base/log.h>\n\n#include <memory>\n\n#include \"com_modelbox_Log.h\"\n#include \"jni_native_object.h\"\n#include \"throw.h\"\n#include \"utils.h\"\n\n/*\n * Class:     com_modelbox_Log\n * Method:    LogNew\n * Signature: ()J\n */\nJNIEXPORT jlong JNICALL Java_com_modelbox_Log_LogNew(JNIEnv *env,\n                                                     jobject j_this) {\n  return modelbox::JNINativeObject::NewHandle(\n      j_this, std::make_shared<modelbox::LoggerJava>());\n}\n\n/*\n * Class:     com_modelbox_Log\n * Method:    LogSetLogLevel\n * Signature: (J)V\n */\nJNIEXPORT void JNICALL Java_com_modelbox_Log_LogSetLogLevel(JNIEnv *env,\n                                                            jobject j_this,\n                                                            jlong j_level) {\n  auto n_log =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::LoggerJava>(\n          env, j_this);\n  if (n_log == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return;\n  }\n\n  n_log->SetLogLevel((modelbox::LogLevel)j_level);\n}\n\n/*\n * Class:     com_modelbox_Log\n * Method:    LogGetLogLevel\n * Signature: ()J\n */\nJNIEXPORT jlong JNICALL Java_com_modelbox_Log_LogGetLogLevel(JNIEnv *env,\n                                                             jobject j_this) {\n  auto n_log =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::LoggerJava>(\n          env, j_this);\n  if (n_log == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return (jlong)modelbox::LogLevel::LOG_OFF;\n  }\n\n  auto logLevel = n_log->GetLogLevel();\n  if (logLevel > modelbox::LogLevel::LOG_OFF) {\n    return (jlong)modelbox::LogLevel::LOG_OFF;\n  }\n  return (jlong)logLevel;\n}\n\n/*\n * Class:     com_modelbox_Log\n * Method:    LogGetLogger\n * Signature: ()Lcom/modelbox/Log;\n */\nJNIEXPORT jobject JNICALL Java_com_modelbox_Log_LogGetLogger(JNIEnv *env,\n                                                             jclass j_clazz) {\n  auto n_log = ModelBoxLogger.GetLogger();\n  if (n_log == nullptr) {\n    return nullptr;\n  }\n\n  auto logger_java = std::dynamic_pointer_cast<modelbox::LoggerJava>(n_log);\n  if (logger_java == nullptr) {\n    jobject j_log =\n        modelbox::JNINativeObject::NewJObject(env, \"com/modelbox/Log\", n_log);\n    return j_log;\n  }\n\n  auto *j_log = logger_java->GetJNICaller();\n\n  return j_log;\n}\n\n/*\n * Class:     com_modelbox_Log\n * Method:    LogReg\n * Signature: (Lcom/modelbox/Log;)V\n */\nJNIEXPORT void JNICALL Java_com_modelbox_Log_LogReg(JNIEnv *env, jclass j_clazz,\n                                                    jobject j_log) {\n  auto n_log =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::LoggerJava>(\n          env, j_log);\n  if (n_log == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return;\n  }\n\n  n_log->RegJNICaller(env, j_log);\n  ModelBoxLogger.SetLogger(n_log);\n}\n\n/*\n * Class:     com_modelbox_Log\n * Method:    LogUnReg\n * Signature: ()V\n */\nJNIEXPORT void JNICALL Java_com_modelbox_Log_LogUnReg(JNIEnv *env,\n                                                      jclass j_clazz) {\n  ModelBoxLogger.SetLogger(nullptr);\n}\n\n/*\n * Class:     com_modelbox_Log\n * Method:    LogPrint\n * Signature: (JLjava/lang/String;ILjava/lang/String;Ljava/lang/String;)V\n */\nJNIEXPORT void JNICALL Java_com_modelbox_Log_LogPrint(\n    JNIEnv *env, jclass j_clazz, jlong j_level, jstring j_file, jint j_lineno,\n    jstring j_func, jstring j_msg) {\n  ModelBoxLogger.Print((modelbox::LogLevel)j_level,\n                       modelbox::jstring2string(env, j_file).c_str(), j_lineno,\n                       modelbox::jstring2string(env, j_func).c_str(), \"%s\",\n                       modelbox::jstring2string(env, j_msg).c_str());\n}\n"
  },
  {
    "path": "src/java/jni/jni_export/native_object.cc",
    "content": "\n/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <modelbox/base/log.h>\n\n#include <memory>\n\n#include \"com_modelbox_NativeObject.h\"\n#include \"jni_native_object.h\"\n#include \"throw.h\"\n\n/*\n * Class:     com_modelbox_NativeObject\n * Method:    delete_handle\n * Signature: (J)V\n */\nJNIEXPORT void JNICALL Java_com_modelbox_NativeObject_delete_1handle(\n    JNIEnv *env, jobject jself, jlong handle) {\n  modelbox::JNINativeObject::DeleteHandle(handle);\n}\n"
  },
  {
    "path": "src/java/jni/jni_export/session_context.cc",
    "content": "\n/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/session_context.h\"\n\n#include <memory>\n\n#include \"com_modelbox_SessionContext.h\"\n#include \"jni_native_object.h\"\n#include \"throw.h\"\n#include \"utils.h\"\n\n/*\n * Class:     com_modelbox_SessionContext\n * Method:    SessionContext_SetSessionId\n * Signature: (Ljava/lang/String;)V\n */\nJNIEXPORT void JNICALL\nJava_com_modelbox_SessionContext_SessionContext_1SetSessionId(\n    JNIEnv *env, jobject j_this, jstring j_session_id) {\n  auto n_session_context =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::SessionContext>(\n          env, j_this);\n  if (n_session_context == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return;\n  }\n\n  n_session_context->SetSessionId(modelbox::jstring2string(env, j_session_id));\n}\n\n/*\n * Class:     com_modelbox_SessionContext\n * Method:    SessionContext_GetSessionId\n * Signature: ()Ljava/lang/String;\n */\nJNIEXPORT jstring JNICALL\nJava_com_modelbox_SessionContext_SessionContext_1GetSessionId(JNIEnv *env,\n                                                              jobject j_this) {\n  auto n_session_context =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::SessionContext>(\n          env, j_this);\n  if (n_session_context == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return nullptr;\n  }\n\n  return env->NewStringUTF(n_session_context->GetSessionId().c_str());\n}\n\n/*\n * Class:     com_modelbox_SessionContext\n * Method:    SessionContext_GetConfiguration\n * Signature: ()Lcom/modelbox/Configuration;\n */\nJNIEXPORT jobject JNICALL\nJava_com_modelbox_SessionContext_SessionContext_1GetConfiguration(\n    JNIEnv *env, jobject j_this) {\n  auto n_session_context =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::SessionContext>(\n          env, j_this);\n  if (n_session_context == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return nullptr;\n  }\n\n  auto n_config = n_session_context->GetConfig();\n  if (n_config == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_INVALID,\n                               \"configuration is invalid\");\n    return nullptr;\n  }\n\n  auto *j_config = modelbox::JNINativeObject::NewJObject(\n      env, \"com/modelbox/Configuration\", n_config);\n  if (j_config == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError,\n                               \"configuration is invalid\");\n  }\n  return j_config;\n}\n\n/*\n * Class:     com_modelbox_SessionContext\n * Method:    SessionContext_SetError\n * Signature: (Lcom/modelbox/FlowUnitError;)V\n */\nJNIEXPORT void JNICALL\nJava_com_modelbox_SessionContext_SessionContext_1SetError(JNIEnv *env,\n                                                          jobject j_this,\n                                                          jobject j_error) {\n  auto n_session_context =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::SessionContext>(\n          env, j_this);\n  if (n_session_context == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return;\n  }\n\n  auto n_error =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::FlowUnitError>(\n          env, j_error);\n  if (n_error == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_INVALID,\n                               \"flowunit error is invalid\");\n    return;\n  }\n\n  n_session_context->SetError(n_error);\n}\n\n/*\n * Class:     com_modelbox_SessionContext\n * Method:    SessionContext_GetError\n * Signature: ()Lcom/modelbox/FlowUnitError;\n */\nJNIEXPORT jobject JNICALL\nJava_com_modelbox_SessionContext_SessionContext_1GetError(JNIEnv *env,\n                                                          jobject j_this) {\n  auto n_session_context =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::SessionContext>(\n          env, j_this);\n  if (n_session_context == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return nullptr;\n  }\n\n  auto n_error = n_session_context->GetError();\n  if (n_error == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_INVALID,\n                               \"configuration is invalid\");\n    return nullptr;\n  }\n\n  auto *j_error = modelbox::JNINativeObject::NewJObject(\n      env, \"com/modelbox/FlowUnitError\", n_error);\n  if (j_error == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError,\n                               \"configuration is invalid\");\n  }\n\n  return j_error;\n}\n"
  },
  {
    "path": "src/java/jni/jni_export/status.cc",
    "content": "\n/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/base/status.h\"\n\n#include <memory>\n\n#include \"com_modelbox_Status.h\"\n#include \"jni_native_object.h\"\n#include \"modelbox/base/utils.h\"\n#include \"throw.h\"\n#include \"utils.h\"\n\njobject GetJStatusCodeFromStatus(JNIEnv *env, modelbox::Status &status) {\n  jclass j_cls = env->FindClass(\"com/modelbox/StatusCode\");\n  if (j_cls == nullptr) {\n    modelbox::StatusError = {modelbox::STATUS_INTERNAL,\n                             \"Cannot find class StatusCode\"};\n    return nullptr;\n  }\n\n  Defer { env->DeleteLocalRef(j_cls); };\n\n  jfieldID j_field = env->GetStaticFieldID(\n      j_cls, status.StrStatusCode().c_str(), \"Lcom/modelbox/StatusCode;\");\n  if (j_field == nullptr) {\n    modelbox::StatusError = {modelbox::STATUS_FAULT,\n                             \"Cannot find enum for StatusCode\"};\n    return nullptr;\n  }\n\n  jobject j_code = env->GetStaticObjectField(j_cls, j_field);\n\n  return j_code;\n}\n\n/*\n * Class:     com_modelbox_Status\n * Method:    StatusNew\n * Signature: ()J\n */\nJNIEXPORT jlong JNICALL Java_com_modelbox_Status_StatusNew(JNIEnv *env,\n                                                           jobject j_this) {\n  return modelbox::JNINativeObject::NewHandle(\n      j_this, std::make_shared<modelbox::Status>());\n}\n\n/*\n * Class:     com_modelbox_Status\n * Method:    StatusSetCode\n * Signature: (J)V\n */\nJNIEXPORT void JNICALL Java_com_modelbox_Status_StatusSetCode(JNIEnv *env,\n                                                              jobject j_this,\n                                                              jlong j_code) {\n  auto n_status =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::Status>(env,\n                                                                      j_this);\n  if (n_status == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return;\n  }\n\n  if (j_code >= modelbox::STATUS_LASTFLAG) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_INVALID,\n                               \"statuscode is invalid\");\n    return;\n  }\n\n  *n_status = (modelbox::StatusCode)j_code;\n}\n\n/*\n * Class:     com_modelbox_Status\n * Method:    StatusWrap\n * Signature: (Lcom/modelbox/Status;JLjava/lang/String;)V\n */\nJNIEXPORT void JNICALL Java_com_modelbox_Status_StatusWrap(\n    JNIEnv *env, jobject j_this, jobject j_status_other, jlong j_code,\n    jstring j_message) {\n  auto n_status =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::Status>(env,\n                                                                      j_this);\n  if (n_status == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return;\n  }\n\n  auto n_status_other =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::Status>(\n          env, j_status_other);\n  if (n_status_other == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return;\n  }\n\n  if (j_code >= modelbox::STATUS_LASTFLAG) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_INVALID,\n                               \"statuscode is invalid\");\n    return;\n  }\n\n  n_status->Wrap(*n_status_other, (modelbox::StatusCode)j_code,\n                 modelbox::jstring2string(env, j_message));\n}\n\n/*\n * Class:     com_modelbox_Status\n * Method:    StatusToSting\n * Signature: ()Ljava/lang/String;\n */\nJNIEXPORT jstring JNICALL\nJava_com_modelbox_Status_StatusToSting(JNIEnv *env, jobject j_this) {\n  auto n_status =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::Status>(env,\n                                                                      j_this);\n  if (n_status == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return nullptr;\n  }\n\n  return env->NewStringUTF(n_status->ToString().c_str());\n}\n\n/*\n * Class:     com_modelbox_Status\n * Method:    StatusCode\n * Signature: ()Lcom/modelbox/StatusCode;\n */\nJNIEXPORT jobject JNICALL Java_com_modelbox_Status_StatusCode(JNIEnv *env,\n                                                              jobject j_this) {\n  auto n_status =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::Status>(env,\n                                                                      j_this);\n  if (n_status == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return nullptr;\n  }\n  auto *j_code = GetJStatusCodeFromStatus(env, *n_status);\n  if (j_code == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n  }\n\n  return j_code;\n}\n\n/*\n * Class:     com_modelbox_Status\n * Method:    StatusStrCode\n * Signature: ()Ljava/lang/String;\n */\nJNIEXPORT jstring JNICALL\nJava_com_modelbox_Status_StatusStrCode(JNIEnv *env, jobject j_this) {\n  auto n_status =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::Status>(env,\n                                                                      j_this);\n  if (n_status == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return nullptr;\n  }\n\n  return env->NewStringUTF(n_status->StrCode().c_str());\n}\n\n/*\n * Class:     com_modelbox_Status\n * Method:    StatusSetErrorMsg\n * Signature: (Ljava/lang/String;)V\n */\nJNIEXPORT void JNICALL Java_com_modelbox_Status_StatusSetErrorMsg(\n    JNIEnv *env, jobject j_this, jstring j_message) {\n  auto n_status =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::Status>(env,\n                                                                      j_this);\n  if (n_status == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return;\n  }\n\n  n_status->SetErrormsg(modelbox::jstring2string(env, j_message));\n}\n\n/*\n * Class:     com_modelbox_Status\n * Method:    StatusErrorMsg\n * Signature: ()Ljava/lang/String;\n */\nJNIEXPORT jstring JNICALL\nJava_com_modelbox_Status_StatusErrorMsg(JNIEnv *env, jobject j_this) {\n  auto n_status =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::Status>(env,\n                                                                      j_this);\n  if (n_status == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return nullptr;\n  }\n\n  return env->NewStringUTF(n_status->Errormsg().c_str());\n}\n\n/*\n * Class:     com_modelbox_Status\n * Method:    StatusWrapErrormsgs\n * Signature: ()Ljava/lang/String;\n */\nJNIEXPORT jstring JNICALL\nJava_com_modelbox_Status_StatusWrapErrormsgs(JNIEnv *env, jobject j_this) {\n  auto n_status =\n      modelbox::JNINativeObject::GetNativeSharedPtr<modelbox::Status>(env,\n                                                                      j_this);\n  if (n_status == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::StatusError);\n    return nullptr;\n  }\n\n  return env->NewStringUTF(n_status->WrapErrormsgs().c_str());\n}"
  },
  {
    "path": "src/java/jni/jni_native_object.cc",
    "content": "\n/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"jni_native_object.h\"\n\n#include \"modelbox/base/log.h\"\n#include \"modelbox/base/status.h\"\n\nnamespace modelbox {\n\nJNINativeObject::JNINativeObject(jobject jni_object,\n                                 std::shared_ptr<void> native_shared_ptr)\n    : jni_object_(jni_object),\n      native_shared_ptr_(std::move(native_shared_ptr)){};\n\nJNINativeObject::JNINativeObject() = default;\n\nJNINativeObject::~JNINativeObject() = default;\n\njobject JNINativeObject::GetJObject() { return jni_object_; }\n\nvoid JNINativeObject::SetJObject(jobject object) { jni_object_ = object; }\n\nstd::shared_ptr<void> JNINativeObject::GetNativeSharedPtr() {\n  return native_shared_ptr_;\n}\n\nStatus JNINativeObject::SetNativeSharedPtr(\n    JNIEnv *env, jlong handle, const std::shared_ptr<void> &native_shared_ptr) {\n  Status ret = STATUS_SUCCESS;\n  auto *native_object = FromHandle(handle);\n  if (native_object == nullptr) {\n    return {STATUS_INVALID, \"handle is invalid\"};\n  }\n\n  native_object->SetNativeSharedPtr(native_shared_ptr);\n  return ret;\n}\n\nStatus JNINativeObject::SetNativeSharedPtr(\n    JNIEnv *env, jobject object, const std::shared_ptr<void> &native_shared_ptr,\n    const char *member) {\n  Status ret = STATUS_SUCCESS;\n  auto *native_object = FromJObject(env, object, member);\n  if (native_object == nullptr) {\n    return modelbox::StatusError;\n  }\n\n  native_object->SetNativeSharedPtr(native_shared_ptr);\n  return ret;\n}\n\nvoid JNINativeObject::SetNativeSharedPtr(\n    std::shared_ptr<void> native_shared_ptr) {\n  native_shared_ptr_ = std::move(native_shared_ptr);\n}\n\nJNINativeObject *JNINativeObject::FromHandle(jlong handle) {\n  return reinterpret_cast<JNINativeObject *>(handle);\n}\n\nJNINativeObject *JNINativeObject::FromJObject(JNIEnv *env, jobject object,\n                                              const char *member) {\n  if (env == nullptr || object == nullptr) {\n    std::string errmsg = \"get jni native from object failed, invalid argument\";\n    StatusError = {STATUS_INVALID, errmsg};\n    return nullptr;\n  }\n\n  jclass cls = env->GetObjectClass(object);\n  if (cls == nullptr) {\n    std::string errmsg = \"get object class failed.\";\n    StatusError = {STATUS_INVALID, errmsg};\n    return nullptr;\n  }\n  Defer { env->DeleteLocalRef(cls); };\n\n  jfieldID ptrField = env->GetFieldID(cls, member, \"J\");\n  if (ptrField == nullptr) {\n    std::string errmsg = \"not a modelbox object, not extends from NativeObject\";\n    StatusError = {STATUS_INVALID, errmsg};\n    return nullptr;\n  }\n\n  auto handle = (jlong)env->GetLongField(object, ptrField);\n  if (handle == 0) {\n    std::string errmsg = \"native handler is invalid\";\n    StatusError = {STATUS_INVALID, errmsg};\n    return nullptr;\n  }\n\n  auto *native_object = FromHandle(handle);\n  if (native_object == nullptr) {\n    return nullptr;\n  }\n\n  return native_object;\n}\n\njlong JNINativeObject::NewHandle(\n    jobject object, const std::shared_ptr<void> &native_shared_ptr) {\n  auto *native_object = new JNINativeObject(object, native_shared_ptr);\n  return (jlong)native_object;\n}\n\njobject JNINativeObject::NewJObject(\n    JNIEnv *env, const char *clazz,\n    const std::shared_ptr<void> &native_shared_ptr, const char *member) {\n  jclass cls = env->FindClass(clazz);\n  if (cls == nullptr) {\n    StatusError = {STATUS_INVALID, \"cannot find class \" + std::string(clazz)};\n    return nullptr;\n  }\n  Defer { env->DeleteLocalRef(cls); };\n\n  auto *cls_constructor = env->GetMethodID(cls, \"<init>\", \"()V\");\n  if (cls_constructor == nullptr) {\n    std::string errmsg =\n        \"cannot find constructor for \" + std::string(clazz) + \".\";\n    modelbox::StatusError = {modelbox::STATUS_INVALID, errmsg};\n  }\n\n  jobject object = env->NewObject(cls, cls_constructor);\n  if (object == nullptr) {\n    std::string errmsg = \"new object for \" + std::string(clazz) + \" failed.\";\n    modelbox::StatusError = {modelbox::STATUS_NOMEM, errmsg};\n    return nullptr;\n  }\n\n  jfieldID ptrField = env->GetFieldID(cls, member, \"J\");\n  if (ptrField == nullptr) {\n    std::string errmsg =\n        \"not a modelbox class, not extends from NativeObject, \";\n    errmsg += \"class: \" + std::string(clazz);\n    modelbox::StatusError = {modelbox::STATUS_INVALID, errmsg};\n    env->DeleteLocalRef(object);\n    return nullptr;\n  }\n\n  jlong oldptr = env->GetLongField(object, ptrField);\n  if (oldptr != 0) {\n    DeleteHandle(oldptr);\n  }\n\n  auto native_object = NewHandle(object, native_shared_ptr);\n  env->SetLongField(object, ptrField, native_object);\n  return object;\n}\n\nvoid JNINativeObject::DeleteHandle(jlong handle) {\n  auto *native_object = FromHandle(handle);\n  if (native_object == nullptr) {\n    return;\n  }\n\n  delete native_object;\n}\n\nvoid JNINativeObject::DeleteJObject(JNIEnv *env, jobject object,\n                                    const char *member) {\n  auto *native_object = FromJObject(env, object, member);\n  if (native_object == nullptr) {\n    return;\n  }\n  \n  jclass cls = env->GetObjectClass(object);\n  if (cls == nullptr) {\n    std::string errmsg = \"get object class failed.\";\n    StatusError = {STATUS_INVALID, errmsg};\n    return;\n  }\n  Defer { env->DeleteLocalRef(cls); };\n\n  jfieldID ptrField = env->GetFieldID(cls, member, \"J\");\n  if (ptrField == nullptr) {\n    std::string errmsg = \"not a modelbox object, not extends from NativeObject\";\n    StatusError = {STATUS_INVALID, errmsg};\n    return;\n  }\n  env->SetLongField(object, ptrField, 0);\n\n  delete native_object;\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/java/jni/jni_native_object.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_JNI_BIND_H_\n#define MODELBOX_JNI_BIND_H_\n\n#include <jni.h>\n\n#include <memory>\n#include <string>\n\n#include \"modelbox/base/status.h\"\n\nnamespace modelbox {\n\nconstexpr const char *NATIVE_HANDLER_MEMBER_NAME = \"native_handle\";\n\nclass JNINativeObject {\n public:\n  JNINativeObject(jobject jni_object, std::shared_ptr<void> native_shared_ptr);\n  virtual ~JNINativeObject();\n\n  jobject GetJObject();\n\n  void SetJObject(jobject object);\n\n  std::shared_ptr<void> GetNativeSharedPtr();\n\n  template <typename T>\n  inline std::shared_ptr<T> GetNativeSharedPtr() {\n    return std::static_pointer_cast<T>(GetNativeSharedPtr());\n  }\n\n  static jlong NewHandle(jobject object,\n                         const std::shared_ptr<void> &native_shared_ptr);\n\n  static void DeleteHandle(jlong handle);\n\n  static jobject NewJObject(JNIEnv *env, const char *clazz,\n                            const std::shared_ptr<void> &native_shared_ptr,\n                            const char *member = NATIVE_HANDLER_MEMBER_NAME);\n\n  static void DeleteJObject(JNIEnv *env, jobject object,\n                            const char *member = NATIVE_HANDLER_MEMBER_NAME);\n\n  template <typename T>\n  inline static std::shared_ptr<T> GetNativeSharedPtr(jlong handle) {\n    auto native_object = FromHandle(handle);\n    if (native_object == nullptr) {\n      return nullptr;\n    }\n\n    return native_object->GetNativeSharedPtr<T>();\n  }\n\n  template <typename T>\n  inline static std::shared_ptr<T> GetNativeSharedPtr(\n      JNIEnv *env, jobject object,\n      const char *member = NATIVE_HANDLER_MEMBER_NAME) {\n    auto native_object = FromJObject(env, object, member);\n    if (native_object == nullptr) {\n      return nullptr;\n    }\n\n    return native_object->GetNativeSharedPtr<T>();\n  }\n\n  static Status SetNativeSharedPtr(\n      JNIEnv *env, jlong handle,\n      const std::shared_ptr<void> &native_shared_ptr);\n\n  static Status SetNativeSharedPtr(\n      JNIEnv *env, jobject object,\n      const std::shared_ptr<void> &native_shared_ptr,\n      const char *member = NATIVE_HANDLER_MEMBER_NAME);\n\n private:\n  JNINativeObject();\n  void SetNativeSharedPtr(std::shared_ptr<void> native_shared_ptr);\n\n  static JNINativeObject *FromHandle(jlong handle);\n\n  static JNINativeObject *FromJObject(\n      JNIEnv *env, jobject object,\n      const char *member = NATIVE_HANDLER_MEMBER_NAME);\n\n  jobject jni_object_;\n  std::shared_ptr<void> native_shared_ptr_;\n};\n\n}  // namespace modelbox\n\n#endif  // MODELBOX_JNI_BIND_H_\n"
  },
  {
    "path": "src/java/jni/log.cc",
    "content": "\n/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"log.h\"\n\n#include \"scoped_jvm.h\"\n#include \"throw.h\"\n\nnamespace modelbox {\n\nLoggerJava::LoggerJava() = default;\nLoggerJava::~LoggerJava() {\n  modelbox::ScopedJvm scope;\n  UnReg(scope.GetJNIEnv());\n}\n\nvoid LoggerJava::Print(LogLevel level, const char *file, int lineno,\n                       const char *func, const char *msg) {\n  modelbox::ScopedJvm scope;\n  auto *env = scope.GetJNIEnv();\n  if (env == nullptr) {\n    return;\n  }\n  auto *jfile = env->NewStringUTF(file);\n  auto jlineno = (jint)lineno;\n  auto *jfunc = env->NewStringUTF(func);\n  auto *jmsg = env->NewStringUTF(msg);\n  env->CallObjectMethod(logger_, log_mid_, (jlong)level, jfile, jlineno, jfunc,\n                        jmsg);\n  env->DeleteLocalRef(jfile);\n  env->DeleteLocalRef(jfunc);\n  env->DeleteLocalRef(jmsg);\n}\n\njobject LoggerJava::GetJNICaller() { return logger_; }\n\nvoid LoggerJava::RegJNICaller(JNIEnv *env, jobject logger) {\n  UnReg(env);\n  jclass cls = env->GetObjectClass(logger);\n  if (cls == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_INVALID,\n                               \"logger class is not found.\");\n    return;\n  }\n\n  Defer { env->DeleteLocalRef(cls); };\n\n  jmethodID mid = env->GetMethodID(\n      cls, \"jniPrintCallback\",\n      \"(JLjava/lang/String;ILjava/lang/String;Ljava/lang/String;)V\");\n  if (mid == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_INVALID,\n                               \"no print callback function found.\");\n    return;\n  }\n\n  logger_ = env->NewGlobalRef(logger);\n  log_mid_ = mid;\n}\n\nvoid LoggerJava::UnReg(JNIEnv *env) {\n  if (logger_ == nullptr) {\n    return;\n  }\n\n  env->DeleteLocalRef(logger_);\n  logger_ = nullptr;\n  log_mid_ = nullptr;\n}\n\nvoid LoggerJava::SetLogLevel(LogLevel level) { level_ = level; }\n\nLogLevel LoggerJava::GetLogLevel() { return level_; }\n\nLoggerJavaWapper::LoggerJavaWapper() = default;\n\nLoggerJavaWapper::~LoggerJavaWapper() { ModelBoxLogger.SetLogger(nullptr); }\n\nvoid LoggerJavaWapper::RegLogFunc(const std::string &pylog) {\n  ModelBoxLogger.SetLogger(logger_java_);\n}\n\nstd::shared_ptr<Logger> LoggerJavaWapper::GetLogger() {\n  return ModelBoxLogger.GetLogger();\n}\n\nvoid LoggerJavaWapper::SetLogger(const std::shared_ptr<Logger> &logger) {\n  ModelBoxLogger.SetLogger(logger);\n}\n\nvoid LoggerJavaWapper::SetLogLevel(LogLevel level) {\n  logger_java_->SetLogLevel(level);\n}\n\nvoid LoggerJavaWapper::PrintExt(LogLevel level, const char *file, int lineno,\n                                const char *func, const char *msg) {\n  ModelBoxLogger.Print(level, file, lineno, func, \"%s\", msg);\n}\n\n}  // namespace modelbox\n"
  },
  {
    "path": "src/java/jni/log.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_JAVA_LIB_LOG_H_\n#define MODELBOX_JAVA_LIB_LOG_H_\n\n#include <jni.h>\n#include <modelbox/base/log.h>\n\nnamespace modelbox {\n\nclass LoggerJava : public Logger {\n public:\n  LoggerJava();\n  ~LoggerJava() override;\n\n  void Print(LogLevel level, const char *file, int lineno, const char *func,\n             const char *msg) override;\n\n  void RegJNICaller(JNIEnv *env, jobject logger);\n\n  jobject GetJNICaller();\n\n  void UnReg(JNIEnv *env);\n\n  void SetLogLevel(LogLevel level) override;\n\n  LogLevel GetLogLevel() override;\n\n private:\n  jobject logger_{nullptr};\n  jmethodID log_mid_{nullptr};\n  LogLevel level_{LOG_OFF};\n};\n\nclass LoggerJavaWapper {\n public:\n  LoggerJavaWapper();\n  virtual ~LoggerJavaWapper();\n\n  void RegLogFunc(const std::string &pylog);\n\n  void SetLogLevel(LogLevel level);\n\n  std::shared_ptr<Logger> GetLogger();\n\n  void SetLogger(const std::shared_ptr<Logger> &logger);\n\n  void PrintExt(LogLevel level, const char *file, int lineno, const char *func,\n                const char *msg);\n  void Print(LogLevel level, const char *msg);\n\n private:\n  std::shared_ptr<LoggerJava> logger_java_ = std::make_shared<LoggerJava>();\n};\n\n}  // namespace modelbox\n\n#endif  // MODELBOX_JAVA_LIB_LOG_H_\n"
  },
  {
    "path": "src/java/jni/modelbox_jni.cc",
    "content": "\n/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox_jni.h\"\n\n#include \"com_modelbox_ModelBox.h\"\n#include \"modelbox/base/driver.h\"\n#include \"modelbox/base/status.h\"\n#include \"scoped_jvm.h\"\n#include \"throw.h\"\n\nJNIEXPORT jint JNI_OnLoad(JavaVM *vm, void *reserved) {\n  JNIEnv *env;\n  if (vm->GetEnv(reinterpret_cast<void **>(&env), JNI_VERSION_1_8) != JNI_OK) {\n    return JNI_ERR;\n  }\n\n  modelbox::ScopedJvm::SetJavaVM(vm);\n\n  return JNI_VERSION_1_8;\n}\n\nJNIEXPORT void JNICALL JNI_OnUnload(JavaVM *vm, void *reserved) {\n  modelbox::ScopedJvm::SetJavaVM(nullptr);\n}\n\n/*\n * Class:     com_modelbox_ModelBox\n * Method:    SetDefaultScanPath\n * Signature: (Ljava/lang/String;)V\n */\nJNIEXPORT void JNICALL Java_com_modelbox_ModelBox_SetDefaultScanPath(\n    JNIEnv *env, jclass j_class, jstring j_path) {\n  const char *path = env->GetStringUTFChars(j_path, nullptr);\n  if (path == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_INVALID, \"invalid path\");\n    return;\n  }\n\n  modelbox::Drivers::SetDefaultScanPath(path);\n  env->ReleaseStringUTFChars(j_path, path);\n}\n\n/*\n * Class:     com_modelbox_ModelBox\n * Method:    SetDefaultInfoPath\n * Signature: (Ljava/lang/String;)V\n */\nJNIEXPORT void JNICALL Java_com_modelbox_ModelBox_SetDefaultInfoPath(\n    JNIEnv *env, jclass j_class, jstring j_path) {\n  const char *path = env->GetStringUTFChars(j_path, nullptr);\n  if (path == nullptr) {\n    modelbox::ModelBoxJNIThrow(env, modelbox::STATUS_INVALID, \"invalid path\");\n    return;\n  }\n\n  modelbox::Drivers::SetDefaultInfoPath(path);\n  env->ReleaseStringUTFChars(j_path, path);\n}"
  },
  {
    "path": "src/java/jni/modelbox_jni.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_JNI_H_\n#define MODELBOX_JNI_H_\n\n#include <jni.h>\n\n#endif  // MODELBOX_JNI_H_\n"
  },
  {
    "path": "src/java/jni/scoped_jvm.cc",
    "content": "\n/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"scoped_jvm.h\"\n\n#include <memory>\n\n#include \"modelbox/base/log.h\"\n\nnamespace modelbox {\n\nJavaVM *ScopedJvm::jvm_;\n\nJavaVM *ScopedJvm::GetJavaVM() { return jvm_; }\n\nvoid ScopedJvm::SetJavaVM(JavaVM *vm) { jvm_ = vm; }\n\nScopedJvm::ScopedJvm() {\n  JNIEnv *env = nullptr;\n  if (jvm_ == nullptr) {\n    throw std::runtime_error(\"jvm pointer is not set\");\n    return;\n  }\n\n  auto ret = jvm_->GetEnv((void **)&env, JNI_VERSION_1_6);\n  if (ret == JNI_OK) {\n    env_ = env;\n    return;\n  }\n\n  ret = jvm_->AttachCurrentThread((void **)&env, nullptr);\n  if (ret != JNI_OK) {\n    throw std::runtime_error(\"Attach jvm thread failed.\");\n    return;\n  }\n\n  env_ = env;\n  do_attach_ = true;\n}\n\nScopedJvm::~ScopedJvm() {\n  if (jvm_ == nullptr) {\n    return;\n  }\n\n  if (do_attach_ == true) {\n    jvm_->DetachCurrentThread();\n  }\n\n  env_ = nullptr;\n}\n\nJNIEnv *ScopedJvm::GetJNIEnv() { return env_; }\n\n}  // namespace modelbox"
  },
  {
    "path": "src/java/jni/scoped_jvm.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_SCOPED_JVM_H_\n#define MODELBOX_SCOPED_JVM_H_\n\n#include <jni.h>\n\nnamespace modelbox {\n\nclass ScopedJvm {\n public:\n  ScopedJvm();\n  virtual ~ScopedJvm();\n\n  JNIEnv *GetJNIEnv();\n\n  static JavaVM *GetJavaVM();\n\n  static void SetJavaVM(JavaVM *vm);\n\n private:\n  static JavaVM *jvm_;\n  bool do_attach_{false};\n  JNIEnv *env_{nullptr};\n};\n}  // namespace modelbox\n\n#endif  // MODELBOX_SCOPED_JVM_H_\n"
  },
  {
    "path": "src/java/jni/throw.cc",
    "content": "\n\n/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"throw.h\"\n\n#include \"modelbox/base/log.h\"\n#include \"modelbox/base/status.h\"\n#include \"modelbox_jni.h\"\n#include \"utils.h\"\n\nnamespace modelbox {\n\nconst char *kModelBoxExceptionCodeMap[] = {\n    \"com/modelbox/ModelBoxException$Success\",\n    \"com/modelbox/ModelBoxException$Fault\",\n    \"com/modelbox/ModelBoxException$Notfound\",\n    \"com/modelbox/ModelBoxException$Invalid\",\n    \"com/modelbox/ModelBoxException$Again\",\n    \"com/modelbox/ModelBoxException$Badconf\",\n    \"com/modelbox/ModelBoxException$Nomem\",\n    \"com/modelbox/ModelBoxException$Range\",\n    \"com/modelbox/ModelBoxException$Exist\",\n    \"com/modelbox/ModelBoxException$Internal\",\n    \"com/modelbox/ModelBoxException$Busy\",\n    \"com/modelbox/ModelBoxException$Permit\",\n    \"com/modelbox/ModelBoxException$Notsupport\",\n    \"com/modelbox/ModelBoxException$Nodata\",\n    \"com/modelbox/ModelBoxException$Nospace\",\n    \"com/modelbox/ModelBoxException$Nobufs\",\n    \"com/modelbox/ModelBoxException$Overflow\",\n    \"com/modelbox/ModelBoxException$Inprogress\",\n    \"com/modelbox/ModelBoxException$Already\",\n    \"com/modelbox/ModelBoxException$Timedout\",\n    \"com/modelbox/ModelBoxException$Nostream\",\n    \"com/modelbox/ModelBoxException$Reset\",\n    \"com/modelbox/ModelBoxException$Continue\",\n    \"com/modelbox/ModelBoxException$Edquot\",\n    \"com/modelbox/ModelBoxException$Stop\",\n    \"com/modelbox/ModelBoxException$Shutdown\",\n    \"com/modelbox/ModelBoxException$Eof\",\n    \"com/modelbox/ModelBoxException$Noent\",\n    \"com/modelbox/ModelBoxException$Deadlock\",\n    \"com/modelbox/ModelBoxException$Noresponse\",\n    \"com/modelbox/ModelBoxException$Io\",\n};\n\nstatic void do_jni_throw(JNIEnv *env, const char *except_name,\n                         const char *message) {\n  jclass ecls = env->FindClass(except_name);\n\n  if (ecls == nullptr) {\n    ecls = env->FindClass(\"java/lang/RuntimeException\");\n    message = \"Modelbox: cannot find exception\";\n    if (ecls == nullptr) {\n      MBLOG_ERROR << \"Modelbox-JNI: Failed to throw exception\";\n      return;\n    }\n  }\n\n  Defer { env->DeleteLocalRef(ecls); };\n  int ret = env->ThrowNew(ecls, message);\n  if (ret < 0) {\n    MBLOG_ERROR << \"Modelbox-JNI: Fatal Error\";\n  }\n}\n\nvoid ModelBoxJNIThrow(JNIEnv *env, Status &status) {\n  ModelBoxJNIThrow(env, status.Code(), status.WrapErrormsgs());\n}\n\nvoid ModelBoxJNIThrow(JNIEnv *env, StatusCode code,\n                      const std::string &errormsg) {\n  if (code == STATUS_OK) {\n    return;\n  }\n\n  if (code >= sizeof(kModelBoxExceptionCodeMap) / sizeof(char *)) {\n    do_jni_throw(env, \"java/lang/RuntimeException\",\n                 \"Modelbox: Status is invalid.\");\n    return;\n  }\n\n  do_jni_throw(env, kModelBoxExceptionCodeMap[code], errormsg.c_str());\n}\n\nvoid ModelBoxJNIThrow(JNIEnv *env, const char *runtime_exception,\n                      const char *errmsg) {\n  do_jni_throw(env, runtime_exception, errmsg);\n}\n\nstd::shared_ptr<Status> ModelboxJNICatchException(JNIEnv *env) {\n  auto status = std::make_shared<Status>();\n  if (env->ExceptionCheck() == JNI_FALSE) {\n    return status;\n  }\n\n  auto *j_throw = env->ExceptionOccurred();\n  if (j_throw == nullptr) {\n    return nullptr;\n  }\n  Defer { env->DeleteLocalRef(j_throw); };\n\n  jclass throwable_class = env->FindClass(\"java/lang/Throwable\");\n  if (throwable_class == nullptr) {\n    return nullptr;\n  }\n  Defer { env->DeleteLocalRef(throwable_class); };\n\n  for (size_t i = 0; i < sizeof(kModelBoxExceptionCodeMap) / sizeof(char *);\n       i++) {\n    auto *j_cls = env->FindClass(kModelBoxExceptionCodeMap[i]);\n    if (j_cls == nullptr) {\n      continue;\n    }\n    Defer { env->DeleteLocalRef(j_cls); };\n\n    if (env->IsInstanceOf(j_throw, j_cls)) {\n      jmethodID get_message = env->GetMethodID(throwable_class, \"getMessage\",\n                                               \"()Ljava/lang/String;\");\n      if (get_message == nullptr) {\n        return nullptr;\n      }\n\n      auto *j_message = (jstring)env->CallObjectMethod(j_throw, get_message);\n      if (j_message == nullptr) {\n        return nullptr;\n      }\n      Defer { env->DeleteLocalRef(j_message); };\n      auto msg = modelbox::jstring2string(env, j_message);\n      *status = {static_cast<StatusCode>(i), msg};\n      env->ExceptionClear();\n      return status;\n    }\n  }\n\n  return nullptr;\n}\n\nstd::string ModelboxExceptionMsg(JNIEnv *env, std::string *stack) {\n  std::string msg;\n  auto *j_throw = env->ExceptionOccurred();\n  if (j_throw == nullptr) {\n    return \"\";\n  }\n  Defer { env->DeleteLocalRef(j_throw); };\n  env->ExceptionClear();\n\n  jclass throwable_class = env->FindClass(\"java/lang/Throwable\");\n  if (throwable_class == nullptr) {\n    return \"\";\n  }\n  Defer { env->DeleteLocalRef(throwable_class); };\n\n  jmethodID get_message =\n      env->GetMethodID(throwable_class, \"getMessage\", \"()Ljava/lang/String;\");\n  if (get_message == nullptr) {\n    return \"\";\n  }\n\n  auto *j_message = (jstring)env->CallObjectMethod(j_throw, get_message);\n  if (j_message == nullptr) {\n    return \"\";\n  }\n  Defer { env->DeleteLocalRef(j_message); };\n  msg = modelbox::jstring2string(env, j_message);\n\n  if (stack == nullptr) {\n    return msg;\n  }\n\n  /**\n   * get stack\n   */\n  jmethodID get_stack = env->GetMethodID(throwable_class, \"getStackTrace\",\n                                         \"()[Ljava/lang/StackTraceElement;\");\n  if (get_stack == nullptr) {\n    return msg;\n  }\n\n  auto *j_stack = (jobjectArray)env->CallObjectMethod(j_throw, get_stack);\n  if (j_stack == nullptr) {\n    return msg;\n  }\n  Defer { env->DeleteLocalRef(j_stack); };\n\n  jclass stack_element_class = env->FindClass(\"java/lang/StackTraceElement\");\n  if (stack_element_class == nullptr) {\n    return msg;\n  }\n  Defer { env->DeleteLocalRef(stack_element_class); };\n\n  jmethodID get_class_name = env->GetMethodID(\n      stack_element_class, \"getClassName\", \"()Ljava/lang/String;\");\n  jmethodID get_method_name = env->GetMethodID(\n      stack_element_class, \"getMethodName\", \"()Ljava/lang/String;\");\n  jmethodID get_file_name = env->GetMethodID(stack_element_class, \"getFileName\",\n                                             \"()Ljava/lang/String;\");\n  jmethodID get_line_number =\n      env->GetMethodID(stack_element_class, \"getLineNumber\", \"()I\");\n  if (get_class_name == nullptr || get_method_name == nullptr ||\n      get_file_name == nullptr || get_line_number == nullptr) {\n    return msg;\n  }\n\n  jsize len = env->GetArrayLength(j_stack);\n  for (int i = 0; i < len; i++) {\n    auto *j_element = env->GetObjectArrayElement(j_stack, i);\n    if (j_element == nullptr) {\n      continue;\n    }\n    Defer { env->DeleteLocalRef(j_element); };\n\n    auto *j_class_name =\n        (jstring)env->CallObjectMethod(j_element, get_class_name);\n    auto *j_method_name =\n        (jstring)env->CallObjectMethod(j_element, get_method_name);\n    auto *j_file_name =\n        (jstring)env->CallObjectMethod(j_element, get_file_name);\n    int j_line_number = (jlong)env->CallIntMethod(j_element, get_line_number);\n    if (j_class_name == nullptr || j_method_name == nullptr ||\n        j_file_name == nullptr) {\n      continue;\n    }\n\n    *stack += modelbox::jstring2string(env, j_class_name) + \".\" +\n              modelbox::jstring2string(env, j_method_name) + \"(\" +\n              modelbox::jstring2string(env, j_file_name) + \":\";\n    if (j_line_number < 0) {\n      *stack += \"jni\";\n    } else {\n      *stack += std::to_string(j_line_number);\n    }\n    *stack += \")\\n\";\n    env->DeleteLocalRef(j_class_name);\n    env->DeleteLocalRef(j_method_name);\n    env->DeleteLocalRef(j_file_name);\n  }\n\n  return msg;\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/java/jni/throw.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_JNI_THROW_H_\n#define MODELBOX_JNI_THROW_H_\n\n#include <jni.h>\n\n#include \"modelbox/base/status.h\"\n\nnamespace modelbox {\n\nconstexpr const char *JNIEXCEPT_NullPointer = \"java/lang/NullPointerException\";\nconstexpr const char *JNIEXCEPT_OutOfMemoryError = \"java/lang/OutOfMemoryError\";\nconstexpr const char *JNIEXCEPT_RuntimeException = \"java/lang/RuntimeException\";\nconstexpr const char *JNIEXCEPT_IllegalArgumentException =\n    \"java/lang/IllegalArgumentException\";\n\nvoid ModelBoxJNIThrow(JNIEnv *env, StatusCode code,\n                      const std::string &errormsg);\n\nvoid ModelBoxJNIThrow(JNIEnv *env, Status &status);\n\nvoid ModelBoxJNIThrow(JNIEnv *env, const char *runtime_exception,\n                      const char *errmsg);\n\nstd::shared_ptr<Status> ModelboxJNICatchException(JNIEnv *env);\n\nstd::string ModelboxExceptionMsg(JNIEnv *env, std::string *stack = nullptr);\n\n}  // namespace modelbox\n\n#endif  // MODELBOX_JNI_THROW_H_\n"
  },
  {
    "path": "src/java/jni/utils.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"utils.h\"\n\n#include \"modelbox/base/log.h\"\n\nnamespace modelbox {\n\nstd::string jstring2string(JNIEnv *env, jstring jStr) {\n  if (!jStr) {\n    return \"\";\n  }\n\n  jclass stringClass = env->GetObjectClass(jStr);\n  jmethodID getBytes =\n      env->GetMethodID(stringClass, \"getBytes\", \"(Ljava/lang/String;)[B\");\n  auto *const stringJbytes = (jbyteArray)env->CallObjectMethod(\n      jStr, getBytes, env->NewStringUTF(\"UTF-8\"));\n\n  auto length = (size_t)env->GetArrayLength(stringJbytes);\n  jbyte *pBytes = env->GetByteArrayElements(stringJbytes, nullptr);\n\n  std::string ret = std::string((char *)pBytes, length);\n  env->ReleaseByteArrayElements(stringJbytes, pBytes, JNI_ABORT);\n\n  env->DeleteLocalRef(stringJbytes);\n  env->DeleteLocalRef(stringClass);\n  return ret;\n}\n\n}  // namespace modelbox\n"
  },
  {
    "path": "src/java/jni/utils.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_JNI_UTILS_H_\n#define MODELBOX_JNI_UTILS_H_\n\n#include <jni.h>\n\n#include <memory>\n#include <string>\n\n#include \"modelbox/base/status.h\"\n\nnamespace modelbox {\n\nstd::string jstring2string(JNIEnv *env, jstring jStr);\n\n}  // namespace modelbox\n\n#endif  // MODELBOX_JNI_UTILS_H_\n"
  },
  {
    "path": "src/java/pom.xml",
    "content": "<project xmlns=\"http://maven.apache.org/POM/4.0.0\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:schemaLocation=\"http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd\">\n  <modelVersion>4.0.0</modelVersion>\n\n  <groupId>com.modelbox</groupId>\n  <artifactId>modelbox</artifactId>\n  <version>1.0.0</version>\n\n  <properties>\n    <maven.compiler.source>1.8</maven.compiler.source>\n    <maven.compiler.target>1.8</maven.compiler.target>\n    <buildDirectory>${project.basedir}/target</buildDirectory>\n  </properties>\n\n  <build>\n    <directory>${buildDirectory}</directory>\n    <plugins>\n      <plugin>\n        <artifactId>maven-compiler-plugin</artifactId>\n        <version>3.7.0</version>\n        <configuration>\n          <compilerArgs>\n            <arg>-h</arg>\n            <arg>${buildDirectory}</arg>\n          </compilerArgs>\n        </configuration>\n      </plugin>\n    </plugins>\n  </build>\n\n  <dependencies>\n    <dependency>\n      <groupId>junit</groupId>\n      <artifactId>junit</artifactId>\n      <version>4.13.2</version>\n      <scope>test</scope>\n    </dependency>\n\n    <dependency>\n      <groupId>org.json</groupId>\n      <artifactId>json</artifactId>\n      <version>20180130</version>\n      <scope>test</scope>\n    </dependency>\n  </dependencies>\n</project>\n"
  },
  {
    "path": "src/java/src/main/java/com/modelbox/Buffer.java",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage com.modelbox;\n\nimport java.nio.ByteBuffer;\n\n/**\n * Modelbox Buffer\n */\npublic class Buffer extends NativeObject {\n  private Buffer() {\n\n  }\n\n  /**\n   * Build buffer by size\n   * @param size\n   */\n  public void build(long size) throws ModelBoxException {\n    BufferBuild(size);\n  }\n\n  /**\n   * build buffer from bytes[]\n   * @param data\n   */\n  public void build(byte[] data) throws ModelBoxException {\n    BufferBuild(data);\n  }\n\n  /**\n   * get bytes[] from buffer\n   * @return\n   */\n  public byte[] getData() throws ModelBoxException {\n    return BufferGetData();\n  }\n\n  /**\n   * get direct buffer.\n   * <p>WARNING: Do not hold ByteBuffer beyond the scope, otherwise it will cause dangling pointers and trigger coredump\n   * @return direct buffer\n   */\n  public ByteBuffer getDirectData() throws ModelBoxException {\n    return BufferGetDirectData();\n  }\n\n  /*\n   * Any error on buffer\n   */\n  public boolean hasError() {\n    return BufferHasError();\n  }\n\n  /**\n   * Set error to buffer\n   * @param code error code\n   * @param message error message\n   */\n  public void setError(String code, String message) throws ModelBoxException {\n    BufferSetError(code, message);\n  }\n\n  /**\n   * Get error code\n   * @return error code\n   */\n  public String getErrorCode() {\n    return BufferGetErrorCode();\n  }\n\n  /**\n   * Get error message\n   * @return error message\n   */\n  public String getErrorMsg() {\n    return BufferGetErrorMsg();\n  }\n\n  /**\n   * Get buffer length in byte\n   * @return\n   */\n  public long getBytes() {\n    return BufferGetBytes();\n  }\n\n  /**\n   * Copy meta from another buffer\n   * @param buffer another buffer\n   * @param isOverWrite overwrite exist meta\n   * @throws ModelBoxException\n   */\n  public void copyMeta(Buffer buffer, boolean isOverWrite) throws ModelBoxException {\n    BufferCopyMeta(buffer, isOverWrite);\n  }\n\n  /**\n   * Copy meta from anoter buffer\n   * @param buffer another meta\n   */\n  public void copyMeta(Buffer buffer) {\n    BufferCopyMeta(buffer, false);\n  }\n\n  /**\n   * Set long to meta data\n   * @key meta key\n   * @value meta value\n   */ \n  public void setMetaLong(String key, long value) throws ModelBoxException {\n    BufferSetMetaLong(key, value);\n  }\n\n  /**\n   * Set int to meta data\n   * @key meta key\n   * @value meta value\n   */\n  public void setMetaInt(String key, int value) throws ModelBoxException {\n    BufferSetMetaInt(key, value);\n  }\n\n  /**\n   * Set String to meta data\n   * @key meta key\n   * @value meta value\n   */\n  public void setMetaString(String key, String value) throws ModelBoxException {\n    BufferSetMetaString(key, value);\n  }\n\n  /**\n   * Get double from meta data\n   * @key meta key\n   * @return meta value\n   */\n  public void setMetaDouble(String key, double value) throws ModelBoxException {\n    BufferSetMetaDouble(key, value);\n  }\n\n  /**\n   * Get float from meta data\n   * @key meta key\n   * @return meta value\n   */\n  public void setMetaFloat(String key, float value) throws ModelBoxException {\n    BufferSetMetaFloat(key, value);\n  }\n\n  /**\n   * Get boolean from meta data\n   * @key meta key\n   * @return meta value\n   */\n  public void setMetaBoolean(String key, boolean value) throws ModelBoxException {\n    BufferSetMetaBoolean(key, value);\n  }\n\n  /**\n   * Get long from meta data\n   * @key meta key\n   * @return meta value\n   */\n  public long getMetaLong(String key) throws ModelBoxException {\n    return BufferGetMetaLong(key);\n  }\n\n  /**\n   * Get int from meta data\n   * @key meta key\n   * @default default value\n   * @return meta value\n   */\n  public long getMetaLong(String key, long defaultValue) {\n    try {\n      return getMetaLong(key);\n    } catch (ModelBoxException e) {\n      return defaultValue;\n    }\n  }\n\n  /**\n   * Get int from meta data\n   * @key meta key\n   * @return meta value\n   */\n  public int getMetaInt(String key) throws ModelBoxException {\n    return BufferGetMetaInt(key);\n  }\n\n  /**\n   * Get int from meta data\n   * @key meta key\n   * @default default value\n   * @return meta value\n   */\n  public int getMetaInt(String key, int defaultValue) {\n    try {\n      return getMetaInt(key);\n    } catch (ModelBoxException e) {\n      return defaultValue;\n    }\n  }\n\n  /**\n   * Get String from meta data\n   * @key meta key\n   * @return meta value\n   */\n  public String getMetaString(String key) throws ModelBoxException {\n    return BufferGetMetaString(key);\n  }\n\n  /**\n   * Get String from meta data\n   * @key meta key\n   * @default default value\n   * @return meta value\n   */\n  public String getMetaString(String key, String defaultValue) {\n    try {\n      return getMetaString(key);\n    } catch (ModelBoxException e) {\n      return defaultValue;\n    }\n  }\n\n  /**\n   * Get double from meta data\n   * @key meta key\n   * @return meta value\n   */\n  public double getMetaDouble(String key) throws ModelBoxException {\n    return BufferGetMetaDouble(key);\n  }\n\n  /**\n   * Get double from meta data\n   * @key meta key\n   * @default default value\n   * @return meta value\n   */\n  public double getMetaDouble(String key, double defaultValue) {\n    try {\n      return getMetaDouble(key);\n    } catch (ModelBoxException e) {\n      return defaultValue;\n    }\n  }\n\n  /**\n   * Get double from meta data\n   * @key meta key\n   * @return meta value\n   */\n  public float getMetaFloat(String key) throws ModelBoxException {\n    return BufferGetMetaFloat(key);\n  }\n\n  /**\n   * Get float from meta data\n   * @key meta key\n   * @default default value\n   * @return meta value\n   */\n  public float getMetaFloat(String key, float defaultValue) {\n    try {\n      return getMetaFloat(key);\n    } catch (ModelBoxException e) {\n      return defaultValue;\n    }\n  }\n\n  /**\n   * Get boolean from meta data\n   * @key meta key\n   * @return meta value\n   */\n  public boolean getMetaBool(String key) throws ModelBoxException {\n    return BufferGetMetaBool(key);\n  }\n\n  /**\n   * Get boolean from meta data\n   * @key meta key\n   * @default default value\n   * @return meta value\n   */\n  public boolean getMetaBool(String key, boolean defaultValue) {\n    try {\n      return getMetaBool(key);\n    } catch (ModelBoxException e) {\n      return defaultValue;\n    }\n  }\n\n  /**\n   * Get buffer device\n   * @return\n   */\n  public Device getDevice() {\n    return BufferGetDevice();\n  }\n\n  private native void BufferBuild(long size);\n\n  private native void BufferBuild(byte[] data);\n\n  private native byte[] BufferGetData();\n\n  private native ByteBuffer BufferGetDirectData();\n\n  private native boolean BufferHasError();\n\n  private native void BufferSetError(String code, String message);\n\n  private native String BufferGetErrorCode();\n\n  private native String BufferGetErrorMsg();\n\n  private native long BufferGetBytes();\n\n  private native void BufferSetMetaLong(String key, long value);\n\n  private native void BufferSetMetaInt(String key, int value);\n\n  private native void BufferSetMetaString(String key, String value);\n\n  private native void BufferSetMetaDouble(String key, double value);\n\n  private native void BufferSetMetaFloat(String key, float value);\n\n  private native void BufferSetMetaBoolean(String key, boolean value);\n\n  private native long BufferGetMetaLong(String key);\n\n  private native int BufferGetMetaInt(String key);\n\n  private native String BufferGetMetaString(String key);\n\n  private native double BufferGetMetaDouble(String key);\n\n  private native float BufferGetMetaFloat(String key);\n\n  private native boolean BufferGetMetaBool(String key);\n\n  private native void BufferCopyMeta(Buffer buffer, boolean isOverWrite);\n\n  private native Device BufferGetDevice();\n}\n"
  },
  {
    "path": "src/java/src/main/java/com/modelbox/BufferList.java",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage com.modelbox;\n\n\nimport java.nio.ByteBuffer;\nimport java.util.Iterator;\n\n/**\n * Modelbox Bufferlist\n */\n\npublic class BufferList extends NativeObject implements Iterable<Buffer> {\n\n  class BufferListIterator implements Iterator<Buffer> {\n\n    @Override\n    public boolean hasNext() {\n      if (index < bufferList.size()) {\n        return true;\n      }\n      return false;\n    }\n\n    @Override\n    public Buffer next() {\n      Buffer buff = bufferList.at(index);\n      index++;\n      return buff;\n    }\n\n    protected void SetBufferList(BufferList list) {\n      this.bufferList = list;\n    }\n\n    private BufferList bufferList;\n    int index = 0;\n  }\n\n  public Iterator<Buffer> iterator() {\n    BufferListIterator itr = new BufferListIterator();\n    itr.SetBufferList(this);\n    return itr;\n  }\n\n  /**\n   * modelbox buffer list.\n   * Create Bufferlist from external data map\n   */\n  private BufferList() {\n\n  }\n\n  /**\n   * Builder buffer, create memory\n   * @param sizeList buffer size list\n   */\n  public void build(int[] sizeList) {\n    BufferListBuild(sizeList);\n  }\n\n  /**\n   * Get buffer at index\n   * @param index position of buffer\n   * @return buffer\n   */\n  public Buffer at(long index) {\n    return BufferListAt(index);\n  }\n\n  /**\n   * Get number of buffer in bufferlist\n   * @return number of buffer\n   */\n  public long size() {\n    return BufferListSize();\n  }\n\n  /**\n   * Push new buffer to buffer list\n   * @param buffer pointer to buffer\n   */\n  public void pushBack(Buffer buffer) {\n    BufferListPushBack(buffer);\n  }\n\n  /**\n   * Push new data to buffer list\n   * @param data pointer to data\n   */\n  public void pushBack(byte[] data) {\n    BufferListPushBack(data);\n  }\n\n  /**\n   * Assign buffer list\n   * @param buffers buffer list to assign\n   */\n  public void assign(Buffer[] buffers) {\n    BufferListAssign(buffers);\n  }\n\n  /**\n   * Get buffer data pointer from begining\n   * @return buffer data pointer from begining\n   */\n  public byte[] getData() {\n    return BufferListGetData();\n  }\n\n  /**\n   * Get buffer data pointer from begining\n   * <p>WARNING: Do not hold ByteBuffer beyond the scope, otherwise it will cause dangling pointers and trigger coredump\n   * @return buffer data pointer from begining\n   */\n  public ByteBuffer getDirectData() {\n    return BufferListGetDirectData();\n  }\n\n  /**\n   * Get buffer data pointer from begining\n   * WARNING: Do not hold ByteBuffer beyond the scope, otherwise it will cause dangling pointers and trigger coredump\n   * @param index position of buffer\n   * @return buffer data pointer from begining\n   */\n  public ByteBuffer getDirectData(int index) {\n    return BufferListGetDirectData(index);\n  }\n\n  /**\n   * Get device of buffer list\n   * @return pointer to device\n   */\n  public Device getDevice() {\n    return BufferListGetDevice();\n  }\n\n  /**\n   * Reset buffer list\n   * @return reset result\n   */\n  public void reset() {\n    BufferListReset();\n  }\n\n  private native void BufferListBuild(int[] sizeList);\n\n  private native Buffer BufferListAt(long index);\n\n  private native long BufferListSize();\n\n  private native void BufferListPushBack(Buffer buffer);\n\n  private native void BufferListPushBack(byte[] data);\n\n  private native void BufferListAssign(Buffer[] buffers);\n\n  private native byte[] BufferListGetData();\n\n  private native ByteBuffer BufferListGetDirectData();\n\n  private native ByteBuffer BufferListGetDirectData(int index);\n\n  private native Device BufferListGetDevice();\n\n  private native void BufferListReset();\n}\n"
  },
  {
    "path": "src/java/src/main/java/com/modelbox/Configuration.java",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage com.modelbox;\n\nimport java.util.ArrayList;\n\n/**\n * modelbox Configuration\n */\npublic class Configuration extends NativeObject {\n  public Configuration() {\n    setNativeHandle(ConfigurationNew());\n  }\n\n  /**\n   * Parser configuration from file\n   * @param file toml or json file\n   * @throws ModelBoxException\n   */\n  public void Parser(String file) throws ModelBoxException {\n    ConfigurationParser(file);\n  }\n\n  /**\n   * Get boolean key\n   * @param key\n   * @return\n   */\n  public boolean getBoolean(String key, boolean defaultValue) {\n    return ConfigurationGetBoolean(key, defaultValue);\n  }\n\n  /**\n   * Get int key\n   * @param key\n   * @return\n   */\n  public int getInt(String key, int defaultValue) {\n    return ConfigurationGetInt(key, defaultValue);\n  }\n\n  /**\n   * Get long key\n   * @param key\n   * @return\n   */\n  public long getLong(String key, long defaultValue) {\n    return ConfigurationGetLong(key, defaultValue);\n  }\n\n  /**\n   * Get String key\n   * @param key\n   * @return\n   */\n  public String getString(String key, String defaultValue) {\n    return ConfigurationGetString(key, defaultValue);\n  }\n\n  /**\n   * Get float key\n   * @param key\n   * @return\n   */\n  public float getFloat(String key, float defaultValue) {\n    return ConfigurationGetFloat(key, defaultValue);\n  }\n\n  /**\n   * Get double key\n   * @param key\n   * @return\n   */\n  public double getDouble(String key, double defaultValue) {\n    return ConfigurationGetDouble(key, defaultValue);\n  }\n\n  /**\n   * Set boolean key\n   * @param key\n   * @param value\n   */\n  public void set(String key, boolean value) {\n    ConfigurationSet(key, value);\n  }\n\n  /**\n   * Set int key\n   */\n  public void set(String key, int value) {\n    ConfigurationSet(key, value);\n  }\n\n  /**\n   * Set long key\n   */\n  public void set(String key, long value) {\n    ConfigurationSet(key, value);\n  }\n\n\n  /**\n   * Set float key\n   * @param key\n   * @param value\n   */\n  public void set(String key, float value) {\n    ConfigurationSet(key, value);\n  }\n\n  /**\n   * Set double key\n   * @param key\n   * @param value\n   */\n  public void set(String key, double value) {\n    ConfigurationSet(key, value);\n  }\n\n  /**\n   * Set string key\n   * @param key\n   * @param value\n   */\n  public void set(String key, String value) {\n    ConfigurationSet(key, value);\n  }\n\n  /**\n   * Get string array by key\n   * @param key\n   * @return\n   */\n  public ArrayList<String> getStrings(String key, ArrayList<String> defaultValues) {\n    return ConfigurationGetStrings(key, defaultValues);\n  }\n\n  /**\n   * set string array\n   * @param key\n   * @param values\n   */\n  public void set(String key, ArrayList<String> values) {\n    ConfigurationSet(key, values);\n  }\n\n  private native boolean ConfigurationGetBoolean(String key, boolean defaultValue);\n\n  private native int ConfigurationGetInt(String key, int defaultValue);\n\n  private native long ConfigurationGetLong(String key, long defaultValue);\n\n  private native String ConfigurationGetString(String key, String defaultValue);\n\n  private native float ConfigurationGetFloat(String key, float defaultValue);\n\n  private native double ConfigurationGetDouble(String key, double defaultValue);\n\n  private native void ConfigurationSet(String key, boolean value);\n\n  private native void ConfigurationSet(String key, int value);\n\n  private native void ConfigurationSet(String key, long value);\n\n  private native void ConfigurationSet(String key, float value);\n\n  private native void ConfigurationSet(String key, double value);\n\n  private native void ConfigurationSet(String key, String value);\n\n  private native ArrayList<String> ConfigurationGetStrings(String key,\n      ArrayList<String> defaultValue);\n\n  private native void ConfigurationSet(String key, ArrayList<String> values);\n\n  private native void ConfigurationParser(String file);\n\n  private native long ConfigurationNew();\n}\n"
  },
  {
    "path": "src/java/src/main/java/com/modelbox/DataContext.java",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage com.modelbox;\n\npublic class DataContext extends NativeObject {\n  private DataContext() {\n\n  }\n\n  /**\n   * Get input port bufferlist\n   * @param portName port name\n   * @return BufferList\n   */\n  public BufferList input(String portName) {\n    return DataContext_Input(portName);\n  }\n\n  /**\n   * Get output port bufferlist\n   * @param portName port name\n   * @return BufferList\n   */\n  public BufferList output(String portName) {\n    return DataContext_Output(portName);\n  }\n\n  /**\n   * Get external port bufferlist\n   * @return BufferList\n   */\n  public BufferList external() {\n    return DataContext_External();\n  }\n\n  /**\n   * Has error\n   * @return boolean\n   */\n  public boolean hasError() {\n    return DataContext_HasError();\n  }\n\n  /**\n   * Send event to flowunit\n   * @param event\n   */\n  public void sendEvent(FlowUnitEvent event) {\n    DataContext_SendEvent(event);\n  }\n\n  /**\n   * Set private \n   * @param key private key\n   * @param priv private object\n   */\n  public void setPrivate(String key, Object priv) {\n    DataContext_SetPrivate(key, priv);\n  }\n\n  /**\n   * Get private\n   * @param key private key\n   * @return object\n   */\n  public Object getPrivate(String key) {\n    return DataContext_GetPrivate(key);\n  }\n\n  /**\n   * Get input meta\n   * @param portName portname\n   * @return data meta\n   */\n  public DataMeta getInputMeta(String portName) {\n    return DataContext_GetInputMeta(portName);\n  }\n\n  /**\n   * Set output meta\n   * @param portName portname\n   * @param dataMeta data meta\n   */\n  public void setOutputMeta(String portName, DataMeta dataMeta) {\n    DataContext_SetOututMeta(portName, dataMeta);\n  }\n\n  /**\n   * Get session context\n   * @return session context\n   */\n  public SessionContext getSessionContext() {\n    return DataContext_GetSessionContext();\n  }\n\n  /**\n   * Get session configuration\n   * @return session configuration\n   */\n  public Configuration getSessionConfig() {\n    return DataContext_GetSessionConfig();\n  }\n\n  /**\n   * get Statistics\n   * @return Statistics\n   */\n  public StatisticsItem getStatistics() {\n    return DataContext_GetStatistics();\n  }\n\n  private native BufferList DataContext_Input(String portName);\n\n  private native BufferList DataContext_Output(String portName);\n\n  private native BufferList DataContext_External();\n\n  private native boolean DataContext_HasError();\n\n  private native void DataContext_SendEvent(FlowUnitEvent event);\n\n  private native void DataContext_SetPrivate(String key, Object priv);\n\n  private native Object DataContext_GetPrivate(String key);\n\n  private native DataMeta DataContext_GetInputMeta(String portName);\n\n  private native void DataContext_SetOututMeta(String portName, DataMeta dataMeta);\n\n  private native SessionContext DataContext_GetSessionContext();\n\n  private native Configuration DataContext_GetSessionConfig();\n\n  private native StatisticsItem DataContext_GetStatistics();\n}\n"
  },
  {
    "path": "src/java/src/main/java/com/modelbox/DataMeta.java",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage com.modelbox;\n\n/**\n * modelbox DataMeta\n */\npublic class DataMeta extends NativeObject {\n  public DataMeta() {\n    setNativeHandle(DataMetaNew());\n  }\n\n  /**\n   * set meta data\n   * @param key meta key\n   * @param value meta value\n   */\n  public void set(String key, String value) {\n    DataMetaSet(key, value);\n  }\n\n  public String getString(String key) {\n    return DataMetaGetString(key);\n  }\n\n  private native long DataMetaNew();\n\n  private native void DataMetaSet(String key, String meta);\n\n  private native String DataMetaGetString(String key);\n\n}\n"
  },
  {
    "path": "src/java/src/main/java/com/modelbox/Device.java",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage com.modelbox;\n\n/**\n * modelbox device\n */\npublic class Device extends NativeObject {\n  private Device() {\n\n  }\n\n  /**\n   * Get device type\n   * @return device type in string\n   */\n  public String getType() {\n    return DeviceGetType();\n  }\n\n  /**\n   * Get device ID\n   * @return device id in string\n   */\n  public String getDeviceID() {\n    return DeviceGetDeviceID();\n  }\n\n  private native String DeviceGetType();\n\n  private native String DeviceGetDeviceID();\n}\n"
  },
  {
    "path": "src/java/src/main/java/com/modelbox/ExternalDataMap.java",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\npackage com.modelbox;\n\nimport java.util.HashMap;\n\n/**\n * modelbox ExternalDataMap\n */\npublic class ExternalDataMap extends NativeObject {\n  private Object priv_data = null;\n\n  private ExternalDataMap() {\n    // Create this object from Flow.\n  }\n\n  /**\n   * Create buffer list \n   * @return bufferlist \n   */\n  public BufferList CreateBufferList() {\n    return ExternalDataMap_CreateBufferList();\n  }\n\n  /**\n   * Set output data meta\n   * @param name meta name\n   * @param meta datameta object\n   */\n  public void setOutputMeta(String name, DataMeta meta) {\n    ExternalDataMap_SetOutputMeta(name, meta);\n  }\n\n  /**\n   * Send bufferlist to port\n   * @param portName portname\n   * @param bufferlist bufferlist \n   * @throws ModelBoxException\n   */\n  public void send(String portName, BufferList bufferlist) throws ModelBoxException {\n    ExternalDataMap_Send(portName, bufferlist);\n  }\n\n  /**\n   * Recv bufferlist map\n   * @param timeout recv timeout in milliseconds,    \n   *   timeout > 0 if no data blocking for timeout(ms) and return null.\n   *   timeout = 0 if no data blocking until data is ready.\n   *   timeout < 0 if no data return immediately. and return null.\n   * @return output portname and bufferlist map\n   * @throws ModelBoxException\n   */\n  public HashMap<String, BufferList> recv(long timeout) throws ModelBoxException {\n    return ExternalDataMap_Recv(timeout);\n  }\n\n  /**\n   * Recv bufferlist map, blocking until data is ready.\n   * @return output portname and bufferlist map\n   * @throws ModelBoxException\n   */\n  public HashMap<String, BufferList> recv() throws ModelBoxException {\n    return recv(0);\n  }\n\n  /**\n   * Close datamap, no input data anymore\n   */\n  public void close() {\n    ExternalDataMap_Close();\n  }\n\n  /**\n   * shutdown datamap, and exit.\n   */\n  public void shutdown() {\n    ExternalDataMap_Shutdown();\n  }\n\n  /**\n   * Set user private object\n   * @param o\n   */\n  public void setPrivate(Object o) {\n    priv_data = o;\n  }\n\n  /**\n   * Get user private object\n   * @param <T> user object type\n   * @return user object\n   */\n  @SuppressWarnings(\"unchecked\")\n  public <T> T getPrivate() {\n    try {\n      return (T) priv_data;\n    } catch (ClassCastException e) {\n      return null;\n    }\n  }\n\n  /**\n   * Get session context\n   * @return session context\n   */\n  public SessionContext getSessionContext() {\n    return ExternalDataMap_GetSessionContext();\n  }\n\n  /**\n   * Get sessioncontext configuration\n   * @return sessioncontext configuration\n   */\n  public Configuration getSessionConfig() {\n    return ExternalDataMap_GetSessionConfig();\n  }\n\n  /**\n   * Get last error on datamap\n   * @return flowunit error\n   */\n  public FlowUnitError getLastError() {\n    return ExternalDataMap_GetLastError();\n  }\n\n  private native BufferList ExternalDataMap_CreateBufferList();\n\n  private native void ExternalDataMap_SetOutputMeta(String name, DataMeta meta);\n\n  private native void ExternalDataMap_Send(String portName, BufferList bufferList);\n\n  private native HashMap<String, BufferList> ExternalDataMap_Recv(long timeout);\n\n  private native void ExternalDataMap_Close();\n\n  private native void ExternalDataMap_Shutdown();\n\n  private native SessionContext ExternalDataMap_GetSessionContext();\n\n  private native Configuration ExternalDataMap_GetSessionConfig();\n\n  private native FlowUnitError ExternalDataMap_GetLastError();\n}\n"
  },
  {
    "path": "src/java/src/main/java/com/modelbox/ExternalDataSelect.java",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage com.modelbox;\n\nimport java.util.ArrayList;\n\n/**\n * modelbox ExternalDataSelect\n */\npublic class ExternalDataSelect extends NativeObject {\n  public ExternalDataSelect() {\n    setNativeHandle(ExternalDataSelect_New());\n  }\n\n  /**\n   * Register ExternalDataMap to ExternalDataSelect \n   * @param dataMap datamap object\n   */\n  public void register(ExternalDataMap dataMap) {\n    ExternalDataSelect_RegisterExternalData(dataMap);\n  }\n\n  /**\n   * From ExternalDataMap from ExternalDataSelect \n   * @param dataMap datamap object\n   */\n  public void remove(ExternalDataMap dataMap) {\n    ExternalDataSelect_RemoveExternalData(dataMap);\n  }\n\n  /**\n   * Wait from datamap ready\n   * @param timeout wait timeout. if timeout < 0, wait until data ready.\n   * @return datamap ready to recv\n   * @throws ModelBoxException\n   */\n  public ArrayList<ExternalDataMap> select(long timeout) throws ModelBoxException {\n    return ExternalDataSelect_SelectExternalData(timeout);\n  }\n\n  /**\n   * Wait from datamap ready\n   * @param timeout wait timeout. wait until data ready.\n   * @return datamap ready to recv\n   * @throws ModelBoxException\n   */\n  public ArrayList<ExternalDataMap> select() throws ModelBoxException {\n    return select(-1);\n  }\n\n  private native long ExternalDataSelect_New();\n\n  private native void ExternalDataSelect_RegisterExternalData(ExternalDataMap dataMap);\n\n  private native void ExternalDataSelect_RemoveExternalData(ExternalDataMap dataMap);\n\n  private native ArrayList<ExternalDataMap> ExternalDataSelect_SelectExternalData(long timeout);\n}\n"
  },
  {
    "path": "src/java/src/main/java/com/modelbox/Flow.java",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage com.modelbox;\n\n/**\n * modelbox Flow\n */\npublic class Flow extends NativeObject {\n  public Flow() {\n    setNativeHandle(FlowNew());\n  }\n\n  /**\n   * init flow from inline graph\n   * @param name graph name\n   * @param graph inline graph\n   * @throws ModelBoxException\n   */\n  public void init(String name, String graph) throws ModelBoxException {\n    FlowInit(name, graph);\n  }\n\n  /**\n   * init flow from graph file\n   * @param file path to graph file\n   * @throws ModelBoxException\n   */\n  public void init(String file) throws ModelBoxException {\n    FlowInit(file);\n  }\n\n  /**\n   * init flow by name, args and flow directory\n   * @param name flow name\n   * @param args flow args\n   * @param flowDir scan flow directory\n   * @throws ModelBoxException\n   */\n  public void initByName(String name, Configuration args, String flowDir) throws ModelBoxException {\n    FlowInitByName(name, args, flowDir);\n  }\n\n  /**\n   * init flow by name, args and flow directory\n   * @param name flow name\n   * @param args flow args\n   * @throws ModelBoxException\n   */\n  public void initByName(String name, Configuration args) throws ModelBoxException {\n    FlowInitByName(name, args);\n  }\n\n  /**\n   * init flow by name, args and flow directory\n   * @param name flow name\n   * @param args flow args\n   * @throws ModelBoxException\n   */\n  public void initByName(String name) throws ModelBoxException {\n    FlowInitByName(name, null);\n  }\n\n  /**\n   * Register flowunit\n   * @param flowunit_builder flowunit builder\n   * @throws ModelBoxException\n   */\n  public void RegisterFlowUnit(FlowUnitBuilder flowunit_builder) throws ModelBoxException {\n    FlowRegisterFlowUnit(flowunit_builder);\n  }\n\n  /**\n   * Start run flow\n   * @throws ModelBoxException\n   */\n  public void startRun() throws ModelBoxException {\n    FlowStartRun();\n  }\n\n  /**\n   * Wait flow finish\n   * @throws ModelBoxException\n   */\n  public void waitFor() throws ModelBoxException {\n    waitFor(0);\n  }\n\n  /**\n   * Wait flow finish\n   * @param timeout wait timeout, in millisecond \n   * @return whether timeout\n   * @throws ModelBoxException\n   */\n  public boolean waitFor(long timeout) throws ModelBoxException {\n    Status retval = new Status();\n    return waitFor(timeout, retval);\n  }\n\n  /**\n   * Wait flor finish, and get flow result\n   * @param timeout wait timeout, in millisecond \n   * @param retval flow result.\n   * @return whether timeout\n   * @throws ModelBoxException\n   */\n  public boolean waitFor(long timeout, Status retval) throws ModelBoxException {\n    return FlowWait(timeout, retval);\n  }\n\n  /**\n   * Stop flow\n   * @throws ModelBoxException\n   */\n  public void stop() throws ModelBoxException {\n    FlowStop();\n  }\n\n  /**\n   * Create external data for sending data to flow\n   * @return ExternalDataMap object\n   * @throws ModelBoxException\n   */\n  public ExternalDataMap createExternalDataMap() throws ModelBoxException {\n    return FlowCreateExternalDataMap();\n  }\n\n  /**\n   * Create stream io to send and recv stream data\n   * @return ExternalDataMap object\n   * @throws ModelBoxException\n   */\n  public FlowStreamIO CreateStreamIO() throws ModelBoxException {\n    return FlowCreateStreamIO();\n  }\n\n  private native long FlowNew();\n\n  private native boolean FlowWait(long timeout, Status status);\n\n  private native void FlowStartRun();\n\n  private native void FlowInit(String name, String graph);\n\n  private native void FlowInit(String file);\n\n  private native void FlowInitByName(String name, Configuration args, String flowDir);\n\n  private native void FlowInitByName(String name, Configuration args);\n\n  private native void FlowRegisterFlowUnit(FlowUnitBuilder flowunit_builder);\n\n  private native void FlowStop();\n\n  private native ExternalDataMap FlowCreateExternalDataMap();\n\n  private native FlowStreamIO FlowCreateStreamIO();\n}\n"
  },
  {
    "path": "src/java/src/main/java/com/modelbox/FlowStreamIO.java",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage com.modelbox;\n\n/**\n * modelbox FlowStreamIO\n */\npublic class FlowStreamIO extends NativeObject {\n  private FlowStreamIO() {\n\n  }\n\n  /**\n   * create a empty buffer on cpu device\n   * @returncpu buffer\n   * @throws ModelBoxException\n   */\n  public Buffer createBuffer() throws ModelBoxException {\n    return FlowStreamIO_CreateBuffer();\n  }\n\n  /**\n   * Send buffer of this stream to flow\n   * @param inputName input node name of flow\n   * @param buffer buffer of this stream\n   * @throws ModelBoxException\n   */\n  public void send(String inputName, Buffer buffer) throws ModelBoxException {\n    FlowStreamIO_Send(inputName, buffer);\n  }\n\n  /**\n   * Send buffer of this stream to flow\n   * @param inputName input node name of flow\n   * @param data data of this stream\n   * @throws ModelBoxException\n   */\n  public void send(String inputName, byte[] data) throws ModelBoxException {\n    FlowStreamIO_Send(inputName, data);\n  }\n\n  /**\n   * @brief recv buffer of this stream result from flow\n   * @param output_name output node name of flow\n   * @param buffer result buffer of this stream\n   * @param timeout wait result timeout\n   * @return Status\n   **/\n  /**\n   * Recv buffer of this stream result from flow\n   * @param outputName output node name of flow\n   * @param timeout wait result timeout\n   *   timeout > 0 if no data blocking for timeout(ms) and return null.\n   *   timeout = 0 if no data blocking until data is ready.\n   *   timeout < 0 if no data return immediately. and return null.\n   * @return result buffer of this stream\n   * @throws ModelBoxException\n   */\n  public Buffer recv(String outputName, long timeout) throws ModelBoxException {\n    return FlowStreamIO_Recv(outputName, timeout);\n  }\n\n  /**\n   * Close input stream, mark stream end\n   */\n  public void closeInput() {\n    FlowStreamIO_CloseInput();\n  }\n\n  private native Buffer FlowStreamIO_CreateBuffer();\n\n  private native void FlowStreamIO_Send(String inputName, Buffer buffer);\n\n  private native void FlowStreamIO_Send(String inputName, byte[] data);\n\n  private native Buffer FlowStreamIO_Recv(String outputName, long timeout);\n\n  private native void FlowStreamIO_CloseInput();\n}\n"
  },
  {
    "path": "src/java/src/main/java/com/modelbox/FlowUnit.java",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage com.modelbox;\n\nabstract public class FlowUnit extends NativeObject {\n  public FlowUnit() {\n    setNativeHandle(FlowUnit_New());\n  }\n\n  /**\n   * Flowunit Open \n   * @param opts\n   * @return \n   */\n  public void open(Configuration opts) throws ModelBoxException {}\n\n  /**\n   * Flowunit Close\n   * @return\n   */\n  public void close() throws ModelBoxException {}\n\n  /**\n   * FlowUnit data process\n   * @param data_ctx\n   * @return\n   */\n  abstract public Status process(DataContext data_ctx) throws ModelBoxException;\n\n  /**\n   * Flowunit data pre\n   * @param data_ctx\n   * @return\n   */\n  public void dataPre(DataContext data_ctx) throws ModelBoxException {}\n\n  /**\n   * FlowUnit data Post;\n   * @param data_ctx\n   * @return\n   */\n  public void dataPost(DataContext data_ctx) throws ModelBoxException {}\n\n  private native long FlowUnit_New();\n}\n"
  },
  {
    "path": "src/java/src/main/java/com/modelbox/FlowUnitBuilder.java",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage com.modelbox;\n\n/**\n * Flowunit Builder\n */\nabstract public class FlowUnitBuilder extends NativeObject {\n  public FlowUnitBuilder() {\n    setNativeHandle(FlowUnitBuilderNew());\n  }\n\n  /**\n   * Probe flowunit description.\n   * @param desc\n   */\n  abstract public void probe(FlowUnitDesc desc) throws ModelBoxException;\n\n  /**\n   * Build flowunit.\n   * @return FlowUnit\n   */\n  abstract public FlowUnit build() throws ModelBoxException;\n\n  private native long FlowUnitBuilderNew();\n\n}\n"
  },
  {
    "path": "src/java/src/main/java/com/modelbox/FlowUnitDesc.java",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage com.modelbox;\n\n/**\n * Flowunit description\n */\npublic class FlowUnitDesc extends NativeObject {\n    public enum FlowOutputType {\n        ORIGIN, EXPAND, COLLAPSE,\n    }\n\n    public enum FlowType {\n        STREAM, NORMAL,\n    }\n\n    public enum ConditionType {\n        NONE, IF_ELSE,\n    }\n\n    public enum LoopType {\n        NOT_LOOP, LOOP,\n    }\n\n    public FlowUnitDesc() {\n        setNativeHandle(FlowUnitDescNew());\n    }\n\n    /**\n     * Get flowunit name.\n     * @return String\n     */\n    public String GetFlowUnitName() {\n        return FlowUnitDescGetFlowUnitName();\n    }\n\n    /**\n     * Get flowunit type.\n     * @return flowunit type\n     */\n    public String GetFlowUnitType() {\n        return FlowUnitDescGetFlowUnitType();\n    }\n\n    /**\n     * Get flowunit alias name.\n     * @return flowunit alias name\n     */\n    public String GetFlowUnitAliasName() {\n        return FlowUnitDescGetFlowUnitAliasName();\n    }\n\n    /**\n     * Get flowunit argument.\n     * @return flowunit argument\n     */\n    public String GetFlowUnitArgument() {\n        return FlowUnitDescGetFlowUnitArgument();\n    }\n\n    /**\n     * Set flowunit output name.\n     * @param flowunit output name\n     */\n    public void SetFlowUnitName(String flowunit_name) {\n        FlowUnitDescSetFlowUnitName(flowunit_name);\n    }\n\n    /**\n     * Set flowunit type.\n     * @param flowunit type\n     */\n    public void SetFlowUnitType(String flowunit_type) {\n        FlowUnitDescSetFlowUnitType(flowunit_type);\n    }\n\n    /**\n     * Add input to flowunit\n     * @param flowunit_input input\n     */\n    public void AddFlowUnitInput(FlowUnitInput flowunit_input) throws ModelBoxException {\n        FlowUnitDescAddFlowUnitInput(flowunit_input);\n    }\n\n    /**\n     * Add output to flowunit\n     * @param flowunit_output output\n     */\n    public void AddFlowUnitOutput(FlowUnitOutput flowunit_output) throws ModelBoxException {\n        FlowUnitDescAddFlowUnitOutput(flowunit_output);\n    }\n\n    /**\n     * Set flowunit condition type\n     * @param condition_type condition type\n     */\n    public void SetConditionType(ConditionType condition_type) {\n        FlowUnitDescSetConditionType(condition_type.ordinal());\n    }\n\n    /**\n     * Set flowunit loop type\n     * @param loop_type loop type\n     */\n    public void SetLoopType(LoopType loop_type) {\n        FlowUnitDescSetLoopType(loop_type.ordinal());\n    }\n\n    /**\n     * Set flowunit output type\n     * @param output_type output type\n     */\n    public void SetOutputType(FlowOutputType output_type) {\n        FlowUnitDescSetOutputType(output_type.ordinal());\n    }\n\n    /**\n     * Set flowunit type\n     * @param flow_type flow type\n     */\n    public void SetFlowType(FlowType flow_type) {\n        FlowUnitDescSetFlowType(flow_type.ordinal());\n    }\n\n    /**\n     * Set flowunit same count\n     * @param is_stream_same_count is same count\n     */\n    public void SetStreamSameCount(boolean is_stream_same_count) {\n        FlowUnitDescSetStreamSameCount(is_stream_same_count);\n    }\n\n    /**\n     * Set flowunit input contiguous\n     * @param is_input_contiguous flowunit input contiguous\n     */\n    public void SetInputContiguous(boolean is_input_contiguous) {\n        FlowUnitDescSetInputContiguous(is_input_contiguous);\n    }\n\n    /**\n     * Set flowunit is source nice\n     * @param is_resource_nice flowunit is source nice\n     */\n    public void SetResourceNice(boolean is_resource_nice) {\n        FlowUnitDescSetResourceNice(is_resource_nice);\n    }\n\n    /**\n     * Set flowunit is collapse\n     * @param is_collapse_all flowunit is collapse\n     */\n    public void SetCollapseAll(boolean is_collapse_all) {\n        FlowUnitDescSetCollapseAll(is_collapse_all);\n    }\n\n    /**\n     * Set flowunit is visible exception\n     * @param is_exception_visible flowunit is visible exception\n     */\n    public void SetExceptionVisible(boolean is_exception_visible) {\n        FlowUnitDescSetExceptionVisible(is_exception_visible);\n    }\n\n    /**\n     * Set flowunit description info\n     * @param description flowunit description info\n     */\n    public void SetDescription(String description) {\n        FlowUnitDescSetDescription(description);\n    }\n\n    /**\n     * Set flowunit max batch size\n     * @param max_batch_size flowunit max batch size\n     */\n    public void SetMaxBatchSize(long max_batch_size) {\n        FlowUnitDescSetMaxBatchSize(max_batch_size);\n    }\n\n    /**\n     * Set flowunit default batch size\n     * @param default_batch_size flowunit max batch size\n     */\n    public void SetDefaultBatchSize(long default_batch_size) {\n        FlowUnitDescSetDefaultBatchSize(default_batch_size);\n    }\n\n    private native long FlowUnitDescNew();\n\n    private native String FlowUnitDescGetFlowUnitName();\n\n    private native String FlowUnitDescGetFlowUnitType();\n\n    private native String FlowUnitDescGetFlowUnitAliasName();\n\n    private native String FlowUnitDescGetFlowUnitArgument();\n\n    private native void FlowUnitDescSetFlowUnitName(String flowunit_name);\n\n    private native void FlowUnitDescSetFlowUnitType(String flowunit_type);\n\n    private native void FlowUnitDescAddFlowUnitInput(FlowUnitInput flowunit_input);\n\n    private native void FlowUnitDescAddFlowUnitOutput(FlowUnitOutput flowunit_output);\n\n    private native void FlowUnitDescSetConditionType(long condition_type);\n\n    private native void FlowUnitDescSetLoopType(long loop_type);\n\n    private native void FlowUnitDescSetOutputType(long output_type);\n\n    private native void FlowUnitDescSetFlowType(long flow_type);\n\n    private native void FlowUnitDescSetStreamSameCount(boolean is_stream_same_count);\n\n    private native void FlowUnitDescSetInputContiguous(boolean is_input_contiguous);\n\n    private native void FlowUnitDescSetResourceNice(boolean is_resource_nice);\n\n    private native void FlowUnitDescSetCollapseAll(boolean is_collapse_all);\n\n    private native void FlowUnitDescSetExceptionVisible(boolean is_exception_visible);\n\n    private native void FlowUnitDescSetDescription(String description);\n\n    private native void FlowUnitDescSetMaxBatchSize(long max_batch_size);\n\n    private native void FlowUnitDescSetDefaultBatchSize(long default_batch_size);\n}\n"
  },
  {
    "path": "src/java/src/main/java/com/modelbox/FlowUnitError.java",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\npackage com.modelbox;\n\npublic class FlowUnitError extends NativeObject {\n  public FlowUnitError(String desc) {\n    setNativeHandle(FlowUnitError_New(desc));\n  }\n\n  public FlowUnitError(String node, String error_pos, Status error_status) {\n    setNativeHandle(FlowUnitError_New(node, error_pos, error_status));\n  }\n\n  /**\n   * Get flowunit error description\n   * @return error description\n   */\n  public String getDesc() {\n    return FlowUnitError_GetDesc();\n  }\n\n  /**\n   * Get flowunit error status\n   * @return\n   */\n  public Status GetStatus() {\n    return FlowUnitError_GetStatus();\n  }\n\n  private native long FlowUnitError_New(String desc);\n\n  private native long FlowUnitError_New(String node, String error_pos, Status error_status);\n\n  private native String FlowUnitError_GetDesc();\n\n  private native Status FlowUnitError_GetStatus();\n}\n"
  },
  {
    "path": "src/java/src/main/java/com/modelbox/FlowUnitEvent.java",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage com.modelbox;\n\npublic class FlowUnitEvent extends NativeObject {\n\n  FlowUnitEvent() {\n    setNativeHandle(FlowUnitEventNew());\n  }\n\n  /**\n   * set data\n   * @param key data key\n   * @param value data value\n   */\n  public void set(String key, String value) {\n    FlowUnitEventSet(key, value);\n  }\n\n  /**\n   * set data\n   * @param key data key\n   * @param value data value\n   */\n  public void set(String key, Object object) {\n    FlowUnitEventSet(key, object);\n  }\n\n\n  public Object get(String key) {\n    return FlowUnitEventGet(key);\n  }\n\n  private native long FlowUnitEventNew();\n\n  private native void FlowUnitEventSet(String key, Object object);\n\n  private native Object FlowUnitEventGet(String key);\n}\n"
  },
  {
    "path": "src/java/src/main/java/com/modelbox/FlowUnitInput.java",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\") {\n * };\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage com.modelbox;\n\npublic class FlowUnitInput extends NativeObject {\n\n    /**\n     * constructor of flowunit input\n     * @param name input name\n     */\n    public FlowUnitInput(String name) {\n        setNativeHandle(FlowUnitInput_New(name));\n    }\n\n    /**\n     * constructor of flowunit input\n     * @param name input name\n     * @param device_type device type\n     */\n    public FlowUnitInput(String name, String device_type) {\n        setNativeHandle(FlowUnitInput_New(name, device_type));\n    }\n\n    /**\n     * constructor of flowunit input\n     * @param name input name\n     * @param device_mem_flags device memory flags\n     */\n    public FlowUnitInput(String name, long device_mem_flags) {\n        setNativeHandle(FlowUnitInput_New(name, device_mem_flags));\n    }\n\n    /**\n     * constructor of flowunit input\n     * @param name input name\n     * @param device_type device type\n     * @param device_mem_flags device memory flags\n     */\n    public FlowUnitInput(String name, String device_type, long device_mem_flags) {\n        setNativeHandle(FlowUnitInput_New(name, device_type, device_mem_flags));\n    }\n\n    private native long FlowUnitInput_New(String name);\n\n    private native long FlowUnitInput_New(String name, String device_type);\n\n    private native long FlowUnitInput_New(String name, long device_mem_flags);\n\n    private native long FlowUnitInput_New(String name, String device_type, long device_mem_flags);\n\n}\n"
  },
  {
    "path": "src/java/src/main/java/com/modelbox/FlowUnitOutput.java",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage com.modelbox;\n\npublic class FlowUnitOutput extends NativeObject {\n  /**\n   * constructor of flowunit output\n   * @param name output name\n   */\n  public FlowUnitOutput(String name) {\n    setNativeHandle(FlowUnitOutput_New(name));\n  }\n\n  /**\n   * constructor of flowunit output\n   * @param name output name\n   * @param device_mem_flags device memory flag\n   */\n  public FlowUnitOutput(String name, long device_mem_flags) {\n    setNativeHandle(FlowUnitOutput_New(name, device_mem_flags));\n  }\n\n  private native long FlowUnitOutput_New(String name);\n\n  private native long FlowUnitOutput_New(String name, long device_mem_flags);\n}\n"
  },
  {
    "path": "src/java/src/main/java/com/modelbox/Log.java",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\npackage com.modelbox;\n\nimport java.text.SimpleDateFormat;\nimport java.util.Date;\n\npublic class Log extends NativeObject {\n  public enum LogLevel {\n    LOG_DEBUG, LOG_INFO, LOG_NOTICE, LOG_WARN, LOG_ERROR, LOG_FATAL, LOG_OFF\n  }\n\n  public Log() {\n    setNativeHandle(LogNew());\n  }\n\n  /**\n   * modelbox default log append function, output log to console\n   * @param level log level\n   * @param file log file\n   * @param lineno log file lineno\n   * @param func log function\n   * @param msg log message\n   */\n  public void print(LogLevel level, String file, int lineno, String func, String msg) {\n    String timeStamp = new SimpleDateFormat(\"yyyy-MM-dd HH.mm.ss.SSS\").format(new Date());\n    System.out.printf(\"[%s][%s][%17s:%-4d] %s\\n\", timeStamp, level, file, lineno, msg);\n  }\n\n  public final void jniPrintCallback(long level, String file, int lineno, String func, String msg) {\n    print(LogLevel.LOG_INFO, file, lineno, func, msg);\n  }\n\n  /**\n   * Set log level\n   * @param level log level\n   */\n  public void setLogLevel(LogLevel level) {\n    LogSetLogLevel(level.ordinal());\n  }\n\n  /**\n   * Get log level\n   * @return level log level\n   */\n  public LogLevel getLogLevel() {\n    return LogLevel.values()[(int)LogGetLogLevel()];\n  }\n\n  /**\n   * Get current log appender\n   * @return\n   */\n  public static Log getLogger() {\n    return LogGetLogger();\n  }\n\n  /**\n   * Register log appender to modelbox\n   * @param log\n   */\n  public static void regLog(Log log) {\n    LogReg(log);\n  }\n\n  /**\n   * Unregister log appender, reset to default\n   */\n  public static void unRegLog() {\n    LogUnReg();\n  }\n\n  /**\n   * log debug\n   * @param msg message\n   */\n  public static void debug(String format, Object... params) {\n    printLog(LogLevel.LOG_DEBUG, format, params);\n  }\n\n  /**\n   * log debug\n   * @param msg message\n   */\n  public static void debug(String message) {\n    printLog(LogLevel.LOG_DEBUG, message);\n  }\n\n  /**\n   * log info\n   * @param msg message\n   */\n  public static void info(String format, Object... params) {\n    printLog(LogLevel.LOG_INFO, format, params);\n  }\n\n  /**\n   * log info\n   * @param msg message\n   */\n  public static void info(String message) {\n    printLog(LogLevel.LOG_INFO, message);\n  }\n\n  /**\n   * log notice\n   * @param msg message\n   */\n  public static void notice(String format, Object... params) {\n    printLog(LogLevel.LOG_NOTICE, format, params);\n  }\n\n  /**\n   * log notice\n   * @param msg message\n   */\n  public static void notice(String message) {\n    printLog(LogLevel.LOG_NOTICE, message);\n  }\n\n  /**\n   * log warn\n   * @param msg message\n   */\n  public static void warn(String format, Object... params) {\n    printLog(LogLevel.LOG_WARN, format, params);\n  }\n\n  /**\n   * log notice\n   * @param msg message\n   */\n  public static void warn(String message) {\n    printLog(LogLevel.LOG_WARN, message);\n  }\n\n  /**\n   * log error\n   * @param msg message\n   */\n  public static void error(String format, Object... params) {\n    printLog(LogLevel.LOG_ERROR, format, params);\n  }\n\n  /**\n   * log error\n   * @param msg message\n   */\n  public static void error(String message) {\n    printLog(LogLevel.LOG_ERROR, message);\n  }\n\n  /**\n   * log fatal\n   * @param msg message\n   */\n  public static void fatal(String format, Object... params) {\n    printLog(LogLevel.LOG_FATAL, format, params);\n  }\n\n  /**\n   * log fatal\n   * @param msg message\n   */\n  public static void fatal(String message) {\n    printLog(LogLevel.LOG_FATAL, message);\n  }\n\n  private static void printLog(LogLevel level, String format, Object... params) {\n    StackTraceElement stack = Thread.currentThread().getStackTrace()[3];\n    String file = stack.getFileName();\n    int lineno = stack.getLineNumber();\n    String func = stack.getMethodName();\n    LogPrint(level.ordinal(), file, lineno, func, String.format(format, params));\n  }\n\n  public native long LogNew();\n\n  public native void LogSetLogLevel(long level);\n\n  public native long LogGetLogLevel();\n\n  public static native Log LogGetLogger();\n\n  public static native void LogReg(Log log);\n\n  public static native void LogUnReg();\n\n  public static native void LogPrint(long level, String file, int lineno, String func, String msg);\n}\n"
  },
  {
    "path": "src/java/src/main/java/com/modelbox/ModelBox.java",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n\npackage com.modelbox;\n\n/**\n * modelbox defulat JNI\n */\npublic class ModelBox {\n  static {\n    System.loadLibrary(\"modelbox-jni\");\n  }\n\n  // CHECKSTYLE:OFF\n  public static native void SetDefaultScanPath(String path);\n\n  public static native void SetDefaultInfoPath(String path);\n  // CHECKSTYLE:ON\n}\n"
  },
  {
    "path": "src/java/src/main/java/com/modelbox/ModelBoxException.java",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\npackage com.modelbox;\n\nimport java.io.IOException;\n\n/**\n * modelbox exception type\n */\npublic class ModelBoxException extends IOException {\n  public ModelBoxException() {}\n\n  public ModelBoxException(String message) {\n    super(message);\n  }\n\n  static public class Success extends ModelBoxException {\n    public Success() {\n      super();\n    }\n\n    public Success(String message) {\n      super(message);\n    }\n  }\n  static public class Fault extends ModelBoxException {\n    public Fault() {\n      super();\n    }\n\n    public Fault(String message) {\n      super(message);\n    }\n  }\n  static public class Notfound extends ModelBoxException {\n    public Notfound() {\n      super();\n    }\n\n    public Notfound(String message) {\n      super(message);\n    }\n  }\n  static public class Invalid extends ModelBoxException {\n    public Invalid() {\n      super();\n    }\n\n    public Invalid(String message) {\n      super(message);\n    }\n  }\n  static public class Again extends ModelBoxException {\n    public Again() {\n      super();\n    }\n\n    public Again(String message) {\n      super(message);\n    }\n  }\n  static public class Badconf extends ModelBoxException {\n    public Badconf() {\n      super();\n    }\n\n    public Badconf(String message) {\n      super(message);\n    }\n  }\n  static public class Nomem extends ModelBoxException {\n    public Nomem() {\n      super();\n    }\n\n    public Nomem(String message) {\n      super(message);\n    }\n  }\n  static public class Range extends ModelBoxException {\n    public Range() {\n      super();\n    }\n\n    public Range(String message) {\n      super(message);\n    }\n  }\n  static public class Exist extends ModelBoxException {\n    public Exist() {\n      super();\n    }\n\n    public Exist(String message) {\n      super(message);\n    }\n  }\n  static public class Internal extends ModelBoxException {\n    public Internal() {\n      super();\n    }\n\n    public Internal(String message) {\n      super(message);\n    }\n  }\n  static public class Busy extends ModelBoxException {\n    public Busy() {\n      super();\n    }\n\n    public Busy(String message) {\n      super(message);\n    }\n  }\n  static public class Permit extends ModelBoxException {\n    public Permit() {\n      super();\n    }\n\n    public Permit(String message) {\n      super(message);\n    }\n  }\n  static public class Notsupport extends ModelBoxException {\n    public Notsupport() {\n      super();\n    }\n\n    public Notsupport(String message) {\n      super(message);\n    }\n  }\n  static public class Nodata extends ModelBoxException {\n    public Nodata() {\n      super();\n    }\n\n    public Nodata(String message) {\n      super(message);\n    }\n  }\n  static public class Nospace extends ModelBoxException {\n    public Nospace() {\n      super();\n    }\n\n    public Nospace(String message) {\n      super(message);\n    }\n  }\n  static public class Nobufs extends ModelBoxException {\n    public Nobufs() {\n      super();\n    }\n\n    public Nobufs(String message) {\n      super(message);\n    }\n  }\n  static public class Overflow extends ModelBoxException {\n    public Overflow() {\n      super();\n    }\n\n    public Overflow(String message) {\n      super(message);\n    }\n  }\n  static public class Inprogress extends ModelBoxException {\n    public Inprogress() {\n      super();\n    }\n\n    public Inprogress(String message) {\n      super(message);\n    }\n  }\n  static public class Already extends ModelBoxException {\n    public Already() {\n      super();\n    }\n\n    public Already(String message) {\n      super(message);\n    }\n  }\n  static public class Timedout extends ModelBoxException {\n    public Timedout() {\n      super();\n    }\n\n    public Timedout(String message) {\n      super(message);\n    }\n  }\n  static public class Nostream extends ModelBoxException {\n    public Nostream() {\n      super();\n    }\n\n    public Nostream(String message) {\n      super(message);\n    }\n  }\n  static public class Reset extends ModelBoxException {\n    public Reset() {\n      super();\n    }\n\n    public Reset(String message) {\n      super(message);\n    }\n  }\n  static public class Continue extends ModelBoxException {\n    public Continue() {\n      super();\n    }\n\n    public Continue(String message) {\n      super(message);\n    }\n  }\n  static public class Edquot extends ModelBoxException {\n    public Edquot() {\n      super();\n    }\n\n    public Edquot(String message) {\n      super(message);\n    }\n  }\n  static public class Stop extends ModelBoxException {\n    public Stop() {\n      super();\n    }\n\n    public Stop(String message) {\n      super(message);\n    }\n  }\n  static public class Shutdown extends ModelBoxException {\n    public Shutdown() {\n      super();\n    }\n\n    public Shutdown(String message) {\n      super(message);\n    }\n  }\n  static public class Eof extends ModelBoxException {\n    public Eof() {\n      super();\n    }\n\n    public Eof(String message) {\n      super(message);\n    }\n  }\n  static public class Noent extends ModelBoxException {\n    public Noent() {\n      super();\n    }\n\n    public Noent(String message) {\n      super(message);\n    }\n  }\n  static public class Deadlock extends ModelBoxException {\n    public Deadlock() {\n      super();\n    }\n\n    public Deadlock(String message) {\n      super(message);\n    }\n  }\n  static public class Noresponse extends ModelBoxException {\n    public Noresponse() {\n      super();\n    }\n\n    public Noresponse(String message) {\n      super(message);\n    }\n  }\n  static public class Io extends ModelBoxException {\n    public Io() {\n      super();\n    }\n\n    public Io(String message) {\n      super(message);\n    }\n  }\n}\n"
  },
  {
    "path": "src/java/src/main/java/com/modelbox/NativeObject.java",
    "content": "package com.modelbox;\n\n/**\n * modelbox JNI backend\n */\npublic class NativeObject {\n  private long native_handle = 0;\n\n  static {\n    System.loadLibrary(\"modelbox-jni\");\n  }\n\n  protected NativeObject() {\n    native_handle = 0;\n  }\n\n  /**\n   * Get native jni handle\n   * @return native jni handle\n   */\n  public long getNativeHandle() {\n    return native_handle;\n  }\n\n  /**\n   * Set native jni handle\n   * @param handle native jni handle\n   */\n  protected void setNativeHandle(long handle) {\n    if (native_handle != 0) {\n      delete_handle(native_handle);\n      native_handle = 0;\n    }\n    native_handle = handle;\n  }\n\n  /**\n   * Free native jni handle\n   */\n  @Override\n  @SuppressWarnings(\"deprecation\")\n  protected void finalize() {\n    try {\n      delete_handle(native_handle);\n      native_handle = 0;\n    } catch (Exception e) {\n      //pass\n    }\n  }\n\n  private native void delete_handle(long handle);\n}\n"
  },
  {
    "path": "src/java/src/main/java/com/modelbox/SessionContext.java",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\npackage com.modelbox;\n\n/**\n * modelbox SessionContext\n */\npublic class SessionContext extends NativeObject {\n\n  private SessionContext() {\n\n  }\n\n  /**\n   * Set private object\n   * @param key private key\n   * @param object private object\n   */\n  public void setPrivate(String key, Object object) {\n    SessionContext_SetPrivate(key, object);\n  }\n\n  /**\n   * Get private object\n   * @param key private key\n   * @return object private object\n   */\n  public Object getPrivate(String key) {\n    return SessionContext_GetPrivate(key);\n  }\n\n  /**\n   * Set session ID\n   * @param sessionId session id\n   */\n  public void setSessionId(String sessionId) {\n    SessionContext_SetSessionId(sessionId);\n  }\n\n  /**\n   * Get session ID\n   * @return session id\n   */\n  public String getSessionId() {\n    return SessionContext_GetSessionId();\n  }\n\n  /**\n   * Get session configuration object\n   * @return session configuration object\n   */\n  public Configuration getConfig() {\n    return SessionContext_GetConfiguration();\n  }\n\n  /**\n   * Set error to session\n   * @param error flowunit error\n   */\n  public void setError(FlowUnitError error) {\n    SessionContext_SetError(error);\n  }\n\n  /**\n   * Get error from session\n   * @return error flowunit error\n   */\n  public FlowUnitError getError() {\n    return SessionContext_GetError();\n  }\n\n  private native void SessionContext_SetPrivate(String key, Object object);\n\n  private native Object SessionContext_GetPrivate(String key);\n\n  private native void SessionContext_SetSessionId(String sessionId);\n\n  private native String SessionContext_GetSessionId();\n\n  private native Configuration SessionContext_GetConfiguration();\n\n  private native void SessionContext_SetError(FlowUnitError error);\n\n  private native FlowUnitError SessionContext_GetError();\n}\n"
  },
  {
    "path": "src/java/src/main/java/com/modelbox/StatisticsItem.java",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage com.modelbox;\n\npublic class StatisticsItem {\n\n}\n"
  },
  {
    "path": "src/java/src/main/java/com/modelbox/Status.java",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\npackage com.modelbox;\n\n/**\n * modelbox Status\n */\npublic class Status extends NativeObject {\n  public Status() {\n    setNativeHandle(StatusNew());\n  }\n\n  /**\n   * constructor of status\n   * @param code status code\n   */\n  public Status(StatusCode code) {\n    setNativeHandle(StatusNew());\n    StatusSetCode(code.ordinal());\n  }\n\n  /**\n   * constructor of status\n   * @param code status code\n   * @param msg  status message\n   */\n  public Status(StatusCode code, String msg) {\n    setNativeHandle(StatusNew());\n    StatusSetCode(code.ordinal());\n    StatusSetErrorMsg(msg);\n  }\n\n  /**\n   * constructor of status\n   * @param other another status\n   * @param msg  status message\n   */\n  public Status(Status other, String msg) {\n    setNativeHandle(StatusNew());\n    StatusWrap(other, other.Code().ordinal(), msg);\n  }\n\n  /**\n   * make status to string\n   * @return status in string\n   */\n  public String ToSting() {\n    return StatusToSting();\n  }\n\n  /**\n   * Get status code\n   * @return status code\n   */\n  public StatusCode Code() {\n    return StatusCode();\n  }\n\n  /**\n   * Get status code in string\n   * @return status code in string\n   */\n  public String StrCode() {\n    return StatusStrCode();\n  }\n\n  /**\n   * Set error message to status\n   * @param errorMsg error message\n   */\n  public void SetErrorMsg(String errorMsg) {\n    StatusSetErrorMsg(errorMsg);\n  }\n\n  /**\n   * Get error message\n   * @return error messsage\n   */\n  public String ErrorMsg() {\n    return StatusErrorMsg();\n  }\n\n  /**\n   * Get wrap error message\n   * @return wrap error message\n   */\n  public String WrapErrormsgs() {\n    return StatusWrapErrormsgs();\n  }\n\n  /**\n   * return success.\n   * @return return success\n   */\n  public static Status OK() {\n    return new Status(StatusCode.STATUS_SUCCESS);\n  }\n\n  /**\n   * return error with message.\n   * @return return error\n   */\n  public static Status Fail(String msg) {\n    return new Status(StatusCode.STATUS_FAULT, msg);\n  }\n\n  /**\n   * return error with message and code.\n   * @return return error\n   */\n  public static Status Fail(StatusCode code, String msg) {\n    return new Status(code, msg);\n  }\n\n  private native long StatusNew();\n\n  private native void StatusSetCode(long code);\n\n  private native void StatusWrap(Status status, long code, String msg);\n\n  private native String StatusToSting();\n\n  private native StatusCode StatusCode();\n\n  private native String StatusStrCode();\n\n  private native void StatusSetErrorMsg(String errorMsg);\n\n  private native String StatusErrorMsg();\n\n  private native String StatusWrapErrormsgs();\n\n  @Override\n  public boolean equals(Object o) {\n    if (o == null) {\n      return false;\n    }\n\n    if (o instanceof Status == false) {\n      return false;\n    }\n\n    if (Code() == ((Status) o).Code()) {\n      return true;\n    }\n\n    return false;\n  }\n}\n"
  },
  {
    "path": "src/java/src/main/java/com/modelbox/StatusCode.java",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\npackage com.modelbox;\n\npublic enum StatusCode {\n  STATUS_SUCCESS,     /* Success, Avoid using this, use STATUS_OK instead.*/\n  STATUS_FAULT,       /* Fault */\n  STATUS_NOTFOUND,    /* Not Found */\n  STATUS_INVALID,     /* Invalid argument */\n  STATUS_AGAIN,       /* Try again */\n  STATUS_BADCONF,     /* Bad Config */\n  STATUS_NOMEM,       /* Out of memory */\n  STATUS_RANGE,       /* Out of range */\n  STATUS_EXIST,       /* Already exists */\n  STATUS_INTERNAL,    /* Internal error */\n  STATUS_BUSY,        /* Device or resource busy */\n  STATUS_PERMIT,      /* Operation not permitted */\n  STATUS_NOTSUPPORT,  /* Not supported */\n  STATUS_NODATA,      /* No data available */\n  STATUS_NOSPACE,     /* No space left */\n  STATUS_NOBUFS,      /* No buffer space available  */\n  STATUS_OVERFLOW,    /* Value too large for defined data type */\n  STATUS_INPROGRESS,  /* Operation now in progress */\n  STATUS_ALREADY,     /* Operation already in progress */\n  STATUS_TIMEDOUT,    /* Operation timed out */\n  STATUS_NOSTREAM,    /* Out of streams resources */\n  STATUS_RESET,       /* Request Reset by peer */\n  STATUS_CONTINUE,    /* Continue operation */\n  STATUS_EDQUOT,      /* Quota exceeded */\n  STATUS_STOP,        /* Stop operation */\n  STATUS_SHUTDOWN,    /* Shutdown operation */\n  STATUS_EOF,         /* End of file */\n  STATUS_NOENT,       /* No such file or directory */\n  STATUS_DEADLOCK,    /* Resource deadlock */\n  STATUS_NORESPONSE,  /* No response*/\n  STATUS_IO           /* Input/output error */\n}\n"
  },
  {
    "path": "src/java/src/test/java/com/modelbox/ModelBoxConfigurationTest.java",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage com.modelbox;\n\nimport static org.junit.Assert.assertEquals;\nimport java.util.ArrayList;\nimport org.junit.Test;\n\npublic class ModelBoxConfigurationTest {\n\n  @Test\n  public void testKeys() throws Exception {\n    Configuration conf = new Configuration();\n    conf.set(\"bool\", true);\n    conf.set(\"int\", 1);\n    conf.set(\"long\", 1);\n    conf.set(\"float\", 1.1F);\n    conf.set(\"double\", 1.2);\n    conf.set(\"string\", \"string\");\n    ArrayList<String> lists = new ArrayList<String>();\n    lists.add(\"a\");\n    lists.add(\"b\");\n    lists.add(\"c\");\n    conf.set(\"strings\", lists);\n\n    assertEquals(conf.getBoolean(\"bool\", false), true);\n    assertEquals(conf.getInt(\"int\", 0), 1);\n    assertEquals(conf.getLong(\"long\", 0), 1);\n    assertEquals(conf.getFloat(\"float\", 0.0F), 1.1F, 0.1);\n    assertEquals(conf.getDouble(\"double\", 0.0F), 1.2, 0.1);\n    assertEquals(conf.getString(\"string\", \"\"), \"string\");\n\n    ArrayList<String> get_lists = conf.getStrings(\"strings\", null);\n    assertEquals(lists.size(), get_lists.size());\n\n    for (int i = 0; i < lists.size(); i++) {\n      assertEquals(lists.get(i), get_lists.get(i));\n    }\n  }\n\n  @Test\n  public void testKeysDefault() throws Exception {\n    Configuration conf = new Configuration();\n\n    ArrayList<String> lists = new ArrayList<String>();\n    lists.add(\"a\");\n    lists.add(\"b\");\n    lists.add(\"c\");\n    conf.set(\"strings\", lists);\n\n    assertEquals(conf.getBoolean(\"bool\", true), true);\n    assertEquals(conf.getInt(\"int\", 1), 1);\n    assertEquals(conf.getLong(\"long\", 1), 1);\n    assertEquals(conf.getFloat(\"float\", 1.1F), 1.1F, 0.1);\n    assertEquals(conf.getDouble(\"double\", 1.1F), 1.2, 0.1);\n    assertEquals(conf.getString(\"string\", \"string\"), \"string\");\n\n    ArrayList<String> get_lists = conf.getStrings(\"strings\", lists);\n    assertEquals(lists, get_lists);\n  }\n}"
  },
  {
    "path": "src/java/src/test/java/com/modelbox/ModelBoxFlowTest.java",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\npackage com.modelbox;\n\nimport static org.junit.Assert.assertEquals;\nimport static org.junit.Assert.assertFalse;\nimport static org.junit.Assert.assertTrue;\nimport java.nio.ByteBuffer;\nimport java.util.ArrayList;\nimport java.util.HashMap;\nimport java.util.Map;\nimport org.junit.BeforeClass;\nimport org.junit.Test;\n\npublic class ModelBoxFlowTest {\n\n  public static class FlowUnitPassThrough extends FlowUnit {\n    public static class Builder extends FlowUnitBuilder {\n      static boolean is_direct = false;\n      @Override\n      public void probe(FlowUnitDesc desc) throws ModelBoxException {\n        desc.SetFlowUnitType(\"cpu\");\n        desc.SetFlowUnitName(\"javapassthrouth\");\n        desc.SetInputContiguous(false);\n        desc.SetFlowType(FlowUnitDesc.FlowType.NORMAL);\n        desc.AddFlowUnitInput(new FlowUnitInput(\"in\"));\n        desc.AddFlowUnitOutput(new FlowUnitOutput(\"out\"));\n      }\n\n      @Override\n      public FlowUnit build() throws ModelBoxException {\n        return new FlowUnitPassThrough(is_direct);\n      }\n\n      public void setDirect(boolean b) {\n        is_direct = b;\n      }\n    }\n\n    public FlowUnitPassThrough(boolean is_direct) {\n      this.is_direct = is_direct;\n    }\n\n    private boolean is_direct = false;\n\n    @Override\n    public void open(Configuration opts) throws ModelBoxException {\n      assertEquals(opts.getString(\"opt\", \"\"), \"value\");\n    }\n\n    @Override\n    public Status process(DataContext data_ctx) throws ModelBoxException {\n      BufferList in = data_ctx.input(\"in\");\n      BufferList out = data_ctx.output(\"out\");\n\n      if (is_direct) {\n        return process_directbuffer(data_ctx);\n      }\n\n      for (int i = 0; i < in.size(); i++) {\n        out.pushBack(in.at(i));\n      }\n\n      return Status.OK();\n    }\n\n    public Status process_directbuffer(DataContext data_ctx) throws ModelBoxException {\n      BufferList in = data_ctx.input(\"in\");\n      BufferList out = data_ctx.output(\"out\");\n\n      int sizes[] = new int[(int)in.size()];\n      for (int i = 0; i < in.size(); i++) {\n        sizes[i] = (int)in.at(i).getBytes();\n      }\n\n      out.build(sizes);\n      ByteBuffer outbuff = out.getDirectData();\n\n      for (int i = 0; i < in.size(); i++) {\n        outbuff.put(in.at(i).getDirectData());\n      }\n\n      return Status.OK();\n    }\n  }\n\n  @BeforeClass\n  public static void setUpTest() {\n    Log.unRegLog();\n    ModelBox.SetDefaultScanPath(TestConfig.TEST_DRIVER_DIR);\n  }\n\n  @Test(expected = ModelBoxException.Badconf.class)\n  public void testFlowNotExist() throws Exception {\n    String txt = \"[log]\\n\";\n    txt += \"level=\\\"INFO\\\"\\n\";\n    txt += \"[graph]\\n\";\n    txt += \"graphconf = '''digraph demo {{ \\n\";\n    txt += \"  notexist[type=flowunit, flowunit=notexist, device=cpu]\\n\";\n    txt += \"}}'''\\n\";\n    txt += \"format = \\\"graphviz\\\"\\n\";\n\n    System.out.println(txt);\n    Flow flow = new Flow();\n    flow.init(\"NOT-EXIST\", txt);\n    flow.startRun();\n  }\n\n  @Test\n  public void testFlowProcessData() throws Exception {\n    boolean get_result = false;\n    String txt = \"[log]\\n\";\n    txt += \"level=\\\"INFO\\\"\\n\";\n    txt += \"[graph]\\n\";\n    txt += \"graphconf = '''digraph demo {{ \\n\";\n    txt += \"  input[type=input] \\n\";\n    txt += \"  process[flowunit=passthrouth, device=cpu]\\n\";\n    txt += \"  output[type=output]\\n\";\n    txt += \"\\n\";\n    txt += \" input->process:in\";\n    txt += \" process:out -> output\\n\";\n    txt += \"}}'''\\n\";\n    txt += \"format = \\\"graphviz\\\"\\n\";\n\n    System.out.println(txt);\n    Flow flow = new Flow();\n    flow.init(\"Process\", txt);\n    flow.startRun();\n    ExternalDataMap datamap = flow.createExternalDataMap();\n    BufferList data = datamap.CreateBufferList();\n    assertEquals(data.getDevice().getType(), \"cpu\");\n\n    data.build(new int[] {0});\n    String msg = \"Hello world\";\n    data.at(0).build(msg.getBytes());\n    data.pushBack(msg.getBytes());\n    datamap.send(\"input\", data);\n    datamap.close();\n    datamap.setPrivate(\"this is a test\");\n    Log.info(\"session id is \" + datamap.getSessionContext().getSessionId());\n\n    ExternalDataSelect data_select = new ExternalDataSelect();\n    data_select.register(datamap);\n\n    while (true) {\n      try {\n        ArrayList<ExternalDataMap> datamaplist = data_select.select(1000 * 10);\n        if (datamaplist == null) {\n          assertFalse(true);\n          break;\n        }\n\n        for (ExternalDataMap outdatamap : datamaplist) {\n          System.out.println(\"Get: \" + outdatamap.getPrivate());\n          HashMap<String, BufferList> outdata = outdatamap.recv();\n          if (outdata == null) {\n            data_select.remove(outdatamap);\n            throw new ModelBoxException.Eof(\"exit\");\n          }\n\n          assertEquals(outdata.size(), 1);\n          assertEquals(datamap, outdatamap);\n          for (Map.Entry<String, BufferList> entry : outdata.entrySet()) {\n            String key = entry.getKey();\n            BufferList value = entry.getValue();\n            assertEquals(key, \"output\");\n            assertEquals(value.size(), 2);\n            String str = new String(value.at(0).getData());\n            assertEquals(msg, str);\n            Log.info(\"Message is: \" + str);\n            get_result = true;\n          }\n        }\n      } catch (ModelBoxException.Eof e) {\n        break;\n      } catch (ModelBoxException e) {\n        System.out.println(\"select failed, \" + e.getMessage());\n        assertFalse(true);\n        break;\n      }\n    }\n\n    assertTrue(get_result);\n    flow = null;\n    System.gc();\n  }\n\n  @Test\n  public void testFlowStreamIO() throws Exception {\n    boolean get_result = false;\n    String txt = \"[log]\\n\";\n    txt += \"level=\\\"INFO\\\"\\n\";\n    txt += \"[graph]\\n\";\n    txt += \"graphconf = '''digraph demo {{ \\n\";\n    txt += \"  input[type=input] \\n\";\n    txt += \"  process[flowunit=passthrouth, device=cpu]\\n\";\n    txt += \"  output[type=output]\\n\";\n    txt += \"\\n\";\n    txt += \" input->process:in\";\n    txt += \" process:out -> output\\n\";\n    txt += \"}}'''\\n\";\n    txt += \"format = \\\"graphviz\\\"\\n\";\n    \n\n    System.out.println(txt);\n    Flow flow = new Flow();\n    flow.init(\"Process\", txt);\n    flow.startRun();\n    FlowStreamIO streamio = flow.CreateStreamIO();\n    Buffer data = streamio.createBuffer();\n    assertEquals(data.getDevice().getType(), \"cpu\");\n    String msg = \"Hello world\";\n    data.build(msg.getBytes());\n    streamio.send(\"input\", data);\n    streamio.send(\"input\", msg.getBytes());\n    streamio.closeInput();\n    int count = 0;\n    \n    while (true) {\n      Buffer outdata = streamio.recv(\"output\", 1000 * 10);\n      if (outdata == null) {\n        break;\n      }\n      \n      String str = new String(outdata.getData());\n      assertEquals(msg, str);\n      Log.info(\"Message is: \" + str);\n      get_result = true;\n      count++;\n    }\n    \n    assertEquals(count, 2);\n    assertTrue(get_result);\n    flow = null;\n    System.gc();\n  }\n\n  @Test\n  public void testFlowRegister() throws Exception {\n    boolean get_result = false;\n    String txt = \"[log]\\n\";\n    txt += \"level=\\\"INFO\\\"\\n\";\n    txt += \"[graph]\\n\";\n    txt += \"graphconf = '''digraph demo {{ \\n\";\n    txt += \"  input[type=input] \\n\";\n    txt += \"  process[flowunit=javapassthrouth, device=cpu, opt=value]\\n\";\n    txt += \"  output[type=output]\\n\";\n    txt += \"\\n\";\n    txt += \" input->process:in\";\n    txt += \" process:out -> output\\n\";\n    txt += \"}}'''\\n\";\n    txt += \"format = \\\"graphviz\\\"\\n\";\n    \n\n    System.out.println(txt);\n    Flow flow = new Flow();\n    FlowUnitPassThrough.Builder builder = new FlowUnitPassThrough.Builder();\n    builder.setDirect(true);\n    flow.RegisterFlowUnit(builder);\n    flow.init(\"Process\", txt);\n    flow.startRun();\n    FlowStreamIO streamio = flow.CreateStreamIO();\n    Buffer data = streamio.createBuffer();\n    assertEquals(data.getDevice().getType(), \"cpu\");\n    String msg = \"Hello world\";\n    data.build(msg.getBytes());\n    streamio.send(\"input\", data);\n    streamio.send(\"input\", msg.getBytes());\n    streamio.closeInput();\n    int count = 0;\n    \n    while (true) {\n      Buffer outdata = streamio.recv(\"output\", 1000 * 10);\n      if (outdata == null) {\n        break;\n      }\n\n      ByteBuffer b = outdata.getDirectData();\n      if (b != null) {\n        byte[] bytes = new byte[b.remaining()];\n        b.get(bytes);\n        String str = new String(bytes);\n        assertEquals(msg, str);\n        Log.info(\"Direct message is: \" + str);\n      }\n      \n      String str = new String(outdata.getData());\n      assertEquals(msg, str);\n      Log.info(\"Message is: \" + str);\n      get_result = true;\n      count++;\n    }\n    \n    assertEquals(count, 2);\n    assertTrue(get_result);\n    flow = null;\n    System.gc();\n  }\n}\n"
  },
  {
    "path": "src/java/src/test/java/com/modelbox/ModelBoxLogTest.java",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage com.modelbox;\n\nimport static org.junit.Assert.assertEquals;\nimport java.text.SimpleDateFormat;\nimport java.util.Date;\nimport org.junit.Test;\n\npublic class ModelBoxLogTest {\n\n  class TestLog extends Log {\n    public void print(LogLevel level, String file, int lineno, String func, String msg) {\n      String timeStamp = new SimpleDateFormat(\"yyyy-MM-dd HH.mm.ss.SSS\").format(new Date());\n      System.out.printf(\"[%s][%s][%17s:%-4d] %s\\n\", timeStamp, level, file, lineno, msg);\n      lastMsg = msg;\n    }\n\n    public String lastMsg;\n  }\n\n  @Test\n  public void testLogReg() throws Exception {\n    String mesg = \"This is hello msg\";\n    Log.getLogger().setLogLevel(Log.LogLevel.LOG_DEBUG);\n    Log.debug(mesg);\n    TestLog log = new TestLog();\n    Log.regLog(log);\n    log.setLogLevel(Log.LogLevel.LOG_DEBUG);\n    Log.info(mesg);\n    assertEquals(log.lastMsg, mesg);\n    assertEquals(log, Log.LogGetLogger());\n    Log.unRegLog();\n  }\n\n  @Test\n  public void testLogFormat() throws Exception {\n    String mesg = \"This is hello msg\";\n    String mesg1 = \"This is message 2\";\n    String expect_msg = \"Msg: \" + mesg + \" \" + mesg1;\n    Log.getLogger().setLogLevel(Log.LogLevel.LOG_DEBUG);\n    Log.debug(mesg);\n    TestLog log = new TestLog();\n    Log.regLog(log);\n    log.setLogLevel(Log.LogLevel.LOG_DEBUG);\n    Log.info(\"Msg: %s %s\", mesg, mesg1);\n    assertEquals(log.lastMsg, expect_msg);\n    assertEquals(log, Log.LogGetLogger());\n    Log.unRegLog();\n  }\n\n  @Test\n  public void testLogLevel() throws Exception {\n    Log.LogLevel oldLevel = Log.getLogger().getLogLevel();\n\n    Log.getLogger().setLogLevel(Log.LogLevel.LOG_DEBUG);\n    assertEquals(Log.getLogger().getLogLevel(), Log.LogLevel.LOG_DEBUG);\n    Log.getLogger().setLogLevel(Log.LogLevel.LOG_INFO);\n    assertEquals(Log.getLogger().getLogLevel(), Log.LogLevel.LOG_INFO);\n    Log.getLogger().setLogLevel(Log.LogLevel.LOG_NOTICE);\n    assertEquals(Log.getLogger().getLogLevel(), Log.LogLevel.LOG_NOTICE);\n    Log.getLogger().setLogLevel(Log.LogLevel.LOG_WARN);\n    assertEquals(Log.getLogger().getLogLevel(), Log.LogLevel.LOG_WARN);\n    Log.getLogger().setLogLevel(Log.LogLevel.LOG_ERROR);\n    assertEquals(Log.getLogger().getLogLevel(), Log.LogLevel.LOG_ERROR);\n    \n    Log.getLogger().setLogLevel(oldLevel);\n  }\n}"
  },
  {
    "path": "src/java/src/test/java/com/modelbox/ModelBoxMiscTest.java",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage com.modelbox;\n\nimport static org.junit.Assert.assertEquals;\nimport org.junit.Test;\n\npublic class ModelBoxMiscTest {\n  @Test\n  public void testFlowUnitError() throws Exception {\n    FlowUnitError err = new FlowUnitError(\"desc\");\n    assertEquals(err.getDesc(), \"desc\");\n\n    Status status = new Status(StatusCode.STATUS_ALREADY, \"status\");\n    err = new FlowUnitError(\"a\", \"b\", status);\n    assertEquals(err.getDesc(), \"node:a error pos:b status:Operation already in progress error:status\");\n  }\n\n  @Test\n  public void testFlowUnitDataMeta() throws Exception {\n    DataMeta data = new DataMeta();\n    data.set(\"a\", \"a\");\n    data.set(\"b\", \"b\");\n\n    assertEquals(data.getString(\"a\"), \"a\");\n    assertEquals(data.getString(\"b\"), \"b\");\n    assertEquals(data.getString(\"c\"), null);\n  }\n\n  @Test\n  public void testFlowUnitEvent() throws Exception {\n    FlowUnitEvent event = new FlowUnitEvent();\n    event.set(\"a\", \"a\");\n    event.set(\"b\", \"b\");\n\n\n    assertEquals(event.get(\"a\"), \"a\");\n    assertEquals(event.get(\"b\"), \"b\");\n    assertEquals(event.get(\"c\"), null);\n  }\n}"
  },
  {
    "path": "src/java/src/test/java/com/modelbox/ModelBoxStatusTest.java",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage com.modelbox;\n\nimport static org.junit.Assert.assertEquals;\nimport org.junit.Test;\n\npublic class ModelBoxStatusTest {\n  @Test\n  public void testStatusMessage() throws Exception {\n    String msg = \"this is a message\";\n    Status s = new Status(StatusCode.STATUS_FAULT, msg);\n    String expect_msg  = \"code: \" + s.StrCode() + \", errmsg: \" + msg;\n    assertEquals(expect_msg, s.ToSting());\n    System.out.println(s.ToSting());\n  }\n}"
  },
  {
    "path": "src/java/src/test/java/com/modelbox/ModelboxBufferTest.java",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\npackage com.modelbox;\n\nimport static org.junit.Assert.assertEquals;\nimport org.junit.BeforeClass;\nimport org.junit.Test;\n\npublic class ModelboxBufferTest {\n  @BeforeClass\n  public static void setUpTest() {\n    Log.unRegLog();\n    ModelBox.SetDefaultScanPath(TestConfig.TEST_DRIVER_DIR);\n  }\n\n  @Test\n  public void testBufferMeta() throws Exception {\n    String txt = \"[log]\\n\";\n    txt += \"level=\\\"INFO\\\"\\n\";\n    txt += \"[graph]\\n\";\n    txt += \"graphconf = '''digraph demo {{ \\n\";\n    txt += \" input[type=input]\\n\";\n    txt += \" output[type=output]\\n\";\n    txt += \"input -> output\\n\";\n    txt += \"}}'''\\n\";\n    txt += \"format = \\\"graphviz\\\"\\n\";\n\n    System.out.println(txt);\n    Flow flow = new Flow();\n    flow.init(\"NOT-EXIST\", txt);\n    flow.startRun();\n    FlowStreamIO streamio = flow.CreateStreamIO();\n    Buffer data = streamio.createBuffer();\n    data.setMetaInt(\"int\", 1);\n    data.setMetaFloat(\"float\", 1.0f);\n    data.setMetaString(\"string\", \"1\");\n    data.setMetaLong(\"long\", 2);\n    data.setMetaDouble(\"double\", 2.0);\n    assertEquals(data.getMetaInt(\"int\"), 1);\n    assertEquals(data.getMetaFloat(\"float\"), 1.0f, 0.1);\n    assertEquals(data.getMetaString(\"string\"), \"1\");\n    assertEquals(data.getMetaLong(\"long\"), 2);\n    assertEquals(data.getMetaDouble(\"double\"), 2.0, 0.1);\n  }\n}\n"
  },
  {
    "path": "src/java/src/test/java/com/modelbox/TestConfig.java",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\npackage com.modelbox;\n\nimport java.nio.charset.StandardCharsets;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\nimport org.json.JSONObject;\n\npublic class TestConfig {\n\n  static {\n    /**\n     for vscode junit:\n     add the following settings in settings.json\n          \n     \"java.test.config\": {\n        \"vmArgs\": [\n            \"-Djava.library.path=${workspaceFolder}/build/src/java/jni\"\n        ],\n        \"env\" : {\n            \"TEST_CONFIG_JSON_FILE\" : \"${workspaceFolder}/build/src/java/src/test/java/com/modelbox/TestConfig.json\"\n        }\n      },\n     */\n    String jsonfile = System.getenv(\"TEST_CONFIG_JSON_FILE\");\n    if (jsonfile != null) {\n      try {\n        JSONObject jsonobject = new JSONObject(\n            new String(Files.readAllBytes(Paths.get(jsonfile)), StandardCharsets.UTF_8));\n        TestConfig.TEST_WORKING_DIR = jsonobject.getString(\"TEST_WORKING_DIR\");\n        TestConfig.TEST_LIB_DIR = jsonobject.getString(\"TEST_LIB_DIR\");\n        TestConfig.TEST_BIN_DIR = jsonobject.getString(\"TEST_BIN_DIR\");\n        TestConfig.TEST_DATA_DIR = jsonobject.getString(\"TEST_DATA_DIR\");\n        TestConfig.TEST_SOURCE_DIR = jsonobject.getString(\"TEST_SOURCE_DIR\");\n        TestConfig.TEST_DRIVER_DIR = jsonobject.getString(\"TEST_DRIVER_DIR\");\n        TestConfig.TEST_ASSETS = jsonobject.getString(\"TEST_ASSETS\");\n      } catch (Exception e) {\n        System.err.println(\"Load json file \" + jsonfile + \" failed\");\n      }\n    }\n  }\n\n  static String TEST_WORKING_DIR;\n\n  static String TEST_LIB_DIR;\n\n  static String TEST_BIN_DIR;\n\n  static String TEST_DATA_DIR;\n\n  static String TEST_SOURCE_DIR;\n\n  static String TEST_DRIVER_DIR;\n\n  static String TEST_ASSETS;\n}\n"
  },
  {
    "path": "src/java/src/test/java/com/modelbox/TestConfig.json.in",
    "content": "{\n  \"TEST_WORKING_DIR\": \"@TEST_WORKING_DIR@\",\n  \"TEST_LIB_DIR\": \"@TEST_WORKING_LIB_DIR@\",\n  \"TEST_BIN_DIR\": \"@TEST_WORKING_BIN_DIR@\",\n  \"TEST_DATA_DIR\": \"@TEST_WORKING_DATA_DIR@\",\n  \"TEST_SOURCE_DIR\": \"@TEST_SOURCE_DIR@\",\n  \"TEST_DRIVER_DIR\": \"@TEST_WORKING_DRIVERS_DIR@\",\n  \"TEST_ASSETS\": \"@TEST_ASSETS@\"\n}"
  },
  {
    "path": "src/libmodelbox/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nfile(GLOB_RECURSE LIBMODELBOX_SOURCES *.cpp *.cc *.c)\nexclude_files_from_dir_in_list(LIBMODELBOX_SOURCES \"${LIBMODELBOX_SOURCES}\" \"${CMAKE_CURRENT_LIST_DIR}/base/\")\nset(LIBMODELBOX_INCLUDE ${CMAKE_CURRENT_LIST_DIR}/include)\nset(LIBMODELBOX_ENGINE_INCLUDE ${CMAKE_CURRENT_LIST_DIR}/engine)\nset(LIBMODELBOX_CONFIG_INCLUDE ${CMAKE_CURRENT_BINARY_DIR}/base/include)\n\ninclude_directories(${LIBMODELBOX_ENGINE_INCLUDE})\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${HUAWEI_SECURE_C_INCLUDE_DIR})\ninclude_directories(${NLOHMANN_INCLUDE_DIR})\ninclude_directories(${LIBMODELBOX_CONFIG_INCLUDE})\n\nadd_subdirectory(base)\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\n\nset(HEADER \n    ${LIBMODELBOX_INCLUDE}/modelbox\n    ${LIBMODELBOX_BASE_INCLUDE}/modelbox\n    ${LIBMODELBOX_CONFIG_INCLUDE}/modelbox\n    )\n\nadd_library(LIBMODELBOX_OBJECTS OBJECT ${LIBMODELBOX_SOURCES})\nset_property(TARGET LIBMODELBOX_OBJECTS PROPERTY POSITION_INDEPENDENT_CODE ON)\n\nadd_library(libmodelbox-static STATIC $<TARGET_OBJECTS:LIBMODELBOX_OBJECTS> $<TARGET_OBJECTS:LIBMODELBOX_BASE_OBJECTS> $<TARGET_OBJECTS:LIBMODELBOX_ARCH_CPU_OBJECTS>)\nset(LIBMODELBOX_STATIC libmodelbox-static)\n\nadd_library(libmodelbox-shared SHARED $<TARGET_OBJECTS:LIBMODELBOX_OBJECTS> $<TARGET_OBJECTS:LIBMODELBOX_BASE_OBJECTS> $<TARGET_OBJECTS:LIBMODELBOX_ARCH_CPU_OBJECTS>)\nset(LIBMODELBOX_SHARED libmodelbox-shared)\n\nset_target_properties(libmodelbox-shared PROPERTIES \n    SOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n\ntarget_link_libraries(libmodelbox-static ssl)\ntarget_link_libraries(libmodelbox-static crypto)\ntarget_link_libraries(libmodelbox-static pthread)\ntarget_link_libraries(libmodelbox-static rt)\ntarget_link_libraries(libmodelbox-static dl)\ntarget_link_libraries(libmodelbox-static atomic)\ntarget_link_libraries(libmodelbox-static ${HUAWEI_SECURE_C_LIBRARIES})\n\ntarget_link_libraries(libmodelbox-shared ssl)\ntarget_link_libraries(libmodelbox-shared crypto)\ntarget_link_libraries(libmodelbox-shared pthread)\ntarget_link_libraries(libmodelbox-shared rt)\ntarget_link_libraries(libmodelbox-shared dl)\ntarget_link_libraries(libmodelbox-shared atomic)\ntarget_link_libraries(libmodelbox-shared ${HUAWEI_SECURE_C_LIBRARIES})\n\nif (CMAKE_SYSTEM_PROCESSOR MATCHES \"arm\")\n    set(ATOMIC_LINK_LIBRARIES, \"atomic\")\n    target_link_libraries(libmodelbox-static ${ATOMIC_LINK_LIBRARIES})\n    target_link_libraries(libmodelbox-shared ${ATOMIC_LINK_LIBRARIES})\nendif()\n\nset_target_properties(libmodelbox-static PROPERTIES OUTPUT_NAME \"modelbox\")\nset_target_properties(libmodelbox-shared PROPERTIES OUTPUT_NAME \"modelbox\")\n\nconfigure_file(${CMAKE_CURRENT_LIST_DIR}/libmodelbox.pc.in ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox.pc @ONLY)\nconfigure_file(${CMAKE_CURRENT_LIST_DIR}/config.h.in ${LIBMODELBOX_CONFIG_INCLUDE}/modelbox/base/config.h @ONLY)\n\nif (STANDALONE)\n    set(CMAKE_INSTALL_RPATH $ORIGIN)\nendif()\n\ninstall(TARGETS libmodelbox-shared \n    COMPONENT libmodelbox\n    RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n    LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    OPTIONAL\n    )\n\ninstall(TARGETS libmodelbox-static \n    COMPONENT libmodelbox-devel\n    ARCHIVE DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    OPTIONAL\n    )\n\ninstall(DIRECTORY \n    ${HEADER} DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR}\n    COMPONENT libmodelbox-devel\n    )\n\ninstall(FILES \n    ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox.pc DESTINATION ${CMAKE_INSTALL_LIBDIR}/pkgconfig\n    COMPONENT libmodelbox-devel\n    )\n\nset(LIBMODELBOX_STATIC ${LIBMODELBOX_STATIC} CACHE INTERNAL \"\")\nset(LIBMODELBOX_SHARED ${LIBMODELBOX_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_INCLUDE \n    ${LIBMODELBOX_INCLUDE} \n    ${LIBMODELBOX_CONFIG_INCLUDE}\n    CACHE INTERNAL \"\")\nset(LIBMODELBOX_SOURCES ${LIBMODELBOX_SOURCES} CACHE INTERNAL \"\")\nset(LIBMODELBOX_BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR} CACHE INTERNAL \"\")\nset(LIBMODELBOX_SOURCES_DIR ${CMAKE_CURRENT_LIST_DIR} CACHE INTERNAL \"\")\n"
  },
  {
    "path": "src/libmodelbox/base/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nfile(GLOB_RECURSE LIBMODELBOX_BASE_SOURCES *.cpp *.cc *.c)\nexclude_files_from_dir_in_list(LIBMODELBOX_BASE_SOURCES \"${LIBMODELBOX_BASE_SOURCES}\" \"${CMAKE_CURRENT_LIST_DIR}/arch/\")\n\nadd_subdirectory(arch)\n\nset(LIBMODELBOX_BASE_INCLUDE ${CMAKE_CURRENT_LIST_DIR}/include)\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_CONFIG_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${HUAWEI_SECURE_C_INCLUDE_DIR})\ninclude_directories(${TOML_INCLUDE_DIR})\nset(LIBMODELBOX_BASE_INCLUDE ${LIBMODELBOX_BASE_INCLUDE})\n\nadd_library(LIBMODELBOX_BASE_OBJECTS OBJECT ${LIBMODELBOX_BASE_SOURCES})\nset_property(TARGET LIBMODELBOX_BASE_OBJECTS PROPERTY POSITION_INDEPENDENT_CODE ON)\n\nset(LIBMODELBOX_BASE_LIBRARY modelbox-base)\nadd_library(${LIBMODELBOX_BASE_LIBRARY} STATIC $<TARGET_OBJECTS:LIBMODELBOX_BASE_OBJECTS>)\nset_property(TARGET ${LIBMODELBOX_BASE_LIBRARY} PROPERTY POSITION_INDEPENDENT_CODE ON)\n\ntarget_link_libraries(${LIBMODELBOX_BASE_LIBRARY} ${HUAWEI_SECURE_C_LIBRARIES})\ntarget_link_libraries(${LIBMODELBOX_BASE_LIBRARY} pthread)\ntarget_link_libraries(${LIBMODELBOX_BASE_LIBRARY} rt)\ntarget_link_libraries(${LIBMODELBOX_BASE_LIBRARY} dl)\n\nset_target_properties(${LIBMODELBOX_BASE_LIBRARY} PROPERTIES OUTPUT_NAME \"modelbox-base\")\n\nset(LIBMODELBOX_BASE_LIBRARY ${LIBMODELBOX_BASE_LIBRARY} CACHE INTERNAL \"\")\nset(LIBMODELBOX_BASE_INCLUDE ${LIBMODELBOX_BASE_INCLUDE} CACHE INTERNAL \"\" )\nset(LIBMODELBOX_BASE_SOURCES ${LIBMODELBOX_BASE_SOURCES} CACHE INTERNAL \"\" )\nset(LIBMODELBOX_BASE_OBJECTS LIBMODELBOX_BASE_OBJECTS CACHE INTERNAL \"\" )\n"
  },
  {
    "path": "src/libmodelbox/base/arch/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nif(UNIX AND NOT APPLE)\n    file(GLOB LIBMODELBOX_LINUX_OS_SOURCES \"./linux/*.cc\" \"*.cc\")\n    if (CMAKE_SYSTEM_PROCESSOR MATCHES \"x86_64\")\n        file(GLOB LIBMODELBOX_CPU_SOURCES \"./linux/x86_64/*.cc\" \"*.cc\")\n        set_property(SOURCE ${CMAKE_CURRENT_LIST_DIR}/linux/x86_64/base64_simd.cc APPEND PROPERTY COMPILE_FLAGS \"-mavx512f -mavx512bw -fPIC\")\n    elseif (CMAKE_SYSTEM_PROCESSOR MATCHES \"aarch64\")\n        file(GLOB LIBMODELBOX_CPU_SOURCES \"./linux/aarch64/*.cc\" \"*.cc\")\n    else()\n         file(GLOB LIBMODELBOX_CPU_SOURCES \"./linux/default/*.cc\" \"*.cc\")\n    endif()\nelseif(ANDROID)\n    file(GLOB LIBMODELBOX_LINUX_OS_SOURCES \"./android/*.cc\" \"*.cc\")\nendif()\n\nlist(APPEND LIBMODELBOX_BASE_SOURCES ${LIBMODELBOX_LINUX_OS_SOURCES})\nset(LIBMODELBOX_BASE_SOURCES ${LIBMODELBOX_BASE_SOURCES} PARENT_SCOPE)\n\nset(LIBMODELBOX_ARCH_CPU_LIBRARY modelbox-arch-cpu)\ninclude_directories(${CMAKE_CURRENT_LIST_DIR}/../include)\nadd_library(LIBMODELBOX_ARCH_CPU_OBJECTS OBJECT ${LIBMODELBOX_CPU_SOURCES})\nset_property(TARGET ${LIBMODELBOX_ARCH_CPU_OBJECTS} PROPERTY POSITION_INDEPENDENT_CODE ON)\nset(LIBMODELBOX_ARCH_CPU_OBJECTS ${LIBMODELBOX_ARCH_CPU_OBJECTS} CACHE INTERNAL \"\")"
  },
  {
    "path": "src/libmodelbox/base/arch/android/base64_simd.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include <modelbox/base/base64_simd.h>\n\nnamespace modelbox {\n\nStatus Base64EncodeSIMD(const uint8_t *input, size_t input_len,\n                        std::string *output) {\n  return {STATUS_NOTFOUND, \"To be implemented android simd.\"};\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/libmodelbox/base/arch/android/stats.cc",
    "content": "#include \"stats.h\"\n\n#include <memory>\n\n#include \"modelbox/base/os.h\"\n\nnamespace modelbox {\n\nAndroidOSProcess::AndroidOSProcess() {}\n\nAndroidOSProcess::~AndroidOSProcess() {}\n\nint32_t AndroidOSProcess::GetThreadsNumber(uint32_t pid) { return 0; }\nuint32_t AndroidOSProcess::GetMemorySize(uint32_t pid) { return 0; }\nuint32_t AndroidOSProcess::GetMemoryRSS(uint32_t pid) { return 0; }\nuint32_t AndroidOSProcess::GetMemorySHR(uint32_t pid) { return 0; }\nuint32_t AndroidOSProcess::GetPid() { return 0; }\n\nstd::vector<uint32_t> AndroidOSProcess::GetProcessTime(uint32_t pid) {\n  return {};\n};\n\nstd::vector<uint32_t> AndroidOSProcess::GetTotalTime(uint32_t pid) {\n  return {};\n};\n\n}  // namespace modelbox"
  },
  {
    "path": "src/libmodelbox/base/arch/android/stats.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include <iostream>\n#include \"modelbox/base/os.h\"\n\nnamespace modelbox {\n\nclass AndroidOSProcess : public OSProcess {\n public:\n  AndroidOSProcess();\n  virtual ~AndroidOSProcess();\n\n  virtual int32_t GetThreadsNumber(uint32_t pid);\n  virtual uint32_t GetMemorySize(uint32_t pid);\n  virtual uint32_t GetMemoryRSS(uint32_t pid);\n  virtual uint32_t GetMemorySHR(uint32_t pid);\n  virtual uint32_t GetPid();\n\n  virtual std::vector<uint32_t> GetProcessTime(uint32_t pid);\n  virtual std::vector<uint32_t> GetTotalTime(uint32_t pid);\n};\n\n}  // namespace modelbox"
  },
  {
    "path": "src/libmodelbox/base/arch/linux/aarch64/base64_simd.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include <modelbox/base/base64_simd.h>\n\n#include <arm_neon.h>\n\nnamespace modelbox {\n\nStatus Base64EncodeSIMD(const uint8_t *input, size_t input_len,\n                        std::string *output) {\n  return {STATUS_NOTFOUND, \"To be implemented linux arrch64 simd.\"};\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/libmodelbox/base/arch/linux/default/default.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include <modelbox/base/base64_simd.h>\n\nnamespace modelbox {\n\nStatus Base64EncodeSIMD(const uint8_t *input, size_t input_len,\n                        std::string *output) {\n  return {STATUS_NOTSUPPORT, \"To be implemented linux arrch64 simd.\"};\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/libmodelbox/base/arch/linux/stats.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"stats.h\"\n\n#include <net/if.h>\n#include <netinet/in.h>\n#include <stdlib.h>\n#include <string.h>\n#include <sys/ioctl.h>\n#include <sys/prctl.h>\n#include <sys/sysinfo.h>\n#include <sys/utsname.h>\n#include <unistd.h>\n\n#include <fstream>\n#include <memory>\n#include <thread>\n\n#include \"modelbox/base/log.h\"\n#include \"modelbox/base/os.h\"\n#include \"modelbox/base/status.h\"\n#include \"securec.h\"\n\nnamespace modelbox {\n\nOSInfo *os = &LinuxOSInfo::GetInstance();\n\nOSProcess::OSProcess() = default;\nOSProcess::~OSProcess() = default;\n\nOSThread::OSThread() = default;\nOSThread::~OSThread() = default;\n\nOSInfo::OSInfo() = default;\nOSInfo::~OSInfo() = default;\n\n// OSProcess\nLinuxOSProcess::LinuxOSProcess() = default;\n\nLinuxOSProcess::~LinuxOSProcess() = default;\n\nuint32_t LinuxOSProcess::GetPid() { return getpid(); }\n\nint32_t LinuxOSProcess::GetThreadsNumber(uint32_t pid) { return 0; }\n\nuint32_t LinuxOSProcess::GetMemorySize(uint32_t pid) { return 0; }\n\nuint32_t LinuxOSProcess::GetMemorySHR(uint32_t pid) { return 0; }\n\nuint32_t LinuxOSProcess::GetMemoryRSS(uint32_t pid) { return 0; }\n\nstd::vector<uint32_t> LinuxOSProcess::GetProcessTime(uint32_t pid) {\n  std::vector<uint32_t> ss{0, 0};\n  return ss;\n}\n\nstd::vector<uint32_t> LinuxOSProcess::GetTotalTime(uint32_t pid) {\n  std::vector<uint32_t> ss{0, 0};\n  return ss;\n}\n\n// OSThread\nLinuxOSThread::LinuxOSThread() = default;\nLinuxOSThread::~LinuxOSThread() = default;\n\nuint32_t LinuxOSProcess::GetPPid() { return 0; };\n\nstd::thread::id LinuxOSThread::GetTid() { return std::this_thread::get_id(); };\n\nStatus LinuxOSThread::SetName(const std::string &name) {\n  prctl(PR_SET_NAME, name.c_str(), 0, 0, 0);\n  return STATUS_OK;\n}\n\nStatus LinuxOSThread::SetThreadPriority(const std::thread::id &thread,\n                                        int32_t priority) {\n  return STATUS_OK;\n}\n\nStatus LinuxOSThread::SetThreadLogicalCPUAffinity(\n    const std::thread::id &thread, const std::vector<int16_t> &l_cpus) {\n  return STATUS_OK;\n}\n\nStatus LinuxOSThread::SetThreadPhysicalCPUAffinity(\n    const std::thread::id &thread, const std::vector<int16_t> &p_cpus) {\n  return STATUS_OK;\n}\n\nint32_t LinuxOSThread::GetThreadPriority(const std::thread::id &thread) {\n  return 0;\n}\n\n// OSInfo\nLinuxOSInfo::LinuxOSInfo() {\n  Process = std::make_shared<LinuxOSProcess>();\n  Thread = std::make_shared<LinuxOSThread>();\n}\n\nLinuxOSInfo::~LinuxOSInfo() = default;\n\nLinuxOSInfo &LinuxOSInfo::GetInstance() {\n  static LinuxOSInfo os;\n  return os;\n}\n\nStatus LinuxOSInfo::GetMemoryUsage(size_t *free, size_t *total) {\n  struct sysinfo si;\n  auto ret = sysinfo(&si);\n  if (ret != 0) {\n    MBLOG_ERROR << \"Get sys mem info failed, ret \" << ret;\n    return STATUS_FAULT;\n  }\n\n  if (free != nullptr) {\n    *free = si.freeram;\n  }\n\n  if (total != nullptr) {\n    *total = si.totalram;\n  }\n\n  return STATUS_SUCCESS;\n}\n\nstd::vector<uint32_t> LinuxOSInfo::GetCpuRunTime() {\n  std::vector<uint32_t> ss{0, 0};\n  return ss;\n}\n\nint32_t LinuxOSInfo::GetPhysicalCpuNumbers() { return 1; };\n\nint32_t LinuxOSInfo::GetLogicalCpuNumbers() {\n  return sysconf(_SC_NPROCESSORS_ONLN);\n}\n\nstd::string LinuxOSInfo::GetSystemID() {\n  std::string result;\n  std::ifstream file(\"/etc/machine-id\");\n  if (!file.fail()) {\n    getline(file, result);\n    if (result.length() > 0) {\n      return result;\n    }\n    file.close();\n  }\n\n  struct utsname buf;\n  if (uname(&buf) != 0) {\n    StatusError = {STATUS_FAULT, StrError(errno)};\n    return \"\";\n  }\n\n  result = buf.machine;\n  result += buf.nodename;\n  return result;\n}\n\nstd::string LinuxOSInfo::GetMacAddress(const std::string &nic) {\n  std::string mac;\n  struct ifreq ifr;\n  struct ifconf ifc;\n  char buf[1024];\n\n  int sock = socket(AF_INET, SOCK_DGRAM, IPPROTO_IP);\n  if (sock == -1) {\n    StatusError = {STATUS_FAULT, \"create socket failed.\"};\n    return mac;\n  }\n  Defer { close(sock); };\n\n  ifc.ifc_len = sizeof(buf);\n  ifc.ifc_buf = buf;\n  if (ioctl(sock, SIOCGIFCONF, &ifc) == -1) {\n    StatusError = {STATUS_FAULT, \"get socket info failed.\"};\n    return mac;\n  }\n\n  struct ifreq *it = ifc.ifc_req;\n  const struct ifreq *const end = it + (ifc.ifc_len / sizeof(struct ifreq));\n\n  StatusError = {STATUS_NOTFOUND, \"not found nic\"};\n  for (; it != end; ++it) {\n    strncpy_s(ifr.ifr_name, IFNAMSIZ, it->ifr_name, IFNAMSIZ);\n    if (ioctl(sock, SIOCGIFFLAGS, &ifr) != 0) {\n      continue;\n    }\n\n    if (ifr.ifr_flags & IFF_LOOPBACK) {\n      continue;\n    }\n\n    if (nic.length() > 0 && nic == it->ifr_name) {\n      continue;\n    }\n\n    if (ioctl(sock, SIOCGIFHWADDR, &ifr) != 0) {\n      continue;\n    }\n\n    char tmp[64];\n    int len = snprintf_s(\n        tmp, sizeof(tmp), sizeof(tmp), \"%.2x:%.2x:%.2x:%.2x:%.2x:%.2x\",\n        (uint8_t)ifr.ifr_hwaddr.sa_data[0], (uint8_t)ifr.ifr_hwaddr.sa_data[1],\n        (uint8_t)ifr.ifr_hwaddr.sa_data[2], (uint8_t)ifr.ifr_hwaddr.sa_data[3],\n        (uint8_t)ifr.ifr_hwaddr.sa_data[4], (uint8_t)ifr.ifr_hwaddr.sa_data[5]);\n    if (len < 0) {\n      continue;\n    }\n    mac = tmp;\n    break;\n  }\n\n  return mac;\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/libmodelbox/base/arch/linux/stats.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <iostream>\n\n#include \"modelbox/base/os.h\"\n\nnamespace modelbox {\n\nclass LinuxOSProcess : public OSProcess {\n public:\n  LinuxOSProcess();\n  ~LinuxOSProcess() override;\n  int32_t GetThreadsNumber(uint32_t pid) override;\n  uint32_t GetMemorySize(uint32_t pid) override;\n  uint32_t GetMemoryRSS(uint32_t pid) override;\n  uint32_t GetMemorySHR(uint32_t pid) override;\n  uint32_t GetPid() override;\n  uint32_t GetPPid() override;\n\n  std::vector<uint32_t> GetProcessTime(uint32_t pid) override;\n  std::vector<uint32_t> GetTotalTime(uint32_t pid) override;\n};\n\nclass LinuxOSThread : public OSThread {\n public:\n  LinuxOSThread();\n  ~LinuxOSThread() override;\n\n  std::thread::id GetTid() override;\n  Status SetName(const std::string &name) override;\n  Status SetThreadPriority(const std::thread::id &thread,\n                           int32_t priority) override;\n  Status SetThreadLogicalCPUAffinity(\n      const std::thread::id &thread,\n      const std::vector<int16_t> &l_cpus) override;\n  Status SetThreadPhysicalCPUAffinity(\n      const std::thread::id &thread,\n      const std::vector<int16_t> &p_cpus) override;\n  int32_t GetThreadPriority(const std::thread::id &thread) override;\n};\n\nclass LinuxOSInfo : public OSInfo {\n public:\n  LinuxOSInfo();\n  ~LinuxOSInfo() override;\n\n  Status GetMemoryUsage(size_t *free, size_t *total) override;\n\n  std::vector<uint32_t> GetCpuRunTime() override;\n\n  int32_t GetPhysicalCpuNumbers() override;\n  int32_t GetLogicalCpuNumbers() override;\n\n  std::string GetSystemID() override;\n\n  std::string GetMacAddress(const std::string &nic = \"\") override;\n\n  static LinuxOSInfo &GetInstance();\n};\n\n}  // namespace modelbox"
  },
  {
    "path": "src/libmodelbox/base/arch/linux/x86_64/base64_simd.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include <modelbox/base/base64_simd.h>\n\n#include <cpuid.h>\n#include <immintrin.h>\n#include \"modelbox/base/log.h\"\n\nnamespace modelbox {\n\nstruct base64_encode_simd_param {\n  const int group_size{3};\n  const int result_group_size{4};\n  const int batch_size{12 * 4};\n  const __m512i idx0_1 =\n      _mm512_setr_epi32(0x0, 0x1, 0x2, 0xC, 0x3, 0x4, 0x5, 0xD, 0x6, 0x7, 0x8,\n                        0xE, 0x9, 0xA, 0xB, 0xF);\n  const __m512i idx1_2 = _mm512_setr_epi32(\n      0x01020001, 0x04050304, 0x07080607, 0x0A0B090A, 0x01020001, 0x04050304,\n      0x07080607, 0x0A0B090A, 0x01020001, 0x04050304, 0x07080607, 0x0A0B090A,\n      0x01020001, 0x04050304, 0x07080607, 0x0A0B090A);\n  const char *encode_table =\n      \"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/\";\n  int batch_count{0};\n  int group_count{0};\n  int input_len{0};\n  int output_len{0};\n};\n\n// NOLINTNEXTLINE\nvoid cpuid(int info[4], int function_id_) {\n  __cpuid_count(function_id_, 0, info[0], info[1], info[2], info[3]);\n}\n\n__m512i ConvertAscii(__m512i &index) {\n  // index:0 ~ 25 -> 'A'(65) - 'X',  offset: +65\n  auto mask_upper_letter = _mm512_cmple_epi8_mask(index, _mm512_set1_epi8(25));\n  auto result =\n      _mm512_maskz_add_epi8(mask_upper_letter, index, _mm512_set1_epi8(65));\n\n  // index:26 ~ 51 -> 'a'(97) - 'z',  offset: +71\n  auto mask_lower_letter_0 =\n      _mm512_cmpge_epi8_mask(index, _mm512_set1_epi8(26));\n  auto mask_lower_letter_1 =\n      _mm512_cmple_epi8_mask(index, _mm512_set1_epi8(51));\n  result =\n      _mm512_mask_add_epi8(result, mask_lower_letter_0 & mask_lower_letter_1,\n                           index, _mm512_set1_epi8(71));\n\n  // index:52 ~ 61 -> '0'(48) ~ '9' , offset: -4\n  auto mask_num_0 = _mm512_cmpge_epi8_mask(index, _mm512_set1_epi8(52));\n  auto mask_num_1 = _mm512_cmple_epi8_mask(index, _mm512_set1_epi8(61));\n  result = _mm512_mask_sub_epi8(result, mask_num_0 & mask_num_1, index,\n                                _mm512_set1_epi8(4));\n\n  // index:62 -> '+'(43), offset: -19\n  auto mask_plus = _mm512_cmpeq_epi8_mask(index, _mm512_set1_epi8(62));\n  result = _mm512_mask_sub_epi8(result, mask_plus, index, _mm512_set1_epi8(19));\n\n  // index:63 -> '/'(47), offset: -16\n  auto mask_slash = _mm512_cmpeq_epi8_mask(index, _mm512_set1_epi8(63));\n  result =\n      _mm512_mask_sub_epi8(result, mask_slash, index, _mm512_set1_epi8(16));\n\n  return result;\n}\n\nvoid Base64EncodeLessOneBatch(struct base64_encode_simd_param param,\n                              const uint8_t *src, uint8_t *output_buffer) {\n  // less than one batch\n  int i = param.batch_size * param.batch_count;\n  while (i + param.group_size <= param.input_len) {\n    output_buffer[0] = param.encode_table[src[i] >> 2];\n    output_buffer[1] =\n        param.encode_table[((src[i] << 4) | (src[i + 1] >> 4)) & 0x3F];\n    output_buffer[2] =\n        param.encode_table[((src[i + 1] << 2) | (src[i + 2] >> 6)) & 0x3F];\n    output_buffer[3] = param.encode_table[src[i + 2] & 0x3F];\n    output_buffer += param.result_group_size;\n    i += param.group_size;\n  }\n\n  // Less than one group\n  int remind_byte = param.input_len % 3;\n  if (remind_byte == 1) {\n    output_buffer[0] = param.encode_table[(src[i] & 0xFC) >> 2];\n    output_buffer[1] = param.encode_table[((src[i] & 0x03) << 4)];\n    output_buffer[2] = '=';\n    output_buffer[3] = '=';\n  } else if (remind_byte == 2) {\n    output_buffer[0] = param.encode_table[(src[i] & 0xFC) >> 2];\n    output_buffer[1] =\n        param.encode_table[((src[i] & 0x03) << 4) | ((src[i + 1] & 0xF0) >> 4)];\n    output_buffer[2] = param.encode_table[((src[i + 1] & 0x0F) << 2)];\n    output_buffer[3] = '=';\n  }\n}\n\nbool CheckSupportBase64SIMD() {\n  // check cpu whether support avx512f and avx512bw\n  int info[4] = {0};\n  const int function_id = 0x00000007;\n  cpuid(info, function_id);\n  bool HAS_AVX512F = (info[1] & ((int)1 << 16)) != 0;\n  bool HAS_AVX512BW = (info[1] & ((int)1 << 30)) != 0;\n\n  return HAS_AVX512F && HAS_AVX512BW;\n}\n\nStatus Base64EncodeSIMD(const uint8_t *input, size_t input_len,\n                        std::string *output) {\n  if (input == nullptr || input_len == 0) {\n    const auto *err_msg = \"base64 encode input data is null or size is zero\";\n    MBLOG_ERROR << err_msg;\n    return {STATUS_INVALID, err_msg};\n  }\n\n  if (!CheckSupportBase64SIMD()) {\n    return {STATUS_NOTFOUND, \"not support simd.\"};\n  }\n\n  struct base64_encode_simd_param param;\n  param.input_len = input_len;\n  param.group_count = (input_len + 2) / 3;\n  param.output_len = param.group_count * 4;\n  param.batch_count = input_len / param.batch_size;\n\n  output->resize(param.output_len);\n  auto *output_buffer = (uint8_t *)output->data();\n\n  for (int i = 0; i <= param.input_len - param.batch_size;\n       i += param.batch_size) {\n    // get index\n    // in0 =\n    // [XXXX|XXXX|XXXX|XXXX|PPPO|OONN|NMMM|LLLK|KKJJ|JIII|HHHG|GGFF|FEEE|DDDC|CCBB|BAAA]\n    __m512i in0 =\n        _mm512_loadu_si512(reinterpret_cast<const __m512i *>(input + i));\n\n    // in1 =\n    // [XXXX|PPPO|OONN|NMMM|XXXX|LLLK|KKJJ|JIII|XXXX|HHHG|GGFF|FEEE|XXXX|DDDC|CCBB|BAAA]\n    __m512i in1 = _mm512_permutexvar_epi32(param.idx0_1, in0);\n\n    // [XAAA] -> [xxxxxxxx|ccdddddd|bbbbcccc|aaaaaabb] ([3|2|1|0])\n    //        -> [bbbbcccc|ccdddddd|aaaaaabb|bbbbcccc] ([1|2|0|1])\n    // in2 = [...|D1D2D0D1|C1C2C0C1|B1B2B0B1|A1A2A0A1]\n    __m512i in2 = _mm512_shuffle_epi8(in1, param.idx1_2);\n\n    //    [bbbbcccc|ccdddddd|aaaaaabb|bbbbcccc]\n    // -> [00dddddd|00cccccc|00bbbbbb|00aaaaaa]\n    //      byte3    byte2    byte1    byte0\n\n    // byte0 & byte2\n    //    [bbbbcccc|ccdddddd|aaaaaabb|bbbbcccc]\n    // -> [0000cccc|cc000000|aaaaaa00|00000000]\n    // -> [00000000|00cccccc|00000000|00aaaaaa]\n    __m512i byte_0_2 = _mm512_and_si512(in2, _mm512_set1_epi32(0x0fc0fc00));\n    byte_0_2 = _mm512_srlv_epi16(byte_0_2, _mm512_set1_epi32(0x0006000a));\n\n    // byte1 & byte3\n    //    [bbbbcccc|ccdddddd|aaaaaabb|bbbbcccc]\n    // -> [00000000|00dddddd|000000bb|bbbb0000]\n    // -> [00dddddd|00000000|00bbbbbb|00000000]\n    __m512i byte_1_3 = _mm512_and_si512(in2, _mm512_set1_epi32(0x003f03f0));\n    byte_1_3 = _mm512_sllv_epi16(byte_1_3, _mm512_set1_epi32(0x00080004));\n    __m512i index = _mm512_or_epi32(byte_1_3, byte_0_2);\n\n    // convert to ascii\n    auto result = ConvertAscii(index);\n\n    // save result\n    _mm512_storeu_si512(reinterpret_cast<__m512i *>(output_buffer), result);\n    output_buffer += 64;\n  }\n\n  // less than one batch\n  Base64EncodeLessOneBatch(param, input, output_buffer);\n  return STATUS_OK;\n}\n}  // namespace modelbox"
  },
  {
    "path": "src/libmodelbox/base/config/configuration.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/base/configuration.h\"\n\n#include <toml.hpp>\n\nnamespace modelbox {\n\nclass TomlConfigParser : public ConfigParser {\n public:\n  virtual ~TomlConfigParser() = default;\n  Status Visit(\n      const std::string &key, const toml::value &value,\n      const std::function<void(const std::string key,\n                               const std::string &basic_value)> &collector);\n\n  Status Parse(const std::shared_ptr<Configuration> &config, std::istream &is,\n               const std::string &fname = \"unknown file\") override;\n  Status Parse(const std::shared_ptr<Configuration> &config,\n               const std::string &file) override;\n};\n\nConfigStore::~ConfigStore() = default;\n\nvoid ConfigStore::WriteProperty(const std::string &key,\n                                const std::string &property) {\n  properties_[key] =\n      expand_env_ ? ExpandEnvironmentVariables(property) : property;\n\n  auto prefix_key = key;\n  auto period_pos = prefix_key.find_last_of('.');\n  while (period_pos != std::string::npos) {\n    auto sub_key = prefix_key.substr(period_pos + 1);\n    prefix_key = prefix_key.substr(0, period_pos);\n\n    if (sub_key_index_.find(prefix_key) != sub_key_index_.end()) {\n      period_pos = std::string::npos;\n    } else {\n      period_pos = prefix_key.find_last_of('.', period_pos);\n    }\n\n    sub_key_index_[prefix_key].insert(sub_key);\n  }\n}\n\nStatus ConfigStore::ReadProperty(const std::string &key,\n                                 std::string *property) const {\n  if (property == nullptr) {\n    return STATUS_FAULT;\n  }\n\n  auto item = properties_.find(key);\n  if (item == properties_.end()) {\n    return STATUS_RANGE;\n  }\n\n  *property = item->second;\n  return STATUS_SUCCESS;\n}\n\nstd::set<std::string> ConfigStore::GetKeys() const {\n  std::set<std::string> keys;\n  for (const auto &propertie : properties_) {\n    keys.insert(propertie.first);\n  }\n\n  return keys;\n}\n\nstd::set<std::string> ConfigStore::GetSubKeys(\n    const std::string &prefix_key) const {\n  auto iter = sub_key_index_.find(prefix_key);\n  if (iter == sub_key_index_.end()) {\n    return {};\n  }\n\n  return iter->second;\n}\n\nstd::unique_ptr<ConfigStore> ConfigStore::GetSubConfigStore(\n    const std::string &prefix_key) const {\n  std::unique_ptr<ConfigStore> sub_store(new ConfigStore());\n  AddSubConfig(prefix_key, sub_store.get(), prefix_key.size() + 1);\n  return sub_store;\n}\n\nvoid ConfigStore::SetExpandEnv(bool expand_env) { expand_env_ = expand_env; }\n\nsize_t ConfigStore::Size() const { return properties_.size(); }\n\nbool ConfigStore::Contain(const std::string &key) const {\n  auto item = properties_.find(key);\n  return item != properties_.end();\n}\n\nvoid ConfigStore::Add(const ConfigStore &store) {\n  for (const auto &iter : store.properties_) {\n    properties_[iter.first] = iter.second;\n  }\n\n  for (const auto &iter : store.sub_key_index_) {\n    sub_key_index_[iter.first] = iter.second;\n  }\n}\n\nvoid ConfigStore::Copy(const ConfigStore &store, const std::string &key) {\n  if (store.Contain(key) == false) {\n    return;\n  }\n  std::string property;\n  store.ReadProperty(key, &property);\n  WriteProperty(key, property);\n}\n\nvoid ConfigStore::AddSubConfig(const std::string &prefix_key,\n                               ConfigStore *store, size_t key_offset) const {\n  auto sub_keys = GetSubKeys(prefix_key);\n  if (sub_keys.size() == 0) {\n    StatusError = {STATUS_NOTFOUND, \"sub config not found\"};\n    return;\n  }\n\n  for (const auto &sub_key : sub_keys) {\n    auto new_prefix = prefix_key + \".\";\n    new_prefix += sub_key;\n    auto item = properties_.find(new_prefix);\n    if (item != properties_.end()) {\n      store->WriteProperty(item->first.substr(key_offset), item->second);\n    }\n    AddSubConfig(new_prefix, store, key_offset);\n  }\n\n  if (sub_keys.size() > 0) {\n    StatusError = STATUS_OK;\n  }\n}\n\nConfiguration::Configuration() {\n  store_ = std::unique_ptr<ConfigStore>(new ConfigStore());\n}\n\nConfiguration::Configuration(std::unique_ptr<ConfigStore> &store) {\n  store_ = std::move(store);\n}\n\nConfiguration::~Configuration() = default;\n\nvoid Configuration::Trim(std::string *value) {\n  if (value == nullptr) {\n    return;\n  }\n\n  value->erase(0, value->find_first_not_of(' '));\n  value->erase(value->find_last_not_of(' ') + 1);\n}\n\nsize_t Configuration::Size() const { return store_->Size(); }\n\nvoid Configuration::Add(const Configuration &config) { store_->Add(*(config.store_)); }\n\nvoid Configuration::Copy(const Configuration &config, const std::string &key) {\n    store_->Copy(*(config.store_), key);\n  }\n\nstd::set<std::string> Configuration::GetKeys() const {\n  return store_->GetKeys();\n}\n\nbool Configuration::Contain(const std::string &key) const {\n  return store_->Contain(key);\n}\n\nstd::set<std::string> Configuration::GetSubKeys(\n    const std::string &prefix_key) const {\n  return store_->GetSubKeys(prefix_key);\n}\n\nstd::shared_ptr<Configuration> Configuration::GetSubConfig(\n    const std::string &prefix_key) const {\n  auto sub_config_store = store_->GetSubConfigStore(prefix_key);\n  std::shared_ptr<Configuration> sub_config(\n      new Configuration(sub_config_store));\n  return sub_config;\n}\n\nstd::string Configuration::GetString(const std::string &key,\n                                     const std::string &default_prop) const {\n  return GetProperty(key, default_prop);\n}\n\nbool Configuration::GetBool(const std::string &key, bool default_prop) const {\n  return GetProperty(key, default_prop);\n}\n\nint8_t Configuration::GetInt8(const std::string &key,\n                              int8_t default_prop) const {\n  return GetProperty(key, default_prop);\n}\n\nuint8_t Configuration::GetUint8(const std::string &key,\n                                uint8_t default_prop) const {\n  return GetProperty(key, default_prop);\n}\n\nint16_t Configuration::GetInt16(const std::string &key,\n                                int16_t default_prop) const {\n  return GetProperty(key, default_prop);\n}\n\nuint16_t Configuration::GetUint16(const std::string &key,\n                                  uint16_t default_prop) const {\n  return GetProperty(key, default_prop);\n}\n\nint32_t Configuration::GetInt32(const std::string &key,\n                                int32_t default_prop) const {\n  return GetProperty(key, default_prop);\n}\n\nuint32_t Configuration::GetUint32(const std::string &key,\n                                  uint32_t default_prop) const {\n  return GetProperty(key, default_prop);\n}\n\nint64_t Configuration::GetInt64(const std::string &key,\n                                int64_t default_prop) const {\n  return GetProperty(key, default_prop);\n}\n\nuint64_t Configuration::GetUint64(const std::string &key,\n                                  uint64_t default_prop) const {\n  return GetProperty(key, default_prop);\n}\n\nfloat Configuration::GetFloat(const std::string &key,\n                              float default_prop) const {\n  return GetProperty(key, default_prop);\n}\n\ndouble Configuration::GetDouble(const std::string &key,\n                                double default_prop) const {\n  return GetProperty(key, default_prop);\n}\n\nstd::vector<std::string> Configuration::GetStrings(\n    const std::string &key,\n    const std::vector<std::string> &default_prop) const {\n  return GetProperty(key, default_prop);\n}\n\nstd::vector<bool> Configuration::GetBools(\n    const std::string &key, const std::vector<bool> &default_prop) const {\n  return GetProperty(key, default_prop);\n}\n\nstd::vector<int8_t> Configuration::GetInt8s(\n    const std::string &key, const std::vector<int8_t> &default_prop) const {\n  return GetProperty(key, default_prop);\n}\n\nstd::vector<uint8_t> Configuration::GetUint8s(\n    const std::string &key, const std::vector<uint8_t> &default_prop) const {\n  return GetProperty(key, default_prop);\n}\n\nstd::vector<int16_t> Configuration::GetInt16s(\n    const std::string &key, const std::vector<int16_t> &default_prop) const {\n  return GetProperty(key, default_prop);\n}\n\nstd::vector<uint16_t> Configuration::GetUint16s(\n    const std::string &key, const std::vector<uint16_t> &default_prop) const {\n  return GetProperty(key, default_prop);\n}\n\nstd::vector<int32_t> Configuration::GetInt32s(\n    const std::string &key, const std::vector<int32_t> &default_prop) const {\n  return GetProperty(key, default_prop);\n}\n\nstd::vector<uint32_t> Configuration::GetUint32s(\n    const std::string &key, const std::vector<uint32_t> &default_prop) const {\n  return GetProperty(key, default_prop);\n}\n\nstd::vector<int64_t> Configuration::GetInt64s(\n    const std::string &key, const std::vector<int64_t> &default_prop) const {\n  return GetProperty(key, default_prop);\n}\n\nstd::vector<uint64_t> Configuration::GetUint64s(\n    const std::string &key, const std::vector<uint64_t> &default_prop) const {\n  return GetProperty(key, default_prop);\n}\n\nstd::vector<float> Configuration::GetFloats(\n    const std::string &key, const std::vector<float> &default_prop) const {\n  return GetProperty(key, default_prop);\n}\n\nstd::vector<double> Configuration::GetDoubles(\n    const std::string &key, const std::vector<double> &default_prop) const {\n  return GetProperty(key, default_prop);\n}\n\ntemplate <>\nStatus Configuration::Convert<std::string>(const std::string &property,\n                                           std::string &convert_prop) const {\n  convert_prop = property;\n  return STATUS_SUCCESS;\n}\n\ntemplate <>\nStatus Configuration::Convert<bool>(const std::string &property,\n                                    bool &convert_prop) const {\n  if (property == \"true\" || property == \"1\") {\n    convert_prop = true;\n  } else if (property == \"false\" || property == \"0\") {\n    convert_prop = false;\n  } else {\n    return {STATUS_FAULT, \"bool failed, invalid\"};\n  }\n\n  return STATUS_SUCCESS;\n}\n\ntemplate <>\nStatus Configuration::Convert<int8_t>(const std::string &property,\n                                      int8_t &convert_prop) const {\n  int value;\n  size_t idx = 0;\n  try {\n    value = std::stoi(property, &idx);\n    if (value > INT8_MAX || value < INT8_MIN) {\n      return {STATUS_FAULT, \"int8 failed, out of range\"};\n    }\n\n    if (idx != property.size()) {\n      return {STATUS_FAULT, \"int8 failed, invalid\"};\n    }\n\n    convert_prop = static_cast<int8_t>(value);\n  } catch (std::invalid_argument &e) {\n    return {STATUS_FAULT, \"int8 failed, invalid\"};\n  } catch (std::out_of_range &e) {\n    return {STATUS_FAULT, \"int8 failed, out of range\"};\n  }\n\n  return STATUS_SUCCESS;\n}\n\ntemplate <>\nStatus Configuration::Convert<uint8_t>(const std::string &property,\n                                       uint8_t &convert_prop) const {\n  int value;\n  size_t idx = 0;\n  try {\n    value = std::stoi(property, &idx);\n    if (value > UINT8_MAX || value < 0) {\n      return {STATUS_FAULT, \"uint8 failed, out of range\"};\n    }\n\n    if (idx != property.size()) {\n      return {STATUS_FAULT, \"uint8 failed, invalid\"};\n    }\n\n    convert_prop = static_cast<uint8_t>(value);\n  } catch (std::invalid_argument &e) {\n    return {STATUS_FAULT, \"uint8 failed, invalid\"};\n  } catch (std::out_of_range &e) {\n    return {STATUS_FAULT, \"uint8 failed, out of range\"};\n  }\n\n  return STATUS_SUCCESS;\n}\n\ntemplate <>\nStatus Configuration::Convert<int16_t>(const std::string &property,\n                                       int16_t &convert_prop) const {\n  int value;\n  size_t idx = 0;\n  try {\n    value = std::stoi(property, &idx);\n    if (value > INT16_MAX || value < INT16_MIN) {\n      return {STATUS_FAULT, \"int16 failed, out of range\"};\n    }\n\n    if (idx != property.size()) {\n      return {STATUS_FAULT, \"int16 failed, invalid\"};\n    }\n\n    convert_prop = static_cast<int16_t>(value);\n  } catch (std::invalid_argument &e) {\n    return {STATUS_FAULT, \"int16 failed, invalid\"};\n  } catch (std::out_of_range &e) {\n    return {STATUS_FAULT, \"int16 failed, out of range\"};\n  }\n\n  return STATUS_SUCCESS;\n}\n\ntemplate <>\nStatus Configuration::Convert<uint16_t>(const std::string &property,\n                                        uint16_t &convert_prop) const {\n  int value;\n  size_t idx = 0;\n  try {\n    value = std::stoi(property, &idx);\n    if (value > UINT16_MAX || value < 0) {\n      return {STATUS_FAULT, \"uint16 failed, out of range\"};\n    }\n\n    if (idx != property.size()) {\n      return {STATUS_FAULT, \"uint16 failed, invalid\"};\n    }\n\n    convert_prop = static_cast<uint16_t>(value);\n  } catch (std::invalid_argument &e) {\n    return {STATUS_FAULT, \"uint16 failed, invalid\"};\n  } catch (std::out_of_range &e) {\n    return {STATUS_FAULT, \"uint16 failed, out of range\"};\n  }\n  return STATUS_SUCCESS;\n}\n\ntemplate <>\nStatus Configuration::Convert<int32_t>(const std::string &property,\n                                       int32_t &convert_prop) const {\n  long long value;\n  size_t idx = 0;\n  try {\n    value = std::stoi(property, &idx);\n    if (value > INT32_MAX || value < INT32_MIN) {\n      return {STATUS_FAULT, \"int32 failed, out of range\"};\n    }\n\n    if (idx != property.size()) {\n      return {STATUS_FAULT, \"int32 failed, invalid\"};\n    }\n\n    convert_prop = static_cast<int32_t>(value);\n  } catch (std::invalid_argument &e) {\n    return {STATUS_FAULT, \"int32 failed, invalid\"};\n  } catch (std::out_of_range &e) {\n    return {STATUS_FAULT, \"int32 failed, out of range\"};\n  }\n\n  return STATUS_SUCCESS;\n}\n\ntemplate <>\nStatus Configuration::Convert<uint32_t>(const std::string &property,\n                                        uint32_t &convert_prop) const {\n  long long value;\n  size_t idx = 0;\n  try {\n    value = std::stoll(property, &idx);\n    if (value > UINT32_MAX || value < 0) {\n      return {STATUS_FAULT, \"uint32 failed, out of range\"};\n    }\n\n    if (idx != property.size()) {\n      return {STATUS_FAULT, \"uint32 failed, invalid\"};\n    }\n\n    convert_prop = static_cast<uint32_t>(value);\n  } catch (std::invalid_argument &e) {\n    return {STATUS_FAULT, \"uint32 failed, invalid\"};\n  } catch (std::out_of_range &e) {\n    return {STATUS_FAULT, \"uint32 failed, out of range\"};\n  }\n\n  return STATUS_SUCCESS;\n}\n\ntemplate <>\nStatus Configuration::Convert<int64_t>(const std::string &property,\n                                       int64_t &convert_prop) const {\n  size_t idx = 0;\n  try {\n    convert_prop = std::stoll(property, &idx);\n\n    if (idx != property.size()) {\n      return {STATUS_FAULT, \"int64 failed, invalid\"};\n    }\n\n  } catch (std::invalid_argument &e) {\n    return {STATUS_FAULT, \"int64 failed, invalid\"};\n  } catch (std::out_of_range &e) {\n    return {STATUS_FAULT, \"int64 failed, out of range\"};\n  }\n\n  return STATUS_SUCCESS;\n}\n\ntemplate <>\nStatus Configuration::Convert<uint64_t>(const std::string &property,\n                                        uint64_t &convert_prop) const {\n  size_t idx = 0;\n  try {\n    convert_prop = std::stoull(property, &idx);\n\n    if (idx != property.size()) {\n      return {STATUS_FAULT, \"uint64 failed, invalid\"};\n    }\n\n    if (property[0] == '-') {\n      return {STATUS_FAULT, \"uint64 failed, out of range\"};\n    }\n\n  } catch (std::invalid_argument &e) {\n    return {STATUS_FAULT, \"uint64 failed, invalid\"};\n  } catch (std::out_of_range &e) {\n    return {STATUS_FAULT, \"uint64 failed, out of range\"};\n  }\n\n  return STATUS_SUCCESS;\n}\n\ntemplate <>\nStatus Configuration::Convert<float>(const std::string &property,\n                                     float &convert_prop) const {\n  size_t idx = 0;\n  try {\n    convert_prop = std::stof(property, &idx);\n\n    if (idx != property.size()) {\n      return {STATUS_FAULT, \"float failed, invalid\"};\n    }\n  } catch (std::invalid_argument &e) {\n    return {STATUS_FAULT, \"float failed, invalid\"};\n  } catch (std::out_of_range &e) {\n    return {STATUS_FAULT, \"float failed, out of range\"};\n  }\n\n  return STATUS_SUCCESS;\n}\n\ntemplate <>\nStatus Configuration::Convert<double>(const std::string &property,\n                                      double &convert_prop) const {\n  size_t idx = 0;\n  try {\n    convert_prop = std::stod(property, &idx);\n\n    if (idx != property.size()) {\n      return {STATUS_FAULT, \"double failed, invalid\"};\n    }\n  } catch (std::invalid_argument &e) {\n    return {STATUS_FAULT, \"double failed, invalid\"};\n  } catch (std::out_of_range &e) {\n    return {STATUS_FAULT, \"double failed, out of range\"};\n  }\n\n  return STATUS_SUCCESS;\n}\n\nvoid Configuration::StringSplit(const std::string &str,\n                                const std::string &delimiter,\n                                std::vector<std::string> &sub_str_list) {\n  if (str.empty() || delimiter.empty()) {\n    return;\n  }\n\n  auto str_with_delimiter = str + delimiter;\n  auto begin = 0;\n  auto end = str_with_delimiter.find(delimiter, begin);\n  while (end != std::string::npos) {\n    auto sub_str = str_with_delimiter.substr(begin, end - begin);\n    sub_str_list.push_back(sub_str);\n    begin = end + 1;\n    end = str_with_delimiter.find(delimiter, begin);\n  }\n}\n\nStatus TomlConfigParser::Visit(\n    const std::string &key, const toml::value &value,\n    const std::function<void(const std::string key,\n                             const std::string &basic_value)> &collector) {\n  if (value.is_array()) {\n    std::stringstream ss;\n    auto array = value.as_array();\n    int index = 0;\n    for (const auto &v : array) {\n      if (index > 0) {\n        ss << LIST_DELIMITER;\n      }\n      ss << v.as_string().str;\n      index++;\n    }\n    collector(key, ss.str());\n    return STATUS_SUCCESS;\n  }\n\n  if (value.is_table()) {\n    auto value_table = value.as_table();\n    for (const auto &pair : value_table) {\n      std::string sub_key;\n      if (key.empty()) {\n        sub_key = pair.first;\n      } else {\n        sub_key = key + \".\" + pair.first;\n      }\n      auto ret = Visit(sub_key, pair.second, collector);\n      if (ret != STATUS_SUCCESS) {\n        return ret;\n      }\n    }\n    return STATUS_SUCCESS;\n  }\n\n  if (value.is_string()) {\n    collector(key, value.as_string().str);\n  } else {\n    std::stringstream ss;\n    ss << value;\n    collector(key, ss.str());\n  }\n\n  return STATUS_SUCCESS;\n}\n\nStatus TomlConfigParser::Parse(const std::shared_ptr<Configuration> &config,\n                               std::istream &is, const std::string &fname) {\n  toml::value data;\n  try {\n    data = toml::parse(is, fname);\n  } catch (std::exception &e) {\n    return {STATUS_FAULT, e.what()};\n  }\n\n  return Visit(\n      \"\", data,\n      [&config](const std::string &key, const std::string &basic_value) {\n        config->SetProperty(key, basic_value);\n      });\n}\n\nStatus TomlConfigParser::Parse(const std::shared_ptr<Configuration> &config,\n                               const std::string &file) {\n  std::ifstream ifs(file.c_str(), std::ios_base::binary);\n  return Parse(config, ifs, file);\n}\n\nConfigurationBuilder::ConfigurationBuilder() = default;\nConfigurationBuilder::~ConfigurationBuilder() = default;\n\nvoid ConfigurationBuilder::AddProperty(const std::string &key,\n                                       const std::string &property) {\n  if (store_ == nullptr) {\n    store_.reset(new ConfigStore());\n  }\n\n  store_->WriteProperty(key, property);\n}\n\nvoid ConfigurationBuilder::AddProperty(\n    const std::string &key, const std::vector<std::string> &properties) {\n  bool is_first = true;\n  if (store_ == nullptr) {\n    store_.reset(new ConfigStore());\n  }\n\n  std::string value;\n  for (const auto &str : properties) {\n    if (is_first == true) {\n      is_first = false;\n    } else {\n      value += LIST_DELIMITER;\n    }\n    value += str;\n  }\n\n  store_->WriteProperty(key, value);\n}\n\nvoid ConfigurationBuilder::AddProperties(\n    const std::map<std::string, std::string> &properties) {\n  for (const auto &pair : properties) {\n    AddProperty(pair.first, pair.second);\n  }\n}\n\nstd::shared_ptr<Configuration> ConfigurationBuilder::Build() {\n  if (store_ == nullptr) {\n    store_.reset(new ConfigStore());\n  }\n\n  StatusError = STATUS_SUCCESS;\n  return std::shared_ptr<Configuration>(new Configuration(store_));\n}\n\nstd::shared_ptr<Configuration> ConfigurationBuilder::Build(\n    const std::string &file, const ConfigType &type, bool expand_env) {\n  std::ifstream ifs(file.c_str());\n  if (!ifs.good()) {\n    auto msg = \"file open \" + file + \" error: \" + StrError(errno);\n    MBLOG_ERROR << msg;\n    StatusError = {STATUS_INVALID, msg};\n    return nullptr;\n  }\n\n  return Build(ifs, file, type, expand_env);\n}\n\nstd::shared_ptr<Configuration> ConfigurationBuilder::Build(\n    std::istream &is, const std::string &fname, const ConfigType &type,\n    bool expand_env) {\n  auto parser = CreateParser(type);\n  if (parser == nullptr) {\n    return nullptr;\n  }\n\n  store_.reset(new ConfigStore());\n  store_->SetExpandEnv(expand_env);\n  std::shared_ptr<Configuration> config(new Configuration(store_));\n  auto ret = parser->Parse(config, is, fname);\n  if (ret != STATUS_SUCCESS) {\n    StatusError = ret;\n    return nullptr;\n  }\n\n  StatusError = STATUS_SUCCESS;\n  return config;\n}\n\nstd::shared_ptr<ConfigParser> ConfigurationBuilder::CreateParser(\n    const ConfigType &type) {\n  switch (type) {\n    case ConfigType::TOML:\n      return std::make_shared<TomlConfigParser>();\n      break;\n\n    default:\n      StatusError = {STATUS_FAULT,\n                     \"Create unknow parser \" + std::to_string((int32_t)type)};\n      return nullptr;\n  }\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/libmodelbox/base/device/device.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/base/device.h\"\n\n#include <stdio.h>\n\n#include <utility>\n\n#include \"modelbox/base/log.h\"\n\nnamespace modelbox {\nDevice::Device(std::shared_ptr<DeviceMemoryManager> mem_mgr)\n    : memory_trace_(std::make_shared<DeviceMemoryTrace>()),\n      memory_manager_(std::move(mem_mgr)) {\n  executor_ = std::make_shared<Executor>();\n}\n\nDevice::Device(size_t thread_count,\n               std::shared_ptr<DeviceMemoryManager> mem_mgr)\n    : memory_trace_(std::make_shared<DeviceMemoryTrace>()),\n      memory_manager_(std::move(mem_mgr)) {\n  if (0 == thread_count) {\n    executor_ = nullptr;\n  } else {\n    executor_ = std::make_shared<Executor>(thread_count);\n  }\n}\n\nDevice::Device() = default;\n\nDevice::~Device() = default;\n\nstd::string Device::GetDeviceID() const {\n  if (device_desc_ != nullptr) {\n    return device_desc_->GetDeviceId();\n  }\n\n  return \"\";\n}\n\nstd::shared_ptr<Executor> Device::GetDeviceExecutor() { return executor_; }\n\nvoid Device::SetMemQuota(size_t mem_quota) {\n  memory_manager_->SetMemQuota(mem_quota);\n}\n\nsize_t Device::GetMemQuota() const { return memory_manager_->GetMemQuota(); }\n\nsize_t Device::GetAllocatedMemSize() const {\n  return memory_manager_->GetAllocatedMemSize();\n}\n\nStatus Device::DeviceExecute(const DevExecuteCallBack &fun, int32_t priority,\n                             size_t count) {\n  return STATUS_NOTSUPPORT;\n}\n\nstd::string Device::GetType() const { return \"\"; }\n\nbool Device::SupportMemContiguous() const { return true; }\n\nbool Device::NeedResourceNice() { return false; }\n\nstd::list<std::future<Status>> Device::DeviceExecuteAsync(\n    const DevExecuteCallBack &fun, int32_t priority, size_t count,\n    bool resource_nice) {\n  if (0 == count) {\n    return {};\n  }\n\n  if (NeedResourceNice() && resource_nice) {\n    return DeviceExecuteAsyncNice(fun, priority, count);\n  }\n\n  return DeviceExecuteAsyncRude(fun, priority, count);\n}\n\nstd::list<std::future<Status>> Device::DeviceExecuteAsyncRude(\n    const DevExecuteCallBack &fun, int32_t priority, size_t count) {\n  std::list<std::future<Status>> future_status_list;\n  for (size_t i = 0; i < count; ++i) {\n    auto future_status = executor_->Run(fun, priority, i);\n    future_status_list.push_back(std::move(future_status));\n  }\n\n  return future_status_list;\n}\n\nstd::list<std::future<Status>> Device::DeviceExecuteAsyncNice(\n    const DevExecuteCallBack &fun, int32_t priority, size_t count) {\n  auto serial_process = [fun, count]() {\n    Status final_result;\n    for (size_t i = 0; i < count; ++i) {\n      auto ret = fun(i);\n      if (final_result == STATUS_OK || final_result == STATUS_CONTINUE) {\n        final_result = ret;\n      }\n    }\n\n    return final_result;\n  };\n\n  auto future_status = executor_->Run(serial_process, priority);\n  std::list<std::future<Status>> status_list;\n  status_list.push_back(std::move(future_status));\n  return status_list;\n}\n\nStatus Device::Init() {\n  size_t total;\n  auto ret = GetMemInfo(nullptr, &total);\n  if (ret != STATUS_SUCCESS) {\n    MBLOG_ERROR << \"Get device \" << GetDeviceID() << \" mem info failed\";\n    return {ret, \"device init failed.\"};\n  }\n\n  SetMemQuota(total);\n  return STATUS_SUCCESS;\n}\n\nstd::shared_ptr<DeviceMemory> Device::MemAlloc(size_t size, uint32_t mem_flags,\n                                               const std::string &user_id) {\n  return MemAlloc(size, size, mem_flags, user_id);\n}\n\nstd::shared_ptr<DeviceMemory> Device::MemAlloc(size_t size, size_t capacity,\n                                               uint32_t mem_flags,\n                                               const std::string &user_id) {\n  // TODO: Get user_id from thread module\n  if (size > capacity) {\n    StatusError = {STATUS_RANGE, \"Mem capacity must >= size\"};\n    MBLOG_ERROR << StatusError.Errormsg();\n    return nullptr;\n  }\n\n  if (size == 0) {\n    auto mem =\n        memory_manager_->MakeDeviceMemory(shared_from_this(), nullptr, 0);\n    mem->SetMemFlags(mem_flags);\n    return mem;\n  }\n\n  if (!memory_manager_->PreserveMem(capacity)) {\n    return nullptr;\n  }\n\n  auto device_mem_shared_ptr =\n      memory_manager_->AllocSharedPtr(capacity, mem_flags);\n  if (device_mem_shared_ptr == nullptr) {\n    return nullptr;\n  }\n\n  auto device_mem = memory_manager_->MakeDeviceMemory(\n      shared_from_this(), device_mem_shared_ptr, capacity);\n  if (device_mem == nullptr) {\n    return nullptr;\n  }\n\n  device_mem->SetMemFlags(mem_flags);\n  device_mem->Resize(size);\n  memory_trace_->TraceMemoryAlloc(device_mem->GetMemoryID(), user_id,\n                                  GetDeviceID(), capacity);\n\n  return device_mem;\n}\n\nstd::shared_ptr<DeviceMemory> Device::MemAcquire(void *mem_ptr, size_t size,\n                                                 const DeleteFunction &deleter,\n                                                 uint32_t mem_flags) {\n  std::shared_ptr<void> mem_shared_ptr(mem_ptr, deleter);\n  return MemAcquire(mem_shared_ptr, size, mem_flags);\n}\n\nstd::shared_ptr<DeviceMemory> Device::MemAcquire(void *mem_ptr, size_t size,\n                                                 uint32_t mem_flags) {\n  auto dev_mem = MemAlloc(size, mem_flags);\n  memory_manager_->Copy(dev_mem->GetPtr<uint8_t>().get(), size, mem_ptr, size,\n                        DeviceMemoryCopyKind::SameDeviceType);\n  return dev_mem;\n}\n\nstd::shared_ptr<DeviceMemory> Device::MemAcquire(\n    const std::shared_ptr<void> &mem_ptr, size_t size, uint32_t mem_flags) {\n  auto dev_mem = MemAlloc(0, mem_flags);\n  auto ret = dev_mem->MemAcquire(mem_ptr, size);\n  if (ret != STATUS_SUCCESS) {\n    MBLOG_ERROR << \"Mem acquire (\" << mem_ptr << \" , \" << size << \") failed\";\n    return nullptr;\n  }\n\n  return dev_mem;\n}\n\nstd::shared_ptr<DeviceMemory> Device::MemWrite(const void *host_data,\n                                               size_t host_size,\n                                               const std::string &user_id) {\n  if (0 == host_size || nullptr == host_data) {\n    MBLOG_ERROR << \"Mem write failed, src size is zero or host data null\";\n    StatusError = {STATUS_INVALID, \"invalid argument\"};\n    return nullptr;\n  }\n\n  auto device_mem = MemAlloc(host_size, 0, user_id);\n  if (nullptr == device_mem) {\n    MBLOG_ERROR << \"Malloc failed, size \" << host_size;\n    return nullptr;\n  }\n\n  auto device_buffer = device_mem->GetPtr<void>();\n  auto ret = memory_manager_->Write(host_data, host_size, device_buffer.get(),\n                                    device_mem->GetSize());\n  if (ret != STATUS_SUCCESS) {\n    MBLOG_ERROR << \"Write host data to device memory failed\";\n    return nullptr;\n  }\n\n  return device_mem;\n}\n\nStatus Device::GetMemInfo(size_t *free, size_t *total) const {\n  return memory_manager_->GetDeviceMemUsage(free, total);\n}\n\nstd::shared_ptr<DeviceMemoryTrace> Device::GetMemoryTrace() const {\n  return memory_trace_;\n}\n\nstd::shared_ptr<DeviceMemory> Device::MemClone(\n    std::shared_ptr<DeviceMemory> src_memory, const std::string &user_id) {\n  if (!src_memory->IsContentMutable() &&\n      src_memory->GetDevice().get() == this) {\n    return src_memory;\n  }\n\n  auto dest_memroy =\n      MemAlloc(src_memory->GetSize(), src_memory->mem_flags_, user_id);\n  if (dest_memroy == nullptr) {\n    MBLOG_ERROR << \"MemAlloc failed in clone\";\n    return nullptr;\n  }\n\n  auto ret = dest_memroy->ReadFrom(src_memory, 0, src_memory->GetSize());\n  if (ret != STATUS_SUCCESS) {\n    MBLOG_ERROR << \"Read data from source failed\";\n    return nullptr;\n  }\n\n  return dest_memroy;\n}\n\nvoid Device::SetDeviceDesc(std::shared_ptr<DeviceDesc> device_desc) {\n  device_desc_ = std::move(device_desc);\n}\n\nstd::shared_ptr<DeviceDesc> Device::GetDeviceDesc() { return device_desc_; }\n\nstd::shared_ptr<DeviceManager> Device::GetDeviceManager() {\n  return device_mgr_.lock();\n}\n\nvoid Device::SetDeviceManager(\n    const std::shared_ptr<DeviceManager> &device_mgr) {\n  device_mgr_ = device_mgr;\n}\n\nvoid DeviceDesc::SetDeviceId(const std::string &device_id) {\n  device_id_ = device_id;\n}\n\nvoid DeviceDesc::SetDeviceType(const std::string &device_type) {\n  device_type_ = device_type;\n}\n\nvoid DeviceDesc::SetDeviceMemory(const std::string &device_memory) {\n  device_memory_ = device_memory;\n}\n\nvoid DeviceDesc::SetDeviceVersion(const std::string &device_version) {\n  device_version_ = device_version;\n}\n\nvoid DeviceDesc::SetDeviceDesc(const std::string &device_desc) {\n  device_description_ = device_desc;\n}\n\nDeviceDesc::DeviceDesc() = default;\nDeviceDesc::~DeviceDesc() = default;\n\nstd::string DeviceDesc::GetDeviceId() { return device_id_; }\n\nstd::string DeviceDesc::GetDeviceType() { return device_type_; }\n\nstd::string DeviceDesc::GetDeviceMemory() { return device_memory_; }\n\nstd::string DeviceDesc::GetDeviceVersion() { return device_version_; }\n\nstd::string DeviceDesc::GetDeviceDesc() { return device_description_; }\n\n}  // namespace modelbox\n"
  },
  {
    "path": "src/libmodelbox/base/device/device_factory.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include <stdio.h>\n#include \"modelbox/base/device.h\"\n\nnamespace modelbox {\n    \n}  // namespace modelbox"
  },
  {
    "path": "src/libmodelbox/base/device/device_manager.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <stdio.h>\n\n#include <string>\n#include <utility>\n#include <vector>\n\n#include \"modelbox/base/device.h\"\n#include \"modelbox/base/driver.h\"\n#include \"modelbox/base/log.h\"\n#include \"modelbox/base/status.h\"\n\nnamespace modelbox {\n\nDeviceFactory::DeviceFactory() = default;\nDeviceFactory::~DeviceFactory() = default;\n\nstd::map<std::string, std::shared_ptr<DeviceDesc>>\nDeviceFactory::DeviceProbe() {\n  return std::map<std::string, std::shared_ptr<DeviceDesc>>();\n}\n\nstd::shared_ptr<Device> DeviceFactory::CreateDevice(\n    const std::string &device_id) {\n  return nullptr;\n}\n\nstd::string DeviceFactory::GetDeviceFactoryType() { return \"\"; }\n\nstd::vector<std::string> DeviceFactory::GetDeviceList() {\n  return std::vector<std::string>();\n}\n\nDeviceManager::DeviceManager() = default;\nDeviceManager::~DeviceManager() = default;\n\nStatus DeviceManager::Register(const std::shared_ptr<DeviceFactory> &factory) {\n  std::string factory_type = factory->GetDeviceFactoryType();\n  if (device_factory_.count(factory_type)) {\n    MBLOG_WARN << \"The type \" << factory_type << \" has already existed.\";\n    return STATUS_EXIST;\n  }\n\n  device_factory_.insert(std::make_pair(factory_type, factory));\n  // TODO: register device id\n  return STATUS_OK;\n}\n\nstd::shared_ptr<DeviceManager> DeviceManager::GetInstance() {\n  static std::shared_ptr<DeviceManager> device_mgr =\n      std::make_shared<DeviceManager>();\n  return device_mgr;\n}\n\nstd::shared_ptr<Device> DeviceManager::GetHostDevice() { return nullptr; }\n\nvoid DeviceManager::Clear() {\n  device_list_.clear();\n  device_desc_list_.clear();\n  device_factory_.clear();\n}\n\nStatus DeviceManager::Initialize(const std::shared_ptr<Drivers> &driver,\n                                 const std::shared_ptr<Configuration> &config) {\n  if (driver == nullptr) {\n    return STATUS_FAULT;\n  }\n\n  SetDrivers(driver);\n\n  InitDeviceFactory(driver);\n  Status status = DeviceProbe();\n\n  return status;\n}\n\nStatus DeviceManager::CheckDeviceManagerInit() {\n  if (device_factory_.empty() || device_desc_list_.empty()) {\n    MBLOG_ERROR << \"Please init devicemanager first.\";\n    return STATUS_FAULT;\n  }\n\n  return STATUS_OK;\n}\n\nStatus DeviceManager::InitDeviceFactory(\n    const std::shared_ptr<Drivers> &driver) {\n  std::vector<std::shared_ptr<Driver>> driver_list =\n      driver->GetDriverListByClass(\"DRIVER-DEVICE\");\n  std::shared_ptr<DriverDesc> desc;\n  for (auto &device_driver : driver_list) {\n    auto temp_factory = device_driver->CreateFactory();\n    if (nullptr == temp_factory) {\n      continue;\n    }\n    desc = device_driver->GetDriverDesc();\n    std::shared_ptr<DeviceFactory> device_factory =\n        std::dynamic_pointer_cast<DeviceFactory>(temp_factory);\n\n    device_factory_.insert(std::make_pair(desc->GetType(), device_factory));\n  }\n  return STATUS_OK;\n}\n\nstd::vector<std::string> DeviceManager::GetDevicesTypes() {\n  std::vector<std::string> device_type;\n\n  for (auto &iter : device_factory_) {\n    device_type.push_back(iter.first);\n  }\n\n  return device_type;\n}\n\nstd::vector<std::string> DeviceManager::GetDevicesIdList(\n    const std::string &device_type) {\n  std::vector<std::string> device_id;\n  auto iter = device_desc_list_.find(device_type);\n  if (iter == device_desc_list_.end()) {\n    return device_id;\n  }\n\n  for (auto &id : device_desc_list_[device_type]) {\n    device_id.push_back(id.first);\n  }\n\n  return device_id;\n}\n\nstd::shared_ptr<Device> DeviceManager::CreateDevice(\n    const std::string &device_type, const std::string &device_id) {\n  if (CheckDeviceManagerInit() != STATUS_OK) {\n    StatusError = {STATUS_FAULT, \"check device failed.\"};\n    return nullptr;\n  }\n\n  if (device_type.empty() || device_id.empty()) {\n    StatusError = {STATUS_INVALID, \"device type or id is invalid.\"};\n    MBLOG_ERROR << StatusError.Errormsg();\n    return nullptr;\n  }\n\n  std::shared_ptr<Device> device;\n  device = GetDevice(device_type, device_id);\n  if (device != nullptr) {\n    return device;\n  }\n\n  auto type_desc = device_desc_list_.find(device_type);\n  if (type_desc == device_desc_list_.end()) {\n    StatusError = {STATUS_NOTFOUND, \"Can't support type:\" + device_type};\n    MBLOG_ERROR << StatusError.Errormsg();\n    return nullptr;\n  }\n\n  auto id_desc = device_desc_list_[device_type].find(device_id);\n  if (id_desc == device_desc_list_[device_type].end()) {\n    StatusError = {STATUS_NOTFOUND, \"Can't find device, type \" + device_type +\n                                        \" id: \" + device_id};\n    MBLOG_ERROR << StatusError.Errormsg();\n    return nullptr;\n  }\n\n  auto iter = device_factory_.find(device_type);\n  if (iter == device_factory_.end()) {\n    StatusError = {STATUS_NOTFOUND, \"device type not found: \" + device_type};\n    return nullptr;\n  }\n\n  device = device_factory_[device_type]->CreateDevice(device_id);\n  if (device == nullptr) {\n    return device;\n  }\n\n  device->SetDeviceManager(shared_from_this());\n  auto ret = device->Init();\n  if (ret != STATUS_SUCCESS) {\n    MBLOG_ERROR << \"Device init failed\";\n    return nullptr;\n  }\n\n  device->SetDeviceDesc(device_desc_list_[device_type][device_id]);\n  auto &id_map = device_list_[device_type];\n  id_map[device_id] = device;\n  return device;\n}\n\nstd::shared_ptr<Device> DeviceManager::GetDevice(const std::string &device_type,\n                                                 const std::string &device_id) {\n  auto iter = device_list_.find(device_type);\n  if (iter == device_list_.end()) {\n    StatusError = {STATUS_NOTFOUND, \"cannot found device: \" + device_type +\n                                        \", id: \" + device_id};\n    return nullptr;\n  }\n\n  auto id = device_list_[device_type].find(device_id);\n  if (id == device_list_[device_type].end()) {\n    StatusError = {STATUS_NOTFOUND, \"cannot found device: \" + device_type +\n                                        \", id: \" + device_id};\n    return nullptr;\n  }\n\n  return device_list_[device_type][device_id];\n}\n\nStatus DeviceManager::DeviceProbe() {\n  for (auto &iter : device_factory_) {\n    auto tmp = iter.second->DeviceProbe();\n    device_desc_list_.insert(std::make_pair(iter.first, tmp));\n    for (auto const &itdev : tmp) {\n      auto dev_desc = itdev.second;\n      MBLOG_DEBUG << \"add device:\";\n      MBLOG_DEBUG << \"  type: \" << dev_desc->GetDeviceType();\n      MBLOG_DEBUG << \"  id: \" << dev_desc->GetDeviceId();\n      MBLOG_DEBUG << \"  memory: \" << dev_desc->GetDeviceMemory();\n      MBLOG_DEBUG << \"  version: \" << dev_desc->GetDeviceVersion();\n      MBLOG_DEBUG << \"  description: \" << dev_desc->GetDeviceDesc();\n    }\n  }\n\n  return STATUS_OK;\n}\n\nconst std::map<std::string, std::shared_ptr<DeviceFactory>>\n    &DeviceManager::GetDeviceFactoryList() {\n  return device_factory_;\n}\n\nconst std::map<std::string, std::map<std::string, std::shared_ptr<DeviceDesc>>>\n    &DeviceManager::GetDeviceDescList() {\n  return device_desc_list_;\n}\n\nconst std::map<std::string, std::map<std::string, std::shared_ptr<Device>>>\n    &DeviceManager::GetDeviceList() {\n  return device_list_;\n}\n\nstd::shared_ptr<Drivers> DeviceManager::GetDrivers() { return drivers_; }\n\nvoid DeviceManager::SetDrivers(std::shared_ptr<Drivers> drivers) {\n  drivers_ = std::move(drivers);\n}\n\n}  // namespace modelbox\n"
  },
  {
    "path": "src/libmodelbox/base/device/device_memory.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/base/device_memory.h\"\n\n#include <utility>\n\n#include \"modelbox/base/device.h\"\n#include \"modelbox/base/log.h\"\n#include \"modelbox/base/slab.h\"\n\nnamespace modelbox {\n\nconst uint64_t DeviceMemory::MEM_MAGIC_CODE = 0x446d4d654d6f5279;\n\nDeviceMemory::DeviceMemory(const std::shared_ptr<Device> &device,\n                           const std::shared_ptr<DeviceMemoryManager> &mem_mgr,\n                           const std::shared_ptr<void> &device_mem_ptr,\n                           size_t size, bool is_host_mem)\n    : is_host_mem_(is_host_mem),\n      device_(device),\n      mem_mgr_(mem_mgr),\n\n      size_(size),\n      capacity_(size) {\n  UpdateMemID(device_mem_ptr.get());\n  if (device_mem_ptr != nullptr) {\n    auto id = memory_id_;\n    device_mem_ptr_.reset(device_mem_ptr.get(), [device, mem_mgr, id, size,\n                                                 device_mem_ptr](void *ptr) {\n      auto trace = device->GetMemoryTrace();\n      if (trace != nullptr) {\n        trace->TraceMemoryFree(id);\n        mem_mgr->RestoreMem(size);\n      }\n    });\n  }\n}\n\nDeviceMemory::~DeviceMemory() = default;\n\nvoid DeviceMemory::UpdateMemID(void *device_mem_ptr) {\n  memory_id_ = std::to_string((uintptr_t)device_mem_ptr);\n}\n\nbool DeviceMemory::IsContentMutable() const { return is_content_mutable_; };\n\nStatus DeviceMemory::SetContentMutable(bool content_mutable) {\n  is_content_mutable_ = content_mutable;\n  // TODO: Protect memory in device\n  return STATUS_SUCCESS;\n}\n\nsize_t DeviceMemory::GetSize() const { return size_; };\n\nsize_t DeviceMemory::GetCapacity() const { return capacity_; };\n\nstd::string DeviceMemory::GetMemoryID() const { return memory_id_; }\n\nstd::shared_ptr<Device> DeviceMemory::GetDevice() const { return device_; }\n\nuint32_t DeviceMemory::GetMemFlags() const { return mem_flags_; }\n\nbool DeviceMemory::IsHost() const { return is_host_mem_; }\n\nbool DeviceMemory::IsSameDevice(const std::shared_ptr<DeviceMemory> &dev_mem) {\n  return dev_mem ? (device_ == dev_mem->device_ &&\n                    mem_flags_ == dev_mem->mem_flags_)\n                 : false;\n}\n\nbool DeviceMemory::IsContiguous(\n    const std::vector<std::shared_ptr<DeviceMemory>> &mem_list,\n    bool with_order) {\n  if (mem_list.size() <= 1) {\n    return true;\n  }\n\n  std::vector<std::shared_ptr<DeviceMemory>> order_mem_list(mem_list);\n  if (!with_order) {\n    std::sort(order_mem_list.begin(), order_mem_list.end(),\n              [](const std::shared_ptr<DeviceMemory> &mem1,\n                 const std::shared_ptr<DeviceMemory> &mem2) {\n                return mem1->GetConstPtr<void>() < mem2->GetConstPtr<void>();\n              });\n  }\n\n  auto device_mem_ptr = order_mem_list[0]->device_mem_ptr_;\n  size_t offset = order_mem_list[0]->offset_;\n  for (auto &mem : order_mem_list) {\n    if (device_mem_ptr != mem->device_mem_ptr_) {\n      return false;\n    }\n\n    if (offset != mem->offset_) {\n      return false;\n    }\n\n    offset += mem->size_;\n  }\n\n  return true;\n}\n\nstd::shared_ptr<DeviceMemory> DeviceMemory::Combine(\n    const std::vector<std::shared_ptr<DeviceMemory>> &mem_list,\n    const std::shared_ptr<Device> &target_device, uint32_t target_mem_flags) {\n  if (mem_list.empty()) {\n    MBLOG_ERROR << \"Combine mem list is empty\";\n    return nullptr;\n  }\n\n  size_t total_size = 0;\n  auto ret = CountMemSize(mem_list, total_size);\n  if (ret != STATUS_SUCCESS) {\n    return nullptr;\n  }\n\n  auto contiguous =\n      ((target_device && target_device != mem_list[0]->GetDevice()) ||\n       (target_mem_flags != mem_list[0]->mem_flags_))\n          ? false\n          : IsContiguous(mem_list, true);\n  if (contiguous) {\n    return CombineContinuous(mem_list, total_size, target_device);\n  }\n\n  return CombineFragment(mem_list, total_size, target_device, target_mem_flags);\n}\n\nstd::shared_ptr<DeviceMemory> DeviceMemory::CombineContinuous(\n    const std::vector<std::shared_ptr<DeviceMemory>> &mem_list,\n    size_t total_size, const std::shared_ptr<Device> &target_device) {\n  auto first_mem_ptr = std::min_element(\n      mem_list.begin(), mem_list.end(),\n      [](const std::shared_ptr<DeviceMemory> &mem1,\n         const std::shared_ptr<DeviceMemory> &mem2) {\n        return mem1->GetConstPtr<void>() < mem2->GetConstPtr<void>();\n      });\n  const auto &mem = *first_mem_ptr;\n  auto device = mem->GetDevice();\n  auto continuous_mem = device->MemAlloc(0);\n  continuous_mem->offset_ = mem->offset_;\n  continuous_mem->size_ = total_size;\n  continuous_mem->capacity_ = mem->capacity_;\n  continuous_mem->device_mem_ptr_ = mem->device_mem_ptr_;\n  continuous_mem->memory_id_ = mem->memory_id_;\n  continuous_mem->mem_flags_ = mem->mem_flags_;\n  auto ret = continuous_mem->CombineExtraMeta(mem_list);\n  if (ret != STATUS_SUCCESS) {\n    MBLOG_ERROR << \"Combine extra meta failed\";\n    return nullptr;\n  }\n\n  if (target_device == nullptr || target_device == mem->device_) {\n    return continuous_mem;\n  }\n\n  auto new_device_mem = target_device->MemAlloc(total_size);\n  if (new_device_mem == nullptr) {\n    MBLOG_ERROR << \"Mem alloc failed, size \" << total_size;\n    return nullptr;\n  }\n\n  new_device_mem->ReadFrom(continuous_mem, 0, total_size);\n  return new_device_mem;\n}\n\nstd::shared_ptr<DeviceMemory> DeviceMemory::CombineFragment(\n    const std::vector<std::shared_ptr<DeviceMemory>> &mem_list,\n    size_t total_size, std::shared_ptr<Device> target_device,\n    uint32_t target_mem_flags) {\n  if (target_device == nullptr) {\n    target_device = mem_list[0]->GetDevice();\n  }\n\n  auto new_mem = target_device->MemAlloc(total_size, target_mem_flags);\n  if (new_mem == nullptr) {\n    MBLOG_ERROR << \"Mem alloc failed, size \" << total_size;\n    return nullptr;\n  }\n\n  size_t dest_offset = 0;\n  for (const auto &mem : mem_list) {\n    if (mem->GetSize() == 0) {\n      continue;\n    }\n\n    auto ret = new_mem->ReadFrom(mem, 0, mem->GetSize(), dest_offset);\n    if (ret != STATUS_SUCCESS) {\n      MBLOG_ERROR << \"Combine read data failed, \" << ret;\n      return nullptr;\n    }\n\n    dest_offset += mem->GetSize();\n  }\n\n  return new_mem;\n}\n\nStatus DeviceMemory::CountMemSize(\n    const std::vector<std::shared_ptr<DeviceMemory>> &mem_list,\n    size_t &total_size) {\n  total_size = 0;\n  for (const auto &mem : mem_list) {\n    if (SIZE_MAX - total_size < mem->GetSize()) {\n      MBLOG_ERROR << \"Mem size > SIZE_MAX\";\n      return STATUS_FAULT;\n    }\n\n    total_size += mem->GetSize();\n  }\n\n  return STATUS_SUCCESS;\n}\n\nStatus DeviceMemory::Verify() const { return STATUS_SUCCESS; }\n\nStatus DeviceMemory::Resize(size_t new_size) {\n  if (new_size > capacity_) {\n    MBLOG_ERROR << \"New size \" << new_size << \" > capacity \" << capacity_;\n    return STATUS_RANGE;\n  }\n\n  size_ = new_size;\n  return STATUS_SUCCESS;\n}\n\nStatus DeviceMemory::Realloc(size_t new_capacity) {\n  if (new_capacity < capacity_) {\n    return STATUS_SUCCESS;\n  }\n\n  if (!CheckReallocParam(new_capacity)) {\n    MBLOG_ERROR << \"Check realloc param failed\";\n    return STATUS_INVALID;\n  }\n\n  auto new_device_memory = device_->MemAlloc(size_, new_capacity, mem_flags_);\n  if (nullptr == new_device_memory) {\n    MBLOG_ERROR << \"Device malloc failed\";\n    return STATUS_FAULT;\n  }\n\n  if (size_ > 0) {\n    auto ret = new_device_memory->ReadFrom(shared_from_this(), 0, size_);\n    if (ret != STATUS_SUCCESS) {\n      MBLOG_ERROR << \"Copy old data to new buffer failed, copy size \" << size_;\n      return ret;\n    }\n  }\n\n  device_mem_ptr_ = new_device_memory->device_mem_ptr_;\n  offset_ = 0;\n  size_ = new_device_memory->size_;\n  capacity_ = new_device_memory->capacity_;\n  memory_id_ = new_device_memory->memory_id_;\n  auto this_mem = shared_from_this();\n  new_device_memory->CopyExtraMetaTo(this_mem);\n  return STATUS_SUCCESS;\n}\n\nbool DeviceMemory::CheckReallocParam(size_t new_capacity) {\n  if (0 == new_capacity) {\n    MBLOG_ERROR << \"Realloc mem to zero failed\";\n    return false;\n  }\n\n  return true;\n}\n\nStatus DeviceMemory::ReadFrom(\n    const std::shared_ptr<const DeviceMemory> &src_memory, size_t src_offset,\n    size_t src_size, size_t dest_offset) {\n  if (src_memory->device_mem_ptr_ == device_mem_ptr_) {\n    MBLOG_ERROR << \"Memory read from same mem block is not supported\";\n    return STATUS_INVALID;\n  }\n\n  if (!CheckReadFromParam(src_memory, src_offset, src_size, dest_offset)) {\n    MBLOG_ERROR << \"Check read param failed\";\n    return STATUS_INVALID;\n  }\n\n  auto ret = TransferInDevice(src_memory, src_offset, src_size, dest_offset);\n  if (ret == STATUS_NOTSUPPORT) {\n    // Try to transfer data in host\n    ret = TransferInHost(src_memory, src_offset, src_size, dest_offset);\n  }\n\n  return ret;\n}\n\nStatus DeviceMemory::WriteTo(const std::shared_ptr<DeviceMemory> &dest_memory,\n                             size_t src_offset, size_t src_size,\n                             size_t dest_offset) const {\n  return dest_memory->ReadFrom(shared_from_this(), src_offset, src_size,\n                               dest_offset);\n}\n\nStatus DeviceMemory::CopyExtraMetaTo(\n    std::shared_ptr<DeviceMemory> &device_mem) {\n  return STATUS_SUCCESS;\n}\n\nStatus DeviceMemory::CombineExtraMeta(\n    const std::vector<std::shared_ptr<DeviceMemory>> &mem_list) {\n  return STATUS_SUCCESS;\n}\n\nvoid DeviceMemory::SetMemFlags(uint32_t mem_flags) { mem_flags_ = mem_flags; }\n\nbool DeviceMemory::CheckReadFromParam(\n    const std::shared_ptr<const DeviceMemory> &src_memory, size_t src_offset,\n    size_t src_size, size_t dest_offset) {\n  if (!IsContentMutable()) {\n    MBLOG_ERROR << \"Target memory content is not mutable\";\n    return false;\n  }\n\n  auto src_max_size = src_memory->GetSize();\n  if (src_offset >= src_max_size) {\n    MBLOG_ERROR << \"src_offset \" << src_offset << \" >= src_memory size\"\n                << src_max_size;\n    return false;\n  }\n\n  if (0 == src_size) {\n    MBLOG_ERROR << \"src_size is zero\";\n    return false;\n  }\n\n  auto src_data_size = src_max_size - src_offset;\n  if (src_data_size < src_size) {\n    MBLOG_ERROR << \"src_size \" << src_size << \" + src_offset \" << src_offset\n                << \" > src_memory size\" << src_max_size;\n    return false;\n  }\n\n  if (dest_offset >= this->size_) {\n    MBLOG_ERROR << \"dest_offset \" << dest_offset << \" >= dest_memory size\"\n                << this->size_;\n    return false;\n  }\n\n  auto dest_data_size = this->size_ - dest_offset;\n  if (dest_data_size < src_size) {\n    MBLOG_ERROR << \"src_size \" << src_size << \" + dest_offset \" << dest_offset\n                << \" > dest_memory size \" << this->size_;\n    return false;\n  }\n\n  return true;\n}\n\nStatus DeviceMemory::TransferInHost(\n    const std::shared_ptr<const DeviceMemory> &src_memory, size_t src_offset,\n    size_t src_size, size_t dest_offset) {\n  // TODO: consider 4k cache\n  std::shared_ptr<uint8_t> host_cache(new (std::nothrow) uint8_t[src_size],\n                                      [](const uint8_t *ptr) { delete[] ptr; });\n  if (host_cache == nullptr) {\n    MBLOG_ERROR << \"No memory for host cache\";\n    return STATUS_NOMEM;\n  }\n\n  auto src_mem_mgr = src_memory->mem_mgr_;\n  auto src_dev = src_memory->GetDevice();\n  auto ret =\n      src_mem_mgr->Read(src_memory->GetConstPtr<uint8_t>().get() + src_offset,\n                        src_size, host_cache.get(), src_size);\n  if (ret != STATUS_SUCCESS) {\n    MBLOG_ERROR << \"Read data from device \" << src_dev->GetType() << \":\"\n                << src_dev->GetDeviceID() << \"to host failed, size \"\n                << src_size;\n    return STATUS_FAULT;\n  }\n\n  ret = mem_mgr_->Write(host_cache.get(), src_size,\n                        GetPtr<uint8_t>().get() + dest_offset, src_size);\n  if (ret != STATUS_SUCCESS) {\n    MBLOG_ERROR << \"Write data to device \" << device_->GetType() << \":\"\n                << device_->GetDeviceID() << \"from host failed, size \"\n                << src_size;\n    return STATUS_FAULT;\n  }\n\n  return STATUS_SUCCESS;\n}\n\nStatus DeviceMemory::TransferInDevice(\n    const std::shared_ptr<const DeviceMemory> &src_memory, size_t src_offset,\n    size_t src_size, size_t dest_offset) {\n  DeviceMemoryCopyKind copy_kind;\n  if (src_memory->device_->GetType() == device_->GetType()) {\n    copy_kind = DeviceMemoryCopyKind::SameDeviceType;\n  } else if (src_memory->IsHost()) {\n    copy_kind = DeviceMemoryCopyKind::FromHost;\n  } else {\n    // Different type device transfer is not support by device\n    return STATUS_NOTSUPPORT;\n  }\n\n  return mem_mgr_->DeviceMemoryCopy(shared_from_this(), dest_offset, src_memory,\n                                    src_offset, src_size, copy_kind);\n}\n\nstd::shared_ptr<DeviceMemory> DeviceMemory::Append(\n    const std::shared_ptr<DeviceMemory> &dev_mem) {\n  return Append(std::vector<std::shared_ptr<DeviceMemory>>{dev_mem});\n}\n\nstd::shared_ptr<DeviceMemory> DeviceMemory::Append(\n    const std::vector<std::shared_ptr<DeviceMemory>> &mem_list) {\n  if (mem_list.empty()) {\n    MBLOG_ERROR << \"Append mem list is empty\";\n    return nullptr;\n  }\n\n  size_t append_size;\n  auto ret = CountMemSize(mem_list, append_size);\n  if (ret != STATUS_SUCCESS) {\n    return nullptr;\n  }\n\n  if (SIZE_MAX - append_size < size_) {\n    MBLOG_ERROR << \"Total mem size > SIZE_MAX\";\n    return nullptr;\n  }\n\n  auto new_device_mem = PrepareAppendMem(append_size);\n  if (new_device_mem == nullptr) {\n    return nullptr;\n  }\n\n  ret = AppendData(mem_list, new_device_mem);\n  if (ret != STATUS_SUCCESS) {\n    return nullptr;\n  }\n\n  return new_device_mem;\n}\n\nstd::shared_ptr<DeviceMemory> DeviceMemory::PrepareAppendMem(\n    size_t append_size) {\n  std::shared_ptr<DeviceMemory> new_device_mem;\n  if (capacity_ - size_ < append_size) {\n    new_device_mem = device_->MemAlloc(size_ + append_size, mem_flags_);\n    if (new_device_mem == nullptr) {\n      MBLOG_ERROR << \"Alloc mem failed, size \" << size_ + append_size;\n      return nullptr;\n    }\n\n    if (size_ > 0) {\n      auto ret = new_device_mem->ReadFrom(shared_from_this(), 0, size_);\n      if (ret != STATUS_SUCCESS) {\n        MBLOG_ERROR << \"Append read data failed\";\n        return nullptr;\n      }\n    }\n  } else {\n    new_device_mem = device_->MemAlloc(0);\n    new_device_mem->device_mem_ptr_ = device_mem_ptr_;\n    new_device_mem->offset_ = offset_;\n    new_device_mem->size_ = size_ + append_size;\n    new_device_mem->capacity_ = capacity_;\n    new_device_mem->memory_id_ = memory_id_;\n    new_device_mem->mem_flags_ = mem_flags_;\n  }\n\n  return new_device_mem;\n}\n\nStatus DeviceMemory::AppendData(\n    const std::vector<std::shared_ptr<DeviceMemory>> &mem_list,\n    std::shared_ptr<DeviceMemory> &target_device_mem) {\n  size_t offset = size_;\n  for (const auto &mem : mem_list) {\n    if (mem->GetSize() == 0) {\n      continue;\n    }\n\n    auto ret = target_device_mem->ReadFrom(mem, 0, mem->GetSize(), offset);\n    offset += mem->GetSize();\n    if (ret != STATUS_SUCCESS) {\n      MBLOG_ERROR << \"Append mem data failed\";\n      return STATUS_FAULT;\n    }\n  }\n\n  return STATUS_SUCCESS;\n}\n\nstd::shared_ptr<DeviceMemory> DeviceMemory::Cut(size_t offset, size_t size) {\n  if (offset + size > capacity_) {\n    MBLOG_ERROR << \"cut failed, offset[\" << offset << \"] + size[\" << size\n                << \"] > capacity[\" << size << \"]\";\n    return nullptr;\n  }\n\n  auto new_device_mem = device_->MemAlloc(0);\n  new_device_mem->device_mem_ptr_ = device_mem_ptr_;\n  new_device_mem->offset_ = offset_ + offset;\n  new_device_mem->size_ = size;\n  new_device_mem->capacity_ = capacity_ - offset;\n  new_device_mem->memory_id_ = memory_id_;\n  new_device_mem->mem_flags_ = mem_flags_;\n  CopyExtraMetaTo(new_device_mem);\n  return new_device_mem;\n}\n\nstd::shared_ptr<DeviceMemory> DeviceMemory::Delete(size_t offset, size_t size) {\n  if (size >= capacity_) {\n    MBLOG_ERROR << \"Delete size \" << size << \" >= capacity \" << capacity_;\n    return nullptr;\n  }\n\n  return Delete(offset, size, capacity_ - size);\n}\n\nstd::shared_ptr<DeviceMemory> DeviceMemory::Delete(size_t offset, size_t size,\n                                                   size_t capacity) {\n  if (offset >= capacity_) {\n    MBLOG_ERROR << \"Delete offset \" << offset << \" >= capacity \" << capacity_;\n    return nullptr;\n  }\n\n  if (size == 0) {\n    MBLOG_ERROR << \"Delete size is zero\";\n    return nullptr;\n  }\n\n  auto data_offset = offset + size;\n  if (data_offset > capacity_) {\n    MBLOG_ERROR << \"Delete offset \" << offset << \" + size \" << size\n                << \" > capacity \" << capacity_;\n    return nullptr;\n  }\n\n  auto content_size = capacity_ - size;\n  if (content_size == 0) {\n    MBLOG_ERROR << \"Delete size \" << size << \" == capacity \" << capacity_;\n    return nullptr;\n  }\n\n  auto new_device_mem = device_->MemAlloc(capacity, mem_flags_);\n  if (offset > 0) {\n    auto ret = new_device_mem->ReadFrom(shared_from_this(), 0, offset);\n    if (ret != STATUS_SUCCESS) {\n      MBLOG_ERROR << \"Delete, read from first part [0,\" << offset\n                  << \",0] failed\";\n      return nullptr;\n    }\n  }\n\n  if (data_offset < capacity_) {\n    auto ret = new_device_mem->ReadFrom(shared_from_this(), offset + size,\n                                        capacity_ - data_offset, offset);\n    if (ret != STATUS_SUCCESS) {\n      MBLOG_ERROR << \"Delete, read from second part [\" << offset + size << \",\"\n                  << capacity_ - data_offset << \",\" << offset << \"] failed\";\n      return nullptr;\n    }\n  }\n\n  new_device_mem->size_ = capacity_ - size;\n  return new_device_mem;\n}\n\nstd::shared_ptr<DeviceMemory> DeviceMemory::Copy(size_t offset, size_t size) {\n  return Copy(offset, size, size);\n}\n\nstd::shared_ptr<DeviceMemory> DeviceMemory::Copy(size_t offset, size_t size,\n                                                 size_t capacity) {\n  if (offset >= capacity_) {\n    MBLOG_ERROR << \"Copy offset \" << offset << \" >= capacity \" << capacity_;\n    return nullptr;\n  }\n\n  if (size == 0) {\n    MBLOG_ERROR << \"Copy size is zero\";\n    return nullptr;\n  }\n\n  if (offset + size > capacity_) {\n    MBLOG_ERROR << \"Copy offset \" << offset << \" + size \" << size\n                << \" > capacity \" << capacity_;\n    return nullptr;\n  }\n\n  auto new_device_mem = device_->MemAlloc(capacity, mem_flags_);\n  if (new_device_mem == nullptr) {\n    MBLOG_ERROR << \"Mem alloc failed, size \" << capacity;\n    return nullptr;\n  }\n\n  auto ret = new_device_mem->ReadFrom(shared_from_this(), offset, size);\n  if (ret != STATUS_SUCCESS) {\n    MBLOG_ERROR << \"Copy data failed\";\n    return nullptr;\n  }\n\n  new_device_mem->size_ = size;\n  return new_device_mem;\n}\n\nstd::shared_ptr<DeviceMemory> DeviceMemory::Clone(bool is_copy) {\n  std::shared_ptr<DeviceMemory> new_device_memory;\n  if (is_copy) {\n    new_device_memory = device_->MemAlloc(size_, capacity_, mem_flags_);\n    auto ret = new_device_memory->ReadFrom(shared_from_this(), 0, size_);\n    if (ret != STATUS_SUCCESS) {\n      MBLOG_ERROR << \"Clone data failed\";\n      return nullptr;\n    }\n  } else {\n    new_device_memory = device_->MemAlloc(0);\n    new_device_memory->device_mem_ptr_ = device_mem_ptr_;\n    new_device_memory->offset_ = offset_;\n    new_device_memory->size_ = size_;\n    new_device_memory->capacity_ = capacity_;\n    new_device_memory->memory_id_ = memory_id_;\n    new_device_memory->is_content_mutable_ = is_content_mutable_;\n    new_device_memory->mem_flags_ = mem_flags_;\n    CopyExtraMetaTo(new_device_memory);\n  }\n\n  return new_device_memory;\n}\n\nStatus DeviceMemory::MemAcquire(const std::shared_ptr<void> &mem_ptr,\n                                size_t size) {\n  if (mem_ptr == nullptr) {\n    MBLOG_ERROR << \"Mem acquire mem_ptr is nullptr\";\n    return STATUS_INVALID;\n  }\n\n  if (size == 0) {\n    MBLOG_ERROR << \"Mem acquire size is 0\";\n    return STATUS_INVALID;\n  }\n\n  device_mem_ptr_ = mem_ptr;\n  offset_ = 0;\n  size_ = size;\n  capacity_ = size;\n  UpdateMemID(device_mem_ptr_.get());\n  return STATUS_SUCCESS;\n}\n\nDeviceMemoryManager::DeviceMemoryManager(std::string device_id)\n    : device_id_(std::move(device_id)) {}\n\nDeviceMemoryManager::~DeviceMemoryManager() = default;\n\nvoid DeviceMemoryManager::SetMemQuota(size_t mem_quota) {\n  mem_quota_ = mem_quota;\n};\n\nsize_t DeviceMemoryManager::GetMemQuota() const { return mem_quota_; };\n\nsize_t DeviceMemoryManager::GetAllocatedMemSize() const {\n  return mem_allocated_;\n};\n\nbool DeviceMemoryManager::PreserveMem(size_t size) {\n  std::lock_guard<std::mutex> lock_gurad(allocated_size_lock_);\n  auto mem_availalbe = mem_quota_ - mem_allocated_;\n  if (size > mem_availalbe) {\n    MBLOG_ERROR << \"Alloc size \" << size << \" > avaiable mem \" << mem_availalbe;\n    return false;\n  }\n\n  mem_allocated_ += size;\n  return true;\n}\n\nvoid DeviceMemoryManager::RestoreMem(size_t size) {\n  std::lock_guard<std::mutex> lock_guard(allocated_size_lock_);\n  mem_allocated_ -= size;\n}\n\nstd::shared_ptr<void> DeviceMemoryManager::AllocSharedPtr(size_t size,\n                                                          uint32_t mem_flags) {\n  void *ptr = Malloc(size, mem_flags);\n  if (ptr == nullptr) {\n    return nullptr;\n  }\n\n  std::shared_ptr<void> ret(\n      ptr, [this, mem_flags](void *ptr) { this->Free(ptr, mem_flags); });\n  return ret;\n}\n\nStatus DeviceMemoryManager::Write(const void *host_data, size_t host_size,\n                                  void *device_buffer, size_t device_size) {\n  return Copy(device_buffer, device_size, host_data, host_size,\n              DeviceMemoryCopyKind::FromHost);\n}\n\nStatus DeviceMemoryManager::Read(const void *device_data, size_t device_size,\n                                 void *host_buffer, size_t host_size) {\n  return Copy(host_buffer, host_size, device_data, device_size,\n              DeviceMemoryCopyKind::ToHost);\n}\n\nDeviceMemoryLog::DeviceMemoryLog(std::string memory_id, std::string user_id,\n                                 std::string device_id, size_t size)\n    : memory_id_(std::move(memory_id)),\n      user_id_(std::move(user_id)),\n      device_id_(std::move(device_id)),\n      size_(size) {}\n\nDeviceMemoryLog::~DeviceMemoryLog() = default;\n\nDeviceMemoryTrace::~DeviceMemoryTrace() = default;\n\nvoid DeviceMemoryTrace::TraceMemoryAlloc(const std::string &memory_id,\n                                         const std::string &user_id,\n                                         const std::string &device_id,\n                                         size_t size) {\n  std::lock_guard<std::mutex> lock(memory_logs_lock_);\n  auto mem_log =\n      std::make_shared<DeviceMemoryLog>(memory_id, user_id, device_id, size);\n  memory_logs_[memory_id] = mem_log;\n}\n\nvoid DeviceMemoryTrace::TraceMemoryFree(const std::string &memory_id) {\n  std::lock_guard<std::mutex> lock(memory_logs_lock_);\n  auto item = memory_logs_.find(memory_id);\n  if (item != memory_logs_.end()) {\n    memory_logs_.erase(item);\n  }\n}\n\nstd::shared_ptr<DeviceMemoryLog> DeviceMemoryTrace::GetMemoryLog(\n    const std::string &memory_id) {\n  std::lock_guard<std::mutex> lock(memory_logs_lock_);\n  auto item = memory_logs_.find(memory_id);\n  if (item == memory_logs_.end()) {\n    return nullptr;\n  }\n  return item->second;\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/libmodelbox/base/drivers/driver.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/base/driver.h\"\n\n#include <dlfcn.h>\n#include <fcntl.h>\n#include <poll.h>\n#include <stdio.h>\n#include <sys/stat.h>\n#include <sys/types.h>\n\n#include <algorithm>\n#include <atomic>\n#include <fstream>\n#include <functional>\n#include <mutex>\n#include <nlohmann/json.hpp>\n#include <regex>\n#include <sstream>\n#include <thread>\n#include <utility>\n#include <vector>\n\n#include \"modelbox/base/config.h\"\n#include \"modelbox/base/driver_utils.h\"\n#include \"modelbox/base/log.h\"\n\nnamespace modelbox {\n\nconstexpr const char *DEFAULT_LD_CACHE = \"/etc/ld.so.cache\";\nconstexpr const int DRIVER_SCAN_TIMEOUT = 60 * 3;\n\nint SubProcessWaitAndLog(int fd) {\n  struct pollfd fdset;\n  std::string log;\n  char tmp[4096];\n  fdset.fd = fd;\n  fdset.events = POLLIN | POLLHUP;\n  time_t begin = 0;\n  time_t now;\n  if (fd <= 0) {\n    return 0;\n  }\n\n  Defer {\n    if (log.length() > 0) {\n      MBLOG_INFO << \"scan process log: \\n\" << log;\n    }\n  };\n\n  fcntl(fd, F_SETFL, fcntl(fd, F_GETFL) | O_NONBLOCK);\n\n  time(&begin);\n  while (true) {\n    int count = poll(&fdset, 1, 10000);\n    time(&now);\n\n    if (count < 0) {\n      if (errno == EINTR) {\n        continue;\n      }\n\n      return -1;\n    }\n\n    if (count == 0) {\n      if (now - begin >= DRIVER_SCAN_TIMEOUT) {\n        return 1;\n      }\n\n      continue;\n    }\n\n    int len = read(fd, tmp, sizeof(tmp));\n    if (len < 0) {\n      return -1;\n    }\n\n    if (len == 0) {\n      break;\n    }\n\n    log.append(tmp, len);\n    if (log.length() > 4096) {\n      MBLOG_INFO << \"scan process log: \\n\" << log;\n      log.clear();\n    }\n  }\n\n  return 0;\n}\n\n/**\n * @brief fork a process to Run func\n * @return func result\n */\ntemplate <typename func, typename... ts>\nStatus SubProcessRun(func &&fun, ts &&...params) {\n  const char *enable_debug = getenv(\"MODELBOX_DEBUG_DRIVER_SCAN\");\n  if (enable_debug == nullptr) {\n    int unused __attribute__((unused));\n    int fd[2] = {-1, -1};\n    unused = pipe(fd);\n    auto pid = fork();\n    if (pid == 0) {\n      signal(SIGSEGV, SIG_DFL);\n      signal(SIGTERM, SIG_DFL);\n      signal(SIGABRT, SIG_DFL);\n      close(fd[0]);\n      dup2(fd[1], 1);\n      close(fd[1]);\n      setbuf(stdout, nullptr);\n\n      // Keep old log avoid deadlock\n      auto oldlogger_keeper = klogger.GetLogger();\n\n      // output log to console\n      klogger.SetLogger(nullptr);\n      klogger.GetLogger()->SetLogLevel(oldlogger_keeper->GetLogLevel());\n      Status ret = fun(params...);\n      if (ret == STATUS_OK) {\n        _exit(0);\n      }\n\n      MBLOG_WARN << \"run function failed, errmsg: \" << ret.WrapErrormsgs();\n      _exit(1);\n    }\n\n    Defer { close(fd[0]); };\n    close(fd[1]);\n\n    if (pid == -1) {\n      const auto *err_msg = \"fork subprocess failed\";\n      MBLOG_ERROR << err_msg;\n      return {STATUS_NOMEM, err_msg};\n    }\n\n    MBLOG_INFO << \"wait for subprocess \" << pid << \" process finished\";\n    int status = 0;\n\n    auto ret = SubProcessWaitAndLog(fd[0]);\n    if (ret == 1) {\n      MBLOG_WARN << \"scan process timeout, kill scan process.\";\n      kill(pid, 9);\n    }\n\n    ret = waitpid(pid, &status, 0);\n    if (ret < 0) {\n      auto err_msg =\n          \"subprocess run failed, wait error, ret:\" + std::to_string(errno) +\n          \", msg: \" + StrError(errno);\n      MBLOG_ERROR << err_msg;\n      return {STATUS_FAULT, err_msg};\n    }\n\n    if (WIFSIGNALED(status)) {\n      const auto *err_msg = \"killed by signal\";\n      MBLOG_ERROR << err_msg;\n      return {STATUS_NORESPONSE, err_msg};\n    }\n\n    if (!WIFEXITED(status)) {\n      std::string err_msg =\n          \"process exit abnormal, ret = \" + std::to_string(status);\n      MBLOG_ERROR << err_msg;\n      return {STATUS_NORESPONSE, err_msg};\n    }\n\n    if (status != 0) {\n      const auto *err_msg = \"scan process exit result is fail.\";\n      MBLOG_ERROR << err_msg;\n      return {STATUS_FAULT, err_msg};\n    }\n  } else {\n    return fun(params...);\n  }\n\n  return STATUS_OK;\n}\n\nDriver::Driver() = default;\n\nDriver::~Driver() {\n  if (factory_count_ != 0) {\n    Abort(\"factory reference count is not zero\");\n  }\n}\n\nstd::string Drivers::default_scan_path_ = MODELBOX_DEFAULT_DRIVER_PATH;\nstd::string Drivers::default_driver_info_path_ = DEFAULT_SCAN_INFO;\n\nstatic std::shared_ptr<DriverHandler> handler =\n    std::make_shared<DriverHandler>();\n// Driver\nstd::string Driver::GetDriverFile() { return GetDriverDesc()->GetFilePath(); }\n\nbool Driver::IsVirtual() { return is_virtual_; }\n\nvoid Driver::SetVirtual(bool is_virtual) { is_virtual_ = is_virtual; }\n\nvoid Driver::CloseFactory() {\n  std::lock_guard<std::mutex> guard(mutex_);\n  CloseFactoryLocked();\n}\n\nvoid Driver::CloseFactoryLocked() {\n  factory_count_--;\n  if (factory_count_ > 0) {\n    return;\n  }\n\n  if (!driver_handler_) {\n    factory_ = nullptr;\n    return;\n  }\n\n  handler->handler_map_lock.lock();\n  auto driver_handler_info = handler->Get(driver_handler_);\n  if (driver_handler_info == nullptr) {\n    MBLOG_ERROR << \"close factory failed, get null driver_handler_info\";\n  }\n  handler->handler_map_lock.unlock();\n\n  auto no_delete = GetDriverDesc()->GetNoDelete();\n  driver_handler_info->initialize_lock_.lock();\n  if (--driver_handler_info->initialize_count_ == 0) {\n    if (!no_delete) {\n      typedef void (*DriverFini)();\n      DriverFini driver_fini = nullptr;\n      driver_fini = (DriverFini)dlsym(driver_handler_, \"DriverFini\");\n      if (driver_fini) {\n        driver_fini();\n      }\n\n      handler->handler_map_lock.lock();\n      handler->Remove(driver_handler_);\n      handler->handler_map_lock.unlock();\n    } else {\n      driver_handler_info->initialize_count_++;\n    }\n  }\n\n  driver_handler_info->initialize_lock_.unlock();\n  factory_ = nullptr;\n  dlclose(driver_handler_);\n  driver_handler_ = nullptr;\n}\n\nDriverHandlerInfo::DriverHandlerInfo() = default;\n\nDriverHandlerInfo::~DriverHandlerInfo() = default;\n\nint DriverHandlerInfo::IncHanderRefcnt() { return ++handler_count_; }\n\nint DriverHandlerInfo::DecHanderRefcnt() { return --handler_count_; }\n\nstd::shared_ptr<DriverHandlerInfo> DriverHandler::Add(void *driver_handler) {\n  auto driver_handler_info = Get(driver_handler);\n  if (nullptr == driver_handler_info) {\n    std::shared_ptr<DriverHandlerInfo> driver_handler_info =\n        std::make_shared<DriverHandlerInfo>();\n    driver_handler_info->IncHanderRefcnt();\n    handler_map.emplace(driver_handler, driver_handler_info);\n    return driver_handler_info;\n  }\n\n  driver_handler_info->IncHanderRefcnt();\n  return driver_handler_info;\n}\n\nStatus DriverHandler::Remove(void *driver_handler) {\n  auto driver_handler_info = Get(driver_handler);\n  auto cnt = driver_handler_info->DecHanderRefcnt();\n  if (cnt == 0) {\n    handler_map.erase(driver_handler);\n  }\n  return STATUS_SUCCESS;\n}\n\nstd::shared_ptr<DriverHandlerInfo> DriverHandler::Get(void *driver_handler) {\n  auto driver_handler_info = handler_map.find(driver_handler);\n  if (driver_handler_info == handler_map.end()) {\n    return nullptr;\n  }\n\n  return driver_handler_info->second;\n}\n\nint Driver::GetMode(bool no_delete, bool global, bool deep_bind) {\n  unsigned int mode = RTLD_NOW;\n  if (no_delete) {\n    mode |= RTLD_NODELETE;\n  }\n\n#ifdef RTLD_DEEPBIND\n  if (deep_bind) {\n    mode |= RTLD_DEEPBIND;\n  }\n#endif\n\n  if (global) {\n    mode |= RTLD_GLOBAL;\n    return mode;\n  }\n\n  mode |= RTLD_LOCAL;\n  return static_cast<int>(mode);\n}\n\nstd::shared_ptr<DriverFactory> Driver::CreateFactory() {\n  std::lock_guard<std::mutex> guard(mutex_);\n  factory_count_++;\n  if (factory_count_ == 1) {\n    auto no_delete = GetDriverDesc()->GetNoDelete();\n    auto global = GetDriverDesc()->GetGlobal();\n    auto deep_bind = GetDriverDesc()->GetDeepBind();\n    typedef std::shared_ptr<DriverFactory> (*CreateDriverFactory)();\n    typedef Status (*DriverInit)();\n\n    CreateDriverFactory driver_func = nullptr;\n    DriverInit driver_init = nullptr;\n\n    int mode = GetMode(no_delete, global, deep_bind);\n\n    driver_handler_ = dlopen(GetDriverFile().c_str(), mode);\n    if (driver_handler_ == nullptr) {\n      const char *dl_errmsg = dlerror();\n      if (dl_errmsg == nullptr) {\n        dl_errmsg = \"no error msg\";\n      }\n      StatusError = {STATUS_INVALID, \"dlopen \" + GetDriverFile() +\n                                         \" failed, error: \" + dl_errmsg};\n      MBLOG_ERROR << StatusError.Errormsg();\n      CloseFactoryLocked();\n      return nullptr;\n    }\n\n    handler->handler_map_lock.lock();\n    auto handler_info = handler->Add(driver_handler_);\n    handler->handler_map_lock.unlock();\n\n    handler_info->initialize_lock_.lock();\n    if (++handler_info->initialize_count_ == 1) {\n      driver_init = (DriverInit)dlsym(driver_handler_, \"DriverInit\");\n      if (driver_init == nullptr) {\n        handler_info->initialize_count_--;\n        handler_info->initialize_lock_.unlock();\n        const char *dl_errmsg = dlerror();\n        if (dl_errmsg == nullptr) {\n          dl_errmsg = \"no error msg\";\n        }\n        StatusError = {STATUS_INVALID,\n                       \"failed to dlsym function DriverInit in file: \" +\n                           GetDriverFile() + \", error: \" + dl_errmsg};\n        CloseFactoryLocked();\n        return nullptr;\n      }\n\n      Status init = driver_init();\n      if (init != STATUS_OK) {\n        handler_info->initialize_count_--;\n        handler_info->initialize_lock_.unlock();\n        StatusError = {init, \"driver init failed, driver:\" + GetDriverFile()};\n        MBLOG_ERROR << \"driverInit failed in \" << GetDriverFile() << \", \"\n                    << init;\n        CloseFactoryLocked();\n        return nullptr;\n      }\n    }\n\n    handler_info->initialize_lock_.unlock();\n\n    driver_func =\n        (CreateDriverFactory)dlsym(driver_handler_, \"CreateDriverFactory\");\n    if (driver_func == nullptr) {\n      auto *dl_err_msg = dlerror();\n      if (dl_err_msg != nullptr) {\n        StatusError = {STATUS_INVALID,\n                       \"failed to dlsym function DriverDescription in file: \" +\n                           GetDriverFile() + \", error: \" + dl_err_msg};\n      } else {\n        StatusError = {STATUS_INVALID,\n                       \"failed to dlsym function DriverDescription in file: \" +\n                           GetDriverFile() + \", error: no error msg.\"};\n      }\n\n      MBLOG_ERROR << StatusError.Errormsg();\n      CloseFactoryLocked();\n      return nullptr;\n    }\n\n    factory_ = driver_func();\n    if (!factory_) {\n      StatusError = {STATUS_FAULT,\n                     \"create driver failed, driver:\" + GetDriverFile()};\n      MBLOG_ERROR << StatusError.Errormsg();\n      CloseFactoryLocked();\n      return nullptr;\n    }\n  }\n\n  auto holder = shared_from_this();\n  std::shared_ptr<DriverFactory> child_factory(\n      factory_.get(),\n      [&, holder](DriverFactory *child_factory) { holder->CloseFactory(); });\n\n  return child_factory;\n}\n\nstd::shared_ptr<DriverDesc> Driver::GetDriverDesc() { return desc_; }\n\nvoid Driver::SetDriverDesc(std::shared_ptr<DriverDesc> desc) {\n  desc_ = std::move(desc);\n}\n\nDriverFactory::DriverFactory() = default;\nDriverFactory::~DriverFactory() = default;\n\nstd::shared_ptr<Driver> DriverFactory::GetDriver() {\n  return std::make_shared<Driver>();\n};\n\nvoid DriverFactory::SetDriver(const std::shared_ptr<Driver> &driver) {}\n\nVirtualDriverDesc::VirtualDriverDesc() = default;\n\nVirtualDriverDesc::~VirtualDriverDesc() = default;\n\nDriverDesc::DriverDesc() = default;\n\nDriverDesc::~DriverDesc() = default;\n\n// DriverDesc\nstd::string DriverDesc::GetClass() { return driver_class_; }\n\nstd::string DriverDesc::GetType() { return driver_type_; }\n\nstd::string DriverDesc::GetName() { return driver_name_; }\n\nstd::string DriverDesc::GetDescription() { return driver_description_; }\n\nstd::string DriverDesc::GetVersion() { return driver_version_; }\n\nstd::string DriverDesc::GetFilePath() { return driver_file_path_; }\n\nbool DriverDesc::GetNoDelete() { return driver_no_delete_; }\n\nbool DriverDesc::GetGlobal() { return global_; }\nbool DriverDesc::GetDeepBind() { return deep_bind_; }\n\nvoid DriverDesc::SetClass(const std::string &classname) {\n  driver_class_ = classname;\n}\n\nvoid DriverDesc::SetType(const std::string &type) { driver_type_ = type; }\n\nvoid DriverDesc::SetName(const std::string &name) { driver_name_ = name; }\n\nvoid DriverDesc::SetDescription(const std::string &description) {\n  driver_description_ = description;\n}\n\nvoid DriverDesc::SetNodelete(const bool &no_delete) {\n  driver_no_delete_ = no_delete;\n}\n\nvoid DriverDesc::SetGlobal(const bool &global) { global_ = global; }\nvoid DriverDesc::SetDeepBind(const bool &deep_bind) { deep_bind_ = deep_bind; }\n\nStatus DriverDesc::SetVersion(const std::string &version) {\n  if (version.empty()) {\n    return STATUS_SUCCESS;\n  }\n\n  Status status = CheckVersion(version);\n  if (status != STATUS_SUCCESS) {\n    MBLOG_ERROR << \"SetVersion failed, the version model is: x.y.z (xyz should \"\n                   \"be integer), version is: \"\n                << version;\n    return status;\n  }\n\n  driver_version_ = version;\n  return STATUS_SUCCESS;\n}\n\nStatus DriverDesc::CheckVersion(const std::string &version) {\n  std::vector<std::string> version_;\n  std::istringstream iss(version);\n  std::string temp;\n  char split_char = '.';\n\n  if (version.find(split_char) == std::string::npos) {\n    return {STATUS_BADCONF, \"version is invalid\"};\n  }\n\n  while (std::getline(iss, temp, split_char)) {\n    version_.emplace_back(std::move(temp));\n  }\n\n  if (version_.size() != 3) {\n    // \"x\", \"y\", \"z\"\n    return {STATUS_BADCONF, \"version is invalid\"};\n  }\n\n  for (auto &item_version : version_) {\n    if (std::all_of(item_version.begin(), item_version.end(), ::isdigit)) {\n      continue;\n    }\n    return {STATUS_BADCONF, \"version is invalid\"};\n  }\n\n  return STATUS_SUCCESS;\n}\n\nvoid DriverDesc::SetFilePath(const std::string &file_path) {\n  driver_file_path_ = file_path;\n}\n\nDriversScanResultInfo::DriversScanResultInfo() = default;\nDriversScanResultInfo::~DriversScanResultInfo() {\n  load_success_info_.clear();\n  load_failed_info_.clear();\n}\n\nstd::list<std::string> &DriversScanResultInfo::GetLoadSuccessInfo() {\n  return load_success_info_;\n}\n\nstd::map<std::string, std::string> &DriversScanResultInfo::GetLoadFailedInfo() {\n  return load_failed_info_;\n}\n\n// Drivers\nDrivers::Drivers()\n    : drivers_scan_result_info_(std::make_shared<DriversScanResultInfo>()){};\nDrivers::~Drivers() = default;\n\nstd::shared_ptr<Drivers> Drivers::GetInstance() {\n  static std::shared_ptr<Drivers> drivers = std::make_shared<Drivers>();\n  return drivers;\n}\n\nvoid Drivers::PrintScanResult(\n    const std::list<std::string> &load_success_info,\n    const std::map<std::string, std::string> &load_failed_info) {\n  if (load_success_info.empty()) {\n    MBLOG_WARN << \"no driver load success, please check\";\n  } else {\n    MBLOG_INFO << \"load success drivers: count \" << load_success_info.size()\n               << \", show detail in debug level\";\n    for (const auto &info : load_success_info) {\n      MBLOG_DEBUG << info;\n    }\n  }\n\n  if (load_failed_info.empty()) {\n    MBLOG_INFO << \"no drivers load failed\";\n  } else {\n    MBLOG_WARN << \"load failed drivers: count \" << load_failed_info.size()\n               << \", detail:\";\n    for (const auto &info : load_failed_info) {\n      MBLOG_WARN << info.second;\n    }\n  }\n}\n\nStatus Drivers::Scan(const std::string &path, const std::string &filter) {\n  std::vector<std::string> drivers_list;\n  struct stat s;\n  auto ret = lstat(path.c_str(), &s);\n  if (ret) {\n    auto err_msg = \"lstat \" + path + \" failed, \" + StrError(errno);\n    return {STATUS_FAULT, err_msg};\n  }\n\n  if (!S_ISDIR(s.st_mode)) {\n    last_modify_time_sum_ += s.st_mtim.tv_sec;\n    auto status = Add(path);\n    if (status == STATUS_OK) {\n      drivers_scan_result_info_->GetLoadSuccessInfo().push_back(path);\n    } else {\n      drivers_scan_result_info_->GetLoadFailedInfo().emplace(path,\n                                                             status.Errormsg());\n    }\n    return status;\n  }\n\n  Status status = ListFiles(path, filter, &drivers_list);\n  if (status != STATUS_OK) {\n    auto err_msg = \"list directory:  \" + path + \"/\" + filter + \" failed, \";\n    return {status, err_msg};\n  }\n\n  if (drivers_list.size() == 0) {\n    return {STATUS_NOTFOUND, \"directory is empty\"};\n  }\n\n  for (auto &driver_file : drivers_list) {\n    struct stat buf;\n    auto ret = lstat(driver_file.c_str(), &buf);\n    if (ret) {\n      continue;\n    }\n\n    if (S_ISLNK(buf.st_mode)) {\n      continue;\n    }\n    last_modify_time_sum_ += buf.st_mtim.tv_sec;\n\n    auto result = Add(driver_file);\n    if (result == STATUS_OK) {\n      drivers_scan_result_info_->GetLoadSuccessInfo().push_back(driver_file);\n    } else {\n      drivers_scan_result_info_->GetLoadFailedInfo().emplace(driver_file,\n                                                             result.Errormsg());\n    }\n  }\n\n  return STATUS_OK;\n}\n\nStatus Drivers::Initialize(const std::shared_ptr<Configuration> &config) {\n  if (config == nullptr) {\n    return {STATUS_INVALID, \"config is empty.\"};\n  }\n  config_ = config;\n\n  driver_dirs_ = config_->GetStrings(DRIVER_DIR);\n  if (config_->GetBool(DRIVER_SKIP_DEFAULT, false) == false) {\n    driver_dirs_.push_back(default_scan_path_);\n  }\n\n  MBLOG_DEBUG << \"search Path:\";\n  for (const auto &dir : driver_dirs_) {\n    MBLOG_DEBUG << \" \" << dir;\n  }\n\n  return STATUS_OK;\n}\n\nStatus Drivers::WriteScanInfo(const std::string &scan_info_path,\n                              const std::string &check_code) {\n  std::ofstream scan_info_file(scan_info_path);\n  if (!scan_info_file.is_open()) {\n    return {STATUS_FAULT, \"Open file \" + scan_info_path + \" failed, \" + StrError(errno)};\n  }\n\n  nlohmann::json dump_json;\n\n  struct stat buffer;\n  if (stat(DEFAULT_LD_CACHE, &buffer) == -1) {\n    dump_json[\"ld_cache_time\"] = 0;\n  } else {\n    dump_json[\"ld_cache_time\"] = buffer.st_mtim.tv_sec;\n  }\n\n  dump_json[\"check_code\"] = check_code;\n  std::time_t tt =\n      std::chrono::system_clock::to_time_t(std::chrono::system_clock::now());\n  dump_json[\"version_record\"] = std::ctime(&tt);\n  nlohmann::json dump_driver_json_arr = nlohmann::json::array();\n\n  MBLOG_DEBUG << \"write info begin\";\n  for (auto &driver : drivers_list_) {\n    nlohmann::json dump_driver_json;\n    auto desc = driver->GetDriverDesc();\n    auto cls = desc->GetClass();\n    auto type = desc->GetType();\n    auto name = desc->GetName();\n    auto description = desc->GetDescription();\n    auto version = desc->GetVersion();\n    auto file_path = desc->GetFilePath();\n    auto no_delete = desc->GetNoDelete();\n    auto global = desc->GetGlobal();\n    auto deep_bind = desc->GetDeepBind();\n    dump_driver_json[\"class\"] = cls;\n    dump_driver_json[\"type\"] = type;\n    dump_driver_json[\"name\"] = name;\n    dump_driver_json[\"description\"] = description;\n    dump_driver_json[\"version\"] = version;\n    dump_driver_json[\"file_path\"] = file_path;\n    dump_driver_json[\"no_delete\"] = no_delete;\n    dump_driver_json[\"global\"] = global;\n    dump_driver_json[\"deep_bind\"] = deep_bind;\n    dump_driver_json[\"load_success\"] = true;\n    dump_driver_json_arr.push_back(dump_driver_json);\n  }\n\n  auto load_failed_info = drivers_scan_result_info_->GetLoadFailedInfo();\n  MBLOG_DEBUG << \"load_failed_info size \" << load_failed_info.size();\n  for (auto &fail_info : load_failed_info) {\n    nlohmann::json dump_driver_json;\n    dump_driver_json[\"file_path\"] = fail_info.first;\n    dump_driver_json[\"err_msg\"] = fail_info.second;\n    dump_driver_json[\"load_success\"] = false;\n    dump_driver_json_arr.push_back(dump_driver_json);\n  }\n\n  dump_json[\"scan_drivers\"] = dump_driver_json_arr;\n  scan_info_file << dump_json;\n  MBLOG_DEBUG << \"write info end\";\n\n  scan_info_file.close();\n  return STATUS_OK;\n}\n\nStatus Drivers::GatherScanInfo(const std::string &scan_path) {\n  std::ifstream scan_info_file(scan_path);\n  if (!scan_info_file.is_open()) {\n    MBLOG_ERROR << \"Open file \" << scan_path << \" for read failed\";\n    return STATUS_FAULT;\n  }\n\n  nlohmann::json dump_json;\n  try {\n    std::string ss((std::istreambuf_iterator<char>(scan_info_file)),\n                   std::istreambuf_iterator<char>());\n    dump_json = nlohmann::json::parse(ss);\n\n    auto driver_json_arr = dump_json[\"scan_drivers\"];\n    for (auto &driver_info : driver_json_arr) {\n      if (!driver_info[\"load_success\"]) {\n        continue;\n      }\n\n      auto driver = std::make_shared<Driver>();\n      auto desc = driver->GetDriverDesc();\n      desc->SetClass(driver_info[\"class\"]);\n      desc->SetType(driver_info[\"type\"]);\n      desc->SetName(driver_info[\"name\"]);\n      desc->SetDescription(driver_info[\"description\"]);\n      desc->SetVersion(driver_info[\"version\"]);\n      desc->SetFilePath(driver_info[\"file_path\"]);\n      desc->SetNodelete(driver_info[\"no_delete\"]);\n      desc->SetGlobal(driver_info[\"global\"]);\n      desc->SetDeepBind(driver_info[\"deep_bind\"]);\n      auto tmp_driver = GetDriver(driver_info[\"class\"], driver_info[\"type\"],\n                                  driver_info[\"name\"], driver_info[\"version\"]);\n      if (tmp_driver == nullptr) {\n        drivers_list_.push_back(driver);\n      }\n    }\n  } catch (const std::exception &e) {\n    auto err_msg = \"gather scan info failed, err: \" + std::string(e.what());\n    MBLOG_ERROR << err_msg;\n    return {STATUS_FAULT, err_msg};\n  }\n\n  MBLOG_INFO << \"Gather scan info success, drivers count \"\n             << drivers_list_.size();\n  return STATUS_OK;\n}\n\nStatus Drivers::FillCheckInfo(std::string &file_check_node,\n                              std::unordered_map<std::string, bool> &file_map,\n                              int64_t &ld_cache_time) {\n  std::ifstream scan_info(default_driver_info_path_);\n  if (!scan_info.is_open()) {\n    MBLOG_ERROR << \"open \" << default_driver_info_path_ << \" failed.\";\n    return {STATUS_FAULT, \"scan info file is not found\"};\n  }\n\n  nlohmann::json dump_json;\n  try {\n    std::string ss((std::istreambuf_iterator<char>(scan_info)),\n                   std::istreambuf_iterator<char>());\n    dump_json = nlohmann::json::parse(ss);\n\n    file_check_node = dump_json[\"check_code\"];\n    ld_cache_time = dump_json[\"ld_cache_time\"];\n    auto driver_json_arr = dump_json[\"scan_drivers\"];\n    for (const auto &driver_info : driver_json_arr) {\n      if (file_map.find(driver_info[\"file_path\"]) != file_map.end()) {\n        continue;\n      }\n      file_map[driver_info[\"file_path\"]] = true;\n    }\n  } catch (const std::exception &e) {\n    MBLOG_WARN << \"filee check info parse \" << default_driver_info_path_\n               << \" failed, err: \" << e.what();\n    return {STATUS_INVALID,\n            std::string(\"parser scan info file failed, \") + e.what()};\n  }\n\n  return STATUS_SUCCESS;\n}\n\nbool Drivers::CheckPathAndMagicCode() {\n  struct stat buffer;\n  if (stat(default_driver_info_path_.c_str(), &buffer) == -1) {\n    MBLOG_DEBUG << default_driver_info_path_ << \" does not exist.\";\n    return false;\n  }\n\n  if (stat(DEFAULT_LD_CACHE, &buffer) == -1) {\n    MBLOG_DEBUG << DEFAULT_LD_CACHE << \" does not exit.\";\n    return false;\n  }\n\n  std::string file_check_node;\n  std::unordered_map<std::string, bool> file_map;\n  int64_t ld_cache_time = 0;\n  auto ret = FillCheckInfo(file_check_node, file_map, ld_cache_time);\n  if (ret != STATUS_SUCCESS) {\n    MBLOG_DEBUG << \"get check info failed, file: \" << default_driver_info_path_\n                << \" error:\" << ret.Errormsg();\n    return false;\n  }\n\n  if (ld_cache_time != buffer.st_mtim.tv_sec) {\n    return false;\n  }\n\n  int64_t check_sum = 0;\n  for (const auto &dir : driver_dirs_) {\n    std::vector<std::string> drivers_list;\n    std::string filter = \"libmodelbox-*.so*\";\n    struct stat s;\n    auto ret = lstat(dir.c_str(), &s);\n    if (ret) {\n      MBLOG_ERROR << \"lstat \" << dir << \" failed, errno:\" << StrError(errno);\n      return false;\n    }\n\n    if (!S_ISDIR(s.st_mode)) {\n      check_sum += s.st_mtim.tv_sec;\n      continue;\n    }\n\n    Status status = ListFiles(dir, filter, &drivers_list);\n    if (status != STATUS_OK) {\n      auto err_msg = \"list directory:  \" + dir + \"/\";\n      err_msg += filter + \" failed, \";\n      if (status != STATUS_NOTFOUND) {\n        MBLOG_ERROR << err_msg << status.WrapErrormsgs();\n      }\n      return false;\n    }\n\n    if (drivers_list.size() == 0) {\n      continue;\n    }\n\n    for (auto &driver_file : drivers_list) {\n      struct stat buf;\n      auto ret = lstat(driver_file.c_str(), &buf);\n      if (ret) {\n        MBLOG_DEBUG << \"lstat \" << dir << \" failed, errno:\" << StrError(errno);\n        continue;\n      }\n\n      if (S_ISLNK(buf.st_mode)) {\n        continue;\n      }\n\n      if (file_map.count(driver_file) == 0) {\n        return false;\n      }\n\n      check_sum += buf.st_mtim.tv_sec;\n    }\n  }\n  auto check_code = GenerateKey(check_sum);\n  if (file_check_node != check_code) {\n    return false;\n  }\n\n  return true;\n}\n\nStatus Drivers::InnerScan() {\n  Status ret = STATUS_NOTFOUND;\n  for (const auto &dir : driver_dirs_) {\n    MBLOG_INFO << \"Scan dir: \" << dir;\n    ret = Scan(dir, \"libmodelbox-*.so*\");\n    if (!ret && ret != STATUS_NOTFOUND) {\n      MBLOG_WARN << \"scan \" << dir << \" failed, \" << ret.WrapErrormsgs();\n    }\n    ret = STATUS_OK;\n  }\n\n  auto check_code = GenerateKey(last_modify_time_sum_);\n\n  ret = WriteScanInfo(default_driver_info_path_, check_code);\n  if (ret != STATUS_OK) {\n    std::string err_msg = \"write scan info failed, \" + ret.WrapErrormsgs();\n    MBLOG_ERROR << err_msg;\n    return {ret, err_msg};\n  }\n\n  return ret;\n}\n\nvoid Drivers::PrintScanResults(const std::string &scan_path) {\n  std::ifstream scan_info_file(scan_path);\n  if (!scan_info_file.is_open()) {\n    MBLOG_ERROR << \"Open file \" << scan_path << \" for read failed\";\n    return;\n  }\n\n  nlohmann::json dump_json;\n  try {\n    std::string ss((std::istreambuf_iterator<char>(scan_info_file)),\n                   std::istreambuf_iterator<char>());\n    dump_json = nlohmann::json::parse(ss);\n\n    nlohmann::json dump_driver_json_arr = nlohmann::json::array();\n    dump_driver_json_arr = dump_json[\"scan_drivers\"];\n\n    std::list<std::string> load_success_info;\n    std::map<std::string, std::string> load_failed_info;\n\n    for (auto &dump_json : dump_driver_json_arr) {\n      if (dump_json[\"load_success\"]) {\n        load_success_info.push_back(dump_json[\"file_path\"]);\n        continue;\n      }\n\n      load_failed_info.emplace(dump_json[\"file_path\"], dump_json[\"err_msg\"]);\n    }\n\n    PrintScanResult(load_success_info, load_failed_info);\n\n  } catch (const std::exception &e) {\n    MBLOG_ERROR << \"print scan result failed, err: \" << e.what();\n    return;\n  }\n}\n\nStatus Drivers::ReadExcludeInfo() {\n  std::ifstream infile(scan_info_file_);\n  if (infile.fail()) {\n    std::string msg = \"read scan info file \" + scan_info_file_ + \" failed, \" +\n                      StrError(errno);\n    return {STATUS_IO, msg};\n  }\n\n  Defer { infile.close(); };\n  std::string data((std::istreambuf_iterator<char>(infile)),\n                   std::istreambuf_iterator<char>());\n\n  if (access(data.c_str(), F_OK) != 0) {\n    return {STATUS_NOENT, \"not found info file\" + data};\n  }\n\n  scan_exclude_file_list_[data] = true;\n\n  return STATUS_OK;\n}\n\nStatus Drivers::Scan() {\n  Status status = STATUS_FAULT;\n  int retry_count = 0;\n  if (!CheckPathAndMagicCode()) {\n    while (true) {\n      scan_info_file_ = DRIVER_SCAN_INFO;\n      Defer { unlink(scan_info_file_.c_str()); };\n      auto exec_func = std::bind(&Drivers::InnerScan, this);\n      auto status = SubProcessRun(exec_func);\n\n      if (status == STATUS_NORESPONSE) {\n        MBLOG_WARN << \"Scan process may crash, retry.\";\n        std::this_thread::sleep_for(std::chrono::milliseconds(100));\n        if (ReadExcludeInfo() != STATUS_OK) {\n          retry_count++;\n          if (retry_count < 3) {\n            continue;\n          }\n\n          break;\n        }\n\n        continue;\n      }\n\n      scan_exclude_file_list_.clear();\n      if (status != STATUS_OK) {\n        auto err_msg =\n            \"fork subprocess run scan so failed, \" + status.WrapErrormsgs();\n        MBLOG_ERROR << err_msg;\n        return {STATUS_FAULT, err_msg};\n      }\n\n      break;\n    }\n  }\n\n  status = GatherScanInfo(default_driver_info_path_);\n  if (status != STATUS_OK) {\n    const auto *err_msg = \"gather scan info failed\";\n    MBLOG_ERROR << err_msg;\n    return {STATUS_FAULT, err_msg};\n  }\n\n  MBLOG_INFO << \"begin scan virtual drivers\";\n  status = VirtualDriverScan();\n  MBLOG_INFO << \"end scan virtual drivers\";\n\n  return status;\n}\n\nvoid Drivers::Clear() {\n  for (auto iter = drivers_list_.begin(); iter != drivers_list_.end();) {\n    if (iter->get()->IsVirtual() != true) {\n      iter++;\n      continue;\n    }\n\n    drivers_list_.erase(iter);\n  }\n  virtual_driver_manager_list_.clear();\n  drivers_list_.clear();\n  driver_dirs_.clear();\n  config_ = nullptr;\n  last_modify_time_sum_ = 0;\n}\n\nStatus Drivers::VirtualDriverScan() {\n  for (auto &driver : GetDriverListByClass(DRIVER_CLASS_VIRTUAL)) {\n    std::shared_ptr<VirtualDriverManager> factory =\n        std::dynamic_pointer_cast<VirtualDriverManager>(\n            driver->CreateFactory());\n\n    if (factory == nullptr) {\n      continue;\n    }\n\n    auto result = factory->Init(*this);\n    if (result != STATUS_SUCCESS) {\n      MBLOG_WARN << \"virtual driver init failed, \" << result;\n    }\n\n    result = factory->Scan(driver_dirs_);\n    if (result != STATUS_SUCCESS) {\n      MBLOG_WARN << \"scan failed, \" << result;\n    }\n\n    for (const auto &virtualDriver : factory->GetAllDriverList()) {\n      drivers_list_.push_back(virtualDriver);\n    }\n\n    virtual_driver_manager_list_.push_back(factory);\n  }\n\n  return STATUS_OK;\n}\n\nStatus Drivers::Add(const std::string &file) {\n  typedef void (*DriverDescription)(DriverDesc *);\n  DriverDescription driver_func = nullptr;\n\n  if (scan_exclude_file_list_.find(file) != scan_exclude_file_list_.end()) {\n    MBLOG_WARN << \"Skip scan file: \" << file;\n    return STATUS_OK;\n  }\n\n  if (!scan_info_file_.empty()) {\n    std::ofstream out(scan_info_file_, std::ios::trunc);\n    if (!out.fail()) {\n      chmod(scan_info_file_.c_str(), 0600);\n      Defer { out.close(); };\n      out << file;\n    }\n  }\n\n  Defer {\n    if (!scan_info_file_.empty()) {\n      unlink(scan_info_file_.c_str());\n    }\n  };\n\n  void *driver_handler = dlopen(file.c_str(), RTLD_LAZY | RTLD_LOCAL);\n  if (driver_handler == nullptr) {\n    std::string errmsg = file + \" : dlopen failed, \";\n    auto *dl_errmsg = dlerror();\n    if (dl_errmsg != nullptr) {\n      errmsg += dl_errmsg;\n    } else {\n      errmsg += \"no error msg.\";\n    }\n\n    MBLOG_WARN << errmsg;\n    return {STATUS_INVALID, errmsg};\n  }\n\n  driver_func = (DriverDescription)dlsym(driver_handler, \"DriverDescription\");\n  if (driver_func == nullptr) {\n    std::string errmsg = file + \" : dlsym DriverDescription failed, \";\n    auto *dl_errmsg = dlerror();\n    if (dl_errmsg != nullptr) {\n      errmsg += dl_errmsg;\n    } else {\n      errmsg += \"no error msg.\";\n    }\n\n    dlclose(driver_handler);\n    MBLOG_WARN << errmsg;\n    return {STATUS_NOTSUPPORT, errmsg};\n  }\n\n  std::shared_ptr<Driver> driver = std::make_shared<Driver>();\n  std::shared_ptr<DriverDesc> desc = driver->GetDriverDesc();\n  driver_func(desc.get());\n  if (DriversContains(drivers_list_, driver) == true) {\n    MBLOG_DEBUG\n        << \"add driver: \" << file\n        << \" failed, it already has the same function library in libraries.\";\n    dlclose(driver_handler);\n    return {STATUS_EXIST, file + \" : driver is already registered.\"};\n  }\n  desc->SetFilePath(file);\n  auto no_delete = desc->GetNoDelete();\n  if (no_delete) {\n    auto *driver_handler_sec =\n        dlopen(file.c_str(), RTLD_LAZY | RTLD_LOCAL | RTLD_NODELETE);\n    if (driver_handler_sec != nullptr) {\n      dlclose(driver_handler_sec);\n    } else {\n      MBLOG_WARN << \"dlopen \" << file << \" as no delete failed.\";\n    }\n  }\n\n  drivers_list_.push_back(driver);\n  dlclose(driver_handler);\n\n  MBLOG_DEBUG << \"add driver:\";\n  MBLOG_DEBUG << \"  name: \" << desc->GetName();\n  MBLOG_DEBUG << \"  class: \" << desc->GetClass();\n  MBLOG_DEBUG << \"  type: \" << desc->GetType();\n  MBLOG_DEBUG << \"  description: \" << desc->GetDescription();\n  MBLOG_DEBUG << \"  version: \" << desc->GetVersion();\n  MBLOG_DEBUG << \"  driver file: \" << file;\n  return STATUS_OK;\n}\n\nstd::vector<std::shared_ptr<Driver>> Drivers::GetAllDriverList() {\n  return drivers_list_;\n}\n\nstd::vector<std::shared_ptr<Driver>> Drivers::GetDriverListByClass(\n    const std::string &driver_class) {\n  std::vector<std::shared_ptr<Driver>> drivers_class_list;\n  for (const auto &driver : drivers_list_) {\n    std::shared_ptr<DriverDesc> desc_temp = driver->GetDriverDesc();\n    if (desc_temp->GetClass() == driver_class) {\n      drivers_class_list.push_back(driver);\n    }\n  }\n\n  return drivers_class_list;\n}\n\nstd::vector<std::string> Drivers::GetDriverClassList() {\n  std::vector<std::string> driver_class_list;\n  for (auto &driver : drivers_list_) {\n    std::shared_ptr<DriverDesc> desc = driver->GetDriverDesc();\n    driver_class_list.push_back(desc->GetClass());\n  }\n\n  RemoveSameElements(&driver_class_list);\n  return driver_class_list;\n}\n\nstd::vector<std::string> Drivers::GetDriverTypeList(\n    const std::string &driver_class) {\n  std::vector<std::string> driver_type_list;\n  for (auto &driver : drivers_list_) {\n    std::shared_ptr<DriverDesc> desc = driver->GetDriverDesc();\n    if (desc->GetClass() == driver_class) {\n      driver_type_list.push_back(desc->GetType());\n    }\n  }\n\n  RemoveSameElements(&driver_type_list);\n  return driver_type_list;\n}\n\nstd::vector<std::string> Drivers::GetDriverNameList(\n    const std::string &driver_class, const std::string &driver_type) {\n  std::vector<std::string> driver_name_list;\n  for (auto &driver : drivers_list_) {\n    std::shared_ptr<DriverDesc> desc = driver->GetDriverDesc();\n    if (desc->GetClass() == driver_class && desc->GetType() == driver_type) {\n      driver_name_list.push_back(desc->GetName());\n    }\n  }\n\n  RemoveSameElements(&driver_name_list);\n  return driver_name_list;\n}\n\nstd::shared_ptr<Driver> Drivers::GetDriver(const std::string &driver_class,\n                                           const std::string &driver_type,\n                                           const std::string &driver_name,\n                                           const std::string &driver_version) {\n  std::vector<std::string> driver_version_list;\n  std::shared_ptr<Driver> temp_driver = nullptr;\n  for (auto driver : drivers_list_) {\n    std::shared_ptr<DriverDesc> desc = driver->GetDriverDesc();\n    if (desc->GetClass() != driver_class || desc->GetType() != driver_type ||\n        desc->GetName() != driver_name) {\n      continue;\n    }\n\n    if (desc->GetVersion() == driver_version) {\n      return driver;\n    }\n\n    if (temp_driver == nullptr) {\n      temp_driver = driver;\n      continue;\n    }\n\n    if (temp_driver->GetDriverDesc()->GetVersion() <\n        driver->GetDriverDesc()->GetVersion()) {\n      temp_driver = driver;\n      continue;\n    }\n  }\n\n  return temp_driver;\n}\n\nvoid Drivers::RemoveSameElements(std::vector<std::string> *driver_list) {\n  sort(driver_list->begin(), driver_list->end());\n  driver_list->erase(unique(driver_list->begin(), driver_list->end()),\n                     driver_list->end());\n}\n\nbool Drivers::DriversContains(\n    const std::vector<std::shared_ptr<Driver>> &drivers_list,\n    const std::shared_ptr<Driver> &driver) {\n  std::shared_ptr<DriverDesc> target_desc = driver->GetDriverDesc();\n  for (const auto &driver_item : drivers_list) {\n    std::shared_ptr<DriverDesc> desc = driver_item->GetDriverDesc();\n\n    if (desc->GetClass() != target_desc->GetClass()) {\n      continue;\n    }\n\n    if (desc->GetType() != target_desc->GetType()) {\n      continue;\n    }\n\n    if (desc->GetName() != target_desc->GetName()) {\n      continue;\n    }\n\n    if (desc->GetDescription() != target_desc->GetDescription()) {\n      continue;\n    }\n\n    if (desc->GetVersion() != target_desc->GetVersion()) {\n      continue;\n    }\n\n    return true;\n  }\n\n  return false;\n}\n\nvoid Drivers::SetDefaultScanPath(const std::string &path) {\n  default_scan_path_ = path;\n}\n\nvoid Drivers::SetDefaultInfoPath(const std::string &path) {\n  default_driver_info_path_ = path;\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/libmodelbox/base/drivers/driver_utils.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/base/driver_utils.h\"\n\n#include \"modelbox/base/crypto.h\"\n\nnamespace modelbox {\n\nstd::string GenerateKey(int64_t check_sum) {\n  std::vector<unsigned char> output;\n  auto status = HmacEncode(\"sha256\", &check_sum, sizeof(uint64_t), &output);\n  if (!status) {\n    StatusError = status;\n    return \"\";\n  }\n\n  return HmacToString(output.data(), output.size());\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/libmodelbox/base/drivers/register_flowunit.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/base/register_flowunit.h\"\n\n#include <utility>\n\nconstexpr const char *VIRTUAL_TYPE = \"register_flowunit\";\n\nnamespace modelbox {\n\nRegisterFlowUnit::RegisterFlowUnit(const std::string &name) { name_ = name; }\n\nRegisterFlowUnit::~RegisterFlowUnit() = default;\nStatus RegisterFlowUnit::Open(const std::shared_ptr<Configuration> &config) {\n  return STATUS_OK;\n}\n\nStatus RegisterFlowUnit::Close() { return STATUS_OK; }\n\nvoid RegisterFlowUnit::SetCallBack(\n    std::function<Status(std::shared_ptr<DataContext>)> callback) {\n  callback_ = std::move(callback);\n}\n\nstd::function<Status(std::shared_ptr<DataContext>)>\nRegisterFlowUnit::GetCallBack() {\n  return callback_;\n}\n\nStatus RegisterFlowUnit::Process(std::shared_ptr<DataContext> data_context) {\n  auto callback = GetCallBack();\n  if (callback) {\n    return callback(data_context);\n  }\n  return STATUS_INVALID;\n}\n\nRegisterFlowUnitFactory::RegisterFlowUnitFactory() = default;\n\nRegisterFlowUnitFactory::~RegisterFlowUnitFactory() = default;\n\nRegisterFlowUnitFactory::RegisterFlowUnitFactory(\n    std::string unit_name, std::vector<std::string> inputs,\n    std::vector<std::string> outputs,\n    std::function<Status(std::shared_ptr<DataContext>)> callback)\n    : unit_name_(std::move(unit_name)),\n      input_ports_(std::move(inputs)),\n      output_ports_(std::move(outputs)),\n      callback_(std::move(callback)) {\n  if (STATUS_SUCCESS != Init()) {\n    MBLOG_ERROR << \"failed init RegisterFlowUnitFactory\";\n  }\n}\n\nstd::map<std::string, std::shared_ptr<FlowUnitDesc>>\nRegisterFlowUnitFactory::FlowUnitProbe() {\n  return desc_map_;\n}\n\nstd::string RegisterFlowUnitFactory::GetFlowUnitFactoryType() {\n  return FLOWUNIT_TYPE;\n}\n\nstd::string RegisterFlowUnitFactory::GetFlowUnitFactoryName() {\n  return unit_name_;\n}\n\nstd::shared_ptr<FlowUnit> RegisterFlowUnitFactory::CreateFlowUnit(\n    const std::string &name, const std::string &unit_type) {\n  auto register_flowunit = std::make_shared<RegisterFlowUnit>(name);\n  auto iter = desc_map_.find(name);\n  if (iter == desc_map_.end()) {\n    MBLOG_ERROR << \"failed find flowunit desc for \" << name;\n    return nullptr;\n  }\n  register_flowunit->SetFlowUnitDesc(desc_map_[name]);\n  register_flowunit->SetCallBack(callback_);\n  return register_flowunit;\n}\n\nStatus RegisterFlowUnitFactory::Init() {\n  std::shared_ptr<DriverDesc> driver_desc = std::make_shared<DriverDesc>();\n  std::shared_ptr<Driver> driver = std::make_shared<Driver>();\n  driver->SetDriverDesc(driver_desc);\n\n  std::shared_ptr<FlowUnitDesc> desc = std::make_shared<FlowUnitDesc>();\n  if (desc == nullptr) {\n    return STATUS_FAULT;\n  }\n  SetDriver(driver);\n\n  desc->SetFlowUnitName(unit_name_);\n  for (auto &port_name : input_ports_) {\n    desc->AddFlowUnitInput(FlowUnitInput(port_name, \"cpu\"));\n  }\n\n  for (auto &port_name : output_ports_) {\n    desc->AddFlowUnitOutput(FlowUnitOutput(port_name));\n  }\n  desc->SetVirtualType(VIRTUAL_TYPE);\n  desc_map_.emplace(unit_name_, desc);\n  return STATUS_SUCCESS;\n}\n\n}  // namespace modelbox\n"
  },
  {
    "path": "src/libmodelbox/base/drivers/virtual_driver.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <dlfcn.h>\n#include <modelbox/base/config.h>\n\n#include <memory>\n#include <utility>\n\n#include \"modelbox/base/driver.h\"\n#include \"modelbox/base/log.h\"\n#include \"modelbox/base/status.h\"\n#include \"modelbox/base/utils.h\"\n#include \"toml.hpp\"\n\nnamespace modelbox {\n\nVirtualDriverManager::VirtualDriverManager() = default;\n\nVirtualDriverManager::~VirtualDriverManager() = default;\n\nStatus VirtualDriverManager::Add(const std::string &file) { return STATUS_OK; };\n\nStatus VirtualDriverManager::Init(Drivers &driver) { return STATUS_OK; };\n\nStatus VirtualDriverManager::Scan(const std::vector<std::string> &scan_dirs) {\n  auto ret = STATUS_OK;\n  for (const auto &dir : scan_dirs) {\n    ret = Scan(dir);\n    if (ret != STATUS_OK) {\n      MBLOG_WARN << \"Scan \" << dir << \" failed, \" << ret;\n    }\n    ret = STATUS_OK;\n  }\n\n  return ret;\n}\n\nStatus VirtualDriverManager::Scan(const std::string &path) { return STATUS_OK; }\n\nstd::vector<std::shared_ptr<VirtualDriver>>\nVirtualDriverManager::GetAllDriverList() {\n  return drivers_list_;\n}\n\nvoid VirtualDriverManager::Clear() { drivers_list_.clear(); };\n\nstd::shared_ptr<VirtualDriverDesc> VirtualDriver::GetVirtualDriverDesc() {\n  return virtual_driver_desc_;\n}\n\nvoid VirtualDriver::SetVirtualDriverDesc(\n    std::shared_ptr<VirtualDriverDesc> desc) {\n  virtual_driver_desc_ = std::move(desc);\n}\n\nstd::vector<std::shared_ptr<Driver>> VirtualDriver::GetBindDriver() {\n  return std::vector<std::shared_ptr<Driver>>();\n}\n\nstd::shared_ptr<DriverFactory> VirtualDriver::CreateFactory() {\n  return nullptr;\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/libmodelbox/base/graph_manager/graph_manager.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <modelbox/base/graph_manager.h>\n\n#include <utility>\n\nnamespace modelbox {\n\nstd::vector<std::string> SplitByCommaIgnoreQuotes(const std::string &str) {\n  std::string value;\n  std::vector<std::string> values;\n  char quoteChar = 0;\n\n  for (char ch : str) {\n    if (quoteChar == '\\\\') {\n      value.push_back(ch);\n      quoteChar = 0;\n      continue;\n    }\n\n    if (quoteChar && ch != quoteChar) {\n      value.push_back(ch);\n      continue;\n    }\n\n    switch (ch) {\n      case '\\'':\n      case '\\\"':\n      case '\\\\':\n        quoteChar = quoteChar ? 0 : ch;\n        break;\n      case ',':\n        if (!value.empty()) {\n          values.push_back(value);\n          value.clear();\n        }\n        break;\n      default:\n        value.push_back(ch);\n        break;\n    }\n  }\n\n  if (!value.empty()) {\n    values.push_back(value);\n  }\n\n  return values;\n}\n\nGCNode::GCNode() = default;\n\nGCNode::~GCNode() = default;\n\nStatus GCNode::Init(const std::string &name,\n                    const std::shared_ptr<GCGraph> &root_graph) {\n  name_ = name;\n  root_graph_ = root_graph;\n  ConfigurationBuilder builder;\n  configuration_ = builder.Build();\n\n  return STATUS_OK;\n}\n\nstd::string GCNode::GetNodeName() const { return name_; }\n\nstd::shared_ptr<Configuration> GCNode::GetConfiguration() const {\n  return configuration_;\n}\n\nstd::shared_ptr<const std::set<std::string>> GCNode::GetInputPorts() const {\n  auto input_ports =\n      std::make_shared<const std::set<std::string>>(input_ports_);\n  return input_ports;\n}\n\nstd::shared_ptr<const std::set<std::string>> GCNode::GetOutputPorts() const {\n  auto output_ports =\n      std::make_shared<const std::set<std::string>>(output_ports_);\n  return output_ports;\n}\n\nstd::shared_ptr<GCGraph> GCNode::GetRootGraph() const {\n  return root_graph_.lock();\n}\n\nstd::string GCNode::GetNodeType() const { return type_; }\n\nvoid GCNode::SetNodeType(std::string type) { type_ = std::move(type); }\n\nvoid GCNode::SetConfiguration(const std::string &key,\n                              const std::string &value) {\n  auto sub_str_list = SplitByCommaIgnoreQuotes(value);\n  for (auto &str : sub_str_list) {\n    Configuration::Trim(&str);\n  }\n  configuration_->SetProperty(key, sub_str_list);\n}\n\nStatus GCNode::SetInputPort(const std::string &port) {\n  input_ports_.insert(port);\n  return STATUS_OK;\n}\n\nStatus GCNode::SetOutputPort(const std::string &port) {\n  output_ports_.insert(port);\n  return STATUS_OK;\n}\n\nvoid GCNode::SetOutDataHandler(std::shared_ptr<DataHandler> &data_handler) {\n  out_data_handler_ = data_handler;\n}\n\nstd::shared_ptr<DataHandler> GCNode::GetBindDataHandler() {\n  return out_data_handler_.lock();\n}\n\nGCEdge::GCEdge() = default;\n\nGCEdge::~GCEdge() = default;\n\nStatus GCEdge::Init(const std::shared_ptr<GCGraph> &root_graph) {\n  root_graph_ = root_graph;\n  ConfigurationBuilder builder;\n  configuration_ = builder.Build();\n  return STATUS_OK;\n}\n\nconst std::string &GCEdge::GetHeadOutPort() const { return head_out_port_; }\n\nconst std::string &GCEdge::GetTailInPort() const { return tail_in_port_; }\n\nstd::shared_ptr<GCNode> GCEdge::GetHeadNode() const { return head_; }\n\nstd::shared_ptr<GCNode> GCEdge::GetTailNode() const { return tail_; }\n\nstd::shared_ptr<Configuration> GCEdge::GetConfiguration() const {\n  return configuration_;\n}\n\nstd::shared_ptr<GCGraph> GCEdge::GetRootGraph() const {\n  return root_graph_.lock();\n}\n\nStatus GCEdge::SetHeadNode(std::shared_ptr<GCNode> node) {\n  head_ = std::move(node);\n  return STATUS_OK;\n}\n\nStatus GCEdge::SetTailNode(std::shared_ptr<GCNode> node) {\n  tail_ = std::move(node);\n  return STATUS_OK;\n}\n\nStatus GCEdge::SetHeadPort(std::string port) {\n  head_out_port_ = std::move(port);\n  return STATUS_OK;\n}\n\nStatus GCEdge::SetTailPort(std::string port) {\n  tail_in_port_ = std::move(port);\n  return STATUS_OK;\n}\n\nvoid GCEdge::SetConfiguration(const std::string &key,\n                              const std::string &value) {\n  auto sub_str_list = SplitByCommaIgnoreQuotes(value);\n  for (auto &str : sub_str_list) {\n    Configuration::Trim(&str);\n  }\n  configuration_->SetProperty(key, sub_str_list);\n}\n\nGCGraph::GCGraph() = default;\n\nGCGraph::~GCGraph() = default;\n\nStatus GCGraph::Init(const std::shared_ptr<GCGraph> &root_graph) {\n  root_graph_ = root_graph;\n  ConfigurationBuilder builder;\n  configuration_ = builder.Build();\n  return STATUS_OK;\n}\n\nvoid GCGraph::SetGraphName(const std::string &name) { name_ = name; };\n\nconst std::string &GCGraph::GetGraphName() const { return name_; }\n\nstd::shared_ptr<GCGraph> GCGraph::GetRootGraph() const {\n  return root_graph_.lock();\n}\n\nStatus GCGraph::AddSubGraph(const std::shared_ptr<GCGraph> &subgraph) {\n  std::string key = subgraph->GetGraphName();\n  subgraphs_.insert(\n      std::pair<std::string, const std::shared_ptr<GCGraph>>(key, subgraph));\n  return STATUS_OK;\n}\n\nstd::shared_ptr<GCGraph> GCGraph::GetSubGraph(const std::string &name) const {\n  auto elem = subgraphs_.find(name);\n  if (elem == subgraphs_.end()) {\n    return nullptr;\n  }\n  return elem->second;\n}\n\nstd::map<std::string, const std::shared_ptr<GCGraph>> GCGraph::GetAllSubGraphs()\n    const {\n  return subgraphs_;\n}\n\nvoid GCGraph::ShowAllSubGraph() const {}\n\nStatus GCGraph::AddNode(const std::shared_ptr<GCNode> &node) {\n  std::string key = node->GetNodeName();\n  nodes_.insert(\n      std::pair<std::string, const std::shared_ptr<GCNode>>(key, node));\n  return STATUS_OK;\n}\n\nStatus GCGraph::SetFirstNode(const std::shared_ptr<GCNode> &node) {\n  first_nodes_.push_back(node);\n  return STATUS_OK;\n}\n\nstd::shared_ptr<GCNode> GCGraph::GetNode(const std::string &name) const {\n  auto elem = nodes_.find(name);\n  if (elem == nodes_.end()) {\n    return nullptr;\n  }\n  return elem->second;\n}\n\nstd::vector<std::shared_ptr<GCNode>> GCGraph::GetFirstNodes() {\n  return first_nodes_;\n}\n\nstd::map<std::string, const std::shared_ptr<GCNode>> GCGraph::GetAllNodes()\n    const {\n  return nodes_;\n}\n\nvoid GCGraph::ShowAllNode() const {\n  for (const auto &elem : nodes_) {\n    MBLOG_INFO << \"node name : \" << elem.second->GetNodeName();\n\n    std::shared_ptr<const std::set<std::string>> input_ports;\n    input_ports = elem.second->GetInputPorts();\n    for (const auto &input_port : *input_ports) {\n      MBLOG_INFO << \"input port : \" << input_port;\n    }\n\n    std::shared_ptr<const std::set<std::string>> output_ports;\n    output_ports = elem.second->GetOutputPorts();\n    for (const auto &output_port : *output_ports) {\n      MBLOG_INFO << \"output port : \" << output_port;\n    }\n  }\n}\n\nStatus GCGraph::AddEdge(const std::shared_ptr<GCEdge> &edge) {\n  std::string key =\n      edge->GetHeadNode()->GetNodeName() + \":\" + edge->GetHeadOutPort() + \"-\" +\n      edge->GetTailNode()->GetNodeName() + \":\" + edge->GetTailInPort();\n  edges_.insert(\n      std::pair<std::string, const std::shared_ptr<GCEdge>>(key, edge));\n  return STATUS_OK;\n}\n\nstd::shared_ptr<GCEdge> GCGraph::GetEdge(const std::string &name) const {\n  auto elem = edges_.find(name);\n  if (elem == edges_.end()) {\n    return nullptr;\n  }\n  return elem->second;\n}\n\nstd::map<std::string, const std::shared_ptr<GCEdge>> GCGraph::GetAllEdges()\n    const {\n  return edges_;\n}\n\nvoid GCGraph::ShowAllEdge() const {\n  for (const auto &elem : edges_) {\n    MBLOG_DEBUG << elem.second->GetHeadNode()->GetNodeName() << \":\"\n                << elem.second->GetHeadOutPort() << \"->\"\n                << elem.second->GetTailNode()->GetNodeName() << \":\"\n                << elem.second->GetTailInPort();\n  }\n}\n\nstd::shared_ptr<Configuration> GCGraph::GetConfiguration() const {\n  return configuration_;\n}\n\nvoid GCGraph::SetConfiguration(const std::string &key,\n                               const std::string &value) {\n  auto sub_str_list = SplitByCommaIgnoreQuotes(value);\n  for (auto &str : sub_str_list) {\n    Configuration::Trim(&str);\n  }\n  configuration_->SetProperty(key, sub_str_list);\n}\n\nGraphConfig::GraphConfig() = default;\n\nGraphConfig::~GraphConfig() = default;\n\nGraphConfigFactory::GraphConfigFactory() = default;\n\nGraphConfigFactory::~GraphConfigFactory() = default;\n\nGraphConfigManager::GraphConfigManager() = default;\n\nGraphConfigManager::~GraphConfigManager() = default;\n\nStatus GraphConfigManager::Initialize(\n    const std::shared_ptr<Drivers> &driver,\n    const std::shared_ptr<Configuration> &config) {\n  auto ret = InitGraphConfigFactory(driver);\n  if (STATUS_OK != ret) {\n    MBLOG_ERROR << \"Init Graph config factory failed\";\n    return STATUS_FAULT;\n  }\n  return STATUS_OK;\n}\n\nstd::shared_ptr<GraphConfig> GraphConfigManager::LoadGraphConfig(\n    const std::shared_ptr<Configuration> &config) {\n  std::shared_ptr<GraphConfig> graph_config;\n  auto graph_format = config->GetString(\"graph.format\", \"\");\n  if (graph_format == \"\") {\n    MBLOG_ERROR << \"graph.format is empty.\";\n    StatusError = {STATUS_BADCONF, \"graph.format is empty\"};\n    return nullptr;\n  }\n\n  MBLOG_INFO << \"graph.format : \" << graph_format;\n  auto graph_conf_factory =\n      GetGraphConfFactory(graph_format);  // from config get this type.\n  if (graph_conf_factory == nullptr) {\n    std::string types;\n    for (auto &type : GetSupportTypes()) {\n      if (types.length() == 0) {\n        types = type;\n        continue;\n      }\n\n      types += \", \" + type;\n    }\n    MBLOG_ERROR << \"Graph format not supported, support type: \" << types;\n    StatusError = {STATUS_NOTSUPPORT,\n                   \"Graph format not supported, support type:\" + types};\n    return nullptr;\n  }\n\n  auto graph_graphconf_array = config->GetStrings(\"graph.graphconf\");\n  std::string graph_graphconf;\n  for (const auto &line : graph_graphconf_array) {\n    graph_graphconf += line + \"\\n\";\n  }\n\n  if (graph_graphconf != \"\") {\n    graph_config = graph_conf_factory->CreateGraphConfigFromStr(\n        graph_graphconf);  // from config get graph config value\n    return graph_config;\n  }\n\n  auto graph_graphconf_file_path =\n      config->GetString(\"graph.graphconffilepath\", \"\");\n  MBLOG_INFO << \"graph.graphconffilepath : \" << graph_graphconf_file_path;\n  if (graph_graphconf_file_path == \"\") {\n    MBLOG_ERROR << \"get graph config and graph config file path all failed, \"\n                   \"value is null\";\n    StatusError = {STATUS_NOTFOUND, \"graph config path is null.\"};\n    return nullptr;\n  }\n\n  graph_config = graph_conf_factory->CreateGraphConfigFromFile(\n      graph_graphconf_file_path);  // from config get graph config file value\n\n  return graph_config;\n}\n\nGraphConfigManager &GraphConfigManager::GetInstance() {\n  static GraphConfigManager graph_config_manager;\n  return graph_config_manager;\n}\n\nStatus GraphConfigManager::Register(\n    const std::shared_ptr<GraphConfigFactory> &factory) {\n  graph_conf_factories_.insert(\n      std::pair<std::string, std::shared_ptr<GraphConfigFactory>>(\n          factory->GetGraphConfFactoryType(), factory));\n  return STATUS_OK;\n}\n\nstd::map<std::string, const std::shared_ptr<GraphConfigFactory>>\nGraphConfigManager::GetGraphConfFactoryList() {\n  return graph_conf_factories_;\n}\n\nstd::vector<std::string> GraphConfigManager::GetSupportTypes() {\n  std::vector<std::string> ret;\n  for (auto &type : graph_conf_factories_) {\n    ret.push_back(type.first);\n  }\n\n  return ret;\n}\n\nstd::shared_ptr<GraphConfigFactory> GraphConfigManager::GetGraphConfFactory(\n    const std::string &type) {\n  auto graph_conf_map = graph_conf_factories_.find(type);\n  if (graph_conf_map == graph_conf_factories_.end()) {\n    MBLOG_ERROR << \"do not find graph config factory type \" << type;\n    return nullptr;\n  }\n  return graph_conf_map->second;\n}\n\nStatus GraphConfigManager::InitGraphConfigFactory(\n    const std::shared_ptr<Drivers> &driver) {\n  std::vector<std::shared_ptr<Driver>> driver_list =\n      driver->GetDriverListByClass(DRIVER_CLASS_GRAPHCONF);\n  std::shared_ptr<DriverDesc> desc;\n  for (auto &device_driver : driver_list) {\n    auto temp_factory = device_driver->CreateFactory();\n    if (nullptr == temp_factory) {\n      continue;\n    }\n\n    std::shared_ptr<GraphConfigFactory> graph_conf_factory =\n        std::dynamic_pointer_cast<GraphConfigFactory>(temp_factory);\n\n    graph_conf_factories_.insert(std::make_pair(\n        graph_conf_factory->GetGraphConfFactoryType(), graph_conf_factory));\n  }\n\n  return STATUS_OK;\n}\n\nstd::shared_ptr<GraphConfig> GraphConfigManager::GetGraphConfig(\n    const std::string &graph_conf_name) {\n  auto graph_conf = graph_conf_list_.find(graph_conf_name);\n  return graph_conf->second;\n}\n\nstd::map<std::string, const std::shared_ptr<GraphConfig>>\nGraphConfigManager::GetGraphConfList() {\n  return graph_conf_list_;\n}\n\nStatus GraphConfigManager::DeleteGraphConfig(\n    const std::string &graph_conf_name) {\n  auto graph_conf = graph_conf_list_.find(graph_conf_name);\n  if (graph_conf == graph_conf_list_.end()) {\n    return STATUS_OK;\n  }\n\n  graph_conf_list_.erase(graph_conf);\n  return STATUS_OK;\n}\n\nvoid GraphConfigManager::Clear() {\n  graph_conf_list_.clear();\n  graph_conf_factories_.clear();\n}\n\nvoid GCGraph::SetConfiguration(std::shared_ptr<Configuration> &config) {\n  configuration_ = config;\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/libmodelbox/base/include/modelbox/base/any.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_ANY_H_\n#define MODELBOX_ANY_H_\n\n#include <modelbox/base/log.h>\n\n#include <map>\n#include <memory>\n#include <mutex>\n#include <string>\n#include <type_traits>\n#include <typeinfo>\n#include <utility>\n\nstatic std::map<size_t, size_t> type_hash_code_map = {\n    {typeid(int).hash_code(), typeid(int64_t).hash_code()},\n    {typeid(float).hash_code(), typeid(double).hash_code()},\n    {typeid(int64_t).hash_code(), typeid(int).hash_code()},\n    {typeid(double).hash_code(), typeid(float).hash_code()},\n};\n\nnamespace modelbox {\nclass Any {\n public:\n  // NOLINTNEXTLINE\n  Any() noexcept {};\n\n  virtual ~Any() noexcept { delete value_ptr_; }\n\n  template <\n      typename ValueType,\n      typename = typename std::enable_if<\n          !std::is_same<typename std::decay<ValueType>::type, Any>::value &&\n          std::is_copy_constructible<\n              typename std::decay<ValueType>::type>::value>::type>\n  explicit Any(ValueType&& value)\n      : value_ptr_(new AnyImpl<typename std::decay<ValueType>::type>(value)) {}\n\n  Any(const Any& other)\n      : value_ptr_(other.value_ptr_ ? other.value_ptr_->clone() : nullptr) {}\n\n  Any(Any&& other) noexcept : value_ptr_(other.value_ptr_) {\n    other.value_ptr_ = nullptr;\n  }\n\n  Any& swap(Any& rhs) noexcept {\n    std::swap(value_ptr_, rhs.value_ptr_);\n    return *this;\n  }\n\n  template <typename ValueType>\n  Any& operator=(Any&& rhs) {\n    *this = Any{std::forward<ValueType>(rhs)};\n    return *this;\n  }\n\n  Any& operator=(const Any& rhs) {\n    *this = Any{rhs};\n    return *this;\n  }\n\n  Any& operator=(Any&& rhs) noexcept {\n    reset();\n    value_ptr_ = rhs.value_ptr_;\n    rhs.value_ptr_ = nullptr;\n    return *this;\n  }\n\n  template <typename ValueType>\n  bool update(ValueType&& rhs) noexcept {\n    if (type() != typeid(ValueType)) {\n      return false;\n    }\n\n    *this = Any{std::forward<ValueType>(rhs)};\n    return true;\n  }\n\n  bool has_value() const noexcept { return value_ptr_ != nullptr; }\n\n  const std::type_info& type() const noexcept {\n    return has_value() ? value_ptr_->type() : typeid(void);\n  }\n\n  void reset() noexcept {\n    delete value_ptr_;\n    value_ptr_ = nullptr;\n  }\n\n  template <typename ValueType>\n  const ValueType* _Cast() const noexcept {\n    if (has_value()) {\n      return &(static_cast<Any::AnyImpl<ValueType>*>(value_ptr_)->value_);\n    }\n    return nullptr;\n  }\n\n  template <typename ValueType>\n  ValueType* _Cast() noexcept {\n    return const_cast<ValueType*>(\n        const_cast<const Any*>(this)->_Cast<ValueType>());\n  }\n\n protected:\n  struct AnyImplBase {\n    virtual ~AnyImplBase() noexcept = default;\n\n    virtual const std::type_info& type() const noexcept = 0;\n\n    virtual AnyImplBase* clone() const = 0;\n  };\n\n  template <typename ValueType>\n  struct AnyImpl : public AnyImplBase {\n    AnyImpl(ValueType value) : value_(std::move(value)) {}\n\n    AnyImpl(ValueType&& value) : value_(std::move(value)) {}\n\n    const std::type_info& type() const noexcept override {\n      return typeid(ValueType);\n    }\n\n    AnyImplBase* clone() const override { return new AnyImpl(value_); }\n\n    ValueType value_;\n  };\n\n private:\n  AnyImplBase* value_ptr_{nullptr};\n};\n\ntemplate <typename ValueType>\nValueType* any_cast(Any* any) noexcept {\n  return any->_Cast<ValueType>();\n}\n\ntemplate <typename ValueType>\nconst ValueType* any_cast(const Any* any) noexcept {\n  return any->_Cast<ValueType>();\n}\n\ntemplate <typename ValueType>\nValueType any_cast(Any& any) {\n  auto* const result = any_cast<typename std::decay<ValueType>::type>(&any);\n\n  if (!result) {\n    throw std::bad_cast();\n  }\n\n  return static_cast<ValueType>(*result);\n}\n\nclass Collection {\n public:\n  Collection();\n\n  virtual ~Collection();\n\n  template <typename T>\n  void Set(const std::string& key, T&& value) {\n    entrys_[key] = Any(value);\n  }\n\n  void Set(const std::string& key, const char* value);\n\n  template <typename T>\n  bool Get(const std::string& key, T&& value) {\n    if (entrys_.find(key) == entrys_.end()) {\n      // could be a normal condition\n      MBLOG_DEBUG << \"Key \" << key << \" not found\";\n      return false;\n    }\n\n    if (!CanConvert(typeid(T).hash_code(), entrys_[key].type().hash_code())) {\n      // always a bad condition\n      MBLOG_ERROR << \"Get value for \" << key\n                  << \" failed, type mismatch, param type \" << typeid(T).name()\n                  << \", stored value type \" << entrys_[key].type().name();\n      return false;\n    }\n\n    value = any_cast<T>(entrys_[key]);\n    return true;\n  }\n\n  std::tuple<Any*, bool> Get(const std::string& key);\n\n  void Merge(const Collection& other, bool is_override = false);\n  bool CanConvert(size_t cast_code, size_t origin_code);\n\n private:\n  std::map<std::string, Any> entrys_;\n};\n\n}  // namespace modelbox\n\n#endif"
  },
  {
    "path": "src/libmodelbox/base/include/modelbox/base/base64_simd.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#ifndef MODELBOX_BASE64_SIMD_H\n#define MODELBOX_BASE64_SIMD_H\n\n#include <modelbox/base/status.h>\n\nnamespace modelbox {\n\n/**\n * @brief base64 endoce by SIMD\n * @param input input data\n * @param input_len input data len\n * @param output encode base64 string\n * @return wheter success\n */\nStatus Base64EncodeSIMD(const uint8_t *input, size_t input_len,\n                        std::string *output);\n}  // namespace modelbox\n\n#endif  // MODELBOX_BASE64_SIMD_H"
  },
  {
    "path": "src/libmodelbox/base/include/modelbox/base/blocking_queue.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_BLOCKINGQUEUE_H_\n#define MODELBOX_BLOCKINGQUEUE_H_\n\n#include <condition_variable>\n#include <functional>\n#include <mutex>\n#include <queue>\n#include <thread>\n#include <vector>\n\nnamespace modelbox {\n\n/**\n * @brief QueueType: FifoQueue, PriorityQueue, StablePriorityQueue.\n *   FifoQueue: fisrt in, first out.\n *   PriorityQueue: priority queue.\n *   StablePriorityQueue: stable ordered queue, when priority same, first in,\n *                        first out.\n */\n\n/**\n * @brief StableElement: to keep elements in order which have same priority.\n */\ntemplate <typename T, typename Compare = std::less<T>>\nstruct StableElement {\n  /**\n   * @brief StableElement\n   */\n  using StableElementT = StableElement<T, Compare>;\n\n  /** @brief construct order element */\n  StableElement(T&& o, std::size_t c) : object_(std::move(o)), order_(c) {}\n\n  /** @brief construct order element, from element and size*/\n  StableElement(const T& o, std::size_t c) : object_(o), order_(c) {}\n\n  /** @brief destructor order element */\n  virtual ~StableElement() = default;\n\n  /** @brief override of () */\n  explicit operator T() { return object_; }\n\n  /** @brief override of < */\n  bool operator<(const StableElementT& rhs) const {\n    /* if element priority is same, compare with order */\n    if (comp_(object_, rhs.object_) == false &&\n        comp_(rhs.object_, object_) == false) {\n      return order_ >= rhs.order_;\n    }\n\n    /* compare two objects */\n    return comp_(object_, rhs.object_);\n  }\n\n  /// @brief object\n  T object_;\n  /// @brief order inserted\n  std::size_t order_;\n  /// @brief compare function\n  Compare comp_;\n};\n\n/** @brief Stable priority queue */\ntemplate <typename T, typename Compare = std::less<T>>\nclass StablePriorityQueue\n    : public std::priority_queue<StableElement<T, Compare>,\n                                 std::vector<StableElement<T, Compare>>,\n                                 std::less<StableElement<T, Compare>>> {\n  using stableT = StableElement<T, Compare>;\n  using std::priority_queue<\n      stableT, std::vector<StableElement<T, Compare>>,\n      std::less<StableElement<T, Compare>>>::priority_queue;\n\n public:\n  /// @brief constructor of priority queue.\n  StablePriorityQueue() = default;\n  /// @brief destructor of priority queue.\n  virtual ~StablePriorityQueue() = default;\n  /// @brief front of the queue\n  const T& front() { return this->c.front().object_; }\n  /// @brief top of the queue\n  const T& top() { return this->c.front().object_; }\n  /// @brief push value into queue\n  void push(const T& value) {\n    /* push back and increase order */\n    this->c.push_back(stableT(value, counter_++));\n    std::push_heap(this->c.begin(), this->c.end(), this->comp);\n  }\n\n  /// @brief push value into queue\n  void push(T&& value) {\n    this->c.push_back(stableT(std::move(value), counter_++));\n    std::push_heap(this->c.begin(), this->c.end(), this->comp);\n  }\n\n  /// @brief emplace value into queue\n  template <class... Args>\n  void emplace(Args&&... args) {\n    /* emplace element */\n    this->c.emplace_back(T(std::forward<Args>(args)...), counter_++);\n    std::push_heap(this->c.begin(), this->c.end(), this->comp);\n  }\n\n  /// @brief pop from queue\n  void pop() {\n    /* pop element */\n    std::pop_heap(this->c.begin(), this->c.end(), this->comp);\n    this->c.pop_back();\n    /* if queue is empty, reset counter, this will avoid counter overflow */\n    if (this->empty()) {\n      counter_ = 0;\n    }\n  }\n\n protected:\n  /**\n   * @brief counter, used as order.\n   * counter will never overflow, as counter will reset when queue is empty.\n   */\n  std::size_t counter_ = 0;\n};\n\n/** @brief Std priority_queue wrap */\ntemplate <typename T, typename Sequence = std::vector<T>,\n          typename Compare = std::less<typename Sequence::value_type>>\nclass PriorityQueue : public std::priority_queue<T, Sequence, Compare> {\n public:\n  /**\n   * @brief front of the queue\n   */\n  T front() const { return this->c.front(); }\n};\n\n/** @brief Std queue wrap */\ntemplate <typename T>\nclass FifoQueue : public std::queue<T> {};\n\n/**\n * @brief Blocking queue, Blocking caller, when queue is empty, or full.\n */\ntemplate <typename T, typename Queue = FifoQueue<T>,\n          typename Sequence = std::vector<T>>\nclass BlockingQueue {\n public:\n  /**\n   * @brief A blocking queue.\n   * @param capacity capacity, default is SIZE_MAX\n   */\n  explicit BlockingQueue(size_t capacity = SIZE_MAX) : capacity_(capacity) {\n    if (capacity <= 0) {\n      capacity_ = SIZE_MAX;\n    }\n  }\n\n  virtual ~BlockingQueue() { Close(); }\n\n  /**\n   * @brief Return element size\n   * @return element size\n   */\n  size_t Size() {\n    std::unique_lock<std::mutex> lock(mutex_);\n    return queue_.size();\n  }\n\n  /**\n   * @brief Set queue capacity\n   * @param capacity queue capacity, langer than 0.\n   */\n  void SetCapacity(size_t capacity) {\n    if (capacity <= 0) {\n      return;\n    }\n\n    capacity_ = capacity;\n  }\n\n  /**\n   * @brief Get queue capacity\n   */\n  size_t GetCapacity() const { return capacity_; }\n\n  /**\n   * @brief Get remain capacity\n   */\n  size_t RemainCapacity() {\n    size_t queue_size = Size();\n    if (queue_size > capacity_) {\n      return 0;\n    }\n\n    return capacity_ - queue_size;\n  }\n\n  /**\n   * @brief Clear queue\n   */\n  void Clear() {\n    std::unique_lock<std::mutex> lock(mutex_);\n    Queue empty;\n    std::swap(queue_, empty);\n  }\n\n  /**\n   * @brief Close queue\n   */\n  void Close() {\n    std::unique_lock<std::mutex> lock(mutex_);\n    Queue empty;\n    std::swap(queue_, empty);\n    shutdown_ = true;\n    not_empty_.notify_all();\n    not_full_.notify_all();\n  }\n\n  /**\n   * @brief Is queue full\n   * @return true of false\n   */\n  bool Full() {\n    std::unique_lock<std::mutex> lock(mutex_);\n    return queue_.size() >= capacity_;\n  }\n\n  /**\n   * @brief Is queue empty\n   * @return true of false\n   */\n  bool Empty() {\n    std::unique_lock<std::mutex> lock(mutex_);\n    return queue_.empty();\n  }\n\n  /**\n   * @brief Wake up waiters\n   */\n  void Wakeup() {\n    std::unique_lock<std::mutex> lock(mutex_);\n    need_wakeup_ = true;\n    not_empty_.notify_all();\n    not_full_.notify_all();\n  }\n\n  /**\n   * @brief Push item into queue\n   * @param elem item reference\n   * @param timeout\n   *   timeout > 0 if queue is full, blocking for timeout(ms), and return false.\n   *   timeout = 0 if queue is full, blocking until queue is not full.\n   *   timeout < 0 if queue is full, return immediately.\n   * @return true or false\n   */\n  bool Push(const T& elem, int timeout) {\n    std::unique_lock<std::mutex> lock(mutex_);\n    if (PushQueue(lock, elem, timeout) == false) {\n      return false;\n    }\n\n    not_empty_.notify_one();\n\n    return true;\n  }\n\n  /**\n   * @brief Push a sequence link vector into queue\n   * @param elems sequence reference, return number of pushed elements.\n   * @param timeout\n   *   timeout > 0 if queue is full, blocking for timeout(ms), and return pushed\n   * number.\n   *   timeout = 0 if queue is full, blocking until queue is not full.\n   *   timeout < 0 if queue is full, return immediately.\n   * @return: number of pushed elems.\n   */\n  size_t Push(Sequence* elems, int timeout = 0) {\n    size_t num = 0;\n    std::unique_lock<std::mutex> lock(mutex_);\n    num = PushQueue(lock, elems, timeout);\n\n    if (num <= 0) {\n      return num;\n    }\n\n    not_empty_.notify_all();\n\n    return num;\n  }\n\n  /**\n   * @brief Push a sequence link vector into queue at once\n   * @param elems sequence reference, return number of elems.\n   * @param timeout\n   *   timeout > 0 if queue is full, blocking for timeout(ms), and return false.\n   *   timeout = 0 if queue is full, blocking until queue is not full.\n   *   timeout < 0 if queue is full, return immediately.\n   * @return: number of elems.\n   */\n  size_t PushBatch(Sequence* elems, int timeout = 0) {\n    size_t ret = 0;\n    std::unique_lock<std::mutex> lock(mutex_);\n    ret = PushQueue(lock, elems, timeout, elems->size());\n    not_empty_.notify_all();\n    return ret;\n  }\n\n  /**\n   * @brief Force push a sequence link vector into queue at once, ignore\n   * capacity\n   * @param elems sequence reference, return number of elems.\n   * @param wait_when_full wait when queue is full.\n   * @param timeout\n   *   timeout > 0 if queue is full, blocking for timeout(ms), and return false.\n   *   timeout = 0 if queue is full, blocking until queue is not full.\n   *   timeout < 0 if queue is full, return immediately.\n   * @return: number of elems.\n   */\n  size_t PushBatchForce(Sequence* elems, bool wait_when_full = false,\n                        int timeout = 0) {\n    size_t ret = 0;\n    std::unique_lock<std::mutex> lock(mutex_);\n    ret = PushQueueForce(lock, elems, wait_when_full, timeout);\n    not_empty_.notify_all();\n    return ret;\n  }\n\n  /**\n   * @brief Push item into queue, blocking if queue is full.\n   * @param elem item reference.\n   * @return true or false\n   */\n  virtual bool Push(const T& elem) { return Push(elem, 0); }\n\n  /**\n   * @brief Force push item into queue.\n   * @param elem item reference.\n   * @return true or false\n   */\n  virtual bool PushForce(const T& elem) {\n    bool ret = false;\n    std::unique_lock<std::mutex> lock(mutex_);\n    ret = PushQueueForce(elem);\n    not_empty_.notify_one();\n\n    return ret;\n  }\n\n  /**\n   * @brief Get an item from queue, blocking if queue is empty.\n   * @param elem item data.\n   * @return true or false\n   */\n  virtual bool Pop(T* elem) { return Pop(elem, 0); }\n\n  /**\n   * @brief Get an item from queue, or return false if queue is empty, never\n   * blocking.\n   * @param elem item data.\n   * @return true or false\n   */\n  bool Poll(T* elem) { return Pop(elem, -1); }\n\n  /**\n   * @brief Get many items from queue, or return false if queue is empty, never\n   * blocking.\n   * @param elems item data in vector.\n   * @return number element returned.\n   */\n  size_t Poll(Sequence* elems) { return Pop(elems, -1); }\n\n  /**\n   * @brief Get an item from queue, if queue is empty, blocking for timeout(ms)\n   * and return false\n   * @param elem item\n   * @param timeout\n   *   timeout > 0 if queue is empty, blocking for timeout(ms) and return false.\n   *   timeout = 0 if queue is empty, blocking until queue is not empty.\n   *   timeout < 0 if queue is empty, return immediately.\n   * @return is pop success\n   */\n  bool Pop(T* elem, int timeout) {\n    std::unique_lock<std::mutex> lock(mutex_);\n    if (PopQueue(lock, elem, timeout) == false) {\n      return false;\n    }\n    /* wakeup waiter */\n    not_full_.notify_one();\n\n    return true;\n  }\n\n  /**\n   * @brief Get an sequence from queue, if queue is empty, blocking for\n   * timeout(ms) and return number of poped elems.\n   * @param elems item\n   * @param timeout\n   *   timeout > 0 if queue is empty, blocking for timeout(ms) and return false.\n   *   timeout = 0 if queue is empty, blocking until queue is not empty.\n   *   timeout < 0 if queue is empty, return immediately. return: number of\n   * poped elems.\n   * @param maxsize max pop items number.\n   * @return number of poped elemets.\n   */\n  virtual size_t Pop(Sequence* elems, int timeout = 0, size_t maxsize = 0) {\n    size_t num = 0;\n    std::unique_lock<std::mutex> lock(mutex_);\n    num = PopQueue(lock, elems, timeout, maxsize);\n    if (num < 0) {\n      return num;\n    }\n\n    /* wakeup waiter */\n    not_full_.notify_all();\n\n    return num;\n  }\n\n  /**\n   * @brief Pop a sequence of elems from queue at once\n   * @param elems sequence reference, return number of elems.\n   * @param timeout\n   *   timeout > 0 if queue is empty, blocking for timeout(ms), and return\n   * false. timeout = 0 if queue is empty, blocking until queue is empty.\n   *   timeout < 0 if queue is empty, return immediately.\n   * @param max_elems max elements number returned.\n   * @return: return number of elems.\n   */\n  size_t PopBatch(Sequence* elems, int timeout = 0, uint32_t max_elems = -1) {\n    size_t num = 0;\n    std::unique_lock<std::mutex> lock(mutex_);\n    num = PopQueueBatch(lock, elems, timeout, max_elems);\n\n    /* wakeup waiter */\n    not_full_.notify_all();\n\n    return num;\n  }\n\n  /**\n   * @brief Get item and not remove from queue, return false if queue is empty\n   * @param elem element to save.\n   * @return is get front success or not.\n   */\n  bool Front(T* elem) {\n    std::unique_lock<std::mutex> lock(mutex_);\n    if (WaitQueue(lock, -1) == false) {\n      return false;\n    }\n\n    *elem = queue_.front();\n    return true;\n  }\n\n  /**\n   * @brief Shutdown queue, push will wakeup and return false\n   */\n  void Shutdown() {\n    std::unique_lock<std::mutex> lock(mutex_);\n    shutdown_ = true;\n    not_full_.notify_all();\n    not_empty_.notify_all();\n  }\n\n  /**\n   * @brief Queue is shutdown or not\n   * @return is queue shutdown\n   */\n  bool IsShutdown() { return shutdown_; }\n\n protected:\n  /**\n   * @brief Wait queue.\n   * @param cond condition\n   * @param lock queue lock\n   * @param timeout wait timeout\n   * @param wait_cond wait function\n   * @return wait success\n   */\n  bool Wait(std::condition_variable& cond, std::unique_lock<std::mutex>& lock,\n            int timeout, const std::function<bool()> &wait_cond) {\n    bool ret = false;\n    auto cond_func = [&]() { return need_wakeup_ || wait_cond(); };\n\n    waiter_number_++;\n    if (timeout > 0) {\n      /* if timeout is set, wait for timeout and return false */\n      ret = cond.wait_for(lock, std::chrono::milliseconds(timeout), cond_func);\n      if (ret == false) {\n        errno = ETIMEDOUT;\n      }\n    } else if (timeout == 0) {\n      /* if wait forever, do wait */\n      cond.wait(lock, cond_func);\n      ret = true;\n    } else {\n      /* do not wait */\n      ret = cond_func();\n    }\n\n    waiter_number_--;\n    /* if wake by Wakeup */\n    if (need_wakeup_) {\n      /* if all waiters have been woken up */\n      if (waiter_number_ == 0) {\n        need_wakeup_ = false;\n      }\n      errno = EINTR;\n      ret = false;\n    }\n\n    /* wakeup */\n    return ret;\n  }\n\n  /**\n   * @brief Pop queue elements with batch\n   * @param lock queue lock\n   * @param elems elements list\n   * @param timeout wait timeout\n   * @param max_elems max pop elements number, -1 means all\n   * @return poped elements number\n   */\n  size_t PopQueueBatch(std::unique_lock<std::mutex>& lock, Sequence* elems,\n                       int timeout = 0, uint32_t max_elems = -1) {\n    size_t num = 0;\n\n    /* Loop and get same element */\n    while (queue_.size() > 0 && num < max_elems) {\n      auto time_wait = -1;\n      if (num == 0) {\n        time_wait = timeout;\n      }\n\n      if (WaitQueue(lock, time_wait) == false) {\n        return num;\n      }\n\n      /* get and remove element */\n      elems->emplace_back(std::move(queue_.front()));\n      queue_.pop();\n      num++;\n    }\n\n    return num;\n  }\n\n  /**\n   * @brief Push one element into queue\n   * @param lock queue lock\n   * @param elem element\n   * @param timeout wait timeout\n   * @return push result\n   */\n  bool PushQueue(std::unique_lock<std::mutex>& lock, const T& elem,\n                 int timeout) {\n    /* shutdown or queue has enough space */\n    auto wait_check = [&]() {\n      return shutdown_ == true || capacity_ > queue_.size();\n    };\n\n    /* if queue already shutdown, return false */\n    if (shutdown_) {\n      errno = ESHUTDOWN;\n      return false;\n    }\n\n    /* check if queue has space, or wait. */\n    if (Wait(not_full_, lock, timeout, wait_check) == false) {\n      return false;\n    }\n\n    /* if wakeup after shutdown, just return false */\n    if (shutdown_) {\n      errno = ESHUTDOWN;\n      return false;\n    }\n\n    queue_.emplace(elem);\n\n    return true;\n  }\n\n  /**\n   * @brief Push element list into queue\n   * @param lock queue lock\n   * @param elems elements list\n   * @param timeout wait timeout\n   * @param expect_space push when free space greater than expect_space\n   * @return pushed element number\n   */\n  size_t PushQueue(std::unique_lock<std::mutex>& lock, Sequence* elems,\n                   int timeout = 0, size_t expect_space = 1) {\n    size_t push_num = 0;\n\n    /* shutdown or queue has enough space */\n    auto wait_check = [&]() {\n      return shutdown_ == true || queue_.size() == 0 ||\n             capacity_ >= expect_space + queue_.size();\n    };\n\n    /* check if queue has space, or wait. */\n    if (Wait(not_full_, lock, timeout, wait_check) == false) {\n      return false;\n    }\n\n    for (auto it = elems->begin(); it != elems->end(); it++) {\n      if (PushQueue(lock, *it, timeout) == false) {\n        break;\n      }\n\n      push_num++;\n\n      /* when get first element, stop waiting */\n      if (timeout >= 0) {\n        timeout = -1;\n      }\n    }\n\n    /* remove elems from origin sequence */\n    elems->erase(elems->begin(), elems->begin() + push_num);\n\n    return push_num;\n  }\n\n  /**\n   * @brief force Push element list into queue\n   * @param lock queue lock\n   * @param elems elements list\n   * @param wait_when_full wait whether queue is full\n   * @param timeout wait timeout\n   * @return pushed element number\n   */\n  size_t PushQueueForce(std::unique_lock<std::mutex>& lock, Sequence* elems,\n                        bool wait_when_full = false, int timeout = 0) {\n    size_t push_num = 0;\n\n    /* shutdown or queue is not full*/\n    auto wait_check = [&]() {\n      if (wait_when_full == true) {\n        if (capacity_ >= 1 + queue_.size()) {\n          return true;\n        }\n      }\n\n      return wait_when_full == false || shutdown_ == true || queue_.size() == 0;\n    };\n\n    /* check if queue has space, or wait. */\n    if (Wait(not_full_, lock, timeout, wait_check) == false) {\n      return false;\n    }\n\n    for (auto it = elems->begin(); it != elems->end(); it++) {\n      queue_.emplace(*it);\n      push_num++;\n    }\n\n    /* remove elems from origin sequence */\n    elems->erase(elems->begin(), elems->begin() + push_num);\n\n    return push_num;\n  }\n\n  /**\n   * @brief Force push item into queue.\n   * @param elem item reference.\n   * @return true or false\n   */\n  bool PushQueueForce(const T& elem) {\n    /* if queue already shutdown, return false */\n    if (shutdown_) {\n      errno = ESHUTDOWN;\n      return false;\n    }\n\n    queue_.emplace(elem);\n\n    return true;\n  }\n\n  /**\n   * @brief Get an element from queue\n   * @param lock queue lock\n   * @param elem element poped\n   * @param timeout wait for timeout\n   * @return pop result\n   */\n  bool PopQueue(std::unique_lock<std::mutex>& lock, T* elem, int timeout) {\n    /* if queue is empty, try wait. */\n    if (WaitQueue(lock, timeout) == false) {\n      return false;\n    }\n\n    /* Get and remove element */\n    *elem = std::move(queue_.front());\n    queue_.pop();\n\n    return true;\n  }\n\n  /**\n   * @brief Get an sequence from queue, if queue is empty, blocking for\n   * timeout(ms) and return number of poped elems.\n   * @param lock queue lock\n   * @param elems item\n   * @param timeout wait for timeout\n   * @param maxsize max pop items number.\n   * @return number of poped elemets.\n   */\n  size_t PopQueue(std::unique_lock<std::mutex>& lock, Sequence* elems,\n                  int timeout = 0, size_t maxsize = 0) {\n    size_t num = 0;\n\n    while (true) {\n      if (WaitQueue(lock, timeout) == false) {\n        return num;\n      }\n\n      /* remove element */\n      elems->emplace_back(std::move(queue_.front()));\n      queue_.pop();\n      num++;\n      if (maxsize > 0 && num >= maxsize) {\n        return num;\n      }\n\n      /* when get first element, disable wait */\n      if (timeout >= 0) {\n        timeout = -1;\n      }\n    }\n\n    return num;\n  }\n\n  /**\n   * @brief wait for queue ready\n   * @param lock queue lock\n   * @param timeout wait for timeout\n   * @return whether wait success.\n   */\n  bool WaitQueue(std::unique_lock<std::mutex>& lock, int timeout) {\n    /* wait check has data */\n    auto wait_check = [&]() { return shutdown_ == true || queue_.size() > 0; };\n\n    /* return false when shutdown */\n    if (shutdown_ == true && queue_.size() == 0) {\n      errno = ESHUTDOWN;\n      return false;\n    }\n\n    /* check if queue has data, or wait. */\n    if (Wait(not_empty_, lock, timeout, wait_check) == false) {\n      return false;\n    }\n\n    /* shutdown may be called, return false */\n    if (queue_.size() == 0) {\n      errno = ESHUTDOWN;\n      return false;\n    }\n\n    return true;\n  }\n\n  /**\n   * @brief Queue lock\n   */\n  std::mutex mutex_;\n\n  /**\n   * @brief Queue\n   */\n  Queue queue_;\n\n  /**\n   * @brief Wakeup when queue is full\n   */\n  std::condition_variable not_full_;\n\n  /**\n   * @brief Wakeup when queue is empty\n   */\n  std::condition_variable not_empty_;\n\n private:\n  size_t capacity_ = 0;\n  bool need_wakeup_ = false;\n  int waiter_number_ = 0;\n  bool shutdown_ = false;\n};\n\n/**\n * @brief Priority Blocking queue, blocking queue with stable priority.\n * Support geting all elements with same priority in one batch.\n */\ntemplate <typename T, typename Compare = std::less<T>,\n          typename Sequence = std::vector<T>>\nclass PriorityBlockingQueue\n    : public BlockingQueue<T, StablePriorityQueue<T, Compare>, Sequence> {\n  using _BlockingQueue =\n      BlockingQueue<T, StablePriorityQueue<T, Compare>, Sequence>;\n\n public:\n  /**\n   * @brief A priority blocking queue.\n   * @param capacity queue capacity.\n   */\n  explicit PriorityBlockingQueue(size_t capacity = SIZE_MAX)\n      : _BlockingQueue(capacity) {}\n\n  ~PriorityBlockingQueue() override = default;\n\n  /**\n   * @brief Pop a sequence of elems which equal each other from queue at once\n   * @param elems sequence reference, return number of elems.\n   * @param timeout\n   *   timeout > 0 if queue is full, blocking for timeout(ms), and return false.\n   *   timeout = 0 if queue is full, blocking until queue is not full.\n   *   timeout < 0 if queue is full, return immediately.\n   * @param max_elems max elements number returned.\n   * @return: return number of elems.\n   */\n  size_t PopBatch(Sequence* elems, int timeout = 0, uint32_t max_elems = -1) {\n    size_t num = 0;\n    {\n      std::unique_lock<std::mutex> lock(_BlockingQueue::mutex_);\n\n      num = PopQueueBatch(lock, elems, timeout, max_elems);\n      if (num < 0) {\n        return num;\n      }\n    }\n\n    /* wakeup waiter */\n    _BlockingQueue::not_full_.notify_all();\n\n    return num;\n  }\n\n private:\n  size_t PopQueueBatch(std::unique_lock<std::mutex>& lock, Sequence* elems,\n                       int timeout = 0, uint32_t max_elems = -1) {\n    size_t num = 0;\n\n    /* wait for elems first */\n    if (num >= max_elems || _BlockingQueue::WaitQueue(lock, timeout) == false) {\n      return num;\n    }\n\n    /* Get first element */\n    auto first = _BlockingQueue::queue_.front();\n    auto second = first;\n    num++;\n    elems->emplace_back(std::move(_BlockingQueue::queue_.front()));\n    _BlockingQueue::queue_.pop();\n\n    /* Loop and get same element */\n    while (_BlockingQueue::queue_.size() > 0 && num < max_elems) {\n      if (_BlockingQueue::WaitQueue(lock, -1) == false) {\n        return num;\n      }\n\n      second = _BlockingQueue::queue_.front();\n\n      /* try to get all same priority elements\n       * break when priority is different\n       */\n      if (comp_(first, second) == true || comp_(second, first) == true) {\n        break;\n      }\n\n      /* get and remove element */\n      elems->emplace_back(std::move(_BlockingQueue::queue_.front()));\n      _BlockingQueue::queue_.pop();\n      num++;\n    }\n\n    return num;\n  }\n\n  Compare comp_;\n};\n\n}  // namespace modelbox\n#endif\n"
  },
  {
    "path": "src/libmodelbox/base/include/modelbox/base/collector.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#ifndef MODELBOX_COLLECTOR_H_\n#define MODELBOX_COLLECTOR_H_\n\n#include <map>\n#include <memory>\n#include <mutex>\n#include <vector>\n#include \"modelbox/base/log.h\"\nnamespace modelbox {\n\ntemplate <typename T>\nclass Collector {\n public:\n  Collector() = default;\n  virtual ~Collector() = default;\n\n  /**\n   * @brief Add Object\n   * @param name object key\n   * @param obj object\n   */\n  void AddObject(const std::string &name, std::shared_ptr<T> obj) {\n    std::unique_lock<std::mutex> lock(object_lock_);\n    auto iter = objs_.find(name);\n    if (iter != objs_.end()) {\n      MBLOG_WARN << name << \" is already in the map, overwrites it.\";\n    }\n    objs_[name] = obj;\n  }\n\n  /**\n   * @brief Remove Object\n   * @param name object key\n   */\n  void RmvObject(const std::string &name) {\n    std::unique_lock<std::mutex> lock(object_lock_);\n    auto iter = objs_.find(name);\n    if (iter == objs_.end()) {\n      return;\n    }\n\n    objs_.erase(name);\n  }\n\n  /**\n   * @brief Get object\n   * @param name object key\n   * @param obj object\n   * @return get object success or not\n   */\n  bool GetObject(const std::string &name, std::shared_ptr<T> &obj) {\n    auto iter = objs_.find(name);\n    if (iter == objs_.end()) {\n      return false;\n    }\n    obj = iter->second;\n    return true;\n  }\n\n  /**\n   * @brief Get All Objects\n   * @return a vector of objects\n   */\n  std::vector<std::shared_ptr<T>> GetObjects() {\n    std::vector<std::shared_ptr<T>> objs;\n    for (auto &obj : objs_) {\n      objs.push_back(obj.second);\n    }\n    return objs;\n  }\n\n  /**\n   * @brief Get Object size\n   * @return the object size\n   */\n  int GetObjectSize() { return objs_.size(); }\n\n private:\n  std::mutex object_lock_;\n  std::map<std::string, std::shared_ptr<T>> objs_;\n};\n\n}  // namespace modelbox\n\n#endif"
  },
  {
    "path": "src/libmodelbox/base/include/modelbox/base/configuration.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_CONFIGURATION_H_\n#define MODELBOX_CONFIGURATION_H_\n\n#include <functional>\n#include <map>\n#include <memory>\n#include <set>\n#include <sstream>\n#include <string>\n#include <vector>\n\n#include \"modelbox/base/log.h\"\n#include \"modelbox/base/status.h\"\n\nnamespace modelbox {\n// ETX (end of text) is used as a separator between values\nconstexpr const char *LIST_DELIMITER = \"\\003\";\nconstexpr const uint32_t VALID_RANGE_OF_DOUBLE = 15;\n\nclass ConfigStore {\n public:\n  virtual ~ConfigStore();\n\n  void WriteProperty(const std::string &key, const std::string &property);\n\n  Status ReadProperty(const std::string &key, std::string *property) const;\n\n  size_t Size() const;\n\n  std::set<std::string> GetKeys() const;\n\n  bool Contain(const std::string &key) const;\n\n  std::set<std::string> GetSubKeys(const std::string &prefix_key) const;\n\n  std::unique_ptr<ConfigStore> GetSubConfigStore(\n      const std::string &prefix_key) const;\n\n  void Add(const ConfigStore &store);\n\n  void Copy(const ConfigStore &store, const std::string &key);\n\n  void SetExpandEnv(bool expand_env);\n\n private:\n  std::map<std::string, std::string> properties_;\n  std::map<std::string, std::set<std::string>> sub_key_index_;\n  bool expand_env_{false};\n\n  void AddSubConfig(const std::string &prefix_key, ConfigStore *store,\n                    size_t key_offset) const;\n};\n\nclass ConfigurationBuilder;\n\nclass Configuration {\n  friend class ConfigurationBuilder;\n\n public:\n  Configuration();\n  Configuration(const Configuration &config) = delete;\n  Configuration &operator=(const Configuration &config) = delete;\n  Configuration(const Configuration &&config) = delete;\n  Configuration &operator=(const Configuration &&config) = delete;\n\n  virtual ~Configuration();\n\n  static void Trim(std::string *value);\n\n  static void StringSplit(const std::string &str, const std::string &delimiter,\n                          std::vector<std::string> &sub_str_list);\n\n  void Add(const Configuration &config);\n\n  void Copy(const Configuration &config, const std::string &key);\n\n  size_t Size() const;\n\n  std::set<std::string> GetKeys() const;\n\n  bool Contain(const std::string &key) const;\n\n  std::set<std::string> GetSubKeys(const std::string &prefix_key) const;\n\n  std::shared_ptr<Configuration> GetSubConfig(\n      const std::string &prefix_key) const;\n\n  template <class T>\n  void SetProperty(const std::string &key, const T &prop);\n\n  template <class T>\n  void SetProperty(const std::string &key, const std::vector<T> &prop);\n\n  template <class T>\n  T GetProperty(const std::string &key, const T &default_prop) const;\n\n  template <class T>\n  std::vector<T> GetProperty(const std::string &key,\n                             const std::vector<T> &default_prop) const;\n\n  std::string GetString(const std::string &key,\n                        const std::string &default_prop = \"\") const;\n\n  bool GetBool(const std::string &key, bool default_prop = false) const;\n\n  int8_t GetInt8(const std::string &key, int8_t default_prop = 0) const;\n\n  uint8_t GetUint8(const std::string &key, uint8_t default_prop = 0) const;\n\n  int16_t GetInt16(const std::string &key, int16_t default_prop = 0) const;\n\n  uint16_t GetUint16(const std::string &key, uint16_t default_prop = 0) const;\n\n  int32_t GetInt32(const std::string &key, int32_t default_prop = 0) const;\n\n  uint32_t GetUint32(const std::string &key, uint32_t default_prop = 0) const;\n\n  int64_t GetInt64(const std::string &key, int64_t default_prop = 0) const;\n\n  uint64_t GetUint64(const std::string &key, uint64_t default_prop = 0) const;\n\n  float GetFloat(const std::string &key, float default_prop = 0.0F) const;\n\n  double GetDouble(const std::string &key, double default_prop = 0.0) const;\n\n  std::vector<std::string> GetStrings(\n      const std::string &key,\n      const std::vector<std::string> &default_prop = {}) const;\n\n  std::vector<bool> GetBools(const std::string &key,\n                             const std::vector<bool> &default_prop = {}) const;\n\n  std::vector<int8_t> GetInt8s(\n      const std::string &key,\n      const std::vector<int8_t> &default_prop = {}) const;\n\n  std::vector<uint8_t> GetUint8s(\n      const std::string &key,\n      const std::vector<uint8_t> &default_prop = {}) const;\n\n  std::vector<int16_t> GetInt16s(\n      const std::string &key,\n      const std::vector<int16_t> &default_prop = {}) const;\n\n  std::vector<uint16_t> GetUint16s(\n      const std::string &key,\n      const std::vector<uint16_t> &default_prop = {}) const;\n\n  std::vector<int32_t> GetInt32s(\n      const std::string &key,\n      const std::vector<int32_t> &default_prop = {}) const;\n\n  std::vector<uint32_t> GetUint32s(\n      const std::string &key,\n      const std::vector<uint32_t> &default_prop = {}) const;\n\n  std::vector<int64_t> GetInt64s(\n      const std::string &key,\n      const std::vector<int64_t> &default_prop = {}) const;\n\n  std::vector<uint64_t> GetUint64s(\n      const std::string &key,\n      const std::vector<uint64_t> &default_prop = {}) const;\n\n  std::vector<float> GetFloats(\n      const std::string &key,\n      const std::vector<float> &default_prop = {}) const;\n\n  std::vector<double> GetDoubles(\n      const std::string &key,\n      const std::vector<double> &default_prop = {}) const;\n\n protected:\n  Configuration(std::unique_ptr<ConfigStore> &store);\n\n  template <class T>\n  Status Convert(const std::string &property, T &convert_prop) const;\n\n  std::unique_ptr<ConfigStore> store_;\n};\n\ntemplate <class T>\nvoid Configuration::SetProperty(const std::string &key, const T &prop) {\n  std::stringstream ss;\n  ss.precision(VALID_RANGE_OF_DOUBLE);\n  ss << prop;\n  store_->WriteProperty(key, ss.str());\n}\n\ntemplate <class T>\nvoid Configuration::SetProperty(const std::string &key,\n                                const std::vector<T> &prop) {\n  std::stringstream ss;\n  ss.precision(VALID_RANGE_OF_DOUBLE);\n  for (size_t i = 1; i < prop.size(); ++i) {\n    ss << prop[i - 1] << LIST_DELIMITER;\n  }\n\n  if (!prop.empty()) {\n    ss << prop.back();\n  }\n\n  store_->WriteProperty(key, ss.str());\n}\n\ntemplate <class T>\nT Configuration::GetProperty(const std::string &key,\n                             const T &default_prop) const {\n  std::string raw_prop;\n  auto ret = store_->ReadProperty(key, &raw_prop);\n  if (ret != STATUS_SUCCESS) {\n    return default_prop;\n  }\n\n  T convert_prop{};\n  ret = Convert<T>(raw_prop, convert_prop);\n  if (ret != STATUS_SUCCESS) {\n    MBLOG_ERROR << \"Convert [\" << key << \" : \" << raw_prop << \"] to \"\n                << ret.Errormsg();\n    return default_prop;\n  }\n\n  return convert_prop;\n};\n\ntemplate <class T>\nstd::vector<T> Configuration::GetProperty(\n    const std::string &key, const std::vector<T> &default_prop) const {\n  std::string raw_prop;\n  auto ret = store_->ReadProperty(key, &raw_prop);\n  if (ret != STATUS_SUCCESS) {\n    return default_prop;\n  }\n\n  std::vector<std::string> raw_value_list;\n  StringSplit(raw_prop, LIST_DELIMITER, raw_value_list);\n\n  std::vector<T> value_list;\n  T convert_prop{};\n  for (const auto &raw_value : raw_value_list) {\n    ret = Convert<T>(raw_value, convert_prop);\n    if (ret != STATUS_SUCCESS) {\n      MBLOG_ERROR << \"Convert [\" << key << \" : \" << raw_prop << \"]::[\"\n                  << raw_value << \"] to \" << ret.Errormsg();\n      return default_prop;\n    }\n\n    value_list.push_back(convert_prop);\n  }\n\n  return value_list;\n}\n\ntemplate <class T>\nStatus Configuration::Convert(const std::string &property,\n                              T &convert_prop) const {\n  UNUSED_VAR(property);\n  UNUSED_VAR(convert_prop);\n\n  return STATUS_FAULT;\n};\n\ntemplate <>\nStatus Configuration::Convert<std::string>(const std::string &property,\n                                           std::string &convert_prop) const;\n\ntemplate <>\nStatus Configuration::Convert<bool>(const std::string &property,\n                                    bool &convert_prop) const;\n\ntemplate <>\nStatus Configuration::Convert<int8_t>(const std::string &property,\n                                      int8_t &convert_prop) const;\n\ntemplate <>\nStatus Configuration::Convert<uint8_t>(const std::string &property,\n                                       uint8_t &convert_prop) const;\n\ntemplate <>\nStatus Configuration::Convert<int16_t>(const std::string &property,\n                                       int16_t &convert_prop) const;\n\ntemplate <>\nStatus Configuration::Convert<uint16_t>(const std::string &property,\n                                        uint16_t &convert_prop) const;\n\ntemplate <>\nStatus Configuration::Convert<int32_t>(const std::string &property,\n                                       int32_t &convert_prop) const;\n\ntemplate <>\nStatus Configuration::Convert<uint32_t>(const std::string &property,\n                                        uint32_t &convert_prop) const;\n\ntemplate <>\nStatus Configuration::Convert<int64_t>(const std::string &property,\n                                       int64_t &convert_prop) const;\n\ntemplate <>\nStatus Configuration::Convert<uint64_t>(const std::string &property,\n                                        uint64_t &convert_prop) const;\n\ntemplate <>\nStatus Configuration::Convert<float>(const std::string &property,\n                                     float &convert_prop) const;\n\ntemplate <>\nStatus Configuration::Convert<double>(const std::string &property,\n                                      double &convert_prop) const;\n\nclass ConfigParser {\n public:\n  virtual Status Parse(const std::shared_ptr<Configuration> &config,\n                       std::istream &is, const std::string &fname) = 0;\n  virtual Status Parse(const std::shared_ptr<Configuration> &config,\n                       const std::string &file) = 0;\n};\n\nenum class ConfigType { TOML };\n\nclass ConfigurationBuilder {\n public:\n  ConfigurationBuilder();\n\n  virtual ~ConfigurationBuilder();\n\n  void AddProperty(const std::string &key, const std::string &property);\n\n  void AddProperty(const std::string &key,\n                   const std::vector<std::string> &properties);\n\n  void AddProperties(const std::map<std::string, std::string> &properties);\n\n  std::shared_ptr<Configuration> Build();\n\n  std::shared_ptr<Configuration> Build(\n      const std::string &file, const ConfigType &type = ConfigType::TOML,\n      bool expand_env = false);\n\n  std::shared_ptr<Configuration> Build(\n      std::istream &is, const std::string &fname = \"unknown file\",\n      const ConfigType &type = ConfigType::TOML, bool expand_env = false);\n\n private:\n  std::shared_ptr<ConfigParser> CreateParser(\n      const ConfigType &type = ConfigType::TOML);\n\n  std::unique_ptr<ConfigStore> store_;\n};\n\n}  // namespace modelbox\n\n#endif  // MODELBOX_CONFIGURATION_H_"
  },
  {
    "path": "src/libmodelbox/base/include/modelbox/base/crypto.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_CRYPTO_H_\n#define MODELBOX_CRYPTO_H_\n\n#include <modelbox/base/status.h>\n#include <time.h>\n\n#include <string>\n#include <vector>\n\nnamespace modelbox {\n\n/// default ciphter\nconstexpr const char *DEFAULT_CIPHER_AES256_CBC = \"aes-256-cbc\";\nconstexpr int MAX_PASSWORD_LEN = 1024;\nconstexpr int  IV_LEN = 16;\n\n/**\n * @brief hmac encode\n * @param algorithm algorithm, support sha512, sha256, sha1, md5, sha224, sha384\n * @param input input data\n * @param output output data\n * @return whether success\n */\nStatus HmacEncode(const std::string &algorithm,\n                  const std::vector<unsigned char> &input,\n                  std::vector<unsigned char> *output);\n\n/**\n * @brief hmac encode\n * @param algorithm algorithm, support sha512, sha256, sha1, md5, sha224, sha384\n * @param input input data pointer\n * @param input_len input data len\n * @param output output data\n * @return whether success\n */\nStatus HmacEncode(const std::string &algorithm, const void *input,\n                  size_t input_len, std::vector<unsigned char> *output);\n\n/**\n * @brief Conver Hmac to string\n * @param input input data pointer\n * @param input_len input data len\n * @return Hmac in string\n */\nstd::string HmacToString(const void *input, size_t input_len);\n\n/**\n * @brief Encrypt password\n * @param pass password in plain text\n * @param sysrelated Whether encryption system related\n * @param rootkey output rootkey\n * @param en_pass encrypted password\n * @param ciphername ciphter name, like aes-256-cbc\n * @return whether success\n */\nStatus PassEncrypt(const std::vector<char> &pass, bool sysrelated,\n                   std::string *rootkey, std::string *en_pass,\n                   const std::string &ciphername = DEFAULT_CIPHER_AES256_CBC);\n\n/**\n * @brief Decrypt password\n * @param en_pass encrypted password\n * @param rootkey rootkey\n * @param pass output password in plain text\n * @param ciphername ciphter name, like aes-256-cbc\n * @return whether success\n */\nStatus PassDecrypt(const std::string &en_pass, const std::string &rootkey,\n                   std::vector<char> *pass,\n                   const std::string &ciphername = DEFAULT_CIPHER_AES256_CBC);\n\n/**\n * @brief Generic encrypt function\n * @param ciphername ciphter name, like aes-256-cbc\n * @param input input data\n * @param input_len input data len\n * @param output output data\n * @param output_len output len\n * @param max_output max output len\n * @param key encrypt key\n * @param iv encrypt iv\n * @return whether success\n */\nStatus Encrypt(const std::string &ciphername, unsigned char *input,\n               int input_len, unsigned char *output, int *output_len,\n               int max_output, unsigned char *key, unsigned char *iv);\n\n/**\n * @brief Generic decrypt function\n * @param ciphername ciphter name, like aes-256-cbc\n * @param input input data\n * @param input_len input data len\n * @param output output data\n * @param output_len output len\n * @param max_output max output len\n * @param key encrypt key\n * @param iv encrypt iv\n * @return whether success\n */\nStatus Decrypt(const std::string &ciphername, unsigned char *input,\n               int input_len, unsigned char *output, int *output_len,\n               int max_output, unsigned char *key, unsigned char *iv);\n\n/**\n * @brief Base64 encode\n * @param input input data\n * @param output encoded base64 string\n * @return whether success\n */\nStatus Base64Encode(const std::vector<unsigned char> &input,\n                    std::string *output);\n\n/**\n * @brief Base64 encode\n * @param input input data\n * @param input_len input data len\n * @param output encoded base64 string\n * @return whether success\n */\nStatus Base64Encode(const unsigned char *input, size_t input_len,\n                    std::string *output);\n\n/**\n * @brief Base64 decode\n * @param input encoded base64 string\n * @param output decoded data\n * @return whether success\n */\nStatus Base64Decode(const std::string &input,\n                    std::vector<unsigned char> *output);\n\n}  // namespace modelbox\n\n#endif\n"
  },
  {
    "path": "src/libmodelbox/base/include/modelbox/base/device.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_DEVICE_H_\n#define MODELBOX_DEVICE_H_\n\n#include <atomic>\n#include <functional>\n#include <iostream>\n#include <map>\n#include <memory>\n#include <vector>\n\n#include \"modelbox/base/device_memory.h\"\n#include \"modelbox/base/driver.h\"\n#include \"modelbox/base/executor.h\"\n#include \"modelbox/base/status.h\"\n\nnamespace modelbox {\n\nconstexpr const char *DRIVER_CLASS_DEVICE = \"DRIVER-DEVICE\";\nconstexpr const int MAX_CIRCLE_LIST_SIZE = 100;\nclass DeviceManager;\n\nusing DeleteFunction = std::function<void(void *)>;\n\n/**\n * @brief circle list used to save trace log\n */\nclass CircleQueue {\n public:\n  CircleQueue();\n  virtual ~CircleQueue();\n\n  void EnQueue(const std::string &data);\n  std::string &DeQueue();\n  std::string &GetQueue();\n  bool Empty();\n  bool Full();\n\n private:\n  std::string data_[MAX_CIRCLE_LIST_SIZE];\n  int front_;\n  int rear_;\n};\n\nclass DeviceDesc {\n public:\n  DeviceDesc();\n  virtual ~DeviceDesc();\n\n  virtual std::string GetDeviceId();\n  virtual std::string GetDeviceType();\n  virtual std::string GetDeviceMemory();\n  virtual std::string GetDeviceVersion();\n  virtual std::string GetDeviceDesc();\n\n  void SetDeviceId(const std::string &device_id);\n  void SetDeviceType(const std::string &device_type);\n  void SetDeviceMemory(const std::string &device_memory);\n  void SetDeviceVersion(const std::string &device_version);\n  void SetDeviceDesc(const std::string &device_desc);\n\n protected:\n  std::string device_id_;\n  std::string device_type_;\n  std::string device_memory_;\n  std::string device_version_;\n  std::string device_description_;\n};\n\nusing DevExecuteCallBack = std::function<Status(size_t idx)>;\n\nclass Device : public std::enable_shared_from_this<Device> {\n public:\n  Device();\n  Device(std::shared_ptr<DeviceMemoryManager> mem_mgr);\n  Device(size_t thread_count, std::shared_ptr<DeviceMemoryManager> mem_mgr);\n  virtual ~Device();\n\n  virtual std::string GetDeviceID() const;\n\n  virtual std::string GetType() const;\n\n  /**\n   * @brief when make mem contiguous, need test whether the device supports\n   * @return whether specify device supports mem contiguous\n   **/\n  virtual bool SupportMemContiguous() const;\n\n  void SetDeviceDesc(std::shared_ptr<DeviceDesc> device_desc);\n\n  std::shared_ptr<DeviceDesc> GetDeviceDesc();\n\n  Status Init();\n\n  virtual Status DeviceExecute(const DevExecuteCallBack &fun, int32_t priority,\n                               size_t count);\n\n  virtual std::list<std::future<Status>> DeviceExecuteAsync(\n      const DevExecuteCallBack &fun, int32_t priority, size_t count,\n      bool resource_nice);\n\n  std::shared_ptr<Executor> GetDeviceExecutor();\n\n  /**\n   * @brief Set allocatable memory limit\n   * @param mem_quota quota memory size\n   **/\n  void SetMemQuota(size_t mem_quota);\n\n  /**\n   * @brief Get allocatable memory limit\n   * @return Memory limit\n   **/\n  size_t GetMemQuota() const;\n\n  /**\n   * @brief Get allocated memory size\n   * @return Memory allocated\n   **/\n  size_t GetAllocatedMemSize() const;\n\n  /**\n   * @brief Malloc device memory, memory size = 0 is ok\n   * @param size Memory size\n   * @param mem_flags Flags to create device memory\n   * @param user_id user id\n   * @return Device memory\n   **/\n  std::shared_ptr<DeviceMemory> MemAlloc(size_t size, uint32_t mem_flags = 0,\n                                         const std::string &user_id = \"\");\n\n  /**\n   * @brief Malloc device memory, memory size = 0 is ok\n   * @param size Memory size\n   * @param capacity Memory physic size\n   * @param mem_flags memory flags\n   * @param user_id user id\n   * @return Device memory\n   **/\n  std::shared_ptr<DeviceMemory> MemAlloc(size_t size, size_t capacity,\n                                         uint32_t mem_flags,\n                                         const std::string &user_id = \"\");\n\n  /**\n   * @brief Manage exist device mem\n   * @param mem_ptr Exist mem\n   * @param size Memory size\n   * @param deleter memory delete function\n   * @param mem_flags memory flags\n   * @return Device memory\n   **/\n  std::shared_ptr<DeviceMemory> MemAcquire(void *mem_ptr, size_t size,\n                                           const DeleteFunction &deleter,\n                                           uint32_t mem_flags = 0);\n\n  /**\n   * @brief Manage exist device mem\n   * @param mem_ptr Exist mem\n   * @param size Memory size\n   * @param mem_flags memory flags\n   * @return Device memory\n   **/\n  std::shared_ptr<DeviceMemory> MemAcquire(void *mem_ptr, size_t size,\n                                           uint32_t mem_flags = 0);\n\n  /**\n   * @brief Manage exist device mem\n   * @param mem_ptr Exist mem\n   * @param size Memory size\n   * @param mem_flags Flags of device mem\n   * @return Device memory\n   **/\n  std::shared_ptr<DeviceMemory> MemAcquire(const std::shared_ptr<void> &mem_ptr,\n                                           size_t size, uint32_t mem_flags = 0);\n\n  /**\n   * @brief Write host data to device, and create a new device memory, host_data\n   * is collected by os interface\n   * @param host_data Host data to write\n   * @param host_size Host data size\n   * @param user_id User id\n   **/\n  std::shared_ptr<DeviceMemory> MemWrite(const void *host_data,\n                                         size_t host_size,\n                                         const std::string &user_id = \"\");\n\n  /**\n   * @brief Clone source device memory to this device\n   * if source device memory is readonly, and in this device, same pointer will\n   * return. Otherwise, will make a copy.\n   * @param src_memory Memory to clone\n   * @param user_id user id\n   * @return A clone memory\n   **/\n  std::shared_ptr<DeviceMemory> MemClone(\n      std::shared_ptr<DeviceMemory> src_memory,\n      const std::string &user_id = \"\");\n\n  /**\n   * @brief Get device memory info\n   * @return Status\n   */\n  Status GetMemInfo(size_t *free, size_t *total) const;\n\n  /**\n   * @brief Get memory usage trace\n   * @return Memory usage trace\n   **/\n  std::shared_ptr<DeviceMemoryTrace> GetMemoryTrace() const;\n\n  /**\n   * @brief Get device manager\n   * @return device manager\n   **/\n  std::shared_ptr<DeviceManager> GetDeviceManager();\n\n  /**\n   * @brief Set device manager\n   * @return void\n   **/\n  friend class DeviceManager;\n\n protected:\n  std::shared_ptr<Executor> executor_;\n  void SetDeviceManager(const std::shared_ptr<DeviceManager> &device_mgr);\n\n  std::list<std::future<Status>> DeviceExecuteAsyncRude(\n      const DevExecuteCallBack &fun, int32_t priority, size_t count);\n\n  std::list<std::future<Status>> DeviceExecuteAsyncNice(\n      const DevExecuteCallBack &fun, int32_t priority, size_t count);\n\n  virtual bool NeedResourceNice();\n\n private:\n  std::shared_ptr<DeviceMemoryTrace> memory_trace_;\n  std::shared_ptr<DeviceMemoryManager> memory_manager_;\n  std::shared_ptr<DeviceDesc> device_desc_ = std::make_shared<DeviceDesc>();\n  std::weak_ptr<DeviceManager> device_mgr_;\n};\n\nclass DeviceFactory : public DriverFactory {\n public:\n  DeviceFactory();\n  ~DeviceFactory() override;\n\n  virtual std::map<std::string, std::shared_ptr<DeviceDesc>> DeviceProbe();\n\n  virtual std::shared_ptr<Device> CreateDevice(const std::string &device_id);\n\n  virtual std::string GetDeviceFactoryType();\n\n  virtual std::vector<std::string> GetDeviceList();\n\n private:\n};\n\nclass DeviceManager : public std::enable_shared_from_this<DeviceManager> {\n public:\n  DeviceManager();\n  virtual ~DeviceManager();\n\n  static std::shared_ptr<DeviceManager> GetInstance();\n  Status Initialize(const std::shared_ptr<Drivers> &driver,\n                    const std::shared_ptr<Configuration> &config);\n\n  virtual std::vector<std::string> GetDevicesTypes();\n\n  virtual std::vector<std::string> GetDevicesIdList(\n      const std::string &device_type);\n\n  std::shared_ptr<Device> CreateDevice(const std::string &device_type,\n                                       const std::string &device_id);\n\n  std::shared_ptr<Device> GetDevice(const std::string &device_type,\n                                    const std::string &device_id);\n\n  Status Register(const std::shared_ptr<DeviceFactory> &factory);\n\n  /**\n   * @brief Return host device\n   * @return Host device\n   **/\n  std::shared_ptr<Device> GetHostDevice();\n  void Clear();\n\n  /**\n   * GetDeviceFactoryList(), GetDeviceDescList(), GetDeviceList()\n   * only for test\n   */\n  const std::map<std::string, std::shared_ptr<DeviceFactory>>\n      &GetDeviceFactoryList();\n  const std::map<std::string,\n                 std::map<std::string, std::shared_ptr<DeviceDesc>>>\n      &GetDeviceDescList();\n  const std::map<std::string, std::map<std::string, std::shared_ptr<Device>>>\n      &GetDeviceList();\n\n  Status DeviceProbe();\n  Status InitDeviceFactory(const std::shared_ptr<Drivers> &driver);\n  std::shared_ptr<Drivers> GetDrivers();\n\n private:\n  Status CheckDeviceManagerInit();\n  void SetDrivers(std::shared_ptr<Drivers> drivers);\n  std::map<std::string, std::shared_ptr<DeviceFactory>> device_factory_;\n  std::map<std::string, std::map<std::string, std::shared_ptr<DeviceDesc>>>\n      device_desc_list_;\n  std::map<std::string, std::map<std::string, std::shared_ptr<Device>>>\n      device_list_;\n  std::shared_ptr<Drivers> drivers_;\n};\n\n}  // namespace modelbox\n#endif  // MODELBOX_DEVICE_H_\n"
  },
  {
    "path": "src/libmodelbox/base/include/modelbox/base/device_memory.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_DEVICE_MEMORY_H_\n#define MODELBOX_DEVICE_MEMORY_H_\n\n#include <map>\n#include <memory>\n#include <mutex>\n#include <utility>\n#include <vector>\n\n#include \"modelbox/base/status.h\"\n\nnamespace modelbox {\n\n/**\n * @brief Test mem aligned\n * @param addr Target mem to test\n * @param align Mem need align to\n */\ninline bool IsMemAligned(uintptr_t addr, uintptr_t align) {\n  return addr % align == 0;\n}\n\nenum class DeviceMemoryCopyKind { FromHost, ToHost, SameDeviceType };\n\nclass Device;\nclass DeviceMemoryManager;\n\n/**\n * Simple device memory manage, cloud share one raw memory block\n * Opertation: resize, copy, slice\n * |   <- Raw memory block ->  |\n *  offset -> | <- size -> |\n *            | <- capacity -> |\n */\nclass DeviceMemory : public std::enable_shared_from_this<DeviceMemory> {\n  friend class Device;\n  friend class CpuMemory;\n\n public:\n  DeviceMemory(const DeviceMemory &deviceMemory) = delete;\n  DeviceMemory &operator=(const DeviceMemory &deviceMemory) = delete;\n  DeviceMemory(const DeviceMemory &&deviceMemory) = delete;\n  DeviceMemory &operator=(const DeviceMemory &&deviceMemory) = delete;\n\n  virtual ~DeviceMemory();\n\n  /**\n   * @brief Get memory pointer to access data, memory must be mutable\n   * @return Memory pointer if mutable or nullptr\n   */\n  template <typename T>\n  std::shared_ptr<T> GetPtr() {\n    if (!is_content_mutable_) {\n      return nullptr;\n    }\n\n    std::shared_ptr<T> data_ptr(\n        (T *)((uint8_t *)device_mem_ptr_.get() + offset_), [](T *ptr) {});\n    return data_ptr;\n  };\n\n  /**\n   * @brief Get const memory pointer to read data\n   * @return Const memory pointer\n   */\n  template <typename T>\n  std::shared_ptr<const T> GetConstPtr() const {\n    std::shared_ptr<T> data_ptr(\n        (T *)((uint8_t *)device_mem_ptr_.get() + offset_), [](T *ptr) {});\n    return data_ptr;\n  }\n\n  /**\n   * @brief Mutable if memory content can be modified\n   * @return Mutable\n   */\n  bool IsContentMutable() const;\n\n  /**\n   * @brief Mutable if memory content can be modified\n   * @param content_mutable Content mutable\n   */\n  Status SetContentMutable(bool content_mutable);\n\n  /**\n   * @brief Get memory size, 0 if null\n   * @return memory size\n   */\n  size_t GetSize() const;\n\n  /**\n   * @brief Get memory capacity, 0 if null\n   * @return memory capacity\n   */\n  size_t GetCapacity() const;\n\n  /**\n   * @brief Get memory id\n   * @return Memory id\n   */\n  std::string GetMemoryID() const;\n\n  /**\n   * @brief Get device that memory located\n   * @return Device\n   */\n  std::shared_ptr<Device> GetDevice() const;\n\n  /**\n   * @brief Get device memory flag.\n   * @return device memory flag\n   */\n  uint32_t GetMemFlags() const;\n\n  /**\n   * @brief Check this memory belong to host\n   * @return Host or not\n   */\n  bool IsHost() const;\n\n  /**\n   * @brief Check memory on same device\n   * @param dev_mem other device memory\n   * @return same or not\n   */\n  bool IsSameDevice(const std::shared_ptr<DeviceMemory> &dev_mem);\n\n  /**\n   * @brief Check memory is continguous in same mem block strictly\n   *  |-- mem1 --|-- mem2 --|-- mem3 --|\n   *  |---------    mem block ---------|\n   * @param mem_list list of mem to judge\n   * @param with_order\n   *  true: mem order in list should be same with order in mem block\n   *  false: mem order in list can be different with order in mem block\n   * @return continguous or not\n   */\n  static bool IsContiguous(\n      const std::vector<std::shared_ptr<DeviceMemory>> &mem_list,\n      bool with_order = true);\n\n  /**\n   * @brief Combine mem to one mem block\n   * @param mem_list to combine.\n   * @param target_device target device.\n   * @param target_mem_flags Flags to create device memory\n   * @return Mem block\n   */\n  static std::shared_ptr<DeviceMemory> Combine(\n      const std::vector<std::shared_ptr<DeviceMemory>> &mem_list,\n      const std::shared_ptr<Device> &target_device = nullptr,\n      uint32_t target_mem_flags = 0);\n\n  /**\n   * @brief Count mem total size\n   * @param mem_list to count\n   * @param total_size mem size returned\n   * @return Status\n   */\n  static Status CountMemSize(\n      const std::vector<std::shared_ptr<DeviceMemory>> &mem_list,\n      size_t &total_size);\n\n  /**\n   * @brief Check memory out of bound\n   * @return Result of verify, 0 is ok\n   */\n  virtual Status Verify() const;\n\n  /**\n   * @brief Resize memory, but will not exceed capacity\n   * @param new_size New memory size\n   * @return Status\n   */\n  Status Resize(size_t new_size);\n\n  /**\n   * @brief Realloc memory block\n   * @param new_capacity New memory size\n   * @return Status\n   */\n  Status Realloc(size_t new_capacity);\n\n  /**\n   * @brief Read data from other device memory\n   * @param src_memory Memory read from\n   * @param src_offset Offset in the memory read from\n   * @param src_size Size in the memory read from\n   * @param dest_offset Offset in memory write to\n   * @return Status\n   */\n  virtual Status ReadFrom(const std::shared_ptr<const DeviceMemory> &src_memory,\n                          size_t src_offset, size_t src_size,\n                          size_t dest_offset = 0);\n\n  /**\n   * @brief Write data to other device memory\n   * @param dest_memory Memory write to\n   * @param src_offset Offset in the memory read from\n   * @param src_size Size in the memory read from\n   * @param dest_offset Offset in memory write to\n   * @return Status\n   */\n  Status WriteTo(const std::shared_ptr<DeviceMemory> &dest_memory,\n                 size_t src_offset, size_t src_size,\n                 size_t dest_offset = 0) const;\n  /**\n   * @brief If capacity of this is enough, only data copy happend.\n   * otherwise, new memory is allocated\n   * @return Memory with append data\n   */\n  std::shared_ptr<DeviceMemory> Append(\n      const std::shared_ptr<DeviceMemory> &dev_mem);\n\n  /**\n   * @brief If capacity of this is enough, only data copy happend.\n   * otherwise, new memory is allocated\n   * @return Memory with append data\n   */\n  std::shared_ptr<DeviceMemory> Append(\n      const std::vector<std::shared_ptr<DeviceMemory>> &mem_list);\n\n  /**\n   * @brief A new device memory point to the part of this mem, same mem block in\n   * low level\n   * @return New device memory point to this mem\n   */\n  std::shared_ptr<DeviceMemory> Cut(size_t offset, size_t size);\n\n  /**\n   * @brief A new device memory with new mem block\n   *  Data in param will not copy\n   * @return New device memory with data you want\n   */\n  std::shared_ptr<DeviceMemory> Delete(size_t offset, size_t size);\n\n  /**\n   * @brief A new device memory with new mem block\n   *  Data in param will not be copied\n   * @return New device memory with data you want\n   */\n  std::shared_ptr<DeviceMemory> Delete(size_t offset, size_t size,\n                                       size_t capacity);\n\n  /**\n   * @brief A new device memory with new mem block\n   *  Data in param will be copied\n   * @return New device memory with data you want\n   */\n  std::shared_ptr<DeviceMemory> Copy(size_t offset, size_t size);\n\n  /**\n   * @brief A new device memory with new mem block\n   *  Data in param will be copied\n   * @return New device memory with data you want\n   */\n  std::shared_ptr<DeviceMemory> Copy(size_t offset, size_t size,\n                                     size_t capacity);\n  /**\n   * @brief A new device memory with full data\n   * @param is_copy means device memory will share one mem block or not\n   * @return A new device memory with data\n   */\n  std::shared_ptr<DeviceMemory> Clone(bool is_copy = false);\n\n  /* memory protect magic */\n  static const uint64_t MEM_MAGIC_CODE;\n\n protected:\n  bool is_host_mem_{false};\n  std::shared_ptr<Device> device_;\n  std::shared_ptr<DeviceMemoryManager> mem_mgr_;\n  std::shared_ptr<void> device_mem_ptr_;\n  size_t offset_{0};\n  size_t size_{0};\n  size_t capacity_{0};\n  std::string memory_id_;\n  bool is_content_mutable_{true};\n  uint32_t mem_flags_{0};\n\n  /**\n   * @brief Construct a device memory with physical mem ptr, called by device\n   * @param device Memory belong to\n   * @param mem_mgr device manager\n   * @param device_mem_ptr shared_ptr Memory pointer\n   * @param size Memory size\n   * @param is_host_mem is host memory, default is false.\n   */\n  DeviceMemory(const std::shared_ptr<Device> &device,\n               const std::shared_ptr<DeviceMemoryManager> &mem_mgr,\n               const std::shared_ptr<void> &device_mem_ptr, size_t size,\n               bool is_host_mem = false);\n\n  void SetMemFlags(uint32_t mem_flags);\n\n  /**\n   * @brief Check param for readFrom function\n   * @param src_memory source device memory\n   * @param src_offset memory offset\n   * @param src_size memory size\n   * @param dest_offset dest memory offset\n   * @return Is param ok\n   */\n  bool CheckReadFromParam(const std::shared_ptr<const DeviceMemory> &src_memory,\n                          size_t src_offset, size_t src_size,\n                          size_t dest_offset);\n\n  virtual Status CopyExtraMetaTo(std::shared_ptr<DeviceMemory> &device_mem);\n\n  virtual Status CombineExtraMeta(\n      const std::vector<std::shared_ptr<DeviceMemory>> &mem_list);\n\n private:\n  void UpdateMemID(void *device_mem_ptr);\n\n  /**\n   * @brief Check param for Realloc function\n   * @param new_size New size for device memory\n   * @return Is param ok\n   */\n  bool CheckReallocParam(size_t new_capacity);\n\n  /**\n   * @brief We need host to transfer data in different type devices\n   * @param src_memory Source memory\n   * @param src_offset Source memory offset\n   * @param src_size Source memory size\n   * @param dest_offset Destination memory offset\n   */\n  Status TransferInHost(const std::shared_ptr<const DeviceMemory> &src_memory,\n                        size_t src_offset, size_t src_size, size_t dest_offset);\n  /**\n   * @brief Transfer data by specified device\n   * @param src_memory Source memory\n   * @param src_offset Source memory offset\n   * @param src_size Source memory size\n   * @param dest_offset Destination memory offset\n   */\n  Status TransferInDevice(const std::shared_ptr<const DeviceMemory> &src_memory,\n                          size_t src_offset, size_t src_size,\n                          size_t dest_offset);\n\n  std::shared_ptr<DeviceMemory> PrepareAppendMem(size_t append_size);\n\n  Status AppendData(const std::vector<std::shared_ptr<DeviceMemory>> &mem_list,\n                    std::shared_ptr<DeviceMemory> &target_device_mem);\n\n  Status MemAcquire(const std::shared_ptr<void> &mem_ptr, size_t size);\n\n  static std::shared_ptr<DeviceMemory> CombineContinuous(\n      const std::vector<std::shared_ptr<DeviceMemory>> &mem_list,\n      size_t total_size, const std::shared_ptr<Device> &target_device);\n\n  static std::shared_ptr<DeviceMemory> CombineFragment(\n      const std::vector<std::shared_ptr<DeviceMemory>> &mem_list,\n      size_t total_size, std::shared_ptr<Device> target_device,\n      uint32_t target_mem_flags);\n};\n\n/**\n * @brief device memory manager\n */\nclass DeviceMemoryManager\n    : public std::enable_shared_from_this<DeviceMemoryManager> {\n public:\n  DeviceMemoryManager(std::string device_id);\n\n  virtual ~DeviceMemoryManager();\n\n  /**\n   * @brief Set allocatable memory limit\n   * @param mem_quota quota memory size\n   */\n  void SetMemQuota(size_t mem_quota);\n\n  /**\n   * @brief Get allocatable memory limit\n   * @return Memory limit\n   */\n  size_t GetMemQuota() const;\n\n  /**\n   * @brief Get allocated memory size\n   * @return Allocated memory size\n   */\n  size_t GetAllocatedMemSize() const;\n\n  /**\n   * @brief Preservce memory alloc\n   * @param size Memory size to allocate\n   * @return Is param ok\n   */\n  bool PreserveMem(size_t size);\n\n  /**\n   * @brief Restore preserved size\n   * @param size Memory size to restore\n   */\n  void RestoreMem(size_t size);\n\n  virtual std::shared_ptr<DeviceMemory> MakeDeviceMemory(\n      const std::shared_ptr<Device> &device, std::shared_ptr<void> mem_ptr,\n      size_t size) = 0;\n  /**\n   * @brief Implement by specified device, alloc memory\n   * @param size Memory size to allocate\n   * @param mem_flags Flags to create device memory\n   * @return Device memory in shared ptr\n   */\n  virtual std::shared_ptr<void> AllocSharedPtr(size_t size, uint32_t mem_flags);\n\n  /**\n   * @brief Implement by specified device, alloc memory\n   * @param size Memory size to allocate\n   * @param mem_flags Flags to create device memory\n   * @return Device memory in shared ptr\n   */\n  virtual void *Malloc(size_t size, uint32_t mem_flags) = 0;\n\n  /**\n   * @brief Implement by specified device, free memory\n   * @param mem_ptr Memory to free\n   * @param mem_flags Flags of device memory\n   */\n  virtual void Free(void *mem_ptr, uint32_t mem_flags) = 0;\n\n  /**\n   * @brief Write host data to device by raw pointer\n   * @param host_data Host data to read\n   * @param host_size Host data size\n   * @param device_buffer Device buffer to write\n   * @param device_size Device buffer size\n   * @return Status\n   */\n  virtual Status Write(const void *host_data, size_t host_size,\n                       void *device_buffer, size_t device_size);\n\n  /**\n   * @brief Read device data to host by raw pointer\n   * @param device_data Device data to read\n   * @param device_size Device data size\n   * @param host_buffer Host buffer to write\n   * @param host_size Host buffer size\n   * @return Status\n   */\n  virtual Status Read(const void *device_data, size_t device_size,\n                      void *host_buffer, size_t host_size);\n\n  /**\n   * @brief Implement by specified device, copy data from src to dest\n   * @param dest dest buffer to write\n   * @param dest_size dest buffer size\n   * @param src_buffer src buffer to read\n   * @param src_size read data size\n   * @param kind data copy kind\n   * @return Status\n   */\n  virtual Status Copy(void *dest, size_t dest_size, const void *src_buffer,\n                      size_t src_size, DeviceMemoryCopyKind kind) = 0;\n\n  /**\n   * @brief Implement by specified device, copy memory between current device\n   *and host\n   * @param dest_memory Destination memory\n   * @param dest_offset Destination offset\n   * @param src_memory Source memory\n   * @param src_offset Source offset\n   * @param src_size Source memory size\n   * @param copy_kind copy mode\n   * @return Status\n   */\n  virtual Status DeviceMemoryCopy(\n      const std::shared_ptr<DeviceMemory> &dest_memory, size_t dest_offset,\n      const std::shared_ptr<const DeviceMemory> &src_memory, size_t src_offset,\n      size_t src_size,\n      DeviceMemoryCopyKind copy_kind = DeviceMemoryCopyKind::FromHost) = 0;\n\n  /**\n   * @brief Implement by specified device, get device memory info\n   * @param free Free memory\n   * @param total Total memory\n   * @return Status\n   */\n  virtual Status GetDeviceMemUsage(size_t *free, size_t *total) const = 0;\n\n protected:\n  std::string device_id_;\n  size_t mem_quota_{0};\n  size_t mem_allocated_{0};\n  std::mutex allocated_size_lock_;\n};\n\nclass DeviceMemoryLog {\n public:\n  DeviceMemoryLog(std::string memory_id, std::string user_id,\n                  std::string device_id, size_t size);\n\n  virtual ~DeviceMemoryLog();\n\n  std::string memory_id_;\n  std::string user_id_;\n  std::string device_id_;\n  size_t size_{0};\n};\n\nclass DeviceMemoryTrace {\n public:\n  virtual ~DeviceMemoryTrace();\n  /**\n   * @brief Trace memory allocation\n   * @param memory_id Memory id\n   * @param user_id Memory request by\n   * @param device_id Memory belong to\n   * @param size Memory size\n   */\n  void TraceMemoryAlloc(const std::string &memory_id,\n                        const std::string &user_id,\n                        const std::string &device_id, size_t size);\n\n  /**\n   * @brief Trace memory free\n   * @param memory_id Memory to free\n   */\n  void TraceMemoryFree(const std::string &memory_id);\n\n  /**\n   * @brief Get memory log\n   * @param memory_id Memory id\n   * @return Memory log\n   */\n  std::shared_ptr<DeviceMemoryLog> GetMemoryLog(const std::string &memory_id);\n\n private:\n  // store trace logs of all device memory\n  std::map<std::string, std::shared_ptr<DeviceMemoryLog>> memory_logs_;\n  std::mutex memory_logs_lock_;\n};\n\n}  // namespace modelbox\n\n#endif  // MODELBOX_DEVICE_MEMORY_H_"
  },
  {
    "path": "src/libmodelbox/base/include/modelbox/base/driver.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_DRIVER_H_\n#define MODELBOX_DRIVER_H_\n\n#include <modelbox/base/configuration.h>\n#include <modelbox/base/status.h>\n\n#include <iostream>\n#include <memory>\n#include <mutex>\n#include <string>\n#include <unordered_map>\n#include <vector>\nnamespace modelbox {\n\nconstexpr const char *DRIVER_CLASS_VIRTUAL = \"DRIVER-VIRTUAL\";\nconstexpr const char *DRIVER_CLASS_INFERENCE = \"DRIVER-INFERENCE\";\nconstexpr const char *DRIVER_TYPE_VIRTUAL = \"virtual\";\nconstexpr const char *DEFAULT_SCAN_INFO = \"/tmp/modelbox-driver-info\";\nconstexpr const char *DRIVER_SCAN_INFO = \"/tmp/modelbox-driver-scan-info\";\n\nclass Driver;\nclass DriverFactory {\n public:\n  DriverFactory();\n  virtual ~DriverFactory();\n\n  virtual std::shared_ptr<Driver> GetDriver();\n\n  virtual void SetDriver(const std::shared_ptr<Driver> &driver);\n\n private:\n  friend class Driver;\n};\n\nclass DriverDesc {\n public:\n  DriverDesc();\n  virtual ~DriverDesc();\n  std::string GetClass();\n  std::string GetType();\n  std::string GetName();\n  std::string GetDescription();\n  std::string GetVersion();\n  std::string GetFilePath();\n  bool GetNoDelete();\n  bool GetGlobal();\n  bool GetDeepBind();\n\n  void SetClass(const std::string &classname);\n  void SetType(const std::string &type);\n  void SetName(const std::string &name);\n  void SetDescription(const std::string &description);\n  Status SetVersion(const std::string &version);\n  void SetFilePath(const std::string &file_path);\n  void SetNodelete(const bool &no_delete);\n  void SetGlobal(const bool &global);\n  void SetDeepBind(const bool &deep_bind);\n\n protected:\n  bool driver_no_delete_{false};\n  bool global_{false};\n  bool deep_bind_{false};\n  std::string driver_class_;\n  std::string driver_type_;\n  std::string driver_name_;\n  std::string driver_description_;\n  std::string driver_version_;\n  std::string driver_file_path_;\n\n private:\n  Status CheckVersion(const std::string &version);\n};\n\nclass DriverHandlerInfo {\n public:\n  DriverHandlerInfo();\n  virtual ~DriverHandlerInfo();\n\n  int IncHanderRefcnt();\n  int DecHanderRefcnt();\n\n  int initialize_count_{0};\n  int handler_count_{0};\n  std::mutex initialize_lock_;\n};\n\nclass DriverHandler {\n public:\n  std::shared_ptr<DriverHandlerInfo> Add(void *driver_handler);\n  Status Remove(void *driver_handler);\n  std::shared_ptr<DriverHandlerInfo> Get(void *driver_handler);\n\n  std::mutex handler_map_lock;\n\n private:\n  std::map<void *, std::shared_ptr<DriverHandlerInfo>> handler_map;\n};\n\nclass Driver : public std::enable_shared_from_this<Driver> {\n public:\n  Driver();\n  virtual ~Driver();\n\n  std::string GetDriverFile();\n\n  virtual std::shared_ptr<DriverFactory> CreateFactory();\n\n  std::shared_ptr<DriverDesc> GetDriverDesc();\n\n  void SetDriverDesc(std::shared_ptr<DriverDesc> desc);\n  bool IsVirtual();\n  void SetVirtual(bool is_virtual);\n\n protected:\n  std::shared_ptr<DriverDesc> desc_ = std::make_shared<DriverDesc>();\n\n private:\n  int GetMode(bool no_delete, bool global, bool deep_bind);\n  void CloseFactory();\n  void CloseFactoryLocked();\n  bool is_virtual_ = false;\n  void *driver_handler_{nullptr};\n  int factory_count_ = 0;\n  std::mutex mutex_;\n  std::shared_ptr<DriverFactory> factory_;\n};\n\nclass VirtualDriverDesc : public DriverDesc {\n public:\n  VirtualDriverDesc();\n  ~VirtualDriverDesc() override;\n};\n\nclass VirtualDriver : public Driver {\n public:\n  std::shared_ptr<VirtualDriverDesc> GetVirtualDriverDesc();\n  void SetVirtualDriverDesc(std::shared_ptr<VirtualDriverDesc> desc);\n  std::shared_ptr<DriverFactory> CreateFactory() override;\n  std::vector<std::shared_ptr<Driver>> GetBindDriver();\n\n private:\n  std::shared_ptr<VirtualDriverDesc> virtual_driver_desc_;\n};\n\nclass Drivers;\nclass VirtualDriverManager : public DriverFactory {\n public:\n  VirtualDriverManager();\n  ~VirtualDriverManager() override;\n  virtual Status Add(const std::string &file);\n  virtual Status Init(Drivers &driver);\n  virtual Status Scan(const std::vector<std::string> &scan_dirs);\n  virtual Status Scan(const std::string &path);\n  std::vector<std::shared_ptr<VirtualDriver>> GetAllDriverList();\n  void Clear();\n\n protected:\n  std::vector<std::shared_ptr<VirtualDriver>> drivers_list_;\n};\n\nclass DriversScanResultInfo {\n public:\n  DriversScanResultInfo();\n  virtual ~DriversScanResultInfo();\n  std::list<std::string> &GetLoadSuccessInfo();\n  std::map<std::string, std::string> &GetLoadFailedInfo();\n\n private:\n  std::list<std::string> load_success_info_;\n  std::map<std::string, std::string> load_failed_info_;\n};\nclass Drivers {\n public:\n  Drivers();\n\n  virtual ~Drivers();\n\n  /**\n   * @brief Set default scan path\n   *\n   * @param path\n   */\n  static void SetDefaultScanPath(const std::string &path);\n\n  /**\n   * @brief Set default driver info ;ath\n   *\n   * @param path\n   */\n  static void SetDefaultInfoPath(const std::string &path);\n\n  Status Initialize(const std::shared_ptr<Configuration> &config);\n  Status Scan();\n  void Clear();\n  Status Scan(const std::string &path, const std::string &filter);\n  Status VirtualDriverScan();\n  Status Add(const std::string &file);\n\n  std::vector<std::string> GetDriverClassList();\n  std::vector<std::string> GetDriverTypeList(const std::string &driver_class);\n  std::vector<std::string> GetDriverNameList(const std::string &driver_class,\n                                             const std::string &driver_type);\n  std::vector<std::shared_ptr<Driver>> GetAllDriverList();\n  std::vector<std::shared_ptr<Driver>> GetDriverListByClass(\n      const std::string &driver_class);\n  std::shared_ptr<Driver> GetDriver(const std::string &driver_class,\n                                    const std::string &driver_type,\n                                    const std::string &driver_name,\n                                    const std::string &driver_version = \"\");\n  static std::shared_ptr<Drivers> GetInstance();\n\n private:\n  Status InnerScan();\n  Status ReadExcludeInfo();\n  Status WriteScanInfo(const std::string &scan_info_path,\n                       const std::string &check_code);\n  Status GatherScanInfo(const std::string &scan_path);\n  Status FillCheckInfo(std::string &file_check_node,\n                       std::unordered_map<std::string, bool> &file_map,\n                       int64_t &ld_cache_time);\n  bool CheckPathAndMagicCode();\n  void PrintScanResults(const std::string &scan_path);\n  void PrintScanResult(\n      const std::list<std::string> &load_success_info,\n      const std::map<std::string, std::string> &load_failed_info);\n  void RemoveSameElements(std::vector<std::string> *driver_list);\n  bool DriversContains(const std::vector<std::shared_ptr<Driver>> &drivers_list,\n                       const std::shared_ptr<Driver> &driver);\n  std::shared_ptr<Configuration> config_;\n  std::vector<std::shared_ptr<Driver>> drivers_list_;\n  std::vector<std::shared_ptr<VirtualDriverManager>>\n      virtual_driver_manager_list_;\n  std::vector<std::string> driver_dirs_;\n  std::shared_ptr<DriversScanResultInfo> drivers_scan_result_info_;\n  uint64_t last_modify_time_sum_{0};\n  static std::string default_scan_path_;\n  static std::string default_driver_info_path_;\n  std::map<std::string, bool> scan_exclude_file_list_;\n  std::string scan_info_file_;\n};\n\n}  // namespace modelbox\n\n#endif  // MODELBOX_DRIVER_H_\n"
  },
  {
    "path": "src/libmodelbox/base/include/modelbox/base/driver_api_helper.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_DRIVER_API_HELPER_H_\n#define MODELBOX_DRIVER_API_HELPER_H_\n\n#include <functional>\n#include <utility>\n\n#include \"modelbox/base/driver.h\"\n#include \"modelbox/base/utils.h\"\n\n#pragma GCC visibility push(hidden)\n\nclass DriverPlugin {\n public:\n  virtual ~DriverPlugin() = default;\n\n  DriverPlugin &Init(std::function<modelbox::Status()> func) {\n    init_func_ = std::move(func);\n    return *this;\n  }\n\n  std::function<modelbox::Status()> GetInit() { return init_func_; }\n\n  DriverPlugin &Exit(std::function<void()> func) {\n    fini_func_ = std::move(func);\n    return *this;\n  }\n\n  std::function<void()> GetExit() { return fini_func_; }\n\n  DriverPlugin &SetCreateFacotryFunc(\n      std::function<std::shared_ptr<modelbox::DriverFactory>()> create_func) {\n    create_factory_func_ = std::move(create_func);\n    return *this;\n  }\n\n  virtual std::shared_ptr<modelbox::DriverFactory> CreateFactory() {\n    if (create_factory_func_ == nullptr) {\n      MBLOG_ERROR << \"Factory is null\";\n      return nullptr;\n    }\n\n    return create_factory_func_();\n  }\n  modelbox::DriverDesc Desc;\n\n  void AddPluginInitFunc(const std::function<void()> &func) {\n    plugin_init_func_.push_back(func);\n  }\n\n  void RunPluginInitFunc() {\n    for (auto &func : plugin_init_func_) {\n      func();\n    }\n  }\n\n private:\n  std::function<modelbox::Status()> init_func_;\n  std::function<void()> fini_func_;\n  std::vector<std::function<void()>> plugin_init_func_;\n  std::function<std::shared_ptr<modelbox::DriverFactory>()>\n      create_factory_func_;\n};\n\nextern std::shared_ptr<DriverPlugin> ModelBoxGetDriverPlugin()\n    MODELBOX_DLL_LOCAL;\n\n#define MODELBOX_DRIVER_CREATE_FACTORY()                                      \\\n  extern \"C\" std::shared_ptr<modelbox::DriverFactory> CreateDriverFactory() { \\\n    auto plugin = ModelBoxGetDriverPlugin();                                  \\\n    return plugin->CreateFactory();                                           \\\n  }\n\n#define MODELBOX_DRIVER_DESCRIPTION()                             \\\n  extern \"C\" void DriverDescription(modelbox::DriverDesc *desc) { \\\n    ModelBoxDriverPluginInit();                                   \\\n    auto plugin = ModelBoxGetDriverPlugin();                      \\\n    *desc = plugin->Desc;                                         \\\n    return;                                                       \\\n  }\n\n#define MODELBOX_DRIVER_INIT()               \\\n  extern \"C\" modelbox::Status DriverInit() { \\\n    ModelBoxDriverPluginInit();              \\\n    auto plugin = ModelBoxGetDriverPlugin(); \\\n    auto func = plugin->GetInit();           \\\n    if (func == nullptr) {                   \\\n      return modelbox::STATUS_OK;            \\\n    }                                        \\\n    return func();                           \\\n  }\n\n#define MODELBOX_DRIVER_FINI()               \\\n  extern \"C\" void DriverFini() {             \\\n    auto plugin = ModelBoxGetDriverPlugin(); \\\n    auto func = plugin->GetExit();           \\\n    if (func == nullptr) {                   \\\n      return;                                \\\n    }                                        \\\n    func();                                  \\\n  }\n\n#define MODELBOX_DRIVER_PLUGIN ModelBoxGetDriverPlugin()\n#define MODELBOX_DRIVER_PLUGIN_INIT_FUNC(func) \\\n  MODELBOX_DRIVER_PLUGIN->AddPluginInitFunc(func)\n#define MODELBOX_DRIVER_PLUGIN_DEFINE()                                        \\\n  void DriverPluginInit(DriverPlugin &desc);                                   \\\n  bool ModelBoxDriverPluginInit();                                             \\\n  MODELBOX_DLL_LOCAL std::shared_ptr<DriverPlugin> ModelBoxGetDriverPlugin() { \\\n    static std::shared_ptr<DriverPlugin> plugin =                              \\\n        std::make_shared<DriverPlugin>();                                      \\\n    return plugin;                                                             \\\n  }\n\n#define MODELBOX_DRIVER_INIT_FUNC()                    \\\n  MODELBOX_DLL_LOCAL bool ModelBoxDriverPluginInit() { \\\n    static bool is_init = false;                       \\\n    if (is_init) {                                     \\\n      return true;                                     \\\n    }                                                  \\\n    is_init = true;                                    \\\n    DriverPluginInit(*(MODELBOX_DRIVER_PLUGIN));       \\\n    MODELBOX_DRIVER_PLUGIN->RunPluginInitFunc();       \\\n    return true;                                       \\\n  }\n\n#define MODELBOX_DRIVER_DEFINE(desc) \\\n  MODELBOX_DRIVER_PLUGIN_DEFINE()    \\\n  MODELBOX_DRIVER_CREATE_FACTORY()   \\\n  MODELBOX_DRIVER_DESCRIPTION()      \\\n  MODELBOX_DRIVER_INIT()             \\\n  MODELBOX_DRIVER_FINI()\n\n#define MODELBOX_DRIVER_SETTER(desc) void DriverPluginInit(DriverPlugin &(desc))\n\n#define MODELBOX_DRIVER(desc)  \\\n  MODELBOX_DRIVER_DEFINE(desc) \\\n  MODELBOX_DRIVER_INIT_FUNC()  \\\n  MODELBOX_DRIVER_SETTER(desc)\n\n#pragma GCC visibility pop\n\nextern \"C\" {\n\n#if defined(__clang__)\n#pragma clang diagnostic push\n#pragma clang diagnostic ignored \"-Wreturn-type-c-linkage\"\n#endif\n\nMODELBOX_DLL_PUBLIC std::shared_ptr<modelbox::DriverFactory>\nCreateDriverFactory();\n\nMODELBOX_DLL_PUBLIC modelbox::Status DriverInit();\n\nMODELBOX_DLL_PUBLIC void DriverFini();\n\nMODELBOX_DLL_PUBLIC void DriverDescription(modelbox::DriverDesc *desc);\n\n#if defined(__clang__)\n#pragma clang diagnostic pop\n#endif\n}\n\n#endif  // MODELBOX_DRIVER_API_HELPER_H_"
  },
  {
    "path": "src/libmodelbox/base/include/modelbox/base/driver_utils.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_DRIVER_UTILS_H_\n#define MODELBOX_DRIVER_UTILS_H_\n\n#include <sys/wait.h>\n#include <unistd.h>\n\n#include \"modelbox/base/log.h\"\n#include \"modelbox/base/status.h\"\n\nnamespace modelbox {\n/**\n * @brief generate sha256 key from a check_sum\n * @param check_sum\n * @return sha256 result\n */\nstd::string GenerateKey(int64_t check_sum);\n\n}  // namespace modelbox\n\n#endif"
  },
  {
    "path": "src/libmodelbox/base/include/modelbox/base/error_info.h",
    "content": "#ifndef ERROR_INFO_H_\n#define ERROR_INFO_H_\n#include <iostream>\n/**\n * @brief Job error info\n */\nstruct ErrorInfo {\n  /**\n   * @brief Job error code\n   */\n  std::string error_code_;\n  /**\n   *  @brief Job error message\n   */\n  std::string error_msg_;\n};\n\n#endif\n"
  },
  {
    "path": "src/libmodelbox/base/include/modelbox/base/executor.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_EXECUTOR_H_\n#define MODELBOX_EXECUTOR_H_\n\n#include <modelbox/base/thread_pool.h>\n\nnamespace modelbox {\n\n/**\n * @brief Executor for flowunit\n */\nclass Executor {\n public:\n  Executor();\n  Executor(int thread_count);\n  Executor(const Executor &) = delete;\n  Executor &operator=(const Executor &) = delete;\n\n  virtual ~Executor();\n\n  void SetThreadCount(int thread_count);\n\n  template <typename func, typename... ts>\n  auto Run(func &&fun, int32_t priority, ts &&...params)\n      -> std::future<typename std::result_of<func(ts...)>::type> {\n    return thread_pool_->Submit(fun, params...);\n  }\n\n private:\n  std::shared_ptr<ThreadPool> thread_pool_;\n};\n\n}  // namespace modelbox\n\n#endif\n"
  },
  {
    "path": "src/libmodelbox/base/include/modelbox/base/graph_manager.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_GRAPH_MANAGER_H\n#define MODELBOX_GRAPH_MANAGER_H\n\n#include <modelbox/base/configuration.h>\n#include <modelbox/base/driver.h>\n#include <modelbox/base/log.h>\n\n#include <iostream>\n#include <map>\n#include <memory>\n#include <set>\n#include <vector>\n\nnamespace modelbox {\n\nconstexpr const char *DRIVER_CLASS_GRAPHCONF = \"DRIVER-GRAPHCONF\";\nconstexpr const char *GCGRAPH_NODE_TYPE_NODE = \"node\";\nconstexpr const char *GCGRAPH_NODE_TYPE_SUBGRAPH = \"subgraph\";\n\nclass GCGraph;\nclass DataHandler;\nclass GCNode {\n public:\n  GCNode();\n  virtual ~GCNode();\n\n  Status Init(const std::string &name,\n              const std::shared_ptr<GCGraph> &root_graph);\n\n  std::string GetNodeName() const;\n  std::shared_ptr<Configuration> GetConfiguration() const;\n  std::shared_ptr<const std::set<std::string>> GetInputPorts() const;\n  std::shared_ptr<const std::set<std::string>> GetOutputPorts() const;\n  std::shared_ptr<GCGraph> GetRootGraph() const;\n  std::string GetNodeType() const;\n\n  void SetNodeType(std::string type);\n  void SetConfiguration(const std::string &key, const std::string &value);\n  Status SetInputPort(const std::string &port);\n  Status SetOutputPort(const std::string &port);\n  void SetOutDataHandler(std::shared_ptr<DataHandler> &data_handler);\n  std::shared_ptr<DataHandler> GetBindDataHandler();\n\n private:\n  std::string name_;\n  std::string type_;\n  std::weak_ptr<GCGraph> root_graph_;\n  std::set<std::string> input_ports_;\n  std::set<std::string> output_ports_;\n  std::shared_ptr<Configuration> configuration_;\n  std::weak_ptr<DataHandler> out_data_handler_;\n};\n\nclass GCEdge {\n public:\n  GCEdge();\n  virtual ~GCEdge();\n\n  Status Init(const std::shared_ptr<GCGraph> &root_graph);\n\n  const std::string &GetHeadOutPort() const;\n  const std::string &GetTailInPort() const;\n  std::shared_ptr<GCNode> GetHeadNode() const;\n  std::shared_ptr<GCNode> GetTailNode() const;\n  std::shared_ptr<Configuration> GetConfiguration() const;\n  std::shared_ptr<GCGraph> GetRootGraph() const;\n\n  Status SetHeadNode(std::shared_ptr<GCNode> node);\n  Status SetTailNode(std::shared_ptr<GCNode> node);\n  Status SetHeadPort(std::string port);\n  Status SetTailPort(std::string port);\n  void SetConfiguration(const std::string &key, const std::string &value);\n\n private:\n  std::shared_ptr<GCNode> head_;\n  std::shared_ptr<GCNode> tail_;\n  std::weak_ptr<GCGraph> root_graph_;\n  std::string head_out_port_;\n  std::string tail_in_port_;\n  std::shared_ptr<Configuration> configuration_;\n};\n\nclass GCGraph {\n public:\n  GCGraph();\n  virtual ~GCGraph();\n\n  Status Init(const std::shared_ptr<GCGraph> &root_graph);\n\n  void SetGraphName(const std::string &name);\n  const std::string &GetGraphName() const;\n  std::shared_ptr<GCGraph> GetRootGraph() const;\n\n  Status AddSubGraph(const std::shared_ptr<GCGraph> &subgraph);\n  std::shared_ptr<GCGraph> GetSubGraph(const std::string &name) const;\n  std::map<std::string, const std::shared_ptr<GCGraph>> GetAllSubGraphs() const;\n  void ShowAllSubGraph() const;\n\n  Status AddNode(const std::shared_ptr<GCNode> &node);\n  Status SetFirstNode(const std::shared_ptr<GCNode> &node);\n  std::vector<std::shared_ptr<GCNode>> GetFirstNodes();\n  std::shared_ptr<GCNode> GetNode(const std::string &name) const;\n  std::map<std::string, const std::shared_ptr<GCNode>> GetAllNodes() const;\n  void ShowAllNode() const;\n\n  Status AddEdge(const std::shared_ptr<GCEdge> &edge);\n  std::shared_ptr<GCEdge> GetEdge(const std::string &name) const;\n  std::map<std::string, const std::shared_ptr<GCEdge>> GetAllEdges() const;\n  void ShowAllEdge() const;\n\n  std::shared_ptr<Configuration> GetConfiguration() const;\n  void SetConfiguration(const std::string &key, const std::string &value);\n  void SetConfiguration(std::shared_ptr<Configuration> &config);\n\n private:\n  std::map<std::string, const std::shared_ptr<GCNode>> nodes_;\n  std::map<std::string, const std::shared_ptr<GCEdge>> edges_;\n  std::map<std::string, const std::shared_ptr<GCGraph>> subgraphs_;\n  std::weak_ptr<GCGraph> root_graph_;\n  std::vector<std::shared_ptr<GCNode>> first_nodes_;\n  std::string name_;\n  std::shared_ptr<Configuration> configuration_;\n};\n\nclass GraphConfig {\n public:\n  GraphConfig();\n  virtual ~GraphConfig();\n\n  virtual std::shared_ptr<GCGraph> Resolve() = 0;\n};\n\nclass GraphConfigFactory : public DriverFactory {\n public:\n  GraphConfigFactory();\n  ~GraphConfigFactory() override;\n  virtual std::shared_ptr<GraphConfig> CreateGraphConfigFromStr(\n      const std::string &graph_config) = 0;\n  virtual std::shared_ptr<GraphConfig> CreateGraphConfigFromFile(\n      const std::string &file_path) = 0;\n  virtual std::string GetGraphConfFactoryType() = 0;\n};\n\nclass GraphConfigManager {\n public:\n  GraphConfigManager();\n  virtual ~GraphConfigManager();\n\n  static GraphConfigManager &GetInstance();\n\n  Status Register(const std::shared_ptr<GraphConfigFactory> &factory);\n\n  Status Initialize(const std::shared_ptr<Drivers> &driver,\n                    const std::shared_ptr<Configuration> &config);\n\n  std::shared_ptr<GraphConfig> LoadGraphConfig(\n      const std::shared_ptr<Configuration> &config);\n\n  std::vector<std::string> GetSupportTypes();\n\n  void Clear();\n\n private:\n  Status InitGraphConfigFactory(const std::shared_ptr<Drivers> &driver);\n\n  std::map<std::string, const std::shared_ptr<GraphConfigFactory>>\n  GetGraphConfFactoryList();\n\n  std::shared_ptr<GraphConfigFactory> GetGraphConfFactory(\n      const std::string &type);\n\n  std::shared_ptr<GraphConfig> CreateGraphConfig(std::string graph_conf_type,\n                                                 std::string graph_conf_name);\n  std::shared_ptr<GraphConfig> GetGraphConfig(\n      const std::string &graph_conf_name);\n  std::map<std::string, const std::shared_ptr<GraphConfig>> GetGraphConfList();\n  Status DeleteGraphConfig(const std::string &graph_conf_name);\n\n  std::map<std::string, const std::shared_ptr<GraphConfigFactory>>\n      graph_conf_factories_;\n  std::map<std::string, const std::shared_ptr<GraphConfig>> graph_conf_list_;\n};\n\n}  // namespace modelbox\n#endif  // MODELBOX_GRAPH_MANAGER_H"
  },
  {
    "path": "src/libmodelbox/base/include/modelbox/base/list.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#ifndef LIST_H\n#define LIST_H\n\n#ifdef __cplusplus\nextern \"C\" {\n#define LIST_TYPEOF __typeof__\n#else\n#define LIST_TYPEOF typeof\n#endif /* __cplusplus */\n\n#define LIST_OFFSET_OF(type, member) ((size_t) & ((type *)0)->member)\n#define LIST_CONTAINER_OF(ptr, type, member)                 \\\n  ({                                                         \\\n    const LIST_TYPEOF(((type *)0)->member) *__mptr = (ptr);  \\\n    (type *)((char *)__mptr - LIST_OFFSET_OF(type, member)); \\\n  })\n\n#define LIST_POISON1 ((void *)0x00100100)\n#define LIST_POISON2 ((void *)0x00200200)\n\n/**\n * @brief list head\n */\ntypedef struct _ListHead {\n  /// @brief next list head\n  struct _ListHead *next;\n  /// @brief prev list head\n  struct _ListHead *prev;\n} ListHead;\n\n/* Init list head */\n#define LIST_HEAD_INIT(name) \\\n  { &(name), &(name) }\n\n/* define a list */\n#define LIST_HEAD(name) LIST_HEAD name = LIST_HEAD_INIT(name)\n\n/*Init list head*/\nstatic inline void ListInit(ListHead *list) {\n  list->next = list;\n  list->prev = list;\n}\n\nstatic inline void _ListAdd(ListHead *newnode, ListHead *prev, ListHead *next) {\n  next->prev = newnode;\n  newnode->next = next;\n  newnode->prev = prev;\n  prev->next = newnode;\n}\n\n/* Add new item to list head */\nstatic inline void ListAdd(ListHead *newnode, ListHead *head) {\n  _ListAdd(newnode, head, head->next);\n}\n\n/* Add new item to list tail */\nstatic inline void ListAddTail(ListHead *newnode, ListHead *head) {\n  _ListAdd(newnode, head->prev, head);\n}\n\nstatic inline void _ListDel(ListHead *prev, ListHead *next) {\n  next->prev = prev;\n  prev->next = next;\n}\n\n/* delete a item from list */\nstatic inline void ListDel(ListHead *entry) {\n  _ListDel(entry->prev, entry->next);\n  entry->next = (ListHead *)LIST_POISON1;\n  entry->prev = (ListHead *)LIST_POISON2;\n}\n\n/* delete a item from list and init the item */\nstatic inline void ListDelInit(ListHead *entry) {\n  _ListDel(entry->prev, entry->next);\n  ListInit(entry);\n}\n\n/* set item invalid */\nstatic inline void ListInitEntry(ListHead *entry) {\n  entry->next = (ListHead *)LIST_POISON1;\n  entry->prev = (ListHead *)LIST_POISON2;\n}\n\n/* check item is in list */\nstatic inline int ListEntryNotInList(ListHead *entry) {\n  return ((entry->next == (ListHead *)LIST_POISON1) &&\n          (entry->prev == (ListHead *)LIST_POISON2));\n}\n\n/* is list empty */\nstatic inline int ListEmpty(const ListHead *head) { return head->next == head; }\n\n/* is list empty */\nstatic inline int ListEmptyCareful(const ListHead *head) {\n  ListHead *next = head->next;\n  return (next == head) && (next == head->prev);\n}\n\n/* Get list entry */\n#define ListEntry(ptr, type, member) LIST_CONTAINER_OF(ptr, type, member)\n\n/* Get First Entry */\n#define ListFirstEntry(ptr, type, member) ListEntry((ptr)->next, type, member)\n\n/* Get Last Entry */\n#define ListLastEntry(ptr, type, member) ListEntry((ptr)->prev, type, member)\n\n/* iterator the list */\n#define ListForEachEntry(pos, head, member)                          \\\n  for ((pos) = ListEntry((head)->next, LIST_TYPEOF(*(pos)), member); \\\n       &(pos)->member != (head);                                     \\\n       (pos) = ListEntry((pos)->member.next, LIST_TYPEOF(*(pos)), member))\n\n/* iterator the list */\n#define ListForEachEntrySafe(pos, n, head, member)                      \\\n  for ((pos) = ListEntry((head)->next, LIST_TYPEOF(*(pos)), member),    \\\n      (n) = ListEntry((pos)->member.next, LIST_TYPEOF(*(pos)), member); \\\n       &(pos)->member != (head); (pos) = (n),                           \\\n      (n) = ListEntry((n)->member.next, LIST_TYPEOF(*(n)), member))\n\n#ifdef __cplusplus\n}\n#endif /* __cplusplus */\n\n#endif\n"
  },
  {
    "path": "src/libmodelbox/base/include/modelbox/base/log.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_LOG_H_\n#define MODELBOX_LOG_H_\n\n#include <modelbox/base/utils.h>\n\n#include <functional>\n#include <iostream>\n#include <memory>\n#include <sstream>\n#include <string>\n\nnamespace modelbox {\n\n/**\n * @brief Log level\n */\nenum LogLevel {\n  /// Debug\n  LOG_DEBUG = 0,\n  /// Info\n  LOG_INFO,\n  /// Notice\n  LOG_NOTICE,\n  /// Warning\n  LOG_WARN,\n  /// Error\n  LOG_ERROR,\n  /// Fatal\n  LOG_FATAL,\n  /// Turn off log\n  LOG_OFF,\n};\n\n/**\n * @brief Logger interface\n */\nclass Logger {\n public:\n  Logger();\n  virtual ~Logger();\n\n  /**\n   * @brief Output log with va-arg\n   * @param level log level\n   * @param file log file\n   * @param lineno log file line number\n   * @param func log function\n   * @param format log format\n   * @param ap va_list\n   */\n  virtual void Vprint(LogLevel level, const char *file, int lineno,\n                      const char *func, const char *format, va_list ap);\n\n  /**\n   * @brief Output log\n   * @param level log level\n   * @param file log file\n   * @param lineno log file line number\n   * @param func log function\n   * @param msg log message\n   */\n  virtual void Print(LogLevel level, const char *file, int lineno,\n                     const char *func, const char *msg);\n  /**\n   * @brief Set log level\n   * @param level log level\n   */\n  virtual void SetLogLevel(LogLevel level);\n\n  /**\n   * @brief Get log level\n   * @return level log level\n   */\n  virtual LogLevel GetLogLevel() = 0;\n};\n\nusing LoggerVprint =\n    std::function<void(LogLevel level, const char *file, int lineno,\n                       const char *func, const char *format, va_list ap)>;\nusing LoggerPrint =\n    std::function<void(LogLevel level, const char *file, int lineno,\n                       const char *func, const char *msg)>;\n\n/**\n * @brief Register va-list log function\n * @param func va-list log function\n */\nextern void RegLogVprint(const LoggerVprint &func);\n\n/**\n * @brief Register print log function\n * @param func print log function\n */\nextern void RegLogPrint(const LoggerPrint &func);\n\nclass LoggerCallback : public Logger {\n public:\n  LoggerCallback();\n  ~LoggerCallback() override;\n\n  /**\n   * @brief Register va-list log function\n   * @param func va-list log function\n   */\n  void RegVprint(const LoggerVprint &func);\n\n  /**\n   * @brief Register print log function\n   * @param func print log function\n   */\n  void RegPrint(const LoggerPrint &func);\n\n  /**\n   * @brief Set log level\n   * @param level log level\n   */\n  void SetLogLevel(LogLevel level) override;\n\n  /**\n   * @brief Get log level\n   * @return level log level\n   */\n  LogLevel GetLogLevel() override;\n\n private:\n  /**\n   * @brief Output log with va-arg\n   * @param level log level\n   * @param file log file\n   * @param lineno log file line number\n   * @param func log function\n   * @param format log format\n   * @param ap va_list\n   */\n  void Vprint(LogLevel level, const char *file, int lineno, const char *func,\n              const char *format, va_list ap) override;\n  /**\n   * @brief Output log\n   * @param level log level\n   * @param file log file\n   * @param lineno log file line number\n   * @param func log function\n   * @param msg log message\n   */\n  void Print(LogLevel level, const char *file, int lineno, const char *func,\n             const char *msg) override;\n\n  LoggerVprint vprint_;\n  LoggerPrint print_;\n  LogLevel level_{LOG_DEBUG};\n};\n\n/**\n * @brief Console logger\n */\nclass LoggerConsole : public Logger {\n public:\n  LoggerConsole();\n  ~LoggerConsole() override;\n\n  /**\n   * @brief Output log\n   * @param level log level\n   * @param file log file\n   * @param lineno log file line number\n   * @param func log function\n   * @param msg log message\n   */\n\n  void Print(LogLevel level, const char *file, int lineno, const char *func,\n             const char *msg) override;\n\n  /**\n   * @brief Set log level\n   * @param level log level\n   */\n  void SetLogLevel(LogLevel level) override;\n\n  /**\n   * @brief Get log level\n   * @return level log level\n   */\n  LogLevel GetLogLevel() override;\n\n private:\n  void SetLogLevelFromEnv();\n  LogLevel level_ = LOG_OFF;\n  bool neeed_flush_;\n};\n\nclass Log {\n  using Stream = std::ostringstream;\n  using Buffer_p = std::unique_ptr<Stream, std::function<void(Stream *)>>;\n\n public:\n  Log();\n  virtual ~Log();\n\n  /**\n   * @brief Output log\n   * @param level log level\n   * @param file log file\n   * @param lineno log file line number\n   * @param func log function\n   * @param format log format\n   */\n  void Print(LogLevel level, const char *file, int lineno, const char *func,\n             const char *format, ...) __attribute__((format(printf, 6, 7)))\n  __attribute__((nonnull(6)));\n\n  /**\n   * @brief Output log with va-arg\n   * @param level log level\n   * @param file log file\n   * @param lineno log file line number\n   * @param func log function\n   * @param format log format\n   * @param ap va_list\n   */\n  void Vprint(LogLevel level, const char *file, int lineno, const char *func,\n              const char *format, va_list ap);\n\n  /**\n   * @brief Set loggger\n   * @param logger poniter to logger\n   */\n  void SetLogger(const std::shared_ptr<Logger> &logger);\n\n  /**\n   * @brief Whether to output log\n   * @param level log level\n   */\n  bool CanLog(LogLevel level);\n\n  /**\n   * @brief Get loggger\n   * @return logger poniter to logger\n   */\n  std::shared_ptr<Logger> GetLogger();\n\n  /**\n   * @brief Output log to stream\n   * @param level log level\n   * @param file log file\n   * @param lineno log file line number\n   * @param func log function\n   */\n  Buffer_p LogStream(LogLevel level, const char *file, int lineno,\n                     const char *func);\n\n private:\n  std::shared_ptr<Logger> logger_ = std::make_shared<LoggerConsole>();\n};\n\nclass LogMessage {\n public:\n  /**\n   * @brief Output log message\n   * @param log log pointer\n   * @param level log level\n   * @param file log file\n   * @param lineno log file line number\n   * @param func log function\n   */\n  LogMessage(Log *log, LogLevel level, const char *file, int lineno,\n             const char *func);\n  virtual ~LogMessage();\n\n  /**\n   * @brief Log stream\n   * @return log stream\n   */\n  std::ostream &Stream();\n\n private:\n  Log *log_;\n  LogLevel level_;\n  const char *file_;\n  int lineno_;\n  const char *func_;\n\n  std::ostringstream msg_;\n};\n\n/**\n * @brief Global logger\n */\nextern Log klogger __attribute__((weak));\n\n/**\n * @brief Global logger\n */\nextern Log &GetLogger();\n\n/**\n * @brief Log level to string\n * @param level log level\n * @return log level in string\n */\nextern const char *LogLevelToString(LogLevel level);\n\n/**\n * @brief String log level to level\n * @param level log level in string\n * @return log level\n */\nextern LogLevel LogLevelStrToLevel(const std::string &level);\n\nextern std::shared_ptr<const void> LogSetLogID(const char *id);\n\n}  // namespace modelbox\n\n#ifndef BASE_FILE_NAME\n#define BASE_FILE_NAME                                                     \\\n  (__builtin_strrchr(__FILE__, '/') ? __builtin_strrchr(__FILE__, '/') + 1 \\\n                                    : __FILE__)\n#endif\n\n/*\n  Log may crash when the shared library loaded by deepbind and call this log\n  API. The reason is symbol copy-relocation, About copy-relocation, read here:\n  https://stackoverflow.com/questions/37296995/dynamic-loading-of-shared-library-with-rtld-deepbind\n  Setting the symbol to weak allows the deepbind library to use the first symbol\n  without error. If there is still a problem, you can use macros\n  MBLOG_LIBRARY_DEEPBIND. but this will cause a little slower when calling log\n  API.\n*/\n\n#define ModelBoxLogger modelbox::GetLogger()\n\n#define MODELBOX_PRINT(level, ...) \\\n  ModelBoxLogger.Print(level, BASE_FILE_NAME, __LINE__, __func__, __VA_ARGS__)\n\n#define MODELBOX_LOGSTREAM(level)                                        \\\n  if (ModelBoxLogger.CanLog(level))                                      \\\n  modelbox::LogMessage(&ModelBoxLogger, level, BASE_FILE_NAME, __LINE__, \\\n                       __func__)                                         \\\n      .Stream()\n\n#define MODELBOX_DEBUG(...) MODELBOX_PRINT(modelbox::LOG_DEBUG, __VA_ARGS__)\n#define MODELBOX_INFO(...) MODELBOX_PRINT(modelbox::LOG_INFO, __VA_ARGS__)\n#define MODELBOX_NOTICE(...) MODELBOX_PRINT(modelbox::LOG_NOTICE, __VA_ARGS__)\n#define MODELBOX_WARN(...) MODELBOX_PRINT(modelbox::LOG_WARN, __VA_ARGS__)\n#define MODELBOX_ERROR(...) MODELBOX_PRINT(modelbox::LOG_ERROR, __VA_ARGS__)\n#define MODELBOX_FATAL(...) MODELBOX_PRINT(modelbox::LOG_FATAL, __VA_ARGS__)\n\n/// Output debug log\n#define MBLOG_DEBUG MODELBOX_LOGSTREAM(modelbox::LOG_DEBUG)\n/// Output info log\n#define MBLOG_INFO MODELBOX_LOGSTREAM(modelbox::LOG_INFO)\n// Output notice log\n#define MBLOG_NOTICE MODELBOX_LOGSTREAM(modelbox::LOG_NOTICE)\n/// Output warning log\n#define MBLOG_WARN MODELBOX_LOGSTREAM(modelbox::LOG_WARN)\n/// Output error log\n#define MBLOG_ERROR MODELBOX_LOGSTREAM(modelbox::LOG_ERROR)\n/// Output fatal log\n#define MBLOG_FATAL MODELBOX_LOGSTREAM(modelbox::LOG_FATAL)\n\n/**\n * @brief Print stack, level is modelbox::LOG_DEBUG|modelbox::LOG_INFO|...\n */\n#define MBLOG_STACKTRACE(level) \\\n  MODELBOX_PRINT(level, \"Stack:\\n%s\", modelbox::GetStackTrace().c_str())\n\nnamespace modelbox {\n\n/**\n * @brief Abort and print stack and record log\n * @param errmsg abort message\n */\nstatic inline void Abort(const char *errmsg) {\n  MBLOG_FATAL << \"Abort: \" << errmsg;\n  MBLOG_STACKTRACE(LOG_FATAL);\n  abort();\n}\n}  // namespace modelbox\n\n#endif  // MODELBOX_LOG_H_\n"
  },
  {
    "path": "src/libmodelbox/base/include/modelbox/base/memory_pool.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_MEMORY_POOL_H\n#define MODELBOX_MEMORY_POOL_H\n\n#include <modelbox/base/collector.h>\n#include <modelbox/base/slab.h>\n\nnamespace modelbox {\n\n/**\n * @brief Memory pool interface\n */\nclass MemoryPoolBase : public MemoryAllocFree,\n                       public std::enable_shared_from_this<MemoryPoolBase> {\n public:\n  /**\n   * @brief Initialize slab cache.\n   * @param low low bit 2^low\n   * @param high high bit 2^high\n   * @return result.\n   */\n  Status InitSlabCache(int low = 5, int high = 27);\n\n  /**\n   * @brief Alloc a object from slab.\n   * @return shared pointer to object.\n   */\n  std::shared_ptr<void> AllocSharedPtr(size_t size);\n\n  /**\n   * @brief Shrink slab cache.\n   * @param each_keep slab number for each to keep.\n   * @param before shrink slab before specific time.\n   * @param expire force shrink cache before specific time.\n   * @return shrink result.\n   */\n  virtual Status ShrinkSlabCache(int each_keep, time_t before,\n                                 time_t expire = 0);\n\n  /**\n   * @brief Get all slab object number\n   * @return return total object number\n   */\n  uint32_t GetAllObjectNum();\n\n  /**\n   * @brief Get all active slab object number\n   * @return return total object number\n   */\n  uint32_t GetAllActiveObjectNum();\n\n  /**\n   * @brief Destroy slab cache.\n   */\n  void DestroySlabCache();\n\n  /**\n   * @brief Get the vector of slab cache pointers\n   * @return the vector of slab cach pointers\n   */\n  std::vector<std::shared_ptr<SlabCache>> GetSlabCaches();\n\n  /**\n   * @brief Set memory pool name\n   * @param name\n   */\n  void SetName(std::string name);\n\n  /**\n   * @brief Get memory pool name\n   *\n   * @return std::string\n   */\n  std::string GetName();\n\n  static std::vector<std::shared_ptr<MemoryPoolBase>> GetAllPools();\n\n  MemoryPoolBase();\n  MemoryPoolBase(std::string name);\n  virtual ~MemoryPoolBase();\n\n protected:\n  /**\n   * @brief Create a slab cache.\n   * @param obj_size object size.\n   * @param slab_size slab size.\n   * @return shared pointer to slabcache.\n   */\n  virtual std::shared_ptr<SlabCache> MakeSlabCache(size_t obj_size,\n                                                   size_t slab_size);\n\n  /**\n   * @brief Calculate slabcache size.\n   * @param object_size object size.\n   * @return slab size.\n   */\n  virtual size_t CalSlabSize(size_t object_size);\n\n  /**\n   * @brief Alloc Memory\n   * @param size memory size.\n   * @return pointer to memory.\n   */\n  void *MemAlloc(size_t size) override;\n\n  /**\n   * @brief Free Memory\n   * @param ptr to memory.\n   */\n  void MemFree(void *ptr) override;\n\n  /**\n   * @brief Add a new slab cache\n   * @param slab_cache new slab cache.\n   */\n  void AddSlabCache(const std::shared_ptr<SlabCache> &slab_cache);\n\n  /**\n   * @brief Clear all slabs \n   */\n  void ClearAllSlabs();\n  \n private:\n  std::vector<std::shared_ptr<SlabCache>> slab_caches_;\n  std::string pool_name_;\n  static std::map<MemoryPoolBase *, std::weak_ptr<MemoryPoolBase>> pool_list_;\n  static std::mutex pool_list_lock_;\n};\n\n}  // namespace modelbox\n\n#endif  // MODELBOX_MEMORY_POOL_H\n"
  },
  {
    "path": "src/libmodelbox/base/include/modelbox/base/memory_statistic.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#ifndef MODELBOX_MEMORY_H\n#define MODELBOX_MEMORY_H\n\n#include <modelbox/base/device.h>\n\nnamespace modelbox {\n/**\n * @brief Device memory statistics\n */\nclass MemoryStatistics {\n public:\n  MemoryStatistics(DeviceManager &deviceManager);\n  virtual ~MemoryStatistics();\n\n  /**\n   * @brief Get total memory of one device\n   * @param device_type\n   * @param device_id\n   * @return int\n   */\n  int GetTotalMemory(std::string &device_type, std::string &device_id);\n\n  /**\n   * @brief Get already used memory of one device\n   * @param device_type\n   * @param device_id\n   * @return int\n   */\n  int GetUsedMemory(std::string &device_type, std::string &device_id);\n\n  /**\n   * @brief Get total memory of all devices\n   * @return std::map<std::string, int>\n   */\n  std::map<std::string, int> &GetTotalMemory();\n\n  /**\n   * @brief Get already used memory of all devices\n   * @return std::map<std::string, int>\n   */\n  std::map<std::string, int> &GetUsedMemory();\n\n  /**\n   * @brief Get memory trace log of one device\n   * @param device_type\n   * @param device_id\n   * @return std::vector<CircleQueue>\n   */\n  std::vector<CircleQueue> &GetMemoryTrace(std::string &device_type,\n                                           std::string &device_id);\n\n  /**\n   * @brief Get memory trace log of all device\n   * @return std::map<std::string, std::vector<CircleQueue>>\n   */\n  std::map<std::string, std::vector<CircleQueue>> &GetMemoryTrace();\n\n  /**\n   * @brief Get allocated device memory of one device\n   * @param device_type\n   * @param device_id\n   */\n  std::vector<std::shared_ptr<DeviceMemory>> GetAllocatedMemory(\n      std::string &device_type, std::string &device_id);\n\n private:\n  DeviceManager device_mgr_;\n};\n}  // namespace modelbox\n\n#endif  // MODELBOX_MEMORY_H\n"
  },
  {
    "path": "src/libmodelbox/base/include/modelbox/base/os.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#ifndef MODELBOX_OS_H_\n#define MODELBOX_OS_H_\n\n#include <memory>\n#include <thread>\n#include <vector>\n\n#include \"modelbox/base/status.h\"\n\nnamespace modelbox {\n\n/**\n * @brief OS process API\n */\nclass OSProcess {\n public:\n  OSProcess();\n  virtual ~OSProcess();\n\n  /**\n   * @brief Get thread number of specific process\n   * @param pid process id\n   * @return thread number\n   */\n  virtual int32_t GetThreadsNumber(uint32_t pid) = 0;\n\n  /**\n   * @brief Get memory usage of specific process\n   * @param pid process id\n   * @return memory size\n   */\n  virtual uint32_t GetMemorySize(uint32_t pid) = 0;\n\n  /**\n   * @brief Get memory rss usage of specific process\n   * @param pid process id\n   * @return memory size\n   */\n  virtual uint32_t GetMemoryRSS(uint32_t pid) = 0;\n\n  /**\n   * @brief Get shared memory usage of specific process\n   * @param pid process id\n   * @return memory size\n   */\n  virtual uint32_t GetMemorySHR(uint32_t pid) = 0;\n\n  /**\n   * @brief Get current process id\n   * @return process id\n   */\n  virtual uint32_t GetPid() = 0;\n\n  /**\n   * @brief Get Process parent pid\n   * @return process id\n   */\n  virtual uint32_t GetPPid() = 0;\n\n  virtual std::vector<uint32_t> GetProcessTime(uint32_t pid) = 0;\n\n  virtual std::vector<uint32_t> GetTotalTime(uint32_t pid) = 0;\n};\n\n/**\n * @brief OS thread API\n */\nclass OSThread {\n public:\n  OSThread();\n  virtual ~OSThread();\n\n  /**\n   * @brief Get current thread id\n   * @return thread id\n   */\n  virtual std::thread::id GetTid() = 0;\n\n  /**\n   * @brief Set current thread name\n   * @param name thread name.\n   * @return whether success\n   */\n  virtual Status SetName(const std::string &name) = 0;\n\n  /**\n   * @brief Set current thread priority\n   * @param thread thread handler\n   * @param priority priority\n   */\n  virtual Status SetThreadPriority(const std::thread::id &thread,\n                                   int32_t priority) = 0;\n\n  /**\n   * @brief Set current thread logical affinity\n   * @param thread thread handler\n   * @param l_cpus logical cpu list\n   * @return whether success\n   */\n  virtual Status SetThreadLogicalCPUAffinity(\n      const std::thread::id &thread, const std::vector<int16_t> &l_cpus) = 0;\n\n  /**\n   * @brief Set current thread physical affinity\n   * @param thread thread handler\n   * @param p_cpus physical cpu list\n   * @return whether success\n   */\n  virtual Status SetThreadPhysicalCPUAffinity(\n      const std::thread::id &thread, const std::vector<int16_t> &p_cpus) = 0;\n\n  /**\n   * @brief Get current thread priority\n   * @param thread thread handler\n   * @return thread priority\n   */\n  virtual int32_t GetThreadPriority(const std::thread::id &thread) = 0;\n};\n\n/**\n * @brief OS information API\n */\nclass OSInfo {\n public:\n  OSInfo();\n  virtual ~OSInfo();\n\n  /**\n   * @brief Get system memory usage\n   * @param free free memory\n   * @param total total memory\n   * @return whether success\n   */\n  virtual Status GetMemoryUsage(size_t *free, size_t *total) = 0;\n\n  /**\n   * @brief Get system cpu run time\n   * @return cpu run time list\n   */\n  virtual std::vector<uint32_t> GetCpuRunTime() = 0;\n\n  /**\n   * @brief Get system physical cpu number\n   * @return physical cpu number\n   */\n  virtual int32_t GetPhysicalCpuNumbers() = 0;\n\n  /**\n   * @brief Get system logical cpu number\n   * @return logical cpu number\n   */\n  virtual int32_t GetLogicalCpuNumbers() = 0;\n\n  /**\n   * @brief Get system id\n   * @return system id in string\n   */\n  virtual std::string GetSystemID() = 0;\n\n  /**\n   * @brief Get network interface mac address\n   * @param nic network interface name, default is first.\n   * @return mac address\n   */\n  virtual std::string GetMacAddress(const std::string &nic = \"\") = 0;\n\n  /// Process API\n  std::shared_ptr<OSProcess> Process;\n\n  /// Thread API\n  std::shared_ptr<OSThread> Thread;\n};\n\n/// OS API\nextern OSInfo *os;\n\n}  // namespace modelbox\n#endif"
  },
  {
    "path": "src/libmodelbox/base/include/modelbox/base/popen.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_POPEN_H_\n#define MODELBOX_POPEN_H_\n\n#include <functional>\n#include <memory>\n#include <thread>\n#include <vector>\n#include <map>\n\n#include \"modelbox/base/status.h\"\n\nnamespace modelbox {\n\n/**\n * @brief safe pipe stream to or from a process, support command timeout,\n * support getting standard error output, and support setting enviroment.\n */\nclass Popen;\nclass PopenEnv {\n public:\n  PopenEnv();\n\n  virtual ~PopenEnv();\n\n  /**\n   * @brief Construct a new Popen Env object\n   *\n   * @param item_list env var item in list\n   */\n  PopenEnv(const std::string  &item_list);\n\n  /**\n   * @brief Construct a new Popen Env object\n   *\n   * @param item_list env var item in list\n   */\n  PopenEnv(const char *item_list);\n\n  /**\n   * @brief Construct a new Popen Env object\n   *\n   * @param item\n   * @param value\n   */\n  PopenEnv(const std::string &item, const std::string &value);\n  /**\n   * @brief Add a new env variable\n   *\n   * @param item var name\n   * @param value  var value\n   */\n  PopenEnv &Add(const std::string &item, const std::string &value);\n\n  /**\n   * @brief Remove a item from env\n   *\n   * @param item var name\n   */\n  PopenEnv &Rmv(const std::string &item);\n\n  /**\n   * @brief Clear all enviroment\n   *\n   */\n  PopenEnv &Clear();\n\n protected:\n  friend Popen;\n\n  std::vector<std::string> GetEnvs() const;\n\n  void LoadInherit();\n  void LoadEnvFromList(const std::string &item_list);\n  bool Changed() const;\n\n private:\n  std::map<std::string, std::string> env_;\n  bool inherit_{true};\n  bool load_inherit_{false};\n};\n\nclass Popen {\n public:\n  Popen();\n  virtual ~Popen();\n\n  /**\n   * @brief Opens a process by creating a pipe, forking.\n   *\n   * @param args parameter list, the first parameter is the program path\n   * @param timeout command execution timeout, in milliseconds, When the command\n   * times out, the child process will be killed\n   * @param mode read-write mode,\n   *    \"w\" for writing to standard input,\n   *    \"r\" for reading standard output,\n   *    \"e\" for reading standard error output.\n   * @param env command enviroment.\n   * @return Status operation result\n   */\n  Status Open(std::vector<std::string> args, int timeout = -1,\n              const char *mode = \"r\", const PopenEnv &env = \"\");\n\n  /**\n   * @brief Opens a process by creating a pipe, forking.\n   *\n   * @param cmdline command line in string format.\n   * @param timeout command execution timeout, in milliseconds, When the command\n   * times out, the child process will be killed\n   * @param mode read-write mode,\n   *    \"w\" for writing to standard input,\n   *    \"r\" for reading standard output,\n   *    \"e\" for reading standard error output.\n   * @param env command enviroment.\n   * @return Status operation result\n   */\n  Status Open(const std::string &cmdline, int timeout = -1,\n              const char *mode = \"r\", const PopenEnv &env = \"\");\n\n\n  /**\n   * @brief Close the command and get the command execution result\n   *\n   * @return int command execution result, Whether the command times out, you\n   * can check whether the signal is SIGKILL.\n   */\n  int Close();\n\n  /**\n   * @brief Waiting to be read\n   *\n   * @param timeout wait period in milliseconds\n   * @return 1: can read\n   *         0: timeout\n   *         -1: error\n   */\n  int WaitForLineRead(int timeout = -1);\n\n  /**\n   * @brief Read a line of stderr output\n   *\n   * @param line\n   * @return 0: success\n   *         -1: fail.\n   */\n  int ReadErrLine(std::string &line);\n\n  /**\n   * @brief Read a line of stdout output\n   *\n   * @param line\n   * @return 0: success\n   *         -1: fail.\n   */\n  int ReadOutLine(std::string &line);\n\n  /**\n   * @brief Read all outputs at once\n   *\n   * @param out standard output variable\n   * @param err standard error output variable\n   * @return 0: success\n   *         -1: fail.\n   */\n  int ReadAll(std::string *out, std::string *err);\n\n  /**\n   * @brief Write string to child process\n   *\n   * @param in write message\n   * @return 0: success\n   *         -1: fail.\n   */\n  int WriteString(const std::string &in);\n\n  /**\n   * @brief Force stop child process\n   *\n   * @return OK success\n   *         other: fail.\n   */\n  Status ForceStop();\n\n  /**\n   * @brief Keep command alive\n   *\n   */\n  void KeepAlive();\n\n private:\n  struct stdfd {\n    bool enable_{false};\n    int fd_{-1};\n    std::vector<char> buffer_;\n    int newline_pos_{0};\n    int iseof_{0};\n  };\n\n  int WaitForFds(\n      std::vector<struct stdfd *> fds, int timeout,\n      const std::function<int(struct stdfd *stdfd, int revents)> &func);\n\n  int ReadLineData(struct stdfd *stdfd);\n\n  int WaitForFdsLineRead(std::vector<struct stdfd *> *fds, int timeout);\n\n  bool DataReady(std::vector<struct stdfd *> *fds);\n\n  void UpdateNewLinePos(struct stdfd *stdfd);\n\n  int GetStringLine(struct stdfd *stdfd, std::string &line);\n\n  int WaitChildTimeOut();\n\n  int TimeOutLeft();\n\n  void CloseStdFd();\n\n  void CloseAllParentFds(int keep_fd);\n\n  void SetupMode(const char *mode);\n\n  struct stdfd fdout_;\n  struct stdfd fderr_;\n  struct stdfd fdin_;\n\n  pid_t child_pid_{0};\n  int timeout_{-1};\n  std::chrono::high_resolution_clock::time_point start_tm_;\n};\n\n}  // namespace modelbox\n\n#endif\n"
  },
  {
    "path": "src/libmodelbox/base/include/modelbox/base/refcache.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#ifndef MODELBOX_REFCACHE_H_\n#define MODELBOX_REFCACHE_H_\n\n#include <condition_variable>\n#include <map>\n#include <memory>\n#include <mutex>\n#include <string>\n\nnamespace modelbox {\n\ntemplate <typename T, typename KEY>\nclass RefInsertTransaction;\n\ntemplate <typename T, typename KEY>\nclass RefCache;\n\n/**\n * @brief Reference cache data container\n */\ntemplate <typename T, typename KEY>\nclass RefContainer {\n public:\n  RefContainer() = default;\n  virtual ~RefContainer() = default;\n\n  /// reference count\n  int refcount_{0};\n\n  /// data pointer\n  std::shared_ptr<T> data_ = nullptr;\n\n  /// transaction pointer\n  RefInsertTransaction<T, KEY> *trans_ = nullptr;\n};\n\n/**\n * @brief Reference cache insert transaction\n */\ntemplate <typename T, typename KEY = const std::string>\nclass RefInsertTransaction {\n public:\n  /**\n   * @brief Constructor of RefInsertTransaction\n   * @param ref_cache Pointer to RefCache\n   * @param container data container\n   * @param data data to cache\n   * @param key cache key\n   */\n  RefInsertTransaction(RefCache<T, KEY> *ref_cache,\n                       std::shared_ptr<RefContainer<T, KEY>> container,\n                       std::shared_ptr<T> data, const std::string &key) {\n    ref_cache_ = ref_cache;\n    ref_container_ = container;\n    ref_data_ = data;\n    key_ = key;\n  };\n\n  virtual ~RefInsertTransaction() {\n    if (ref_container_) {\n      if (ref_container_->trans_ != nullptr) {\n        ref_container_->trans_ = nullptr;\n        ref_cache_->NotifyAll();\n      }\n\n      if (ref_container_->data_ == nullptr) {\n        ref_cache_->Release(key_);\n      }\n    }\n\n    ref_data_ = nullptr;\n  };\n\n  /**\n   * @brief End transaction for inserting data.\n   * @param data insert.\n   * @return return new reference data.\n   */\n  std::shared_ptr<T> UpdateData(std::shared_ptr<T> data) {\n    ref_container_->data_ = data;\n    ref_container_->trans_ = nullptr;\n    auto ret = ref_cache_->Get(key_, false);\n    ref_cache_->NotifyAll();\n\n    return ret;\n  }\n\n  /**\n   * @brief Get cache data.\n   * @return return reference data.\n   */\n  std::shared_ptr<T> GetData() { return ref_data_; }\n\n private:\n  friend class RefCache<T, KEY>;\n  RefCache<T, KEY> *ref_cache_;\n  std::shared_ptr<T> ref_data_;\n  std::shared_ptr<RefContainer<T, KEY>> ref_container_;\n  std::string key_;\n};\n\n/**\n * @brief Reference cache, support transactions to insert data.\n */\ntemplate <typename T, typename KEY = const std::string>\nclass RefCache {\n public:\n  RefCache() = default;\n  virtual ~RefCache() = default;\n\n  /**\n   * @brief Get reference data from key, when inserting, may blocking.\n   * @param key reference key.\n   * @return return data\n   */\n  std::shared_ptr<RefInsertTransaction<T, KEY>> InsertAndGet(KEY &key) {\n    std::unique_lock<std::mutex> lock(lock_);\n    auto itr = ref_data_set_.find(key);\n    if (itr == ref_data_set_.end()) {\n      return InsertLocked(&lock, key);\n    }\n\n    auto data = GetLocked(&lock, key, true);\n    if (data == nullptr) {\n      return nullptr;\n    }\n\n    auto reftrans = std::make_shared<RefInsertTransaction<T, KEY>>(\n        this, nullptr, data, key);\n    return reftrans;\n  }\n\n  /**\n   * @brief Get reference data from key, when inserting, may blocking.\n   * @param key reference key.\n   * @param is_block should block when inserting data.\n   * @return return data\n   */\n  std::shared_ptr<T> Get(KEY &key, bool is_block = false) {\n    std::unique_lock<std::mutex> lock(lock_);\n    return GetLocked(&lock, key, is_block);\n  }\n\n  /**\n   * @brief Insert reference data.\n   * @param key reference key.\n   * @return return referenced transaction\n   */\n  std::shared_ptr<RefInsertTransaction<T, KEY>> Insert(KEY &key) {\n    std::unique_lock<std::mutex> lock(lock_);\n    return InsertLocked(&lock, key);\n  }\n\n  /**\n   * @brief Update reference data.\n   * @param key reference key.\n   * @param data data.\n   * @return return referenced datal\n   */\n  std::shared_ptr<T> Update(KEY &key, std::shared_ptr<T> data) {\n    auto ref = std::make_shared<RefContainer<T, KEY>>();\n    ref->data_ = data;\n    ref->refcount_ = 0;\n    std::unique_lock<std::mutex> lock(lock_);\n    ref_data_set_[key] = ref;\n    return GetLocked(&lock, key, false);\n  }\n\n protected:\n  friend class RefInsertTransaction<T, KEY>;\n\n  /**\n   * @brief insert data with lock hold\n   * @param lock cache lock.\n   * @param key reference key.\n   * @return return transaction\n   */\n  std::shared_ptr<RefInsertTransaction<T, KEY>> InsertLocked(\n      std::unique_lock<std::mutex> *lock, KEY &key) {\n    auto itr = ref_data_set_.find(key);\n    if (itr != ref_data_set_.end()) {\n      return nullptr;\n    }\n\n    auto container = std::make_shared<RefContainer<T, KEY>>();\n    auto reftrans = std::make_shared<RefInsertTransaction<T, KEY>>(\n        this, container, nullptr, key);\n\n    ref_data_set_[key] = container;\n    container->trans_ = reftrans.get();\n    container->data_ = nullptr;\n    container->refcount_ = 0;\n\n    return reftrans;\n  }\n\n  /**\n   * @brief get data by with lock hold\n   * @param lock cache lock.\n   * @param key reference key.\n   * @param is_block whether block when key is in transaction\n   * @return return key value\n   */\n  std::shared_ptr<T> GetLocked(std::unique_lock<std::mutex> *lock, KEY &key,\n                               bool is_block = false) {\n    bool is_success = false;\n    std::shared_ptr<bool> guard(&is_success, [&](bool *result) {\n      if (is_success == false) {\n        ReleaseLocked(key);\n      }\n    });\n\n    auto itr = ref_data_set_.find(key);\n    if (itr == ref_data_set_.end()) {\n      return nullptr;\n    }\n\n    auto ref = itr->second;\n    ref->refcount_ += 1;\n\n    if (ref->data_ == nullptr) {\n      if (is_block == false) {\n        return nullptr;\n      }\n\n      if (ref->trans_ == nullptr) {\n        return nullptr;\n      }\n\n      cond_.wait(*lock, [&]() {\n        return ref->data_ != nullptr || ref->trans_ == nullptr;\n      });\n\n      itr = ref_data_set_.find(key);\n      if (itr == ref_data_set_.end()) {\n        return nullptr;\n      }\n\n      ref = itr->second;\n      if (ref->data_ == nullptr) {\n        return nullptr;\n      }\n    }\n\n    is_success = true;\n    std::shared_ptr<T> ret(ref->data_.get(),\n                           [this, key](void *ptr) { Release(key); });\n    return ret;\n  }\n\n  /**\n   * @brief Update key data\n   * @param key reference key.\n   * @param data data.\n   */\n  void UpdateData(KEY &key, std::shared_ptr<T> data) {\n    std::unique_lock<std::mutex> lock(lock_);\n    auto itr = ref_data_set_.find(key);\n    if (itr == ref_data_set_.end()) {\n      return;\n    }\n    auto ref = itr->second;\n    ref->data_ = data;\n  }\n\n  /**\n   * @brief Wake up all blocking call\n   */\n  void NotifyAll() { cond_.notify_all(); }\n\n  /**\n   * @brief Release key with lock hold\n   * @param key reference key\n   */\n  void ReleaseLocked(KEY &key) {\n    auto itr = ref_data_set_.find(key);\n    if (itr == ref_data_set_.end()) {\n      return;\n    }\n\n    auto ref = itr->second;\n    if (--ref->refcount_ > 0) {\n      return;\n    }\n\n    ref_data_set_.erase(key);\n  }\n\n  /**\n   * @brief Release key\n   * @param key reference key\n   */\n  void Release(KEY &key) {\n    std::unique_lock<std::mutex> lock(lock_);\n    ReleaseLocked(key);\n  }\n\n private:\n  std::map<KEY, std::shared_ptr<RefContainer<T, KEY>>> ref_data_set_;\n  std::mutex lock_;\n  std::condition_variable cond_;\n};\n\n/**\n * @brief Default reference cache data for any data\n */\nclass RefCacheData : public RefCache<void> {\n public:\n  RefCacheData();\n  ~RefCacheData() override;\n};\n\n}  // namespace modelbox\n#endif  // MODELBOX_REFCACHE_H_"
  },
  {
    "path": "src/libmodelbox/base/include/modelbox/base/register_flowunit.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef CALLBACK_FLOWUNIT_H_\n#define CALLBACK_FLOWUNIT_H_\n#include <modelbox/flowunit.h>\n\n#define FLOWUNIT_TYPE \"cpu\"\n\nnamespace modelbox {\n\nclass RegisterFlowUnit : public FlowUnit {\n public:\n  RegisterFlowUnit(const std::string &name);\n  ~RegisterFlowUnit() override;\n\n  Status Open(const std::shared_ptr<Configuration> &config) override;\n\n  /* class when unit is close */\n  Status Close() override;\n\n  Status Process(std::shared_ptr<DataContext> data_context) override;\n\n  void SetCallBack(\n      std::function<Status(std::shared_ptr<DataContext>)> callback);\n\n  std::function<Status(std::shared_ptr<DataContext>)> GetCallBack();\n\n private:\n  std::string name_;\n  std::function<Status(std::shared_ptr<DataContext>)> callback_{nullptr};\n};\n\nclass RegisterFlowUnitFactory : public FlowUnitFactory {\n public:\n  RegisterFlowUnitFactory();\n  RegisterFlowUnitFactory(\n      std::string unit_name, std::vector<std::string> inputs,\n      std::vector<std::string> outputs,\n      std::function<Status(std::shared_ptr<DataContext>)> callback);\n  ~RegisterFlowUnitFactory() override;\n\n  std::map<std::string, std::shared_ptr<FlowUnitDesc>> FlowUnitProbe() override;\n\n  std::string GetFlowUnitFactoryType() override;\n\n  std::string GetFlowUnitFactoryName() override;\n\n  std::shared_ptr<FlowUnit> CreateFlowUnit(\n      const std::string &name, const std::string &unit_type) override;\n\n private:\n  Status Init();\n  std::string unit_name_;\n  std::vector<std::string> input_ports_;\n  std::vector<std::string> output_ports_;\n  std::function<Status(std::shared_ptr<DataContext>)> callback_;\n  std::map<std::string, std::shared_ptr<FlowUnitDesc>> desc_map_;\n};\n\n}  // namespace modelbox\n#endif\n"
  },
  {
    "path": "src/libmodelbox/base/include/modelbox/base/slab.h",
    "content": "/**\n * Copyright (C) 2020 Huawei Technologies Co., Ltd. All rights reserved.\n */\n\n#ifndef MODELBOX_SLAB_H_\n#define MODELBOX_SLAB_H_\n\n#include <modelbox/base/list.h>\n#include <modelbox/base/status.h>\n#include <modelbox/base/timer.h>\n\n#include <atomic>\n#include <memory>\n#include <mutex>\n#include <unordered_map>\n#include <vector>\n\nnamespace modelbox {\n\n/// Slab object\nstruct SlabObject {\n  /// list head\n  ListHead list;\n\n  /// slab index in slab cache\n  unsigned long index;\n};\n\nclass SlabCache;\n\n/// Memeory allocator interface\nclass MemoryAllocFree {\n public:\n  /**\n   * @brief Malloc memory\n   * @param size memory size\n   * @return memory pointer\n   */\n  virtual void *MemAlloc(size_t size) = 0;\n\n  /**\n   * @brief Free memory\n   * @param ptr pointer to memory\n   */\n  virtual void MemFree(void *ptr) = 0;\n};\n\n/**\n * @brief Slab object\n */\nclass Slab {\n public:\n  /**\n   * @brief Construct slab\n   * @param cache slab cache.\n   * @param obj_size object size.\n   * @param mem_size memory size.\n   */\n  Slab(SlabCache *cache, size_t obj_size, size_t mem_size);\n\n  virtual ~Slab();\n\n  /**\n   * @brief Init slab, malloc memory.\n   * @return success, return true, else return false.\n   */\n  bool Init();\n\n  /**\n   * @brief Alloc a object from slab.\n   * @return allocated object pointer.\n   */\n  void *Alloc();\n\n  /**\n   * @brief Free a object.\n   * @param ptr pointer to object\n   */\n  void Free(void *ptr);\n\n  /**\n   * @brief Check if slab is full\n   * @return true, slab is full\n   */\n  bool IsFull();\n\n  /**\n   * @brief Check if address is in slab\n   * @return true, address is in slab\n   */\n  bool IsInSlab(const void *ptr);\n\n  /**\n   * @brief Check if slab is empty.\n   * @return true, slab is empty.\n   */\n  bool IsEmpty();\n\n  /**\n   * @brief Get object size.\n   * @return int, object size\n   */\n  size_t ObjectSize();\n\n  /**\n   * @brief Get active object number.\n   * @return int, active object number.\n   */\n  int ActiveObjects();\n\n  /**\n   * @brief Get total object number.\n   * @return int, total object number.\n   */\n  int ObjectNumber();\n\n  /**\n   * @brief Get alive time.\n   * @return alive time.\n   */\n  time_t AliveTime();\n\n protected:\n  /**\n   * @brief Alloc memory override\n   * @param size memory size.\n   * @return memory pointer\n   */\n  void *_Alloc(size_t size);\n\n  /**\n   * @brief Free memory override\n   * @param ptr memory pointer\n   * @return alive time.\n   */\n  void _Free(void *ptr);\n\n private:\n  friend class SlabCache;\n\n  ListHead list;\n  struct SlabObject *objs_{nullptr};\n  ListHead free_obj_head_;\n\n  size_t obj_size_;\n  size_t obj_num_;\n  size_t active_obj_num_;\n\n  void *mem_{nullptr};\n  size_t mem_size_{0};\n\n  SlabCache *cache_{nullptr};\n\n  time_t last_alive_;\n};\n\n/**\n * @brief Slab cache\n */\nclass SlabCache {\n public:\n  /**\n   * @brief Construct slabcache\n   * @param obj_size object size.\n   * @param slab_size each slab memory size.\n   * @param mem_allocator memory allocator.\n   */\n  SlabCache(size_t obj_size, size_t slab_size,\n            MemoryAllocFree *mem_allocator = nullptr);\n\n  virtual ~SlabCache();\n\n  /**\n   * @brief Alloc a object from slab.\n   * @return shared pointer to object.\n   */\n  std::shared_ptr<void> AllocSharedPtr();\n\n  /**\n   * @brief Shrink slab\n   * @param keep number to keep.\n   * @param before shrink cache before specific time.\n   */\n  void Shrink(int keep = 0, time_t before = 0);\n\n  /**\n   * @brief Reclaim slab\n   * @param before shrink cache before specific time.\n   */\n  void Reclaim(time_t before = 30);\n\n  /**\n   * @brief Get empty slab number.\n   * @return empty slab number.\n   */\n  int GetEmptySlabNumber();\n\n  /**\n   * @brief Get total slab number.\n   * @return total slab number.\n   */\n  uint32_t SlabNumber();\n\n  /**\n   * @brief Get object size.\n   * @return object size.\n   */\n  size_t ObjectSize();\n\n  /**\n   * @brief Get object number.\n   * @return object number.\n   */\n  uint32_t GetObjNumber();\n\n  /**\n   * @brief Get free object number.\n   * @return free object number.\n   */\n  uint32_t GetFreeObjNumber();\n\n  /**\n   * @brief Get active object number.\n   * @return active object number.\n   */\n  uint32_t GetActiveObjNumber();\n\n protected:\n  /**\n   * @brief Remove slabs\n   * @param head slabe head\n   */\n  void RemoveSlabs(ListHead *head);\n\n  /**\n   * @brief Remove slab\n   * @param s pointer to slab\n   */\n  void RemoveSlabLocked(Slab *s);\n\n  /**\n   * @brief Remove slab with specific args\n   * @param head slabe head\n   * @param count remove number\n   * @param time_before time before\n   */\n  void RemoveSlabs(ListHead *head, size_t count, time_t time_before);\n\n  /**\n   * @brief Grow slab\n   * @return grow result\n   */\n  bool GrowLocked(std::unique_lock<std::mutex> *lock);\n\n  /**\n   * @brief Alloc memory override\n   * @param size memory size.\n   * @return memory pointer\n   */\n  void *_Alloc(size_t size);\n\n  /**\n   * @brief Free memory override\n   * @param ptr memory pointer\n   * @return alive time.\n   */\n  void _Free(void *ptr);\n\n private:\n  friend class Slab;\n\n  /**\n   * @brief Alloc a object from slab\n   * @param obj object allocated\n   * @param slab which slab\n   */\n  void AllocObject(void **obj, Slab **slab);\n\n  /**\n   * @brief Free a object into slab\n   * @param obj object allocated\n   * @param slab which slab\n   */\n  void FreeObject(void *obj, Slab *slab);\n\n  size_t obj_size_{0};\n  size_t slab_size_{0};\n\n  int obj_num_{0};\n  int batch_object_num_{0};\n  std::atomic<uint32_t> active_obj_num_;\n  std::atomic<uint32_t> slab_empty_num_;\n  std::atomic<uint32_t> slab_num_;\n\n  std::mutex lock_;\n\n  ListHead full_;\n  ListHead partial_;\n  ListHead empty_;\n\n  MemoryAllocFree *mem_allocator_{nullptr};\n};\n\nclass SlabCacheReclaimer {\n public:\n  static SlabCacheReclaimer &Instance();\n  virtual ~SlabCacheReclaimer();\n\n  void AddSlabCache(SlabCache *slabcache);\n\n  void RmvSlabCache(SlabCache *slabcache);\n\n private:\n  SlabCacheReclaimer();\n  void DoReclaim();\n\n  void StartReclaimWorker();\n  void StopReclaimWoker();\n\n  std::unordered_map<SlabCache *, SlabCache *> slab_cache_list_;\n  std::mutex cache_lock_;\n  Timer timer_;\n  std::shared_ptr<TimerTask> reclaimer_timer_;\n  std::atomic<uint32_t> slab_cache_num_;\n};\n\n}  // namespace modelbox\n#endif"
  },
  {
    "path": "src/libmodelbox/base/include/modelbox/base/status.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#ifndef MODELBOX_STATUS_H_\n#define MODELBOX_STATUS_H_\n\n#include <memory>\n#include <ostream>\n\nnamespace modelbox {\n\nenum StatusCode {\n  STATUS_SUCCESS = 0, /* Success, Avoid using this, use STATUS_OK instead.*/\n  STATUS_FAULT,       /* Fault */\n  STATUS_NOTFOUND,    /* Not Found */\n  STATUS_INVALID,     /* Invalid argument */\n  STATUS_AGAIN,       /* Try again */\n  STATUS_BADCONF,     /* Bad Config */\n  STATUS_NOMEM,       /* Out of memory */\n  STATUS_RANGE,       /* Out of range */\n  STATUS_EXIST,       /* Already exists */\n  STATUS_INTERNAL,    /* Internal error */\n  STATUS_BUSY,        /* Device or resource busy */\n  STATUS_PERMIT,      /* Operation not permitted */\n  STATUS_NOTSUPPORT,  /* Not supported */\n  STATUS_NODATA,      /* No data available */\n  STATUS_NOSPACE,     /* No space left */\n  STATUS_NOBUFS,      /* No buffer space available  */\n  STATUS_OVERFLOW,    /* Value too large for defined data type */\n  STATUS_INPROGRESS,  /* Operation now in progress */\n  STATUS_ALREADY,     /* Operation already in progress */\n  STATUS_TIMEDOUT,    /* Operation timed out */\n  STATUS_NOSTREAM,    /* Out of streams resources */\n  STATUS_RESET,       /* Request Reset by peer */\n  STATUS_CONTINUE,    /* Continue operation */\n  STATUS_EDQUOT,      /* Quota exceeded */\n  STATUS_STOP,        /* Stop operation */\n  STATUS_SHUTDOWN,    /* Shutdown operation */\n  STATUS_EOF,         /* End of file */\n  STATUS_NOENT,       /* No such file or directory */\n  STATUS_DEADLOCK,    /* Resource deadlock */\n  STATUS_NORESPONSE,  /* No response*/\n  STATUS_IO,          /* Input/output error */\n  STATUS_LASTFLAG,    /* Status flag, don't used it */\n};\n\nclass Status {\n public:\n  /**\n   * @brief Status code\n   */\n  Status();\n\n  /**\n   * @brief Status code\n   * @param status copy status from status.\n   */\n  Status(const Status& status);\n\n  /**\n   * @brief Status code\n   * @param code create status from status code.\n   */\n  Status(const StatusCode& code);\n\n  /**\n   * @brief Status code\n   * @param success create status from bool.\n   */\n  Status(const bool& success);\n\n  /**\n   * @brief Status code\n   * @param code create status from code.\n   * @param errmsg error mesage.\n   */\n  Status(const StatusCode& code, const std::string& errmsg);\n\n  /**\n   * @brief Status code\n   * @param status from status.\n   * @param errmsg error mesage.\n   */\n  Status(const Status& status, const std::string& errmsg);\n  virtual ~Status();\n\n  /**\n   * @brief Make status to string\n   * @return string of status.\n   */\n  virtual std::string ToString() const;\n\n  /**\n   * @brief Get status code.\n   * @return status code.\n   */\n  StatusCode Code();\n\n  /**\n   * @brief Get status code in string format.\n   * @return status code in string.\n   */\n  std::string StrCode() const;\n\n  /**\n   * @brief Get status raw code in string\n   * \n   */\n  std::string StrStatusCode() const;\n\n  /**\n   * @brief Set error message to status\n   * @param errmsg error mesage.\n   */\n  void SetErrormsg(const std::string& errmsg);\n\n  /**\n   * @brief Get error message\n   * @return error message\n   */\n  const std::string& Errormsg() const;\n\n  /**\n   * @brief Get chain error messages.\n   * @return error message\n   */\n  std::string WrapErrormsgs() const;\n\n  /**\n   * @brief Get wrapped status.\n   * @return wrapped status.\n   */\n  std::shared_ptr<Status> Unwrap();\n\n  /**\n   * @brief Wrap status.\n   * @param status wrapped status.\n   * @param code status code.\n   * @param errmsg error message.\n   */\n  void Wrap(const Status& status, const StatusCode& code,\n            const std::string& errmsg);\n\n  /**\n   * @brief Check if status equals to code\n   */\n  bool operator==(const StatusCode& code) const;\n\n  /**\n   * @brief Check if status equals to status\n   */\n  bool operator==(const Status& s) const;\n\n  /**\n   * @brief Check if status equals to bool\n   */\n  bool operator==(const bool& success) const;\n\n  /**\n   * @brief Check if status not equal to code\n   */\n  bool operator!=(const StatusCode& code) const;\n\n  /**\n   * @brief Check if status not equal to status\n   */\n  bool operator!=(const Status& s) const;\n\n  /**\n   * @brief Override bool function\n   */\n  operator bool() const;\n\n  operator enum StatusCode() const;\n\n private:\n  std::string WrapOnlyErrormsgs(bool with_code) const;\n  std::string ErrorCodeMsgs(bool with_code) const;\n  StatusCode code_ = STATUS_SUCCESS;\n  std::string errmsg_;\n  std::shared_ptr<Status> wrap_status_;\n};\n\nstd::ostream& operator<<(std::ostream& os, const Status& s);\n\n/**\n * @brief Status success, for performance usage\n */\nextern const Status STATUS_OK;\n\n/**\n * @brief Thread local status error like errno\n */\nextern thread_local Status StatusError;\n\n}  // namespace modelbox\n#endif  // MODELBOX_STATUS_H_\n"
  },
  {
    "path": "src/libmodelbox/base/include/modelbox/base/thread_pool.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#ifndef MODELBOX_THREAD_POOL_H\n#define MODELBOX_THREAD_POOL_H\n\n#include <modelbox/base/blocking_queue.h>\n#include <modelbox/base/log.h>\n#include <modelbox/base/status.h>\n#include <sched.h>\n#include <unistd.h>\n\n#include <condition_variable>\n#include <functional>\n#include <future>\n#include <map>\n#include <memory>\n#include <mutex>\n#include <queue>\n#include <thread>\n\nnamespace modelbox {\n\nstruct ThreadFunction {\n  std::string name;\n  std::function<void()> func;\n};\n\nclass ThreadPool;\nclass ThreadWorker {\n public:\n  /**\n   * @brief Init thread worker\n   * @param pool thread pool.\n   * @param thread_id thread id.\n   * @param core_worker is core woker.\n   * @return thread number.\n   */\n  ThreadWorker(ThreadPool *pool, int thread_id, bool core_worker = false);\n  virtual ~ThreadWorker();\n\n  /**\n   * @brief Set thread pool name.\n   * @param name thread pool name.\n   * @return void\n   */\n  void SetName(const std::string &name);\n\n  /**\n   * @brief Is core thread.\n   * @return is core thread.\n   */\n  bool IsCore();\n\n  /**\n   * @brief Start thread.\n   */\n  void Start();\n\n  /**\n   * @brief Stop thread.\n   */\n  void Stop();\n\n  /**\n   * @brief Wait for thread.\n   */\n  void Join();\n\n  /**\n   * @brief Get thread id\n   */\n  int Id();\n\n private:\n  friend class ThreadPool;\n  static void Run(ThreadWorker *worker);\n\n  void ChangeNameNow();\n\n  void SetCore(bool is_core);\n\n  std::atomic<bool> running_{false};\n  std::mutex lock_;\n  bool is_joining_;\n  std::shared_ptr<std::thread> thread_;\n  ThreadPool *pool_;\n  int thread_id_{0};\n  bool is_core_worker_;\n  std::string name_;\n  std::atomic<bool> name_changed_{false};\n};\n\nclass ThreadPool {\n public:\n  /**\n   * @brief Thread pool init\n   * @param thread_size fixed thread size, default is cpu number.\n   * @param max_thread_size max thread size, when queue is full, new thread will\n   * be created.\n   * @param queue_size task queue size, default equal thread size.\n   * @param keep_alive non core thread keep alive time, minimum time is 100ms.\n   */\n  ThreadPool(int thread_size = -1, int max_thread_size = -1,\n             int queue_size = -1, int keep_alive = 60000);\n\n  virtual ~ThreadPool();\n\n  /**\n   * @brief Set thread pool name.\n   * @param name thread pool name.\n   * @return void\n   */\n  void SetName(const std::string &name);\n\n  /**\n   * @brief Set the size of core thread.\n   * @param size queue size.\n   */\n  void SetThreadSize(size_t size);\n\n  /**\n   * @brief Set the size of max thread.\n   * @param size queue size.\n   */\n  void SetMaxThreadSize(size_t size);\n\n  /**\n   * @brief Set the size of queue which task to submit in.\n   * @param size queue size.\n   */\n  void SetTaskQueueSize(size_t size);\n\n  /**\n   * @brief Change none core thread keep alive time.\n   * @param timeout\n   */\n  void SetKeepAlive(uint32_t timeout);\n\n  /**\n   * @brief Shutdown thread pool.\n   * @param force force shutdown.\n   * @return void\n   */\n  void Shutdown(bool force = false);\n\n  /**\n   * @brief Submit a task\n   * @param fun function task to run\n   * @param params function parameters.\n   * @return task future\n   */\n  template <typename func, typename... ts>\n  auto Submit(func &&fun, ts &&... params)\n      -> std::future<typename std::result_of<func(ts...)>::type> {\n    return Submit(\"\", fun, params...);\n  }\n\n  /**\n   * @brief Submit a task\n   * @param name task name.\n   * @param fun function task to run.\n   * @param params function parameters.\n   * @return task future\n   */\n  template <typename func, typename... ts>\n  auto Submit(const std::string &name, func &&fun, ts &&... params)\n      -> std::future<typename std::result_of<func(ts...)>::type> {\n    auto execute =\n        std::bind(std::forward<func>(fun), std::forward<ts>(params)...);\n    using ReturnType = typename std::result_of<func(ts...)>::type;\n    using PackagedTask = std::packaged_task<ReturnType()>;\n    auto package_task = std::make_shared<PackagedTask>(std::move(execute));\n    auto result = package_task->get_future();\n\n    ThreadFunction task;\n    task.func = [package_task]() { (*package_task)(); };\n    if (name.length() > 0) {\n      task.name = name;\n    }\n\n    if (SubmitTask(task) == false) {\n      return std::future<typename std::result_of<func(ts...)>::type>();\n    }\n\n    return result;\n  }\n\n  /**\n   * @brief Get running thread number.\n   * @return thread number.\n   */\n  int GetThreadsNum();\n\n  /**\n   * @brief Get max thread number.\n   * @return max thread number.\n   */\n  int GetMaxThreadsNum();\n\n  /**\n   * @brief Get waiting work.\n   * @return waiting work number.\n   */\n  int GetWaitingWorkCount();\n\n private:\n  friend class ThreadWorker;\n  void ExitWorker(ThreadWorker *worker);\n\n  void RunWorker(ThreadWorker *worker);\n\n  bool Park(ThreadWorker *worker, ThreadFunction &task);\n\n  void RmvWorker(ThreadWorker *worker);\n\n  Status AddWorker(bool core_worker);\n\n  void StopWokers();\n\n  bool SubmitTask(ThreadFunction &task);\n\n  std::shared_ptr<BlockingQueue<ThreadFunction>> work_queue_;\n  bool quit_{false};\n  std::list<std::shared_ptr<ThreadWorker>> workers_;\n  int thread_size_{0};\n  int max_thread_size_{1};\n  int keep_alive_{60000};\n  std::atomic<int> worker_num_{0};\n  std::atomic<int> available_num_{0};\n  std::mutex lock_;\n  std::condition_variable exit_cond_;\n  std::string name_;\n};\n}  // namespace modelbox\n\n#endif  // MODELBOX_THREAD_POOL_H\n"
  },
  {
    "path": "src/libmodelbox/base/include/modelbox/base/timer.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#ifndef MODELBOX_TIMER_H_\n#define MODELBOX_TIMER_H_\n\n#include <atomic>\n#include <condition_variable>\n#include <functional>\n#include <memory>\n#include <mutex>\n#include <queue>\n#include <thread>\n\nnamespace modelbox {\n\nusing TimerTaskFunction = std::function<void()>;\nclass Timer;\nclass TimerCompare;\n\nstatic inline uint64_t GetTickDiff(uint64_t prev, uint64_t cur) {\n  return ((prev) >= (cur)) ? ((prev) - (cur))\n                           : ((~((uint64_t)(0)) - (cur)) + 1 + (prev));\n}\n\n/**\n * @brief Timer task.\n */\nclass TimerTask : public std::enable_shared_from_this<TimerTask> {\n public:\n  /**\n   * @brief Create a timer task\n   * @param f f function\n   * @param args args function arguments\n   */\n  template <typename Function, typename... Args>\n  explicit TimerTask(Function &&f, Args &&...args) : is_running_(false) {\n    Callback(f, args...);\n  }\n\n  /**\n   * @brief Set timer routine\n   * @param f function\n   * @param args args function arguments\n   */\n  template <typename Function, typename... Args>\n  void Callback(Function &&f, Args &&...args) {\n    auto execute =\n        std::bind(std::forward<Function>(f), std::forward<Args>(args)...);\n    task_func_ = [execute]() { execute(); };\n\n    if (task_name_.length() == 0) {\n      task_name_ = GetCaller();\n    }\n  }\n\n  /**\n   * @brief Create a timer task\n   */\n  TimerTask();\n\n  virtual ~TimerTask();\n\n  /**\n   * @brief Stop timer task\n   */\n  void Stop();\n\n  /**\n   * @brief Task run.\n   */\n  virtual void Run();\n\n  /**\n   * @brief Set task name.\n   * @param name task name, default is caller function name\n   */\n  void SetName(const std::string &name);\n\n  /**\n   * @brief Get task name.\n   * @return name task name\n   */\n  std::string GetName();\n\n protected:\n  TimerTaskFunction task_func_;\n\n  /**\n   * @brief Get caller name\n   * @return name caller name\n   */\n  std::string GetCaller();\n\n  /**\n   * @brief Get hit time\n   * @return task hit time\n   */\n  uint64_t GetHitTime();\n\n  /**\n   * @brief Get task period\n   * @return task period\n   */\n  uint64_t GetPeriod();\n\n  /**\n   * @brief Get task is running\n   * @return task is running\n   */\n  bool IsRunning();\n\n private:\n  friend class Timer;\n  friend class TimerCompare;\n\n  bool IsWeakPtrTimerTask();\n  std::shared_ptr<TimerTask> MakeSchedWeakTimer();\n  void SetHitTime(uint64_t time);\n  void SetPeriod(uint64_t period);\n  uint64_t GetDelay();\n  void SetDelay(uint64_t delay);\n  void SetTimerRunning(bool running);\n\n  uint64_t hit_time_ = 0;\n  uint64_t period_ = 0;\n  uint64_t delay_ = 0;\n\n  std::atomic_bool is_running_{false};\n  std::string task_name_;\n  bool is_weaktimer_{false};\n  std::shared_ptr<TimerTask> sched_timer_{nullptr};\n  std::weak_ptr<TimerTask> weak_timer_;\n};\n\nclass TimerCompare {\n public:\n  bool operator()(const std::shared_ptr<TimerTask> &lhs,\n                  const std::shared_ptr<TimerTask> &rhs) {\n    auto hit_time_lhs = lhs->GetHitTime();\n    auto hit_time_rhs = rhs->GetHitTime();\n    if (hit_time_lhs == hit_time_rhs) {\n      return lhs->GetDelay() + lhs->GetPeriod() >\n             rhs->GetDelay() + rhs->GetPeriod();\n    }\n\n    return hit_time_lhs > hit_time_rhs;\n  }\n};\n\n/**\n * @brief Timer thread.\n */\nclass Timer {\n public:\n  Timer();\n  virtual ~Timer();\n\n  /**\n   * @brief Set timer thread priority\n   * @param priority timer pritority\n   */\n  bool SetPriority(int priority);\n\n  /**\n   * @brief Shutdown main timer\n   */\n  void Shutdown();\n\n  /**\n   * @brief Set timer name\n   */\n  void SetName(const std::string &name);\n\n  /**\n   * @brief Start main timer, threading\n   * @param lazy if true, will start thread when timer task is added.\n   */\n  void Start(bool lazy = true);\n\n  /**\n   * @brief Main timer run\n   */\n  virtual void Run();\n\n  /**\n   * @brief Stop main timer\n   */\n  void Stop();\n\n  /**\n   * @brief Schedule a timer task.\n   * @param timer_task pointer to a timer task.\n   * @param delay task for execution after the specified delay.\n   * @param period schedule period, in millisecond.\n   * @param take_owner_ship take ownership of shared_ptr timer_task.\n   */\n  void Schedule(const std::shared_ptr<TimerTask> &timer_task, uint64_t delay,\n                uint64_t period, bool take_owner_ship = false);\n\n  /**\n   * @brief Get current tick\n   * @return tick count\n   */\n  uint64_t GetCurrentTick();\n\n  /**\n   * @brief Get current timer task\n   */\n  static std::shared_ptr<TimerTask> CurrentTimerTask();\n\n protected:\n  /**\n   * @brief Run main timer\n   */\n  void RunTimer();\n\n  /**\n   * @brief Start main thread async\n   */\n  void StartAsync();\n\n  /**\n   * @brief Stop main timer async\n   */\n  void StopAsync();\n\n private:\n  friend class TimerTask;\n  void RunTimerTask(const std::shared_ptr<TimerTask> &timer,\n                    const std::shared_ptr<TimerTask> &timer_call);\n\n  void StartTimerThread();\n\n  void InsertTimerTask(const std::shared_ptr<TimerTask> &timer_task,\n                       uint64_t now);\n\n  void RemoveTopTimerTask();\n\n  void StopTimerTask(TimerTask *timer_task);\n\n  void WaitTimerTask(std::unique_lock<std::mutex> &lock,\n                     std::shared_ptr<TimerTask> &timer);\n\n  bool GetTimerTask(std::unique_lock<std::mutex> &lock,\n                    std::shared_ptr<TimerTask> &timer);\n\n  bool RemoveStoppedTimer();\n\n  thread_local static std::shared_ptr<TimerTask> current_timer_task_;\n  bool is_shutdown_{false};\n  uint64_t start_tick_{0};\n  std::mutex lock_;\n  std::thread thread_;\n  bool timer_running_{false};\n  bool thread_running_{false};\n  std::string name_{\"Timer\"};\n  std::condition_variable cond_;\n  std::priority_queue<std::shared_ptr<TimerTask>,\n                      std::vector<std::shared_ptr<TimerTask>>, TimerCompare>\n      timer_queue_;\n};\n\n/**\n * @brief Global timer thread.\n */\nclass TimerGlobal {\n public:\n  /**\n   * @brief Start main timer, threading\n   */\n  static void Start();\n\n  /**\n   * @brief Stop main timer\n   */\n  static void Stop();\n\n  /**\n   * @brief Schedule a timer task.\n   * @param timer_task pointer to a timer task.\n   * @param delay task for execution after the specified delay.\n   * @param period schedule period, in millisecond.\n   * @param take_owner_ship take ownership of shared_ptr timer_task.\n   */\n  static void Schedule(const std::shared_ptr<TimerTask> &timer_task,\n                       uint64_t delay, uint64_t period,\n                       bool take_owner_ship = false);\n\n private:\n  TimerGlobal();\n  virtual ~TimerGlobal();\n\n  static Timer timer_;\n  static int refcnt_;\n  static std::mutex lock_;\n};\n\n}  // namespace modelbox\n\n#endif  // MODELBOX_TIMER_H_"
  },
  {
    "path": "src/libmodelbox/base/include/modelbox/base/utils.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_UTILS_H_\n#define MODELBOX_UTILS_H_\n\n#include <modelbox/base/status.h>\n#include <openssl/ssl.h>\n#include <sys/time.h>\n#include <time.h>\n\n#include <functional>\n#include <list>\n#include <mutex>\n#include <numeric>\n#include <regex>\n#include <string>\n#include <vector>\n\nnamespace modelbox {\n\n#define UNUSED_VAR(var) \\\n  { auto &unused __attribute__((unused)) = var; }\n\n#define MODELBOX_DLL_PUBLIC __attribute__((visibility(\"default\")))\n#define MODELBOX_DLL_LOCAL __attribute__((visibility(\"hidden\")))\n\n/**\n * @brief The defer statement pushes a function call onto the guard; the\n * saved calls in called when the function returns\n */\nclass DeferGuard {\n public:\n  template <class Callable>\n\n  /**\n   * @brief Defer guard with function call\n   * @param fn function\n   */\n  // NOLINTNEXTLINE\n  DeferGuard(Callable &&fn) noexcept : fn_(std::forward<Callable>(fn)) {}\n\n  /**\n   * @brief Copy constructor\n   * @param other another defer guard.\n   */\n  DeferGuard(DeferGuard &&other) noexcept;\n\n  virtual ~DeferGuard();\n\n  DeferGuard(const DeferGuard &) = delete;\n  void operator=(const DeferGuard &) = delete;\n\n private:\n  std::function<void()> fn_;\n};\n\n/**\n * @brief The defer statement pushes a function call onto a list; the list of\n * saved calls in called when the function returns and condition return true\n */\nclass DeferGuardChain {\n public:\n  /**\n   * @brief Defer guard with function call\n   * @param fn function\n   */\n  template <class Callable>\n  // NOLINTNEXTLINE\n  DeferGuardChain(Callable &&fn) noexcept\n      : fn_cond_(std::forward<Callable>(fn)) {}\n\n  /**\n   * @brief Defer guard with function call\n   * @param other other guard\n   */\n  DeferGuardChain(DeferGuardChain &&other) noexcept;\n\n  /**\n   * @brief Add function to list\n   * @param fn function\n   */\n  DeferGuardChain &operator+=(std::function<void()> &&fn);\n\n  virtual ~DeferGuardChain();\n\n  DeferGuardChain(const DeferGuardChain &) = delete;\n  void operator=(const DeferGuardChain &) = delete;\n\n private:\n  std::list<std::function<void()>> fn_list_;\n  std::function<bool()> fn_cond_;\n};\n\n/**\n * @brief The function list will be called when function returns\n */\n#define DeferCond ::modelbox::DeferGuardChain __defer_cond = [&]()\n\n/**\n * @brief Add defer function to list.\n */\n#define DeferCondAdd __defer_cond += [&]()\n\n#define MODELBOX_CONCAT_(a, b) a##b\n#define MODELBOX_CONCAT(a, b) MODELBOX_CONCAT_(a, b)\n\n/**\n * @brief Call when the function returns\n */\n#define Defer \\\n  ::modelbox::DeferGuard MODELBOX_CONCAT(__defer__, __LINE__) = [&]()\n\n/**\n * @brief Extend defer with capture list args\n */\n#define DeferExt(...)                               \\\n  ::modelbox::DeferGuard MODELBOX_CONCAT(__defer__, \\\n                                         __LINE__) = [##__VA_ARGS__]()\n/**\n * @brief reference variable\n *\n * @tparam T\n */\ntemplate <typename T>\nclass RefVar {\n public:\n  RefVar() {\n    weak_var_ = new std::weak_ptr<T>[max_var_num_];\n    make_func_ = new std::function<std::shared_ptr<T>(int)>[max_var_num_];\n  }\n\n  RefVar(int max_var_num) {\n    if (max_var_num < 0 || max_var_num > 1024) {\n      max_var_num_ = 0;\n      return;\n    }\n\n    weak_var_ = new std::weak_ptr<T>[max_var_num];\n    make_func_ = new std::function<std::shared_ptr<T>(int)>[max_var_num];\n    max_var_num_ = max_var_num;\n  }\n\n  virtual ~RefVar() {\n    delete[] make_func_;\n    delete[] weak_var_;\n    max_var_num_ = 0;\n  }\n  /**\n   * @brief variable new function\n   *\n   * @param make_func\n   * @param index make function index\n   */\n  template <class Callable>\n  void MakeFunc(Callable &&make_func, int index = -1) noexcept {\n    if (index < 0) {\n      for (int i = 0; i < max_var_num_; i++) {\n        make_func_[i] = std::forward<Callable>(make_func);\n      }\n      return;\n    }\n\n    if (index >= max_var_num_) {\n      return;\n    }\n\n    make_func_[index] = std::forward<Callable>(make_func);\n  }\n\n  /**\n   * @brief Get All objects\n   *\n   * @return std::vector<std::shared_ptr<T>>\n   */\n  std::vector<std::shared_ptr<T>> GetAll() {\n    std::lock_guard<std::mutex> lock(weak_var_lock_);\n    std::vector<std::shared_ptr<T>> result;\n    for (int i = 0; i < max_var_num_; i++) {\n      const auto &var = weak_var_[i].lock();\n      if (var == nullptr) {\n        continue;\n      }\n\n      result.emplace_back(var);\n    }\n\n    return result;\n  }\n\n  /**\n   * @brief Get index\n   *\n   * @param index variable index\n   * @return std::shared_ptr<T>\n   */\n  std::shared_ptr<T> Get(int index = 0) {\n    if (index >= max_var_num_) {\n      return nullptr;\n    }\n\n    auto weak_var = weak_var_[index].lock();\n    if (weak_var) {\n      return weak_var;\n    }\n\n    std::lock_guard<std::mutex> lock(weak_var_lock_);\n    weak_var = weak_var_[index].lock();\n    if (weak_var) {\n      return weak_var;\n    }\n\n    if (make_func_[index] == nullptr) {\n      return nullptr;\n    }\n\n    weak_var = make_func_[index](index);\n    weak_var_[index] = weak_var;\n    return weak_var;\n  }\n\n private:\n  std::weak_ptr<T> *weak_var_;\n  std::mutex weak_var_lock_;\n  std::function<std::shared_ptr<T>(int)> *make_func_;\n  int max_var_num_{1};\n};\n\nenum LIST_FILE_TYPE : unsigned int {\n  LIST_FILES_ALL = 0x3,\n  LIST_FILES_FILE = 0x1,\n  LIST_FILES_DIR = 0x2,\n};\n\n/**\n * @brief List files or directoires in path of directory\n * @param path path to directory\n * @param filter list filter\n * @param listfiles files or dirs result\n * @param type list type.\n * @return list result\n */\nStatus ListFiles(const std::string &path, const std::string &filter,\n                 std::vector<std::string> *listfiles,\n                 enum LIST_FILE_TYPE type = LIST_FILES_ALL);\n\n/**\n * @brief find the earilest created file index in path\n * @param listfiles the vector of files\n * @return the earilest file index\n */\nsize_t FindTheEarliestFileIndex(std::vector<std::string> &listfiles);\n\n/**\n * @brief List files in path of directory and sub directories.\n * @param path path to directory\n * @param filter list filter\n * @param listfiles files or dirs result\n * @return list result\n */\nStatus ListSubDirectoryFiles(const std::string &path, const std::string &filter,\n                             std::vector<std::string> *listfiles);\n\n/**\n * @brief Create directory recursively\n * @param path path to directory\n * @param mode directory mode\n * @return create result\n */\nStatus CreateDirectory(const std::string &path, mode_t mode = 0700);\n\n/**\n * @brief Revmoe directory recursively\n *\n * @param path path to directory\n * @return Status remove result\n */\nvoid RemoveDirectory(const std::string &path);\n\n/**\n * @brief judge if the path is directory\n *\n * @param path path to be judged\n * @return true means directory\n */\nbool IsDirectory(const std::string &path);\n\n/**\n * @brief Copy from from src to dest\n * @param src copy file from\n * @param dest copy file to\n * @param mode dest file mode\n * @param overwrite whether overwrite existing file\n * @return Copy result\n */\nStatus CopyFile(const std::string &src, const std::string &dest, int mode = 0,\n                bool overwrite = false);\n\n/**\n * @brief Get current time, in usecond\n * @return Current time in usecond\n */\nint64_t GetCurrentTime();\n\n/**\n * @brief Check whether path is absolute\n * @param path path to check\n * @return is absolute\n */\nbool IsAbsolutePath(const std::string &path);\n\n/**\n * @brief Get directory name of path\n * @param path path\n * @return directory name\n */\nstd::string GetDirName(const std::string &path);\n\n/**\n * @brief Get basename\n * @param path path\n * @return basename, empty when fail\n */\nstd::string GetBaseName(const std::string &path);\n\n/**\n * @brief Get random number\n * @param buf output number\n * @param num length of number\n */\nvoid GetRandom(unsigned char *buf, int num);\n\n/**\n * @brief Canonicalize path\n * @param path path\n * @param root_path root path\n * @return path canonicalize\n */\nstd::string PathCanonicalize(const std::string &path,\n                             const std::string &root_path = \"\");\n\ninline size_t Volume(const std::vector<size_t> &shape) {\n  return std::accumulate(shape.begin(), shape.end(), (size_t)1,\n                         std::multiplies<size_t>());\n}\n\n/**\n * @brief Calculator volume size by shapes\n * @param shapes input shapes\n * @return volume size\n */\ninline size_t Volume(const std::vector<std::vector<size_t>> &shapes) {\n  size_t size = 0;\n  for (const auto &shape : shapes) {\n    size += std::accumulate(shape.begin(), shape.end(), (size_t)1,\n                            std::multiplies<size_t>());\n  }\n\n  return size;\n}\n\n/**\n * @brief Regex pattern match\n * @param str input string\n * @param pattern pattern\n * @return whether match\n */\ninline bool RegexMatch(const std::string &str, const std::string &pattern) {\n  std::regex re(pattern);\n  return std::regex_match(str, re);\n}\n\n/**\n * @brief Split string by delim\n * @param s input string\n * @param delim delimiter\n * @return strings splitted\n */\nstd::vector<std::string> StringSplit(const std::string &s, char delim);\n\n/**\n * @brief Replace string\n *\n * @param str string to replace\n * @param from replace from\n * @param to output replaced string\n */\nvoid StringReplaceAll(std::string &str, const std::string &from,\n                      const std::string &to);\n\n/**\n * @brief Get current call stack trace\n * @param skip skip frame number\n * @param maxdepth max call stack depth\n * @return stack trace in vector list.\n */\nstd::vector<std::tuple<void *, std::string>> GetStacks(int skip = 0,\n                                                       int maxdepth = -1);\n\n/**\n * @brief Get current call stack trace\n * @param skip skip frame number\n * @param maxdepth max call stack depth\n * @return stack trace.\n */\nstd::string GetStackTrace(int skip = 0, int maxdepth = -1);\n\n/**\n * @brief Get symbol name by addr\n * @param addr address to get symbol\n * @return symbol name, and base address.\n */\nstd::tuple<void *, std::string> GetSymbol(void *addr);\n\n/**\n * @brief Convert size in integer to readable string\n * @param size size in integer\n * @return size in string\n */\nstd::string GetBytesReadable(size_t size);\n\n/**\n * @brief Convert size in string to integer, format like B, Mb, GB, TB\n * @param size size in string, format like B, Mb, GB, TB\n * @return size in integer\n */\nuint64_t GetBytesFromReadable(const std::string &size);\n\n/**\n * @brief Get system tick count\n * @return current system tick count\n */\nunsigned long GetTickCount();\n\n/**\n * @brief Convert json to toml\n * @param json_data json data\n * @param toml_data toml data converted\n * @return Convert result\n */\nStatus JsonToToml(const std::string &json_data, std::string *toml_data);\n\n/**\n * @brief Convert toml to json\n * @param toml_data toml data\n * @param json_data json data converted\n * @param readable wheather output with format\n * @return Convert result\n */\nStatus TomlToJson(const std::string &toml_data, std::string *json_data,\n                  bool readable = false);\n\n/**\n * @brief hardening SSL\n * @param ctx ssl context\n * @return hardening result\n */\nStatus HardeningSSL(SSL_CTX *ctx);\n\n/**\n * @brief Get errno in string\n * @param errnum error number\n * @return errno in string\n */\n\nstd::string StrError(int errnum);\n\n/**\n * @brief Get compiled time\n * @return version\n */\nconst char *GetModelBoxVersion();\n\n/**\n * @brief expand environment variable\n * @return expand env text\n */\nstd::string ExpandEnvironmentVariables(const std::string &text);\n\n}  // namespace modelbox\n#endif  // MODELBOX_UTILS_H_"
  },
  {
    "path": "src/libmodelbox/base/include/modelbox/base/uuid.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_UUID_H_\n#define MODELBOX_UUID_H_\n\n#define UUID_LENGTH 37\n#include \"modelbox/base/status.h\"\n\nnamespace modelbox {\n\n/**\n * @brief Generate UUID\n * @param uuid uuid output result\n * @return success or fail\n */\nStatus GetUUID(std::string *uuid);\n\n}  // namespace modelbox\n\n#endif"
  },
  {
    "path": "src/libmodelbox/base/log/log.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/base/log.h\"\n\n#include <libgen.h>\n#include <stdarg.h>\n#include <stdio.h>\n#include <unistd.h>\n\n#include <algorithm>\n#include <chrono>\n#include <functional>\n#include <iostream>\n#include <sstream>\n#include <string>\n#include <utility>\n\n#include \"securec.h\"\n\nnamespace modelbox {\n\nconstexpr int LOG_BUFF_SIZE = 4096;\n\nthread_local const char *kLogID;\nLog klogger;\nstd::shared_ptr<LoggerCallback> kloggercallback =\n    std::make_shared<LoggerCallback>();\n\nLog &GetLogger() { return klogger; }\n\nvoid LogIdReset(const void *id) {\n  if (id == kLogID) {\n    kLogID = nullptr;\n  }\n}\n\nextern std::shared_ptr<const void> LogSetLogID(const char *id) {\n  kLogID = id;\n  std::shared_ptr<const void> ret(id, LogIdReset);\n  return ret;\n}\n\nconst char *kLogLevelString[] = {\n    \"DEBUG\", \"INFO\", \"NOTICE\", \"WARN\", \"ERROR\", \"FATAL\", \"OFF\",\n};\n\nconst char *LogLevelToString(LogLevel level) {\n  if (level >= LOG_OFF) {\n    return \"\";\n  }\n\n  return kLogLevelString[level];\n}\n\nLogLevel LogLevelStrToLevel(const std::string &level) {\n  StatusError = STATUS_OK;\n  auto uppercase_level = level;\n  std::transform(uppercase_level.begin(), uppercase_level.end(),\n                 uppercase_level.begin(), ::toupper);\n  static int level_num = sizeof(kLogLevelString) / sizeof(char *);\n  for (int i = 0; i < level_num; i++) {\n    if (uppercase_level == kLogLevelString[i]) {\n      return LogLevel(i);\n    }\n  }\n\n  StatusError = {STATUS_BADCONF, \"config level is invalid.\"};\n  return LOG_OFF;\n}\n\nLogger::Logger() = default;\nLogger::~Logger() = default;\n\nvoid Logger::Vprint(LogLevel level, const char *file, int lineno,\n                    const char *func, const char *format, va_list ap) {\n  char buff[LOG_BUFF_SIZE];\n\n  va_list tmp;\n  va_copy(tmp, ap);\n  Defer { va_end(tmp); };\n\n  auto ret = vsnprintf_s(buff, sizeof(buff), sizeof(buff) - 1, format, ap);\n  if (ret < 0) {\n    int huge_buff_size = LOG_BUFF_SIZE * 8;\n    auto *huge_buff = (char *)malloc(huge_buff_size);\n    if (huge_buff == nullptr) {\n      return;\n    }\n\n    Defer {\n      free(huge_buff);\n      huge_buff = nullptr;\n    };\n\n    ret =\n        vsnprintf_s(huge_buff, huge_buff_size, huge_buff_size - 1, format, tmp);\n    if (ret < 0) {\n      return;\n    }\n\n    huge_buff[huge_buff_size - 1] = '\\0';\n    Print(level, file, lineno, func, huge_buff);\n    return;\n  }\n\n  buff[LOG_BUFF_SIZE - 1] = '\\0';\n  Print(level, file, lineno, func, buff);\n}\n\nvoid Logger::Print(LogLevel level, const char *file, int lineno,\n                   const char *func, const char *msg) {}\n\nvoid Logger::SetLogLevel(LogLevel level) { UNUSED_VAR(level); };\n\nLoggerCallback::LoggerCallback() = default;\n\nLoggerCallback::~LoggerCallback() = default;\n\nvoid LoggerCallback::SetLogLevel(LogLevel level) { level_ = level; }\n\nLogLevel LoggerCallback::GetLogLevel() { return level_; };\n\nvoid LoggerCallback::RegVprint(const LoggerVprint &func) { vprint_ = func; }\n\nvoid LoggerCallback::RegPrint(const LoggerPrint &func) { print_ = func; };\n\nvoid RegLogVprint(const LoggerVprint &func) {\n  ModelBoxLogger.SetLogger(kloggercallback);\n  kloggercallback->RegVprint(func);\n}\n\nvoid RegLogPrint(const LoggerPrint &func) {\n  ModelBoxLogger.SetLogger(kloggercallback);\n  kloggercallback->RegPrint(func);\n}\n\nvoid LoggerCallback::Vprint(LogLevel level, const char *file, int lineno,\n                            const char *func, const char *format, va_list ap) {\n  if (vprint_) {\n    vprint_(level, file, lineno, func, format, ap);\n    return;\n  }\n\n  Logger::Vprint(level, file, lineno, func, format, ap);\n}\n\nvoid LoggerCallback::Print(LogLevel level, const char *file, int lineno,\n                           const char *func, const char *msg) {\n  if (print_) {\n    print_(level, file, lineno, func, msg);\n    return;\n  }\n\n  Logger::Print(level, file, lineno, func, msg);\n}\n\nLoggerConsole::LoggerConsole() {\n  neeed_flush_ = !isatty(STDOUT_FILENO);\n  SetLogLevelFromEnv();\n}\n\nvoid LoggerConsole::SetLogLevelFromEnv() {\n  const char *log_level = getenv(\"MODELBOX_CONSOLE_LOGLEVEL\");\n  if (log_level == nullptr) {\n    return;\n  }\n\n  level_ = LogLevelStrToLevel(log_level);\n}\n\nLoggerConsole::~LoggerConsole() = default;\n\nvoid LoggerConsole::Print(LogLevel level, const char *file, int lineno,\n                          const char *func, const char *msg) {\n  UNUSED_VAR(func);\n  if (level_ > level) {\n    return;\n  }\n\n  auto now_clock = std::chrono::system_clock::now();\n  std::time_t now = std::chrono::system_clock::to_time_t(now_clock);\n  auto millis = std::chrono::duration_cast<std::chrono::milliseconds>(\n                    now_clock.time_since_epoch()) %\n                1000;\n\n  constexpr int PREFIX_BUFF_LEN = 128;\n  char prefix_msg[PREFIX_BUFF_LEN];\n  char filename[PREFIX_BUFF_LEN];\n\n  struct tm *local_tm = std::localtime(&now);\n  std::string s(30, '\\0');\n  if (local_tm) {\n    std::strftime(&s[0], s.size(), \"%Y-%m-%d %H:%M:%S\", local_tm);\n  }\n\n  int prefix_len =\n      snprintf_s(prefix_msg, sizeof(prefix_msg), sizeof(prefix_msg) - 1,\n                 \"[%s.%.3ld][%5s][%17s:%-4d] \", s.c_str(), millis.count(),\n                 LogLevelToString(level), file, lineno);\n\n  if (prefix_len >= (int)sizeof(prefix_msg)) {\n    strncpy_s(filename, sizeof(filename), file, PREFIX_BUFF_LEN - 1);\n    prefix_len =\n        snprintf_s(prefix_msg, sizeof(prefix_msg), sizeof(prefix_msg) - 1,\n                   \"[%s.%.3ld][%5s][%17s:%-4d] \", s.c_str(), millis.count(),\n                   LogLevelToString(level), basename(filename), lineno);\n    if (prefix_len >= (int)sizeof(prefix_msg)) {\n      printf(\"[%s.%.3ld][%5s][?] %s\\n\", s.c_str(), millis.count(),\n             LogLevelToString(level), msg);\n      return;\n    }\n  }\n\n  printf(\"%s%s\\n\", prefix_msg, msg);\n  if (neeed_flush_) {\n    fflush(stdout);\n  }\n}\n\nvoid LoggerConsole::SetLogLevel(LogLevel level) { level_ = level; }\n\nLogLevel LoggerConsole::GetLogLevel() { return level_; }\n\nLog::Log() = default;\n\nLog::~Log() = default;\n\nvoid Log::Print(LogLevel level, const char *file, int lineno, const char *func,\n                const char *format, ...) {\n  if (CanLog(level) == false) {\n    return;\n  }\n\n  va_list ap;\n  va_start(ap, format);\n  logger_->Vprint(level, file, lineno, func, format, ap);\n  va_end(ap);\n}\n\nvoid Log::Vprint(LogLevel level, const char *file, int lineno, const char *func,\n                 const char *format, va_list ap) {\n  logger_->Vprint(level, file, lineno, func, format, ap);\n}\n\nbool Log::CanLog(LogLevel level) {\n  if (level < logger_->GetLogLevel()) {\n    return false;\n  }\n\n  return true;\n}\n\nLog::Buffer_p Log::LogStream(LogLevel level, const char *file, int lineno,\n                             const char *func) {\n  return Buffer_p(new Stream, [=](Stream *st) {\n    Print(level, file, lineno, func, \"%s\", st->str().c_str());\n    delete st;\n  });\n}\n\nvoid Log::SetLogger(const std::shared_ptr<Logger> &logger) {\n  if (logger == nullptr) {\n    logger_ = std::make_shared<LoggerConsole>();\n    return;\n  }\n\n  logger_ = logger;\n}\n\nstd::shared_ptr<Logger> Log::GetLogger() { return logger_; }\n\nLogMessage::LogMessage(Log *log, LogLevel level, const char *file, int lineno,\n                       const char *func) {\n  log_ = log;\n  level_ = level;\n  file_ = file;\n  lineno_ = lineno;\n  func_ = func;\n}\n\nLogMessage::~LogMessage() {\n  if (kLogID) {\n    log_->Print(level_, file_, lineno_, func_, \"[%s] %s\", kLogID,\n                msg_.str().c_str());\n  } else {\n    log_->Print(level_, file_, lineno_, func_, \"%s\", msg_.str().c_str());\n  }\n}\n\nstd::ostream &LogMessage::Stream() { return msg_; }\n\n}  // namespace modelbox\n"
  },
  {
    "path": "src/libmodelbox/base/mem/memory_pool.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/base/memory_pool.h\"\n\n#include <memory>\n\n#include \"modelbox/base/collector.h\"\n#include \"modelbox/base/log.h\"\n\nnamespace modelbox {\n\nconst size_t kMaxSlabCacheSize = 128 * 1024 * 1024;\n\nvoid *MemoryPoolBase::MemAlloc(size_t size) { return malloc(size); }\n\nvoid MemoryPoolBase::MemFree(void *ptr) { free(ptr); }\n\nstd::map<MemoryPoolBase *, std::weak_ptr<MemoryPoolBase>>\n    MemoryPoolBase::pool_list_;\n\nstd::mutex MemoryPoolBase::pool_list_lock_;\n\nstd::shared_ptr<void> MemoryPoolBase::AllocSharedPtr(size_t size) {\n  std::shared_ptr<void> ret = nullptr;\n\n  if (size <= 0) {\n    return nullptr;\n  }\n\n  auto alloc_sharedptr = [&]() -> std::shared_ptr<void> {\n    for (auto &cache : slab_caches_) {\n      if (size > cache->ObjectSize()) {\n        continue;\n      }\n\n      ret = cache->AllocSharedPtr();\n      if (ret == nullptr) {\n        continue;\n      }\n\n      break;\n    }\n\n    if (ret == nullptr) {\n      auto *ptr = MemAlloc(size);\n      if (ptr == nullptr) {\n        return nullptr;\n      }\n      ret.reset(ptr, [this](void *ptr) { this->MemFree(ptr); });\n    }\n\n    return ret;\n  };\n\n  ret = alloc_sharedptr();\n  if (ret == nullptr) {\n    ShrinkSlabCache(0, 0, 0);\n    ret = alloc_sharedptr();\n  }\n\n  return ret;\n}\n\nStatus MemoryPoolBase::ShrinkSlabCache(int each_keep, time_t before,\n                                       time_t expire) {\n  for (auto &cache : slab_caches_) {\n    cache->Shrink(each_keep, before);\n    if (expire > before) {\n      cache->Shrink(0, expire);\n    }\n  }\n\n  return STATUS_OK;\n}\n\nuint32_t MemoryPoolBase::GetAllObjectNum() {\n  uint32_t total_number = 0;\n  for (auto &cache : slab_caches_) {\n    total_number += cache->GetObjNumber();\n  }\n\n  return total_number;\n}\n\nuint32_t MemoryPoolBase::GetAllActiveObjectNum() {\n  uint32_t total_number = 0;\n  for (auto &cache : slab_caches_) {\n    total_number += cache->GetActiveObjNumber();\n  }\n\n  return total_number;\n}\n\nstd::vector<std::shared_ptr<SlabCache>> MemoryPoolBase::GetSlabCaches() {\n  return slab_caches_;\n}\n\nvoid MemoryPoolBase::DestroySlabCache() { slab_caches_.clear(); }\n\nMemoryPoolBase::MemoryPoolBase(std::string name)\n    : pool_name_(std::move(name)) {}\n\nMemoryPoolBase::MemoryPoolBase() = default;\n\nMemoryPoolBase::~MemoryPoolBase() {\n  std::lock_guard<std::mutex> lock(pool_list_lock_);\n  pool_list_.erase(this);\n}\n\nvoid MemoryPoolBase::SetName(std::string name) { pool_name_ = std::move(name); }\n\nstd::string MemoryPoolBase::GetName() { return pool_name_; }\n\nstd::vector<std::shared_ptr<MemoryPoolBase>> MemoryPoolBase::GetAllPools() {\n  std::vector<std::shared_ptr<MemoryPoolBase>> result;\n  std::lock_guard<std::mutex> lock(pool_list_lock_);\n  for (const auto &pool : pool_list_) {\n    const auto &p = pool.second.lock();\n    if (p == nullptr) {\n      continue;\n    }\n    result.emplace_back(p);\n  }\n\n  return result;\n}\n\nstd::shared_ptr<SlabCache> MemoryPoolBase::MakeSlabCache(size_t obj_size,\n                                                         size_t slab_size) {\n  return std::make_shared<SlabCache>(obj_size, slab_size, this);\n}\n\nvoid MemoryPoolBase::AddSlabCache(\n    const std::shared_ptr<SlabCache> &slab_cache) {\n  slab_caches_.push_back(slab_cache);\n  std::sort(slab_caches_.begin(), slab_caches_.end(),\n            [](const std::shared_ptr<SlabCache> &a,\n               const std::shared_ptr<SlabCache> &b) {\n              return a->ObjectSize() < b->ObjectSize();\n            });\n}\n\nvoid MemoryPoolBase::ClearAllSlabs() { slab_caches_.clear(); }\n\nsize_t MemoryPoolBase::CalSlabSize(size_t object_size) {\n  const size_t size_1K = 1024;\n  const size_t size_1M = 1024 * 1024;\n\n  if (object_size <= size_1K) {\n    return size_1M;\n  }\n\n  if (object_size <= 16 * size_1K) {\n    return 8 * size_1M;\n  }\n\n  if (object_size <= 512 * size_1K) {\n    return 16 * size_1M;\n  }\n\n  if (object_size <= size_1M) {\n    return 8 * size_1M;\n  }\n\n  if (object_size <= 2 * size_1M) {\n    return 16 * size_1M;\n  }\n\n  if (object_size <= 4 * size_1M) {\n    return 32 * size_1M;\n  }\n\n  if (object_size <= 8 * size_1M) {\n    return 32 * size_1M;\n  }\n\n  if (object_size <= 16 * size_1M) {\n    return 64 * size_1M;\n  }\n\n  if (object_size <= 32 * size_1M) {\n    return 64 * size_1M;\n  }\n\n  if (object_size <= 64 * size_1M) {\n    return 128 * size_1M;\n  }\n\n  if (object_size <= 128 * size_1M) {\n    return 128 * size_1M;\n  }\n\n  return 0;\n}\n\nStatus MemoryPoolBase::InitSlabCache(int low, int high) {\n  std::shared_ptr<SlabCache> slab;\n  const unsigned long shift_low = low;\n  const unsigned long shift_high = high;\n  for (unsigned long i = shift_low; i <= shift_high; i++) {\n    size_t obj_size = 1 << i;\n    if (obj_size > kMaxSlabCacheSize) {\n      MBLOG_WARN << \"Unsupport cache size, max is \"\n                 << GetBytesReadable(kMaxSlabCacheSize);\n      break;\n    }\n    size_t slab_size;\n    slab_size = CalSlabSize(obj_size);\n    if (slab_size == 0) {\n      break;\n    }\n    slab = MakeSlabCache(obj_size, slab_size);\n    AddSlabCache(slab);\n  }\n\n  try {\n    auto shared_this = shared_from_this();\n    pool_list_[this] = shared_this;\n  } catch (const std::exception &e) {\n    MBLOG_INFO << \"Skip add memory pool.\";\n  }\n\n  return STATUS_OK;\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/libmodelbox/base/mem/slab.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/base/slab.h\"\n\n#include <stdarg.h>\n#include <stdio.h>\n\n#include <chrono>\n#include <functional>\n#include <iostream>\n#include <sstream>\n\n#include \"modelbox/base/log.h\"\n\nnamespace modelbox {\n\nSlab::Slab(SlabCache *cache, size_t obj_size, size_t mem_size) {\n  if (obj_size <= 0) {\n    Abort(\"object size is invalid.\");\n  }\n\n  ListInit(&free_obj_head_);\n  ListInit(&list);\n  mem_size_ = mem_size;\n  obj_size_ = obj_size;\n  cache_ = cache;\n\n  obj_num_ = mem_size_ / obj_size;\n  if (obj_num_ == 0) {\n    Abort(\"object number is invalid.\");\n  }\n\n  active_obj_num_ = 0;\n  last_alive_ = time(nullptr);\n}\n\nSlab::~Slab() {\n  if (active_obj_num_ > 0) {\n    MBLOG_ERROR << \"active obj number: \" << active_obj_num_;\n    Abort(\"Still exist active object.\");\n  }\n\n  if (objs_) {\n    free(objs_);\n    objs_ = nullptr;\n  }\n\n  if (mem_) {\n    _Free(mem_);\n    mem_ = nullptr;\n  }\n}\n\nbool Slab::Init() {\n  objs_ = (struct SlabObject *)malloc(sizeof(struct SlabObject) * obj_num_);\n  if (objs_ == nullptr) {\n    return false;\n  }\n\n  for (size_t i = 0; i < obj_num_; i++) {\n    objs_[i].index = i;\n    ListAddTail(&objs_[i].list, &free_obj_head_);\n  }\n\n  mem_ = _Alloc(mem_size_);\n  if (mem_ == nullptr) {\n    free(objs_);\n    objs_ = nullptr;\n    return false;\n  }\n\n  return true;\n}\n\ntime_t Slab::AliveTime() {\n  if (active_obj_num_ > 0) {\n    return time(nullptr);\n  }\n\n  return last_alive_;\n}\n\nvoid *Slab::Alloc() {\n  if (ListEmpty(&free_obj_head_) || mem_ == nullptr) {\n    return nullptr;\n  }\n\n  struct SlabObject *sobj =\n      ListFirstEntry(&free_obj_head_, struct SlabObject, list);\n  if (sobj == nullptr) {\n    return nullptr;\n  }\n\n  ListDel(&sobj->list);\n  active_obj_num_++;\n  return (char *)mem_ + obj_size_ * sobj->index;\n}\n\nvoid Slab::Free(void *ptr) {\n  size_t offset = (char *)ptr - (char *)mem_;\n  if (offset % obj_size_ != 0) {\n    Abort(\"Memory address is invalid, not point to object.\");\n  }\n\n  size_t index = offset / obj_size_;\n  if (index >= obj_num_) {\n    Abort(\"Memory address is out of range.\");\n  }\n\n  struct SlabObject *sobj = &objs_[index];\n  if (!ListEntryNotInList(&sobj->list) || sobj->index != index) {\n    MBLOG_ERROR << \"object in list: \" << ListEntryNotInList(&sobj->list);\n    MBLOG_ERROR << \"object index:\" << sobj->index << \", free index:\" << index;\n    Abort(\"Memory is corrupted or double free.\");\n  }\n\n  ListAdd(&sobj->list, &free_obj_head_);\n  active_obj_num_--;\n\n  if (active_obj_num_ == 0) {\n    last_alive_ = time(nullptr);\n  }\n}\n\nbool Slab::IsFull() {\n  if (ListEmpty(&free_obj_head_)) {\n    return true;\n  }\n\n  return false;\n}\n\nbool Slab::IsInSlab(const void *ptr) {\n  if ((char *)ptr >= (char *)mem_ && (char *)ptr <= (char *)mem_ + mem_size_) {\n    return true;\n  }\n\n  return false;\n}\n\nbool Slab::IsEmpty() { return active_obj_num_ == 0; }\n\nsize_t Slab::ObjectSize() { return obj_size_; }\n\nint Slab::ActiveObjects() { return active_obj_num_; }\n\nint Slab::ObjectNumber() { return obj_num_; }\n\nvoid *Slab::_Alloc(size_t size) {\n  if (cache_) {\n    return cache_->_Alloc(size);\n  }\n  return malloc(size);\n}\n\nvoid Slab::_Free(void *ptr) {\n  if (cache_) {\n    return cache_->_Free(ptr);\n  }\n  return free(ptr);\n}\n\nSlabCache::SlabCache(size_t obj_size, size_t slab_size,\n                     MemoryAllocFree *mem_allocator) {\n  obj_size_ = obj_size;\n  slab_size_ = slab_size;\n\n  obj_num_ = 0;\n  active_obj_num_ = 0;\n  slab_empty_num_ = 0;\n  slab_num_ = 0;\n\n  if (obj_size <= 0) {\n    Abort(\"object size is invalid.\");\n  }\n\n  batch_object_num_ = slab_size / obj_size;\n  if (batch_object_num_ == 0) {\n    Abort(\"slab size or object size is invalid.\");\n  }\n\n  ListInit(&full_);\n  ListInit(&partial_);\n  ListInit(&empty_);\n\n  mem_allocator_ = mem_allocator;\n  SlabCacheReclaimer::Instance().AddSlabCache(this);\n}\n\nSlabCache::~SlabCache() {\n  SlabCacheReclaimer::Instance().RmvSlabCache(this);\n  RemoveSlabs(&full_);\n  RemoveSlabs(&partial_);\n  RemoveSlabs(&empty_);\n  slab_empty_num_ = 0;\n  mem_allocator_ = nullptr;\n}\n\nstd::shared_ptr<void> SlabCache::AllocSharedPtr() {\n  void *ptr = nullptr;\n  Slab *s = nullptr;\n\n  AllocObject(&ptr, &s);\n  if (ptr == nullptr) {\n    return nullptr;\n  }\n\n  std::shared_ptr<void> ret(ptr, [=](void *ptr) { this->FreeObject(ptr, s); });\n  return ret;\n}\n\nvoid SlabCache::AllocObject(void **obj, Slab **slab) {\n  Slab *s = nullptr;\n  bool is_stop = false;\n  void *ret = nullptr;\n  ListHead *from_list = nullptr;\n\n  std::unique_lock<std::mutex> lock(lock_);\n  while (ret == nullptr && is_stop == false) {\n    if (!ListEmpty(&partial_)) {\n      from_list = &partial_;\n    } else if (!ListEmpty(&empty_)) {\n      from_list = &empty_;\n    } else {\n      is_stop = !GrowLocked(&lock);\n      continue;\n    }\n\n    s = ListFirstEntry(from_list, Slab, list);\n    if (s == nullptr) {\n      continue;\n    }\n\n    ret = s->Alloc();\n    if (ret == nullptr) {\n      is_stop = !GrowLocked(&lock);\n      continue;\n    }\n\n    if (from_list == &empty_) {\n      if (slab_empty_num_ == 0) {\n        Abort(\"slab number is invalid.\");\n      }\n\n      slab_empty_num_--;\n    }\n\n    if (s->IsFull()) {\n      ListDel(&s->list);\n      ListAdd(&s->list, &full_);\n    } else if (from_list != &partial_) {\n      ListDel(&s->list);\n      ListAdd(&s->list, &partial_);\n    }\n  }\n\n  if (ret == nullptr) {\n    *obj = nullptr;\n    *slab = nullptr;\n    return;\n  }\n\n  active_obj_num_++;\n  *obj = ret;\n  *slab = s;\n}\n\nvoid SlabCache::FreeObject(void *obj, Slab *slab) {\n  std::unique_lock<std::mutex> lock(lock_);\n\n  active_obj_num_--;\n  slab->Free(obj);\n  if (slab->IsEmpty()) {\n    ListDel(&slab->list);\n    ListAdd(&slab->list, &empty_);\n    slab_empty_num_++;\n  } else if (!slab->IsFull()) {\n    ListDel(&slab->list);\n    ListAdd(&slab->list, &partial_);\n  }\n}\n\nvoid SlabCache::Shrink(int keep, time_t before) {\n  size_t shrink_num = 0;\n  int empty_number = slab_empty_num_;\n\n  if (empty_number <= 0) {\n    return;\n  }\n\n  if (keep == 0) {\n    shrink_num = ~0;\n  } else if (empty_number > keep) {\n    shrink_num = slab_empty_num_ - keep;\n  }\n\n  RemoveSlabs(&empty_, shrink_num, before);\n}\n\nvoid SlabCache::Reclaim(time_t before) {\n  if (obj_num_ == 0 || batch_object_num_ == 0) {\n    return;\n  }\n\n  const int free_percent_threshold = 10;\n  const int idle_free_time_before = 60 * 10;\n  auto free_obj_percent =\n      (slab_empty_num_ * batch_object_num_ * 100) / (obj_num_ * 100);\n  auto shrink_percent = free_obj_percent * 100 - free_percent_threshold;\n  if (shrink_percent <= 0) {\n    /* shrink unused slabs */\n    RemoveSlabs(&empty_, slab_empty_num_, idle_free_time_before);\n    return;\n  }\n\n  int shrink_obj_num = obj_num_ * shrink_percent / 100;\n  int shrink_num = shrink_obj_num / batch_object_num_;\n  if (shrink_num <= 0) {\n    RemoveSlabs(&empty_, slab_empty_num_, idle_free_time_before);\n    return;\n  }\n\n  RemoveSlabs(&empty_, shrink_num, before);\n}\n\nuint32_t SlabCache::SlabNumber() {\n  std::unique_lock<std::mutex> lock(lock_);\n  return slab_num_;\n}\n\nint SlabCache::GetEmptySlabNumber() { return slab_empty_num_; }\n\nsize_t SlabCache::ObjectSize() { return obj_size_; };\n\nuint32_t SlabCache::GetObjNumber() { return obj_num_; }\n\nuint32_t SlabCache::GetFreeObjNumber() { return obj_num_ - active_obj_num_; }\n\nuint32_t SlabCache::GetActiveObjNumber() { return active_obj_num_; }\n\nvoid SlabCache::RemoveSlabs(ListHead *head) { RemoveSlabs(head, -1, 0); }\n\nvoid SlabCache::RemoveSlabLocked(Slab *s) {\n  ListDel(&s->list);\n  slab_num_--;\n  obj_num_ -= s->ObjectNumber();\n}\n\nvoid SlabCache::RemoveSlabs(ListHead *head, size_t count, time_t time_before) {\n  Slab *s = nullptr;\n  Slab *tmp = nullptr;\n  size_t loop_count = count;\n  time_t now = time(nullptr);\n\n  ListHead list_free;\n  ListInit(&list_free);\n\n  if (count <= 0) {\n    return;\n  }\n\n  std::unique_lock<std::mutex> lock(lock_);\n  ListForEachEntrySafe(s, tmp, head, list) {\n    if (s->AliveTime() > (now - time_before)) {\n      continue;\n    }\n\n    RemoveSlabLocked(s);\n\n    ListAdd(&s->list, &list_free);\n    if (head == &empty_) {\n      slab_empty_num_--;\n    }\n\n    loop_count--;\n    if (loop_count <= 0) {\n      break;\n    }\n  }\n  lock.unlock();\n\n  ListForEachEntrySafe(s, tmp, &list_free, list) {\n    ListDel(&s->list);\n    delete s;\n  }\n}\n\nbool SlabCache::GrowLocked(std::unique_lock<std::mutex> *lock) {\n  try {\n    lock->unlock();\n    auto *s = new Slab(this, obj_size_, slab_size_);\n    if (s->Init() == false) {\n      lock->lock();\n      delete s;\n      return false;\n    }\n    lock->lock();\n\n    ListAddTail(&s->list, &empty_);\n    slab_empty_num_++;\n    slab_num_++;\n    obj_num_ += s->ObjectNumber();\n  } catch (...) {\n    return false;\n  }\n  return true;\n}\n\nvoid *SlabCache::_Alloc(size_t size) {\n  if (mem_allocator_) {\n    return mem_allocator_->MemAlloc(size);\n  }\n\n  return malloc(size);\n}\n\nvoid SlabCache::_Free(void *ptr) {\n  if (mem_allocator_) {\n    return mem_allocator_->MemFree(ptr);\n  }\n\n  return free(ptr);\n}\n\nSlabCacheReclaimer &SlabCacheReclaimer::Instance() {\n  static SlabCacheReclaimer reclaimer;\n  return reclaimer;\n}\n\nvoid SlabCacheReclaimer::AddSlabCache(SlabCache *slabcache) {\n  std::unique_lock<std::mutex> lock(cache_lock_);\n  if (slab_cache_list_.find(slabcache) != slab_cache_list_.end()) {\n    return;\n  }\n\n  slab_cache_list_[slabcache] = slabcache;\n  lock.unlock();\n  StartReclaimWorker();\n}\n\nvoid SlabCacheReclaimer::RmvSlabCache(SlabCache *slabcache) {\n  std::unique_lock<std::mutex> lock(cache_lock_);\n  auto itr = slab_cache_list_.find(slabcache);\n  if (itr == slab_cache_list_.end()) {\n    return;\n  }\n  slab_cache_list_.erase(itr);\n  lock.unlock();\n  StopReclaimWoker();\n}\n\nvoid SlabCacheReclaimer::DoReclaim() {\n  std::unique_lock<std::mutex> lock(cache_lock_);\n  for (auto itr : slab_cache_list_) {\n    auto *slabcache = itr.first;\n    slabcache->Reclaim();\n  }\n}\n\nSlabCacheReclaimer::SlabCacheReclaimer() = default;\n\nSlabCacheReclaimer::~SlabCacheReclaimer() {\n  if (reclaimer_timer_ == nullptr) {\n    return;\n  }\n  StopReclaimWoker();\n}\n\nvoid SlabCacheReclaimer::StartReclaimWorker() {\n  if (slab_cache_num_++ > 0) {\n    return;\n  }\n\n  timer_.SetName(\"Slab-Reclaim\");\n  timer_.Start();\n  reclaimer_timer_ = std::make_shared<TimerTask>();\n  reclaimer_timer_->Callback(&SlabCacheReclaimer::DoReclaim, this);\n  timer_.Schedule(reclaimer_timer_, 0, 10 * 1000);\n}\n\nvoid SlabCacheReclaimer::StopReclaimWoker() {\n  if (--slab_cache_num_ > 0) {\n    return;\n  }\n\n  reclaimer_timer_->Stop();\n  reclaimer_timer_ = nullptr;\n  timer_.Stop();\n}\n\n}  // namespace modelbox\n"
  },
  {
    "path": "src/libmodelbox/base/status/status.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/base/status.h\"\n\n#include <stdio.h>\n\n#include <sstream>\n\nnamespace modelbox {\n\nconst Status STATUS_OK = STATUS_SUCCESS;\nthread_local Status StatusError;\n\nconst char* kStatusCodeString[] = {\n    \"Success\",\n    \"Fault\",\n    \"Not found\",\n    \"Invalid argument\",\n    \"Try again\",\n    \"Bad config\",\n    \"Out of memory\",\n    \"Out of range\",\n    \"Already exists\",\n    \"Internal error\",\n    \"Device or resource busy\",\n    \"Operation not permitted\",\n    \"Not supported\",\n    \"No data available\",\n    \"No space left\",\n    \"No buffer space available\",\n    \"Value too large for defined data type\",\n    \"Operation now in progress\",\n    \"Operation already in progress\",\n    \"Operation timed out\",\n    \"Out of streams resources\",\n    \"Request reset\",\n    \"Continue operation\",\n    \"Quota exceeded\",\n    \"Stop operation\",\n    \"Shutdown operation\",\n    \"End of file\",\n    \"No such file or directory\",\n    \"Resource deadlock\",\n    \"No response\",\n    \"Input/output error\",\n    \"End flag\",\n};\n\nconst char* kStatusCodeRawString[] = {\n    \"STATUS_SUCCESS\",    \"STATUS_FAULT\",    \"STATUS_NOTFOUND\",\n    \"STATUS_INVALID\",    \"STATUS_AGAIN\",    \"STATUS_BADCONF\",\n    \"STATUS_NOMEM\",      \"STATUS_RANGE\",    \"STATUS_EXIST\",\n    \"STATUS_INTERNAL\",   \"STATUS_BUSY\",     \"STATUS_PERMIT\",\n    \"STATUS_NOTSUPPORT\", \"STATUS_NODATA\",   \"STATUS_NOSPACE\",\n    \"STATUS_NOBUFS\",     \"STATUS_OVERFLOW\", \"STATUS_INPROGRESS\",\n    \"STATUS_ALREADY\",    \"STATUS_TIMEDOUT\", \"STATUS_NOSTREAM\",\n    \"STATUS_RESET\",      \"STATUS_CONTINUE\", \"STATUS_EDQUOT\",\n    \"STATUS_STOP\",       \"STATUS_SHUTDOWN\", \"STATUS_EOF\",\n    \"STATUS_NOENT\",      \"STATUS_DEADLOCK\", \"STATUS_NORESPONSE\",\n    \"STATUS_IO\",\n};\n\nStatus::Status() = default;\n\nStatus::~Status() = default;\n\nStatus::Status(const StatusCode& code) { code_ = code; }\n\nStatus::Status(const bool& success) {\n  if (success) {\n    code_ = STATUS_SUCCESS;\n  } else {\n    code_ = STATUS_FAULT;\n  }\n}\n\nStatus::Status(const Status& status) {\n  code_ = status.code_;\n  operator=(status);\n}\n\nStatus::Status(const StatusCode& code, const std::string& errmsg) {\n  code_ = code;\n  errmsg_ = errmsg;\n}\n\nStatus::Status(const Status& status, const std::string& errmsg) {\n  Wrap(status, status.code_, errmsg);\n}\n\nvoid Status::Wrap(const Status& status, const StatusCode& code,\n                  const std::string& errmsg) {\n  if (code >= STATUS_LASTFLAG) {\n    return;\n  }\n\n  code_ = code;\n  errmsg_ = errmsg;\n  wrap_status_ = std::make_shared<Status>(status);\n}\n\nstd::shared_ptr<Status> Status::Unwrap() { return wrap_status_; }\n\nStatusCode Status::Code() { return code_; }\n\nbool Status::operator==(const StatusCode& code) const { return code_ == code; }\n\nbool Status::operator==(const Status& s) const { return code_ == s.code_; }\n\nbool Status::operator==(const bool& success) const {\n  if ((success && code_ == STATUS_SUCCESS) ||\n      (!success && code_ != STATUS_SUCCESS)) {\n    return true;\n  }\n\n  return false;\n}\n\nbool Status::operator!=(const StatusCode& code) const { return code_ != code; }\n\nbool Status::operator!=(const Status& s) const { return code_ != s.code_; }\n\nStatus::operator bool() const { return code_ == STATUS_SUCCESS; }\n\nStatus::operator enum StatusCode() const { return code_; }\n\nstd::string Status::ToString() const {\n  if (errmsg_.length() > 0) {\n    std::ostringstream oss;\n    oss << \"code: \" << StrCode() << \", errmsg: \" << errmsg_;\n    return oss.str();\n  }\n\n  return StrCode();\n}\n\nstd::string Status::StrCode() const {\n  if ((size_t)code_ >= sizeof(kStatusCodeString) / sizeof(char*)) {\n    return \"\";\n  }\n\n  return kStatusCodeString[code_];\n}\n\nstd::string Status::StrStatusCode() const {\n  if ((size_t)code_ >= sizeof(kStatusCodeString) / sizeof(char*)) {\n    return \"\";\n  }\n\n  return kStatusCodeRawString[code_];\n}\n\nvoid Status::SetErrormsg(const std::string& errmsg) { errmsg_ = errmsg; }\n\nconst std::string& Status::Errormsg() const { return errmsg_; }\n\nstd::string Status::ErrorCodeMsgs(bool with_code) const {\n  if (with_code) {\n    if (Errormsg().length() > 0) {\n      return StrCode() + \", \" + Errormsg();\n    }\n\n    return StrCode();\n  }\n\n  return Errormsg();\n}\n\nstd::string Status::WrapOnlyErrormsgs(bool with_code) const {\n  if (wrap_status_ == nullptr) {\n    return ErrorCodeMsgs(false);\n  }\n\n  if (Errormsg().length() == 0 && with_code == false) {\n    return wrap_status_->WrapOnlyErrormsgs(with_code);\n  }\n\n  const auto& msg = wrap_status_->WrapOnlyErrormsgs(with_code);\n  if (msg.length() > 0) {\n    return ErrorCodeMsgs(with_code) + \" -> \" + msg;\n  }\n\n  return ErrorCodeMsgs(with_code);\n}\n\nstd::string Status::WrapErrormsgs() const {\n  if (wrap_status_ != nullptr) {\n    auto msg = wrap_status_->WrapOnlyErrormsgs(false);\n    if (msg.length() > 0) {\n      return ErrorCodeMsgs(true) + \" -> \" + msg;\n    }\n\n    return ErrorCodeMsgs(true);\n  }\n\n  return ErrorCodeMsgs(true);\n}\n\nstd::ostream& operator<<(std::ostream& os, const Status& s) {\n  os << s.ToString();\n  return os;\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/libmodelbox/base/thread_pool/thread_pool.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <modelbox/base/os.h>\n#include <modelbox/base/thread_pool.h>\n\n#include <algorithm>\n#include <cstring>\n\nnamespace modelbox {\n\nconstexpr int MIN_KEEP_ALIVE_TIME = 100;\n\nThreadWorker::ThreadWorker(ThreadPool *pool, int thread_id, bool core_worker) {\n  pool_ = pool;\n  is_core_worker_ = core_worker;\n  is_joining_ = false;\n  thread_id_ = thread_id;\n}\n\nThreadWorker::~ThreadWorker() { Join(); }\n\nvoid ThreadWorker::SetName(const std::string &name) {\n  name_ = name;\n  name_changed_.exchange(true);\n}\n\nvoid ThreadWorker::ChangeNameNow() {\n  if (name_changed_.exchange(false) == false) {\n    return;\n  }\n\n  if (name_.length() > 0) {\n    os->Thread->SetName(name_);\n  }\n}\n\nbool ThreadWorker::IsCore() { return is_core_worker_; }\n\nvoid ThreadWorker::SetCore(bool is_core) { is_core_worker_ = is_core; }\n\nvoid ThreadWorker::Run(ThreadWorker *worker) {\n  while (worker->running_) {\n    worker->ChangeNameNow();\n    worker->pool_->RunWorker(worker);\n  }\n\n  auto *pool = worker->pool_;\n  worker->pool_ = nullptr;\n  auto thread = worker->thread_;\n  std::unique_lock<std::mutex> lock(worker->lock_);\n  if (!worker->is_joining_ && worker->thread_) {\n    worker->thread_->detach();\n    worker->thread_ = nullptr;\n  }\n\n  lock.unlock();\n  pool->ExitWorker(worker);\n  // thread may be detached, leave nothing here.\n}\n\nvoid ThreadWorker::Start() {\n  if (thread_) {\n    return;\n  }\n\n  std::unique_lock<std::mutex> lock(lock_);\n  running_ = true;\n  thread_ = std::make_shared<std::thread>(&ThreadWorker::Run, this);\n}\n\nvoid ThreadWorker::Stop() {\n  std::unique_lock<std::mutex> lock(lock_);\n  running_ = false;\n}\n\nint ThreadWorker::Id() { return thread_id_; }\n\nvoid ThreadWorker::Join() {\n  Stop();\n  std::shared_ptr<std::thread> thread = thread_;\n  std::unique_lock<std::mutex> lock(lock_);\n  if (thread_) {\n    thread_ = nullptr;\n    is_joining_ = true;\n    lock.unlock();\n    thread->join();\n    thread = nullptr;\n    lock.lock();\n    is_joining_ = false;\n  }\n}\n\nThreadPool::ThreadPool(int thread_size, int max_thread_size, int queue_size,\n                       int keep_alive) {\n  if (thread_size < 0) {\n    thread_size = std::thread::hardware_concurrency();\n  }\n\n  thread_size_ = thread_size;\n  max_thread_size_ = max_thread_size;\n  if (max_thread_size_ < thread_size_) {\n    max_thread_size_ = thread_size_;\n  }\n\n  if (max_thread_size_ == 0) {\n    max_thread_size_ = std::thread::hardware_concurrency();\n  }\n\n  if (queue_size < 0) {\n    queue_size = thread_size;\n    if (queue_size == 0) {\n      queue_size = 1;\n    }\n  }\n\n  if (keep_alive <= MIN_KEEP_ALIVE_TIME) {\n    keep_alive = MIN_KEEP_ALIVE_TIME;\n  }\n\n  keep_alive_ = keep_alive;\n  worker_num_ = 0;\n  quit_ = false;\n  work_queue_ = std::make_shared<BlockingQueue<ThreadFunction>>(queue_size);\n}\n\nThreadPool::~ThreadPool() { Shutdown(); };\n\nvoid ThreadPool::SetName(const std::string &name) { name_ = name; }\n\nvoid ThreadPool::Shutdown(bool force) {\n  work_queue_->Shutdown();\n  if (force) {\n    work_queue_->Close();\n    StopWokers();\n  }\n\n  std::unique_lock<std::mutex> lock(lock_);\n  thread_size_ = 0;\n  exit_cond_.wait(lock, [&]() { return workers_.size() <= 0; });\n}\n\nvoid ThreadPool::ExitWorker(ThreadWorker *worker) {\n  RmvWorker(worker);\n  // leave nothing here\n}\n\nbool ThreadPool::Park(ThreadWorker *worker, ThreadFunction &task) {\n  auto wait_time = 0;\n  if (worker->IsCore() == false) {\n    int extend_thread_size = worker_num_ - thread_size_;\n    if (extend_thread_size <= 0) {\n      extend_thread_size = 1;\n    }\n\n    int wait_time_step = keep_alive_ / extend_thread_size;\n    wait_time = wait_time_step * (worker_num_ - worker->Id() + 1);\n    if (wait_time < MIN_KEEP_ALIVE_TIME) {\n      wait_time = MIN_KEEP_ALIVE_TIME;\n    } else if (wait_time > keep_alive_) {\n      wait_time = keep_alive_;\n    }\n  }\n\n  available_num_++;\n  auto ret = work_queue_->Pop(&task, wait_time);\n  available_num_--;\n  if (ret == false) {\n    if (errno == EINTR) {\n      return false;\n    }\n\n    worker->Stop();\n    return false;\n  }\n\n  return true;\n}\n\nvoid ThreadPool::RunWorker(ThreadWorker *worker) {\n  ThreadFunction task;\n  bool is_set_name = false;\n  DeferCond { return is_set_name; };\n\n  if (Park(worker, task) == false) {\n    return;\n  }\n\n  if (task.name.length() > 0) {\n    worker->SetName(task.name);\n    worker->ChangeNameNow();\n    is_set_name = true;\n    DeferCondAdd { worker->SetName(name_); };\n  }\n\n  try {\n    task.func();\n  } catch (const std::exception &ex) {\n    MBLOG_FATAL << \"thread:  \" << pthread_self() << \" throw exception, \"\n                << ex.what();\n  }\n}\n\nvoid ThreadPool::RmvWorker(ThreadWorker *worker) {\n  worker_num_--;\n  lock_.lock();\n  for (auto iter = workers_.begin(); iter != workers_.end(); ++iter) {\n    if ((*iter).get() == worker) {\n      workers_.erase(iter);\n      break;\n    }\n  }\n\n  if (workers_.size() == 0) {\n    exit_cond_.notify_one();\n  }\n  lock_.unlock();\n}\n\nStatus ThreadPool::AddWorker(bool core_worker) {\n  int id = worker_num_++;\n  std::shared_ptr<ThreadWorker> worker =\n      std::make_shared<ThreadWorker>(this, id, core_worker);\n  worker->SetName(name_);\n  lock_.lock();\n  workers_.emplace_back(worker);\n  lock_.unlock();\n  worker->Start();\n  worker->SetCore(core_worker);\n  return STATUS_OK;\n}\n\nbool ThreadPool::SubmitTask(ThreadFunction &task) {\n  bool is_queued = false;\n\n  if (worker_num_++ < thread_size_) {\n    AddWorker(true);\n  }\n  worker_num_--;\n\n  auto ret = work_queue_->Push(task, -1);\n  if (ret == true) {\n    is_queued = true;\n    if ((!work_queue_->Full() && thread_size_ > 0) || available_num_ > 0) {\n      return ret;\n    }\n  }\n\n  if (work_queue_->IsShutdown()) {\n    return false;\n  }\n\n  // expand extend thread pool\n  auto num = worker_num_++;\n  if (num < max_thread_size_) {\n    bool create_core = false;\n    if (num < thread_size_) {\n      create_core = true;\n    }\n    AddWorker(create_core);\n  }\n  worker_num_--;\n\n  if (is_queued) {\n    return ret;\n  }\n\n  do {\n    ret = work_queue_->Push(task, 0);\n    if (ret == false) {\n      std::this_thread::yield();\n      if (errno == EINTR || errno == ETIMEDOUT) {\n        continue;\n      }\n      break;\n    }\n  } while (ret == false);\n\n  return ret;\n}\n\nvoid ThreadPool::StopWokers() {\n  work_queue_->Shutdown();\n  std::unique_lock<std::mutex> lock(lock_);\n  for (auto &workder : workers_) {\n    workder->Stop();\n  }\n}\n\nvoid ThreadPool::SetThreadSize(size_t size) {\n  thread_size_ = size;\n  if (max_thread_size_ < thread_size_) {\n    max_thread_size_ = thread_size_;\n  }\n\n  int thread_num = 0;\n  lock_.lock();\n  for (auto &worker : workers_) {\n    if (worker->IsCore() == false) {\n      continue;\n    }\n\n    thread_num++;\n    if (thread_num <= thread_size_) {\n      continue;\n    }\n\n    worker->SetCore(false);\n  }\n  lock_.unlock();\n  work_queue_->Wakeup();\n}\n\nvoid ThreadPool::SetMaxThreadSize(size_t size) {\n  max_thread_size_ = size;\n  if (max_thread_size_ < thread_size_) {\n    max_thread_size_ = thread_size_;\n  }\n}\n\nvoid ThreadPool::SetTaskQueueSize(size_t size) {\n  work_queue_->SetCapacity(size);\n}\n\nvoid ThreadPool::SetKeepAlive(uint32_t timeout) {\n  if (timeout <= MIN_KEEP_ALIVE_TIME) {\n    keep_alive_ = MIN_KEEP_ALIVE_TIME;\n  } else {\n    keep_alive_ = timeout;\n  }\n\n  work_queue_->Wakeup();\n}\n\nint ThreadPool::GetThreadsNum() { return worker_num_; }\n\nint ThreadPool::GetMaxThreadsNum() { return max_thread_size_; }\n\nint ThreadPool::GetWaitingWorkCount() {\n  return work_queue_ ? work_queue_->Size() : 0;\n}\n\n}  // namespace modelbox\n"
  },
  {
    "path": "src/libmodelbox/base/timer/timer.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <modelbox/base/log.h>\n#include <modelbox/base/os.h>\n#include <modelbox/base/timer.h>\n#include <modelbox/base/utils.h>\n\nnamespace modelbox {\n\nconstexpr int TIMER_MAX_RUNNING_TIME = 50;\n\nTimer TimerGlobal::timer_;\nint TimerGlobal::refcnt_;\nstd::mutex TimerGlobal::lock_;\n\nTimerTask::~TimerTask() { Stop(); }\n\nTimerTask::TimerTask() { task_name_ = GetCaller(); }\n\nvoid TimerTask::Stop() {\n  if (sched_timer_ != nullptr) {\n    sched_timer_->Stop();\n  }\n\n  is_running_ = false;\n  weak_timer_.reset();\n}\n\nvoid TimerTask::Run() { task_func_(); }\n\nvoid TimerTask::SetName(const std::string &name) {\n  if (sched_timer_ != nullptr) {\n    sched_timer_->SetName(name);\n  }\n\n  task_name_ = name;\n}\n\nstd::string TimerTask::GetName() { return task_name_; }\n\nstd::string TimerTask::GetCaller() {\n  std::stringstream str;\n\n  str << \"@\" << std::hex << __builtin_return_address(0);\n  return str.str();\n}\n\nstd::shared_ptr<TimerTask> TimerTask::MakeSchedWeakTimer() {\n  auto sched_timer = std::make_shared<TimerTask>();\n  sched_timer->weak_timer_ = shared_from_this();\n  sched_timer->is_weaktimer_ = true;\n  sched_timer->task_name_ = task_name_;\n  sched_timer->SetPeriod(period_);\n  sched_timer->SetDelay(delay_);\n  this->sched_timer_ = sched_timer;\n  return sched_timer;\n}\n\nvoid TimerTask::SetHitTime(uint64_t time) { hit_time_ = time; }\n\nuint64_t TimerTask::GetHitTime() { return hit_time_; }\n\nvoid TimerTask::SetPeriod(uint64_t period) { period_ = period; }\n\nuint64_t TimerTask::GetPeriod() { return period_; }\n\nvoid TimerTask::SetDelay(uint64_t delay) { delay_ = delay; }\n\nuint64_t TimerTask::GetDelay() { return delay_; }\n\nvoid TimerTask::SetTimerRunning(bool running) {\n  is_running_ = running;\n  auto timer = weak_timer_.lock();\n  if (timer) {\n    timer->SetTimerRunning(running);\n  }\n}\n\nbool TimerTask::IsWeakPtrTimerTask() { return is_weaktimer_; }\n\nbool TimerTask::IsRunning() { return is_running_; }\n\nTimer::Timer() {\n  // make sure tick may not overflow for a long long time.\n  start_tick_ = GetTickCount();\n}\n\nTimer::~Timer() { Stop(); };\n\nthread_local std::shared_ptr<TimerTask> Timer::current_timer_task_ = nullptr;\n\nstd::shared_ptr<TimerTask> Timer::CurrentTimerTask() {\n  return current_timer_task_;\n}\n\nvoid Timer::SetName(const std::string &name) {\n  if (timer_running_) {\n    return;\n  }\n\n  name_ = name;\n}\n\nvoid Timer::Start(bool lazy) {\n  if (timer_running_) {\n    return;\n  }\n\n  timer_running_ = true;\n  is_shutdown_ = false;\n  if (lazy == false) {\n    StartTimerThread();\n  }\n}\n\nvoid Timer::Shutdown() {\n  if (timer_running_ == false) {\n    return;\n  }\n\n  std::unique_lock<std::mutex> lock(lock_);\n  is_shutdown_ = true;\n  lock.unlock();\n\n  cond_.notify_one();\n  if (thread_.joinable()) {\n    thread_.join();\n  }\n}\n\nvoid Timer::Run() {\n  if (name_.length() > 0) {\n    os->Thread->SetName(name_);\n  }\n\n  if (thread_running_ == false) {\n    return;\n  }\n\n  while (timer_running_) {\n    RunTimer();\n  }\n\n  thread_running_ = false;\n}\n\nvoid Timer::StartAsync() {\n  std::unique_lock<std::mutex> lock(lock_);\n  timer_running_ = true;\n  is_shutdown_ = false;\n  thread_running_ = true;\n}\n\nvoid Timer::StopAsync() {\n  std::unique_lock<std::mutex> lock(lock_);\n  timer_running_ = false;\n  cond_.notify_one();\n}\n\nvoid Timer::Stop() {\n  if (timer_running_ == false) {\n    return;\n  }\n\n  std::unique_lock<std::mutex> lock(lock_);\n  timer_running_ = false;\n  cond_.notify_all();\n  lock.unlock();\n\n  if (thread_.joinable()) {\n    thread_.join();\n  }\n\n  lock.lock();\n  while (!timer_queue_.empty()) {\n    auto timer = timer_queue_.top();\n    timer->Stop();\n    timer_queue_.pop();\n  }\n}\n\nvoid Timer::StartTimerThread() {\n  if (thread_running_ == true) {\n    return;\n  }\n\n  thread_running_ = true;\n  thread_ = std::thread(&Timer::Run, this);\n}\n\nvoid Timer::Schedule(const std::shared_ptr<TimerTask> &timer_task,\n                     uint64_t delay, uint64_t period, bool take_owner_ship) {\n  if (timer_running_ == false) {\n    MBLOG_WARN << \"Schedule timer failed, timer is not running.\";\n    return;\n  }\n\n  timer_task->SetPeriod(period);\n  timer_task->SetDelay(delay);\n\n  auto timer_task_sched = timer_task;\n  if (take_owner_ship == false) {\n    timer_task_sched = timer_task->MakeSchedWeakTimer();\n  }\n\n  std::unique_lock<std::mutex> lock(lock_);\n  uint64_t now = GetCurrentTick();\n  if (thread_running_ == false) {\n    StartTimerThread();\n  }\n\n  InsertTimerTask(timer_task_sched, now);\n  timer_task_sched->SetTimerRunning(true);\n  auto top = timer_queue_.top();\n  if (timer_task_sched.get() == top.get()) {\n    cond_.notify_one();\n  }\n}\n\nbool Timer::SetPriority(int priority) {\n  MBLOG_WARN << \"not support now\";\n  return false;\n}\n\nuint64_t Timer::GetCurrentTick() {\n  return GetTickDiff(GetTickCount(), start_tick_);\n}\n\nvoid Timer::InsertTimerTask(const std::shared_ptr<TimerTask> &timer_task,\n                            uint64_t now) {\n  timer_task->SetHitTime(now + timer_task->GetPeriod() +\n                         timer_task->GetDelay());\n  timer_queue_.push(timer_task);\n}\n\nvoid Timer::RemoveTopTimerTask() {\n  auto top = timer_queue_.top();\n  timer_queue_.pop();\n}\n\nvoid Timer::StopTimerTask(TimerTask *timer_task) {\n  std::unique_lock<std::mutex> lock(lock_);\n  auto timer = timer_queue_.top();\n  if (timer->IsRunning() == false) {\n    cond_.notify_one();\n  }\n}\n\nbool Timer::RemoveStoppedTimer() {\n  bool removed = false;\n  while (timer_queue_.size() > 0) {\n    auto timer = timer_queue_.top();\n    if (timer->IsRunning()) {\n      return removed;\n    }\n\n    RemoveTopTimerTask();\n    removed = true;\n  }\n\n  if (timer_queue_.size() == 0) {\n    return true;\n  }\n\n  return removed;\n}\n\nvoid Timer::WaitTimerTask(std::unique_lock<std::mutex> &lock,\n                          std::shared_ptr<TimerTask> &timer) {\n  // wait for first timer task timeout\n  timer = timer_queue_.top();\n  uint64_t now = GetCurrentTick();\n  uint64_t time_diff = GetTickDiff(timer->GetHitTime(), now);\n  if (time_diff <= timer->GetPeriod() + timer->GetDelay()) {\n    auto wait_time = std::chrono::milliseconds(time_diff);\n    cond_.wait_for(lock, wait_time, [this, timer]() {\n      // return true when timer stop, top timer stop, top timer changed.\n      return timer_running_ == false || timer_queue_.size() == 0 ||\n             (timer_queue_.top()->IsRunning() == false) ||\n             timer_queue_.top().get() != timer.get();\n    });\n\n    if (timer_queue_.size() == 0 || timer_running_ == false) {\n      timer = nullptr;\n      return;\n    }\n\n    timer = timer_queue_.top();\n  } else if (time_diff != 0) {\n    // timer stall, force reset hit time\n    timer = timer_queue_.top();\n    time_diff = GetTickDiff(now, timer->GetHitTime());\n    if (time_diff > (timer->GetPeriod() + timer->GetDelay()) * 5 &&\n        timer->GetPeriod() + timer->GetDelay() > 0) {\n      MBLOG_WARN << \"timer stall too long, update timer task\";\n      MBLOG_WARN << \"timer name: \" << timer->GetName();\n      MBLOG_WARN << \"timer period: \" << timer->GetPeriod();\n      timer->SetHitTime(now);\n    } else if (time_diff > timer->GetPeriod() + timer->GetDelay()) {\n      MBLOG_DEBUG << \"timer [\" << timer->GetName() << \"] stall for \" << time_diff\n                 << \"ms\";\n    }\n  }\n}\n\nbool Timer::GetTimerTask(std::unique_lock<std::mutex> &lock,\n                         std::shared_ptr<TimerTask> &timer) {\n  // wait for timer task\n  if (timer_queue_.size() <= 0) {\n    cond_.wait(lock, [this]() {\n      return timer_queue_.size() > 0 || timer_running_ == false ||\n             is_shutdown_ == true;\n    });\n    if (timer_running_ == false || is_shutdown_ == true) {\n      return false;\n    }\n  }\n\n  // skip timer task already stopped\n  if (RemoveStoppedTimer() == true) {\n    return false;\n  }\n\n  // wait timer task timeout\n  WaitTimerTask(lock, timer);\n\n  // stop, return.\n  if (timer_running_ == false || timer_queue_.size() <= 0) {\n    return false;\n  }\n\n  if (timer->IsRunning() == false) {\n    RemoveTopTimerTask();\n    return false;\n  }\n\n  // get a timer task\n  uint64_t now = GetCurrentTick();\n  uint64_t time_diff = GetTickDiff(timer->GetHitTime(), now);\n  if ((time_diff <= timer->GetPeriod() + timer->GetDelay() && time_diff != 0) ||\n      timer_running_ == false) {\n    return false;\n  }\n\n  RemoveTopTimerTask();\n  return true;\n}\n\nvoid Timer::RunTimerTask(const std::shared_ptr<TimerTask> &timer,\n                         const std::shared_ptr<TimerTask> &timer_call) {\n  try {\n    uint64_t start = GetCurrentTick();\n    current_timer_task_ = timer_call;\n    timer_call->Run();\n    current_timer_task_ = nullptr;\n    uint64_t end = GetCurrentTick();\n\n    auto elapsed = end - start;\n    if (elapsed > TIMER_MAX_RUNNING_TIME) {\n      std::string msg;\n      if (name_.length() > 0) {\n        MBLOG_WARN << name_ << \": timer '\" << timer->GetName()\n                   << \"' run too long, take \" << elapsed << \"ms\";\n      } else {\n        MBLOG_WARN << \"timer '\" << timer->GetName() << \"' run too long, take \"\n                   << elapsed << \"ms\";\n      }\n    }\n  } catch (const std::bad_function_call &ex) {\n    MBLOG_WARN << \"timer '\" << timer->GetName()\n               << \"' is invalid, function is not set, disable\";\n    timer->SetTimerRunning(false);\n  } catch (const std::exception &ex) {\n    MBLOG_WARN << \"timer '\" << timer->GetName()\n               << \"'caght exception: \" << ex.what();\n  }\n}\n\nvoid Timer::RunTimer() {\n  // get a timer\n  std::shared_ptr<TimerTask> timer;\n  std::shared_ptr<TimerTask> timer_call;\n\n  std::unique_lock<std::mutex> lock(lock_);\n\n  if (timer_queue_.size() == 0) {\n    // reset tick\n    start_tick_ = GetTickCount();\n  }\n\n  if (GetTimerTask(lock, timer) == false) {\n    if (is_shutdown_ == true && timer_queue_.size() <= 0) {\n      timer_running_ = false;\n    }\n    return;\n  }\n\n  lock.unlock();\n\n  if (timer->IsWeakPtrTimerTask()) {\n    timer_call = timer->weak_timer_.lock();\n    if (timer_call == nullptr) {\n      timer->SetTimerRunning(false);\n      return;\n    }\n  } else {\n    timer_call = timer;\n  }\n\n  // run timer\n  RunTimerTask(timer, timer_call);\n\n  if (timer->GetPeriod() == 0 || timer->IsRunning() == false) {\n    timer->SetTimerRunning(false);\n    return;\n  }\n\n  // reset delay time.\n  if (timer->GetDelay() > 0) {\n    timer->SetDelay(0);\n  }\n\n  // reschedue task\n  lock.lock();\n  InsertTimerTask(timer, timer->GetHitTime());\n}\n\nvoid TimerGlobal::Stop() {\n  std::unique_lock<std::mutex> lock(lock_);\n  refcnt_--;\n  if (refcnt_ > 0) {\n    return;\n  }\n\n  timer_.Stop();\n}\n\nvoid TimerGlobal::Start() {\n  std::unique_lock<std::mutex> lock(lock_);\n  refcnt_++;\n  if (refcnt_ > 1) {\n    return;\n  }\n\n  timer_.SetName(\"Global-Timer\");\n  timer_.Start();\n}\n\nvoid TimerGlobal::Schedule(const std::shared_ptr<TimerTask> &timer_task,\n                           uint64_t delay, uint64_t period,\n                           bool take_owner_ship) {\n  timer_.Schedule(timer_task, delay, period, take_owner_ship);\n}\n\n}  // namespace modelbox\n"
  },
  {
    "path": "src/libmodelbox/base/utils/any.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <modelbox/base/any.h>\n\nnamespace modelbox {\n\nCollection::Collection() = default;\n\nCollection::~Collection() = default;\n\nvoid Collection::Set(const std::string& key, const char* value) {\n  entrys_[key] = Any(std::string(value));\n}\n\nstd::tuple<Any*, bool> Collection::Get(const std::string& key) {\n  auto iter = entrys_.find(key);\n  if (iter != entrys_.end()) {\n    return std::make_tuple(&(iter->second), true);\n  }\n\n  return std::make_tuple(nullptr, false);\n}\n\nvoid Collection::Merge(const Collection& other, bool is_override) {\n  if (!is_override) {\n    entrys_.insert(other.entrys_.begin(), other.entrys_.end());\n    return;\n  }\n\n  for (auto& iter : entrys_) {\n    entrys_[iter.first] = iter.second;\n  }\n}\n\nbool Collection::CanConvert(size_t cast_code, size_t origin_code) {\n  if (cast_code == origin_code) {\n    return true;\n  }\n\n  auto iter = type_hash_code_map.find(origin_code);\n  if (iter == type_hash_code_map.end()) {\n    return false;\n  }\n\n  if (type_hash_code_map[origin_code] == cast_code) {\n    MBLOG_DEBUG\n        << \"origin type is not match cast type, maybe loss the accuracy.\";\n    return true;\n  }\n\n  return false;\n}\n\n}  // namespace modelbox\n"
  },
  {
    "path": "src/libmodelbox/base/utils/crypto.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <errno.h>\n#include <fcntl.h>\n#include <libgen.h>\n#include <modelbox/base/base64_simd.h>\n#include <modelbox/base/crypto.h>\n#include <modelbox/base/os.h>\n#include <modelbox/base/status.h>\n#include <modelbox/base/utils.h>\n#include <openssl/err.h>\n#include <openssl/evp.h>\n#include <openssl/hmac.h>\n#include <openssl/rand.h>\n#include <stdint.h>\n#include <stdlib.h>  // for endian type\n#include <string.h>\n#include <sys/stat.h>\n#include <sys/types.h>\n#include <unistd.h>\n\n#include <iomanip>\n#include <sstream>\n#include <vector>\n\n#include \"modelbox/base/log.h\"\n#include \"securec.h\"\n\nnamespace modelbox {\n\n#define MODELBOX_SIGN_LEN 4096\n#define KEY_LEN 48\n#define SALT_LEN 32\n#define ITERATION_NUM 10000\n#define KEY_PATH_MAX (1024)\n#define KEY_BUFF_LEN (4096)\n#define SEED_LEN 55\n#define RANDOM_SOURCE \"/dev/random\"\n\nstruct key_gen_info {\n  unsigned char sysrelated;\n  unsigned char rootKey[KEY_LEN];\n  unsigned char salt[SALT_LEN];\n} __attribute__((packed, aligned(1)));\n\nstruct cipher_context {\n  unsigned char iv[IV_LEN];\n  unsigned char ciph[MAX_PASSWORD_LEN];\n} __attribute__((packed, aligned(1)));\n\nStatus HmacEncode(const std::string &algorithm, const void *input,\n                  size_t input_len, std::vector<unsigned char> *output) {\n  EVP_MD_CTX *mdctx = nullptr;\n  const EVP_MD *md = nullptr;\n  unsigned char md_value[EVP_MAX_MD_SIZE];\n  unsigned int md_len;\n\n  md = EVP_get_digestbyname(algorithm.c_str());\n\n  if (!md) {\n    return {STATUS_NOTSUPPORT, \"unknown digest \" + algorithm};\n  }\n\n#if OPENSSL_VERSION_NUMBER >= 0x10100000L\n  mdctx = EVP_MD_CTX_new();\n#else\n  mdctx = EVP_MD_CTX_create();\n#endif\n  if (mdctx == nullptr) {\n    return {STATUS_NOMEM, \"create md ctx failed.\"};\n  }\n\n  EVP_DigestInit_ex(mdctx, md, nullptr);\n  EVP_DigestUpdate(mdctx, input, input_len);\n  EVP_DigestFinal_ex(mdctx, md_value, &md_len);\n#if OPENSSL_VERSION_NUMBER >= 0x10100000L\n  EVP_MD_CTX_free(mdctx);\n#else\n  EVP_MD_CTX_destroy(mdctx);\n#endif\n\n  output->insert(output->end(), &md_value[0], &md_value[md_len]);\n\n  return STATUS_OK;\n}\n\nStatus HmacEncode(const std::string &algorithm,\n                  const std::vector<unsigned char> &input,\n                  std::vector<unsigned char> *output) {\n  return HmacEncode(algorithm, input.data(), input.size(), output);\n}\n\nstd::string HmacToString(const void *input, size_t input_len) {\n  std::stringstream ss;\n  const unsigned char *data = (unsigned char *)input;\n  for (size_t i = 0; i < input_len; ++i) {\n    ss << std::hex << std::setw(2) << std::setfill('0') << (int)data[i];\n  }\n  return ss.str();\n}\n\nStatus Base64Encode(const unsigned char *input, size_t input_len,\n                    std::string *output) {\n  auto ret = Base64EncodeSIMD(input, input_len, output);\n  if (ret == STATUS_NOTFOUND) {\n    std::vector<unsigned char> out;\n    int base64_len = 0;\n    int output_len = (((input_len + 2) / 3) * 4) + 1;\n    out.resize(output_len);\n\n    base64_len = EVP_EncodeBlock(out.data(), input, input_len);\n    if (base64_len <= 0) {\n      return {STATUS_FAULT, \"base64 encode failed.\"};\n    }\n\n    output->assign(out.begin(), out.begin() + base64_len);\n  } else {\n    return ret;\n  }\n\n  return STATUS_OK;\n}\n\nStatus Base64Encode(const std::vector<unsigned char> &input,\n                    std::string *output) {\n  return Base64Encode(input.data(), input.size(), output);\n}\n\nStatus Base64Decode(const std::string &input,\n                    std::vector<unsigned char> *output) {\n  int base64_len = 0;\n  int out_max_len = (input.length() * 6 + 7) / 8;\n\n  output->resize(out_max_len);\n  base64_len = EVP_DecodeBlock(output->data(), (unsigned char *)input.c_str(),\n                               input.length());\n  if (base64_len < 0) {\n    return {STATUS_FAULT, \"Decode base64 failed: \" + input};\n  }\n\n  for (int i = input.length() - 1; i >= 0; i--) {\n    if (input.c_str()[i] != '=') {\n      break;\n    }\n\n    base64_len--;\n  }\n\n  output->resize(base64_len);\n\n  return STATUS_OK;\n}\n\nstatic const signed char ROOT_MATERIAL_INIT[] = {\n    -58,  -85,  80,   55,   -26,  -5,   110,  -63,  71,   37,   104,  -9,\n    -45,  58,   32,   33,   6,    -22,  23,   121,  79,   -62,  -96,  0,\n    125,  -45,  -68,  53,   116,  95,   108,  12,   20,   -105, -102, -11,\n    -62,  -8,   121,  -90,  80,   55,   35,   -27,  -91,  -51,  29,   74,\n    -88,  -51,  -115, 94,   3,    -85,  -72,  -99,  -65,  93,   44,   86,\n    58,   -55,  48,   -52,  -33,  -60,  -77,  -74,  107,  -8,   -2,   -6,\n    115,  -27,  -84,  -11,  -39,  43,   -34,  11,   3,    3,    -95,  28,\n    98,   -59,  -96,  -88,  -89,  14,   104,  -104, 99,   63,   12,   61,\n    68,   -121, 122,  27,   -68,  -71,  113,  -112, -34,  63,   -51,  119,\n    109,  -81,  1,    20,   -103, -6,   -28,  12,   25,   13,   25,   -97,\n    -51,  -72,  -71,  -112, -29,  41,   -12,  -52,  95,   -96,  73,   41,\n    42,   115,  98,   -82,  -112, -92,  19,   -16,  -72,  -15,  -69,  62,\n    -60,  21,   116,  -54,  -11,  -110, -2,   73,   -20,  70,   56,   94,\n    35,   -49,  50,   -88,  -76,  70,   -121, 52,   -58,  -43,  98,   -45,\n    113,  -94,  -97,  -95,  -96,  9,    -88,  -25,  -26,  97,   123,  -83,\n    -48,  -5,   -22,  -79,  87,   40,   18,   -57,  -86,  12,   -107, 101,\n    118,  53,   -97,  64,   -13,  125,  -27,  58,   -85,  -49,  -23,  77,\n    -100, -88,  -28,  65,   -92,  100,  -9,   -49,  -128, -28,  -64,  43,\n    -35,  33,   -103, -62,  31,   59,   115,  63,   2,    21,   102,  117,\n    -66,  -71,  -115, -90,  37,   -53,  -125, -48,  -89,  -45,  1,    36,\n    102,  91,   -125, -123, 114,  63,   -92,  81,   115,  66,   -42,  -78,\n    81,   -94,  -91,  -51,  54,   -40,  62,   19,   -31,  107,  34,   45,\n    110,  -8,   -75,  104,  58,   97,   65,   83,   -33,  117,  -80,  125,\n    -103, -87,  -37,  50,   31,   -32,  -9,   -54,  76,   -108, 38,   116,\n    41,   18,   115,  -15,  -110, 54,   90,   87,   28,   118,  -90,  -127,\n    -59,  4,    -33,  31,   68,   11,   -116, -48,  64,   -25,  -25,  -31,\n    -32,  17,   -92,  103,  17,   -5,   61,   -125, -105, 36,   15,   0,\n    65,   -3,   97,   -71,  114,  -103, -81,  -28,  39,   55,   119,  69,\n    88,   -59,  -96,  -102, -61,  123,  -105, 20,   -40,  -45,  114,  33,\n    -57,  3,    -57,  115,  -80,  -39,  108,  -79,  114,  45,   -5,   114,\n    -50,  81,   105,  15,   51,   99,   -37,  -105, 27,   124,  -20,  -68,\n    7,    -20,  110,  -119, -63,  51,   -67,  -85,  109,  24,   79,   -123,\n    -121, -6,   -35,  -69,  62,   76,   21,   48,   -109, -128, -9,   127,\n    -106, 9,    -42,  -85,  110,  113,  50,   -46,  29,   -3,   -17,  -84,\n    82,   -122, -27,  3,    67,   -83,  -30,  50,   100,  -99,  -92,  -68,\n    -59,  -72,  39,   0,    -54,  -107, -83,  31,   86,   -123, 9,    -69,\n    -23,  121,  -65,  70,   -64,  -16,  31,   20,   123,  -88,  0,    -125,\n    18,   -87,  64,   96,   -67,  17,   -119, 34,   -19,  -36,  37,   -25,\n    105,  -69,  -30,  -12,  -72,  -104, -52,  63,   -69,  29,   -117, -17,\n    122,  -124, -52,  23,   -72,  -106, 119,  -82,  -102, 115,  -71,  -71,\n    -105, -111, -42,  -71,  -8,   81,   4,    -64,  -90,  37,   66,   10,\n    76,   -14,  -8,   -63,  72,   74,   -14,  -3,   -114, -63,  12,   -106,\n    -18,  5,    -19,  44,   -93,  -66,  -33,  -94};\n\nstatic bool seed_set = false;\n\nstatic Status ReadBytes(int fd, void *buffer, int len) {\n  int bytesRead = 0;\n  int result;\n  while (bytesRead < len) {\n    result = read(fd, (char *)(buffer) + bytesRead, len - bytesRead);\n    if (result == -1) {\n      if (errno == EINTR || errno == EAGAIN) {\n        continue;\n      }\n      MBLOG_ERROR << \"errno is \" << StrError(errno);\n      return {STATUS_FAULT, \"Generate Seed Failed.\"};\n    }\n    bytesRead += result;\n  }\n  return STATUS_OK;\n}\n\nStatus GetTrueRandom(void *random, int len) {\n  int fd;\n  fd = open(RANDOM_SOURCE, O_RDONLY);\n  if (fd <= 0) {\n    return {STATUS_FAULT, \"Open /dev/random failed\"};\n  }\n  Defer { close(fd); };\n  auto status = ReadBytes(fd, random, len);\n  return status;\n}\n\nStatus HmacGenRootKey(int sysrelated, std::string *en_key) {\n  struct key_gen_info keyGenInfo;\n  unsigned char sysrelate_num = 0;\n\n  memset_s(&keyGenInfo, sizeof(keyGenInfo), 0, sizeof(keyGenInfo));\n\n  if (!seed_set) {\n    unsigned char seed[SEED_LEN];\n    Status status = GetTrueRandom(seed, SEED_LEN);\n    if (status != STATUS_SUCCESS) {\n      return status;\n    }\n    RAND_seed(&seed, SEED_LEN);\n    seed_set = true;\n  }\n\n#if OPENSSL_VERSION_NUMBER >= 0x10100000L\n  RAND_priv_bytes((unsigned char *)keyGenInfo.rootKey, KEY_LEN);\n  RAND_priv_bytes(keyGenInfo.salt, SALT_LEN);\n  RAND_priv_bytes(&sysrelate_num, sizeof(sysrelate_num));\n#else\n  RAND_bytes((unsigned char *)keyGenInfo.rootKey, KEY_LEN);\n  RAND_bytes(keyGenInfo.salt, SALT_LEN);\n  RAND_bytes(&sysrelate_num, sizeof(sysrelate_num));\n#endif\n\n  if (sysrelated) {\n    if ((sysrelate_num % 2) != 0) {\n      sysrelate_num++;\n    }\n  } else {\n    if ((sysrelate_num % 2) == 0) {\n      sysrelate_num++;\n    }\n  }\n\n  keyGenInfo.sysrelated = sysrelate_num;\n  std::vector<unsigned char> keyinfo(\n      (unsigned char *)&keyGenInfo,\n      (unsigned char *)&keyGenInfo + sizeof(keyGenInfo));\n\n  return Base64Encode(keyinfo, en_key);\n}\n\nStatus HmacGetRootKey(const std::string &en_key,\n                      std::vector<unsigned char> *outkey) {\n  unsigned int i;\n  int iRet;\n\n  int MATERIAL_LEN = sizeof(ROOT_MATERIAL_INIT);\n  struct key_gen_info *keyGenInfo;\n  std::vector<unsigned char> raw_key;\n\n  auto ret = Base64Decode(en_key, &raw_key);\n  if (raw_key.size() < sizeof(struct key_gen_info)) {\n    return {STATUS_INVALID, \"enkey is invalid.\"};\n  }\n\n  keyGenInfo = (struct key_gen_info *)raw_key.data();\n\n  for (i = 0; i < sizeof(keyGenInfo->rootKey); ++i) {\n    keyGenInfo->rootKey[i] =\n        keyGenInfo->rootKey[i] ^ ROOT_MATERIAL_INIT[i % MATERIAL_LEN];\n  }\n\n  if (keyGenInfo->sysrelated % 2 == 0) {\n    std::string sysID = os->GetSystemID();\n    std::string mac_addr = os->GetMacAddress();\n    std::vector<unsigned char> syskey(KEY_LEN);\n#if OPENSSL_VERSION_NUMBER >= 0x1000100fL\n    iRet = PKCS5_PBKDF2_HMAC(sysID.c_str(), sysID.length(), keyGenInfo->salt,\n                             SALT_LEN, ITERATION_NUM, EVP_sha256(),\n                             syskey.size(), syskey.data());\n    if (mac_addr.length() > 0) {\n      iRet = PKCS5_PBKDF2_HMAC(mac_addr.c_str(), mac_addr.length(),\n                               keyGenInfo->salt, SALT_LEN, ITERATION_NUM,\n                               EVP_sha256(), syskey.size(), syskey.data());\n    }\n#else\n    iRet = PKCS5_PBKDF2_HMAC_SHA1(sysID.c_str(), sysID.length(),\n                                  keyGenInfo->salt, SALT_LEN, ITERATION_NUM,\n                                  syskey.size(), syskey.data());\n    if (mac_addr.length() > 0) {\n      iRet = PKCS5_PBKDF2_HMAC_SHA1(mac_addr.c_str(), mac_addr.length(),\n                                    keyGenInfo->salt, SALT_LEN, ITERATION_NUM,\n                                    syskey.size(), syskey.data());\n    }\n#endif\n    for (i = 0; i < sizeof(keyGenInfo->rootKey); ++i) {\n      keyGenInfo->rootKey[i] = keyGenInfo->rootKey[i] ^ syskey[i % KEY_LEN];\n    }\n  }\n\n  outkey->resize(KEY_LEN);\n#if OPENSSL_VERSION_NUMBER >= 0x1000100fL\n  iRet = PKCS5_PBKDF2_HMAC((const char *)keyGenInfo->rootKey, KEY_LEN,\n                           keyGenInfo->salt, SALT_LEN, ITERATION_NUM,\n                           EVP_sha256(), outkey->size(), outkey->data());\n#else\n  iRet = PKCS5_PBKDF2_HMAC_SHA1(keyGenInfo->rootKey, KEY_LEN, keyGenInfo->salt,\n                                SALT_LEN, ITERATION_NUM, outkey->size(),\n                                outkey->data());\n#endif\n\n  if (iRet == 0) {\n    return {STATUS_FAULT, \"Create HMAC failed.\"};\n  }\n\n  return STATUS_OK;\n}\n\nStatus Encrypt(const std::string &ciphername, unsigned char *input,\n               int input_len, unsigned char *output, int *output_len,\n               int max_output, unsigned char *key, unsigned char *iv) {\n  std::shared_ptr<EVP_CIPHER_CTX> ctx;\n  const EVP_CIPHER *cipher = nullptr;\n  EVP_CIPHER_CTX *ctx_new = nullptr;\n  int len;\n  *output_len = 0;\n\n  if (input_len + EVP_MAX_BLOCK_LENGTH >= max_output) {\n    return {STATUS_NOSPACE, \"output buffer is not enough.\"};\n  }\n\n  cipher = EVP_get_cipherbyname(ciphername.c_str());\n  if (cipher == nullptr) {\n    return {STATUS_NOTSUPPORT, \"cipher not support, \" + ciphername};\n  }\n\n  /* Create and initialise the context */\n  ctx_new = EVP_CIPHER_CTX_new();\n  if (ctx_new == nullptr) {\n    return {STATUS_NOMEM, \"create cipher failed.\"};\n  }\n\n  ctx.reset(ctx_new, [](EVP_CIPHER_CTX *ctx) { EVP_CIPHER_CTX_free(ctx); });\n\n  /* Initialise the encryption operation. IMPORTANT - ensure you use a key\n   * and IV size appropriate for your cipher\n   * In this example we are using 256 bit AES (i.e. a 256 bit key). The\n   * IV size for *most* modes is the same as the block size. For AES this\n   * is 128 bits */\n  if (1 != EVP_EncryptInit_ex(ctx.get(), cipher, nullptr, key, iv)) {\n    return {STATUS_FAULT, \"encrypt init failed.\"};\n  }\n\n  /* Provide the message to be encrypted, and obtain the encrypted output.\n   * EVP_EncryptUpdate can be called multiple times if necessary\n   */\n  if (1 != EVP_EncryptUpdate(ctx.get(), output, &len, input, input_len)) {\n    return {STATUS_FAULT, \"encrypt update failed.\"};\n  }\n  *output_len += len;\n\n  /* Finalise the encryption. Further ciphertext bytes may be written at\n   * this stage.\n   */\n  if (1 != EVP_EncryptFinal_ex(ctx.get(), output + *output_len, &len)) {\n    return {STATUS_FAULT, \"encrypt final failed.\"};\n  }\n  *output_len += len;\n\n  return STATUS_OK;\n}\n\nstd::string EvpGetErrorMsg() {\n  const auto *errmsg = ERR_reason_error_string(ERR_get_error());\n  if (errmsg == nullptr) {\n    return \"\";\n  }\n\n  return errmsg;\n}\n\nStatus Decrypt(const std::string &ciphername, unsigned char *input,\n               int input_len, unsigned char *output, int *output_len,\n               int max_output, unsigned char *key, unsigned char *iv) {\n  std::shared_ptr<EVP_CIPHER_CTX> ctx;\n  const EVP_CIPHER *cipher = nullptr;\n  EVP_CIPHER_CTX *ctx_new = nullptr;\n  int len = 0;\n\n  if (input_len + EVP_MAX_BLOCK_LENGTH >= max_output) {\n    return {STATUS_NOSPACE, \"output buffer is not enough.\"};\n  }\n\n  *output_len = 0;\n\n  cipher = EVP_get_cipherbyname(ciphername.c_str());\n  if (cipher == nullptr) {\n    return {STATUS_NOTSUPPORT, \"cipher not support, \" + ciphername};\n  }\n\n  /* Create and initialise the context */\n  ctx_new = EVP_CIPHER_CTX_new();\n  if (ctx_new == nullptr) {\n    return {STATUS_NOMEM, \"create cipher failed.\"};\n  }\n\n  ctx.reset(ctx_new, [](EVP_CIPHER_CTX *ctx) { EVP_CIPHER_CTX_free(ctx); });\n\n  /* Initialise the decryption operation. IMPORTANT - ensure you use a key\n   * and IV size appropriate for your cipher\n   * In this example we are using 256 bit AES (i.e. a 256 bit key). The\n   * IV size for *most* modes is the same as the block size. For AES this\n   * is 128 bits */\n  if (1 != EVP_DecryptInit_ex(ctx.get(), cipher, nullptr, key, iv)) {\n    std::string msg = \"decrypt failed, \" + EvpGetErrorMsg();\n    return {STATUS_FAULT, msg};\n  }\n\n  /* Provide the message to be decrypted, and obtain the plaintext output.\n   * EVP_DecryptUpdate can be called multiple times if necessary\n   */\n  if (1 != EVP_DecryptUpdate(ctx.get(), output, &len, input, input_len)) {\n    std::string msg = \"decrypt update failed, \" + EvpGetErrorMsg();\n    return {STATUS_FAULT, msg};\n  }\n\n  *output_len += len;\n\n  /* Finalise the decryption. Further plaintext bytes may be written at\n   * this stage.\n   */\n  if (1 != EVP_DecryptFinal_ex(ctx.get(), output + *output_len, &len)) {\n    std::string msg = \"decrypt final failed, \" + EvpGetErrorMsg();\n    return {STATUS_FAULT, msg};\n  }\n\n  *output_len += len;\n\n  return STATUS_OK;\n}\n\nStatus PassEncrypt(const std::vector<char> &pass, bool sysrelated,\n                   std::string *rootkey, std::string *en_pass,\n                   const std::string &ciphername) {\n  Status ret;\n  struct cipher_context *contex = nullptr;\n  int cipher_len = 0;\n  std::vector<unsigned char> key;\n\n  if (en_pass == nullptr || rootkey == nullptr) {\n    return STATUS_INVALID;\n  }\n\n  std::vector<unsigned char> encrypt_raw_pass(sizeof(struct cipher_context));\n  contex = (struct cipher_context *)encrypt_raw_pass.data();\n  memset_s(contex, sizeof(*contex), 0, sizeof(*contex));\n\n  if (rootkey->length() == 0) {\n    /* Generate root key */\n    ret = HmacGenRootKey(sysrelated, rootkey);\n    if (ret != STATUS_OK) {\n      return ret;\n    }\n  }\n\n  /* Get root key */\n  ret = HmacGetRootKey(*rootkey, &key);\n  if (ret != STATUS_OK) {\n    return ret;\n  }\n\n  RAND_bytes(contex->iv, IV_LEN);\n  ret = Encrypt(ciphername, (unsigned char *)pass.data(), pass.size(),\n                contex->ciph, &cipher_len, MAX_PASSWORD_LEN, key.data(),\n                contex->iv);\n  if (ret != STATUS_OK) {\n    return ret;\n  }\n\n  encrypt_raw_pass.resize(IV_LEN + cipher_len);\n  memset_s(key.data(), key.size(), 0, key.size());\n  ret = Base64Encode(encrypt_raw_pass, en_pass);\n  return ret;\n}\n\nStatus PassDecrypt(const std::string &en_pass, const std::string &rootkey,\n                   std::vector<char> *pass, const std::string &ciphername) {\n  Status ret;\n  struct cipher_context *contex;\n  std::vector<unsigned char> key;\n  std::vector<unsigned char> raw_pass(sizeof(struct cipher_context));\n\n  ret = HmacGetRootKey(rootkey, &key);\n  if (ret != STATUS_OK) {\n    return ret;\n  }\n\n  ret = Base64Decode(en_pass, &raw_pass);\n  if (ret != STATUS_OK) {\n    return ret;\n  }\n\n  int en_pass_len = raw_pass.size();\n  contex = (struct cipher_context *)raw_pass.data();\n  std::vector<unsigned char> encrypt_raw_pass(contex->ciph,\n                                              contex->ciph + en_pass_len);\n  // fill key with \"0\"\n  pass->resize(en_pass.length() + EVP_MAX_BLOCK_LENGTH, '\\0');\n  int passwordlen = 0;\n  ret = Decrypt(ciphername, contex->ciph, en_pass_len - IV_LEN,\n                (unsigned char *)pass->data(), &passwordlen, MAX_PASSWORD_LEN,\n                key.data(), contex->iv);\n  if (!ret) {\n    return ret;\n  }\n\n  pass->resize(passwordlen);\n  return ret;\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/libmodelbox/base/utils/json_toml_convert.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <modelbox/base/log.h>\n\n#include <fstream>\n#include <nlohmann/json.hpp>\n#include <toml.hpp>\n\n#include \"modelbox/base/utils.h\"\n\nnamespace modelbox {\n\nStatus JsonToTomlProcess(\n    std::list<std::tuple<std::shared_ptr<toml::value>, std::string,\n                         nlohmann::json>> &json_obj_list,\n    const std::shared_ptr<toml::value> &toml_root) {\n  while (json_obj_list.size() > 0) {\n    std::string key;\n    nlohmann::json cur_value;\n    std::shared_ptr<toml::value> toml_key;\n    std::tie(toml_key, key, cur_value) = json_obj_list.back();\n    json_obj_list.pop_back();\n\n    switch (cur_value.type()) {\n      case nlohmann::json::value_t::null:\n        break;\n      case nlohmann::json::value_t::number_integer:\n        (*toml_key) = cur_value.get<int>();\n        break;\n      case nlohmann::json::value_t::number_unsigned:\n        (*toml_key) = cur_value.get<unsigned int>();\n        break;\n      case nlohmann::json::value_t::number_float:\n        (*toml_key) = cur_value.get<double>();\n        break;\n      case nlohmann::json::value_t::boolean:\n        (*toml_key) = cur_value.get<bool>();\n        break;\n      case nlohmann::json::value_t::string:\n        (*toml_key) = cur_value.get<std::string>();\n        break;\n      case nlohmann::json::value_t::object: {\n        for (nlohmann::json::iterator obj = cur_value.begin();\n             obj != cur_value.end(); obj++) {\n          auto *value = new toml::value;\n          const std::string &key = obj.key();\n          std::shared_ptr<toml::value> toml_new(value, [=](toml::value *value) {\n            if (value->is_uninitialized()) {\n              delete value;\n              return;\n            }\n            (*toml_key)[key] = *value;\n            delete value;\n          });\n          json_obj_list.push_front(\n              std::make_tuple(toml_new, obj.key(), obj.value()));\n        }\n        break;\n      }\n      case nlohmann::json::value_t::array: {\n        auto *array = new toml::array;\n        std::shared_ptr<toml::array> array_new(array, [=](toml::array *array) {\n          (*toml_key) = *array;\n          delete array;\n        });\n\n        for (auto &item : cur_value) {\n          auto *value = new toml::value;\n          std::shared_ptr<toml::value> toml_new(value, [=](toml::value *value) {\n            (*array_new).push_back(*value);\n            delete value;\n          });\n          json_obj_list.push_front(std::make_tuple(toml_new, key, item));\n        }\n        break;\n      }\n      default:\n        MBLOG_ERROR << \"Process json to toml failed, \" << key << \":\"\n                    << cur_value;\n        return {STATUS_BADCONF};\n        break;\n    }\n  }\n\n  return STATUS_OK;\n}\n\nStatus JsonToToml(const std::string &json_data, std::string *toml_data) {\n  nlohmann::json root;\n  auto toml_root = std::make_shared<toml::value>();\n\n  if (toml_data == nullptr) {\n    return {STATUS_INVALID};\n  }\n\n  try {\n    root = nlohmann::json::parse(json_data);\n\n    std::list<\n        std::tuple<std::shared_ptr<toml::value>, std::string, nlohmann::json>>\n        json_obj_list;\n    json_obj_list.emplace_back(std::make_tuple(toml_root, \"\", root));\n\n    auto ret = JsonToTomlProcess(json_obj_list, toml_root);\n    if (!ret) {\n      return ret;\n    }\n  } catch (std::exception &e) {\n    MBLOG_ERROR << \"parse json failed, \" << e.what();\n    return {STATUS_BADCONF, e.what()};\n  }\n\n  std::ostringstream os;\n  os << *toml_root;\n  *toml_data = os.str();\n  return STATUS_OK;\n}\n\nstruct JsonSerializer {\n  JsonSerializer(bool indent) : indent_(indent) {}\n\n  void operator()(toml::boolean v) { oss_ << toml::value(v); }\n  void operator()(toml::integer v) { oss_ << toml::value(v); }\n  void operator()(toml::floating v) { oss_ << toml::value(v); }\n  void operator()(const toml::string &v) {\n    // since toml11 automatically convert string to multiline string that is\n    // valid only in TOML, we need to format the string to make it valid in\n    // JSON.\n    oss_ << \"\\\"\" << this->escape_string(v.str) << \"\\\"\";\n  }\n  void operator()(const toml::local_time &v) { oss_ << toml::value(v); }\n  void operator()(const toml::local_date &v) { oss_ << toml::value(v); }\n  void operator()(const toml::local_datetime &v) { oss_ << toml::value(v); }\n  void operator()(const toml::offset_datetime &v) { oss_ << toml::value(v); }\n  void operator()(const toml::array &v) {\n    bool has_data = false;\n    if (!v.empty() && v.front().is_table()) {\n      oss_ << '[';\n      IndentIn();\n      bool is_first = true;\n      for (const auto &elem : v) {\n        if (!is_first) {\n          oss_ << \",\";\n        }\n        is_first = false;\n        has_data = true;\n        toml::visit(*this, elem);\n      }\n      IndentOut(has_data);\n      oss_ << ']';\n    } else {\n      oss_ << \"[\";\n      IndentIn();\n      bool is_first = true;\n      for (const auto &elem : v) {\n        if (!is_first) {\n          oss_ << \",\";\n        }\n        IndentSpace();\n        is_first = false;\n        has_data = true;\n        toml::visit(*this, elem);\n      }\n      IndentOut(has_data);\n      oss_ << \"]\";\n    }\n  }\n  void operator()(const toml::table &v) {\n    oss_ << '{';\n    bool has_data = false;\n    IndentIn();\n    bool is_first = true;\n    for (const auto &elem : v) {\n      if (!is_first) {\n        oss_ << \",\";\n      }\n      has_data = true;\n      is_first = false;\n      IndentSpace();\n      oss_ << this->format_key(elem.first) << \": \";\n      toml::visit(*this, elem.second);\n    }\n    IndentOut(has_data);\n    oss_ << '}';\n  }\n\n  std::string escape_string(const std::string &s) const {\n    std::string retval;\n    for (const char c : s) {\n      switch (c) {\n        case '\\\\': {\n          retval += \"\\\\\\\\\";\n          break;\n        }\n        case '\\\"': {\n          retval += \"\\\\\\\"\";\n          break;\n        }\n        case '\\b': {\n          retval += \"\\\\b\";\n          break;\n        }\n        case '\\t': {\n          retval += \"\\\\t\";\n          break;\n        }\n        case '\\f': {\n          retval += \"\\\\f\";\n          break;\n        }\n        case '\\n': {\n          retval += \"\\\\n\";\n          break;\n        }\n        case '\\r': {\n          retval += \"\\\\r\";\n          break;\n        }\n        default: {\n          retval += c;\n          break;\n        }\n      }\n    }\n    return retval;\n  }\n\n  std::string format_key(const std::string &s) const {\n    const auto *quote = \"\\\"\";\n    return quote + escape_string(s) + quote;\n  }\n\n  std::string GetJsonData() { return oss_.str(); }\n\n private:\n  void IndentIn() {\n    if (indent_ == false) {\n      return;\n    }\n\n    space_.append(\"    \");\n  }\n\n  void IndentSpace() {\n    if (indent_ == false) {\n      return;\n    }\n\n    oss_ << std::endl << space_;\n  }\n\n  void IndentOut(bool need_newline) {\n    if (indent_ == false) {\n      return;\n    }\n\n    if (need_newline == true) {\n      oss_ << std::endl;\n    }\n    space_.pop_back();\n    space_.pop_back();\n    space_.pop_back();\n    space_.pop_back();\n\n    if (need_newline == true) {\n      oss_ << space_;\n    }\n  }\n  std::ostringstream oss_;\n  std::string space_;\n  bool indent_{false};\n};\n\nStatus TomlToJson(const std::string &toml_data, std::string *json_data,\n                  bool readable) {\n  if (json_data == nullptr) {\n    return {STATUS_INVALID};\n  }\n\n  try {\n    std::istringstream instring(toml_data);\n    auto json_serialize = JsonSerializer(readable);\n    auto toml = toml::parse(instring);\n    toml::visit(json_serialize, toml);\n    *json_data = json_serialize.GetJsonData();\n  } catch (const std::exception &e) {\n    MBLOG_ERROR << \"parse toml failed, \" << e.what();\n    return {STATUS_BADCONF, e.what()};\n  }\n\n  return STATUS_OK;\n}\n\n}  // namespace modelbox\n"
  },
  {
    "path": "src/libmodelbox/base/utils/popen.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <fcntl.h>\n#include <modelbox/base/popen.h>\n#include <modelbox/base/utils.h>\n#include <sys/poll.h>\n#include <sys/types.h>\n#include <sys/wait.h>\n#include <unistd.h>\n\n#include <chrono>\n#include <iostream>\n\nnamespace modelbox {\n\nconstexpr int POPEN_ERROR = -1;\nconstexpr int POPEN_EOF = -2;\nconstexpr int POPEN_STOP_READ = -3;\n\nint ParserArg(const std::string &cmd, std::vector<std::string> &args) {\n  std::string arg;\n  char quoteChar = 0;\n\n  for (char ch : cmd) {\n    if (quoteChar == '\\\\') {\n      arg.push_back(ch);\n      quoteChar = 0;\n      continue;\n    }\n\n    if (quoteChar && ch != quoteChar) {\n      arg.push_back(ch);\n      continue;\n    }\n\n    switch (ch) {\n      case '\\'':\n      case '\\\"':\n      case '\\\\':\n        quoteChar = quoteChar ? 0 : ch;\n        break;\n      case ' ':\n      case '\\t':\n      case '\\n':\n        if (!arg.empty()) {\n          args.push_back(arg);\n          arg.clear();\n        }\n        break;\n      default:\n        arg.push_back(ch);\n        break;\n    }\n  }\n\n  if (!arg.empty()) {\n    args.push_back(arg);\n  }\n\n  return 0;\n}\n\nPopen::Popen() {\n  start_tm_ = std::chrono::high_resolution_clock::now();\n  fdout_.buffer_.reserve(1024 * 16);\n  fderr_.buffer_.reserve(1024 * 16);\n}\n\nPopen::~Popen() { Close(); }\n\nvoid Popen::SetupMode(const char *mode) {\n  if (strstr(mode, \"r\")) {\n    fdout_.enable_ = true;\n  }\n\n  if (strstr(mode, \"e\")) {\n    fderr_.enable_ = true;\n  }\n\n  if (strstr(mode, \"w\")) {\n    fdin_.enable_ = true;\n  }\n}\n\nStatus Popen::Open(const std::string &cmdline, int timeout, const char *mode,\n                   const PopenEnv &env) {\n  std::vector<std::string> args;\n  std::vector<std::string> envs;\n\n  if (ParserArg(cmdline, args) != 0) {\n    return {STATUS_INVALID, \"command line is invalid\"};\n  }\n\n  return Open(args, timeout, mode, env);\n}\n\nStatus Popen::Open(std::vector<std::string> args, int timeout, const char *mode,\n                   const PopenEnv &env) {\n  pid_t child_pid;\n  int fd_out[2] = {-1, -1};\n  int fd_err[2] = {-1, -1};\n  int fd_in[2] = {-1, -1};\n\n  SetupMode(mode);\n\n  if (child_pid_ > 0) {\n    return STATUS_ALREADY;\n  }\n\n  if (fdin_.enable_ == true && pipe(fd_in) != 0) {\n    goto errout;\n  }\n\n  if (pipe(fd_out) != 0) {\n    goto errout;\n  }\n\n  if (fdout_.enable_ == true && pipe(fd_err) != 0) {\n    goto errout;\n  }\n\n  child_pid = vfork();\n  if (child_pid < 0) {\n    return {STATUS_FAULT, StrError(errno)};\n  }\n\n  if (child_pid == 0) {\n    size_t i = 0;\n    int fd_out_keep = fd_out[1];\n    setsid();\n\n    if (fdin_.enable_) {\n      close(0);\n      dup2(fd_in[0], 0);\n      close(fd_in[1]);\n    }\n\n    if (fdout_.enable_) {\n      close(1);\n      dup2(fd_out_keep, 1);\n      close(fd_out_keep);\n      fd_out_keep = -1;\n    }\n\n    if (fderr_.enable_) {\n      close(2);\n      dup2(fd_err[1], 2);\n      close(fd_err[1]);\n    }\n\n    // call readdir after vfork is not safe, for glibc only\n    CloseAllParentFds(fd_out_keep);\n\n    // args\n    char *argv[args.size() + 1];\n    for (i = 0; i < args.size(); i++) {\n      argv[i] = (char *)args[i].c_str();\n    }\n    argv[args.size()] = nullptr;\n\n    // env\n    auto envs = env.GetEnvs();\n    char *envp[envs.size() + 1];\n    for (i = 0; i < envs.size(); i++) {\n      envp[i] = (char *)envs[i].c_str();\n    }\n    envp[envs.size()] = nullptr;\n\n    // exec command\n    if (env.Changed()) {\n      execvpe(argv[0], argv, envp);\n    } else {\n      execvp(argv[0], argv);\n    }\n    fprintf(stderr, \"exec failed for %s, %s\\n\", argv[0],\n            StrError(errno).c_str());\n    _exit(1);\n  }\n\n  timeout_ = timeout;\n  child_pid_ = child_pid;\n  start_tm_ = std::chrono::high_resolution_clock::now();\n\n  close(fd_out[1]);\n  fdout_.fd_ = fd_out[0];\n  fcntl(fdout_.fd_, F_SETFL, fcntl(fdout_.fd_, F_GETFL) | O_NONBLOCK);\n\n  if (fdin_.enable_) {\n    close(fd_in[0]);\n    fdin_.fd_ = fd_in[1];\n  }\n\n  if (fderr_.enable_) {\n    close(fd_err[1]);\n    fderr_.fd_ = fd_err[0];\n    fcntl(fderr_.fd_, F_SETFL, fcntl(fderr_.fd_, F_GETFL) | O_NONBLOCK);\n  }\n\n  return STATUS_OK;\n\nerrout:\n  auto close_fd = [](int fd[2]) {\n    if (fd[0] > 0) {\n      close(fd[0]);\n    }\n\n    if (fd[1] > 0) {\n      close(fd[1]);\n    }\n  };\n\n  close_fd(fd_in);\n  close_fd(fd_out);\n  close_fd(fd_err);\n\n  return {STATUS_FAULT, StrError(errno)};\n}\n\nvoid Popen::CloseAllParentFds(int keep_fd) {\n  std::vector<std::string> files;\n  ListFiles(\"/proc/self/fd\", \"*\", &files);\n  for (auto &file : files) {\n    int port = std::stoi(GetBaseName(file));\n    if (port == STDIN_FILENO || port == STDOUT_FILENO ||\n        port == STDERR_FILENO || port == keep_fd) {\n      continue;\n    }\n    close(port);\n  }\n}\n\nbool Popen::DataReady(std::vector<struct stdfd *> *fds) {\n  for (auto const &stdfd : *fds) {\n    if (stdfd->newline_pos_ > 0) {\n      return true;\n    }\n\n    if (stdfd->iseof_ == 1 && stdfd->buffer_.size() > 0) {\n      return true;\n    }\n  }\n\n  return false;\n}\n\nint Popen::ReadLineData(struct stdfd *stdfd) {\n  char tmp[4096];\n\n  stdfd->newline_pos_ = 0;\n  while (true) {\n    int len = read(stdfd->fd_, tmp, sizeof(tmp));\n    if (len < 0) {\n      if (errno == EAGAIN || errno == EWOULDBLOCK) {\n        UpdateNewLinePos(stdfd);\n        return 0;\n      }\n      return POPEN_ERROR;\n    }\n\n    if (len == 0) {\n      stdfd->iseof_ = 1;\n      UpdateNewLinePos(stdfd);\n      return POPEN_STOP_READ;\n    }\n\n    if (stdfd->buffer_.size() + len >= stdfd->buffer_.capacity()) {\n      stdfd->buffer_.erase(stdfd->buffer_.begin(),\n                           stdfd->buffer_.begin() + len);\n    }\n\n    stdfd->buffer_.insert(stdfd->buffer_.end(), tmp, tmp + len);\n\n    UpdateNewLinePos(stdfd);\n    if (stdfd->newline_pos_ > 0) {\n      return POPEN_STOP_READ;\n    }\n  }\n\n  return 0;\n}\n\nint Popen::WaitForFdsLineRead(std::vector<struct stdfd *> *fds, int timeout) {\n  if (DataReady(fds)) {\n    return 1;\n  }\n\n  int ret = WaitForFds(*fds, timeout, [this](struct stdfd *stdfd, int revent) {\n    return ReadLineData(stdfd);\n  });\n\n  if (DataReady(fds)) {\n    return 1;\n  }\n\n  return ret;\n}\n\nint Popen::WaitForLineRead(int timeout) {\n  std::vector<struct stdfd *> fds;\n  if (fdout_.enable_) {\n    fds.push_back(&fdout_);\n  }\n\n  if (fderr_.enable_) {\n    fds.push_back(&fderr_);\n  }\n\n  return WaitForFdsLineRead(&fds, timeout);\n}\n\nvoid Popen::UpdateNewLinePos(struct stdfd *stdfd) {\n  int len = stdfd->buffer_.size();\n\n  for (int i = 0; i < len; i++) {\n    if (stdfd->buffer_[i] == '\\n' || stdfd->buffer_[i] == '\\0') {\n      stdfd->newline_pos_ = i + 1;\n      return;\n    }\n  }\n\n  stdfd->newline_pos_ = 0;\n}\n\nint Popen::GetStringLine(struct stdfd *stdfd, std::string &line) {\n  if (stdfd->enable_ == false) {\n    return -1;\n  }\n\n  if (stdfd->newline_pos_ <= 0) {\n    std::vector<struct stdfd *> fds;\n    fds.push_back(stdfd);\n    WaitForFdsLineRead(&fds, -1);\n  }\n\n  if (stdfd->newline_pos_ <= 0) {\n    if (stdfd->iseof_ && stdfd->buffer_.size() > 0) {\n      line.assign(stdfd->buffer_.begin(),\n                  stdfd->buffer_.begin() + stdfd->buffer_.size());\n      stdfd->buffer_.clear();\n      return 0;\n    }\n\n    return -1;\n  }\n\n  line.assign(stdfd->buffer_.begin(),\n              stdfd->buffer_.begin() + stdfd->newline_pos_);\n  stdfd->buffer_.erase(stdfd->buffer_.begin(),\n                       stdfd->buffer_.begin() + stdfd->newline_pos_);\n\n  UpdateNewLinePos(stdfd);\n\n  return 0;\n}\n\nint Popen::ReadErrLine(std::string &line) {\n  return GetStringLine(&fderr_, line);\n}\n\nint Popen::ReadOutLine(std::string &line) {\n  return GetStringLine(&fdout_, line);\n}\n\nint Popen::ReadAll(std::string *out, std::string *err) {\n  std::vector<struct stdfd *> fds;\n  int ret = 0;\n\n  if (fdout_.enable_) {\n    fds.push_back(&fdout_);\n  }\n\n  if (fderr_.enable_) {\n    fds.push_back(&fderr_);\n  }\n\n  while (true) {\n    ret = WaitForFds(fds, -1, [this](struct stdfd *stdfd, int revent) {\n      return ReadLineData(stdfd);\n    });\n\n    if (ret < 0) {\n      break;\n    }\n  }\n\n  if (out && fdout_.enable_) {\n    out->assign(fdout_.buffer_.begin(),\n                fdout_.buffer_.begin() + fdout_.buffer_.size());\n    fdout_.buffer_.clear();\n  }\n\n  if (err && fderr_.enable_) {\n    err->assign(fderr_.buffer_.begin(),\n                fderr_.buffer_.begin() + fderr_.buffer_.size());\n    fderr_.buffer_.clear();\n  }\n\n  return 0;\n}\n\nint Popen::WriteString(const std::string &in) {\n  if (fdin_.enable_ == false) {\n    return -1;\n  }\n\n  int len = 0;\n  int total_len = in.size();\n\n  struct sigaction act;\n  struct sigaction old;\n  Defer { sigaction(SIGPIPE, &old, nullptr); };\n\n  act.sa_handler = SIG_IGN;\n  act.sa_flags = SA_RESTART;\n  sigaction(SIGPIPE, &act, &old);\n\n  do {\n    int written_len = in.size() - total_len;\n    len = write(fdin_.fd_, in.data() + written_len, in.size() - written_len);\n    if (len < 0) {\n      return -1;\n    }\n\n    total_len -= len;\n  } while (total_len > 0);\n\n  return 0;\n}\n\nint Popen::TimeOutLeft() {\n  auto t1 = std::chrono::high_resolution_clock::now();\n  std::chrono::duration<float> fs = t1 - start_tm_;\n  std::chrono::milliseconds diff =\n      std::chrono::duration_cast<std::chrono::milliseconds>(fs);\n\n  int ret = timeout_ - diff.count();\n  if (ret <= 0) {\n    return -1;\n  }\n\n  return ret;\n}\n\nint Popen::WaitForFds(std::vector<struct stdfd *> fds, int timeout,\n                      const std::function<int(struct stdfd *, int)> &func) {\n  struct pollfd fdset[fds.size()];\n  int fds_count = fds.size();\n  int eof_count = fds.size();\n  int i = 0;\n\n  for (i = 0; i < fds_count; i++) {\n    fdset[i].fd = fds[i]->fd_;\n    fdset[i].events = POLLIN | POLLHUP;\n  }\n\n  while (true && fds_count > 0) {\n    int polltimeout = TimeOutLeft();\n    if (polltimeout < 0 && timeout_ != -1) {\n      return POPEN_ERROR;\n    }\n\n    if ((timeout > polltimeout && timeout_ != -1) || timeout < 0) {\n      timeout = polltimeout;\n    }\n\n    int ret = poll(fdset, fds_count, timeout);\n    if (ret <= 0) {\n      return ret;\n    }\n\n    timeout = 0;\n    for (i = 0; i < fds_count; i++) {\n      struct stdfd *stdfd = fds[i];\n      if (stdfd->fd_ != fdset[i].fd) {\n        raise(SIGSEGV);\n        return POPEN_ERROR;\n      }\n\n      int func_ret = func(stdfd, fdset[i].revents);\n      if (func_ret != 0 && func_ret != POPEN_STOP_READ) {\n        return func_ret;\n      }\n\n      if (fdset[i].revents & POLLHUP || func_ret == POPEN_STOP_READ) {\n        int j;\n        for (j = i + 1; j < fds_count; j++) {\n          fdset[i].fd = fdset[j].fd;\n          fdset[i].events = fdset[j].events;\n          fds.erase(fds.begin() + i);\n        }\n\n        if (fdset[i].revents & POLLHUP) {\n          eof_count--;\n        }\n        fds_count--;\n      }\n    }\n  }\n\n  if (eof_count == 0) {\n    return POPEN_EOF;\n  }\n\n  return 0;\n}\n\nint Popen::WaitChildTimeOut() {\n  char buff[4096];\n  std::vector<struct stdfd *> fds;\n  fds.push_back(&fdout_);\n  if (fderr_.enable_) {\n    fds.push_back(&fderr_);\n  }\n\n  int ret = WaitForFds(fds, -1, [&buff](struct stdfd *stdfd, int revent) {\n    int unused __attribute__((unused));\n    if (!(revent & POLLIN)) {\n      return 0;\n    }\n\n    unused = read(stdfd->fd_, buff, sizeof(buff));\n\n    return 0;\n  });\n\n  return ret;\n}\n\nStatus Popen::ForceStop() {\n  if (child_pid_ <= 0) {\n    return STATUS_NOTFOUND;\n  }\n\n  killpg(child_pid_, SIGKILL);\n\n  return STATUS_OK;\n}\n\nvoid Popen::KeepAlive() {\n  start_tm_ = std::chrono::high_resolution_clock::now();\n}\n\nvoid Popen::CloseStdFd() {\n  auto closefd = [](struct stdfd *stdfd) {\n    if (stdfd->fd_ > 0) {\n      close(stdfd->fd_);\n      stdfd->fd_ = -1;\n    }\n\n    stdfd->iseof_ = 0;\n    stdfd->enable_ = false;\n    stdfd->newline_pos_ = 0;\n    stdfd->buffer_.clear();\n  };\n\n  closefd(&fdout_);\n  closefd(&fderr_);\n  closefd(&fdin_);\n}\n\nint Popen::Close() {\n  int wstatus = 0;\n  if (child_pid_ <= 0) {\n    return 0;\n  }\n\n  if (timeout_ > 0) {\n    int ret = WaitChildTimeOut();\n    if (ret == 0 || ret == POPEN_ERROR) {\n      killpg(child_pid_, SIGTERM);\n      usleep(2000);\n      killpg(child_pid_, SIGKILL);\n    }\n  }\n\n  CloseStdFd();\n\n  if (waitpid(child_pid_, &wstatus, 0) == -1) {\n    return -1;\n  }\n\n  child_pid_ = 0;\n  return wstatus;\n}\n\nPopenEnv::PopenEnv() = default;\n\nPopenEnv::~PopenEnv() = default;\n\nPopenEnv::PopenEnv(const std::string &item_list) { LoadEnvFromList(item_list); }\n\nPopenEnv::PopenEnv(const char *item_list) { LoadEnvFromList(item_list); }\n\nvoid PopenEnv::LoadEnvFromList(const std::string &item_list) {\n  inherit_ = true;\n  std::vector<std::string> envs;\n  ParserArg(item_list, envs);\n  if (envs.size() <= 0) {\n    return;\n  }\n\n  LoadInherit();\n\n  for (auto const &env : envs) {\n    const char *envp = env.c_str();\n    const auto *field = strstr(envp, \"=\");\n    if (field == nullptr) {\n      continue;\n    }\n\n    std::string item(envp, field - envp);\n    std::string value = field + 1;\n    Add(item, value);\n  }\n}\n\nPopenEnv::PopenEnv(const std::string &item, const std::string &value) {\n  inherit_ = true;\n  Add(item, value);\n}\n\nvoid PopenEnv::LoadInherit() {\n  char **ep;\n  if (load_inherit_) {\n    return;\n  }\n\n  load_inherit_ = true;\n  for (ep = environ; *ep != nullptr; ep++) {\n    auto *field = strstr(*ep, \"=\");\n    if (field == nullptr) {\n      continue;\n    }\n\n    std::string item(*ep, field);\n    std::string value = field + 1;\n    Add(item, value);\n  }\n}\n\nPopenEnv &PopenEnv::Add(const std::string &item, const std::string &value) {\n  LoadInherit();\n  env_[item] = value;\n  return *this;\n}\n\nPopenEnv &PopenEnv::Rmv(const std::string &item) {\n  LoadInherit();\n  env_.erase(item);\n  return *this;\n}\n\nPopenEnv &PopenEnv::Clear() {\n  env_.clear();\n  load_inherit_ = true;\n  return *this;\n}\n\nbool PopenEnv::Changed() const { return load_inherit_; }\n\nstd::vector<std::string> PopenEnv::GetEnvs() const {\n  std::vector<std::string> envs;\n\n  for (auto const &it : env_) {\n    std::string value;\n    value = it.first + \"=\" + it.second;\n    envs.emplace_back(value);\n  }\n\n  return envs;\n}\n\n}  // namespace modelbox\n"
  },
  {
    "path": "src/libmodelbox/base/utils/refcache.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include <modelbox/base/refcache.h>\n\nnamespace modelbox {\n\nRefCacheData::RefCacheData() = default;\n\nRefCacheData::~RefCacheData() = default;\n\n}  // namespace modelbox"
  },
  {
    "path": "src/libmodelbox/base/utils/utils.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/base/utils.h\"\n\n#include <cxxabi.h>\n#include <dirent.h>\n#include <dlfcn.h>\n#include <errno.h>\n#include <execinfo.h>\n#include <ftw.h>\n#include <glob.h>\n#include <libgen.h>\n#include <modelbox/base/log.h>\n#include <openssl/rand.h>\n#include <stdio.h>\n#include <string.h>\n#include <sys/stat.h>\n#include <unistd.h>\n\n#include <algorithm>\n#include <fstream>\n#include <iomanip>\n#include <iostream>\n\n#include \"modelbox/base/config.h\"\n#include \"securec.h\"\n\nnamespace modelbox {\n\nconstexpr int MAX_STACK_DEPTH = 100;\n#ifdef _WIN32\nconst char Separator = '\\\\';\n#else\nconst char Separator = '/';\n#endif\n\nDeferGuard::DeferGuard(DeferGuard &&other) noexcept\n    : fn_(std::move(other.fn_)) {\n  other.fn_ = nullptr;\n}\n\nDeferGuard::~DeferGuard() {\n  if (fn_) {\n    fn_();\n  }\n}\n\nDeferGuardChain::DeferGuardChain(DeferGuardChain &&other) noexcept\n    : fn_cond_(std::move(other.fn_cond_)) {\n  other.fn_cond_ = nullptr;\n}\n\nDeferGuardChain &DeferGuardChain::operator+=(std::function<void()> &&fn) {\n  fn_list_.push_front(std::move(fn));\n  return *this;\n}\n\nDeferGuardChain::~DeferGuardChain() {\n  if (!fn_cond_()) {\n    return;\n  }\n\n  for (const auto &fn : fn_list_) {\n    fn();\n  }\n}\n\nStatus ListFiles(const std::string &path, const std::string &filter,\n                 std::vector<std::string> *listfiles,\n                 enum LIST_FILE_TYPE type) {\n  struct stat buffer;\n  if (stat(path.c_str(), &buffer) == -1) {\n    std::string msg = path + \" does not exist, \";\n    return {STATUS_NOTFOUND, msg + StrError(errno)};\n  }\n\n  if (S_ISDIR(buffer.st_mode) == 0) {\n    std::string msg = path + \" is not a directory, \";\n    return {STATUS_INVALID, msg};\n  }\n\n  glob_t glob_result;\n  std::string path_pattern = path + \"/\" + filter;\n\n  auto ret = glob(path_pattern.c_str(), GLOB_TILDE, nullptr, &glob_result);\n  if (ret != 0) {\n    if (ret == GLOB_NOMATCH) {\n      return STATUS_OK;\n    }\n\n    MBLOG_ERROR << \"glob \" << path_pattern << \" failed, code: \" << ret;\n    return {STATUS_INVALID, \"error code :\" + std::to_string(ret)};\n  }\n\n  for (unsigned int i = 0; i < glob_result.gl_pathc; i++) {\n    if (stat(glob_result.gl_pathv[i], &buffer) == -1) {\n      continue;\n    }\n\n    if (S_ISDIR(buffer.st_mode) && (type & LIST_FILES_DIR)) {\n      listfiles->push_back(glob_result.gl_pathv[i]);\n    }\n\n    if (S_ISDIR(buffer.st_mode) == 0 && (type & LIST_FILES_FILE)) {\n      listfiles->push_back(glob_result.gl_pathv[i]);\n    }\n  }\n\n  globfree(&glob_result);\n  return STATUS_OK;\n}\n\nsize_t FindTheEarliestFileIndex(std::vector<std::string> &listfiles) {\n  struct stat buffer;\n  __time_t min_sec = 0x7fffffff;\n  size_t index = 0;\n  for (size_t i = 0; i < listfiles.size(); ++i) {\n    if (stat(listfiles[i].c_str(), &buffer) == -1) {\n      MBLOG_WARN << \"stat \" << listfiles[i]\n                 << \" failed, errno: \" << StrError(errno);\n      continue;\n    }\n\n    if (buffer.st_mtim.tv_sec < min_sec) {\n      min_sec = buffer.st_mtim.tv_sec;\n      index = i;\n    }\n  }\n\n  return index;\n}\n\nStatus ListSubDirectoryFiles(const std::string &path, const std::string &filter,\n                             std::vector<std::string> *listfiles) {\n  struct stat buffer;\n  DIR *pDir;\n  struct dirent *ptr = nullptr;\n\n  auto status = ListFiles(path, filter, listfiles);\n\n  pDir = opendir(path.c_str());\n  if (pDir == nullptr) {\n    return {STATUS_NOTFOUND, StrError(errno)};\n  }\n\n  Defer {\n    if (closedir(pDir) != 0) {\n      MBLOG_WARN << \"Close dir failed.\";\n    }\n  };\n\n  while ((ptr = readdir(pDir)) != nullptr) {\n    std::string temp_path = path + \"/\" + std::string(ptr->d_name);\n    if (stat(temp_path.c_str(), &buffer) == -1) {\n      MBLOG_WARN << \"stat \" << temp_path\n                 << \" failed, errno: \" << StrError(errno);\n      continue;\n    };\n\n    if (S_ISDIR(buffer.st_mode) == 0) {\n      continue;\n    }\n\n    if (strncmp(ptr->d_name, \".\", PATH_MAX - 1) == 0 ||\n        strncmp(ptr->d_name, \"..\", PATH_MAX - 1) == 0) {\n      continue;\n    }\n\n    auto status = ListFiles(temp_path, filter, listfiles);\n  }\n\n  return STATUS_OK;\n}\n\nStatus CreateDirectory(const std::string &path, mode_t mode) {\n  std::string directory_path = path + \"/\";\n  uint32_t dir_path_len = directory_path.length();\n  if (dir_path_len > PATH_MAX) {\n    return STATUS_INVALID;\n  }\n\n  char dir_path[PATH_MAX] = {0};\n  for (uint32_t i = 0; i < dir_path_len; ++i) {\n    dir_path[i] = directory_path[i];\n    if (dir_path[i] != '/') {\n      continue;\n    }\n\n    if (access(dir_path, 0) == 0) {\n      continue;\n    }\n\n    int32_t ret = mkdir(dir_path, mode);\n    if (ret != 0) {\n      return {STATUS_FAULT, StrError(errno)};\n    }\n  }\n\n  return STATUS_OK;\n}\n\nbool IsDirectory(const std::string &path) {\n  struct stat buffer;\n  if (stat(path.c_str(), &buffer) == -1) {\n    return false;\n  }\n\n  if (S_ISDIR(buffer.st_mode) == 0) {\n    return false;\n  }\n\n  return true;\n}\n\nstatic int rmfiles(const char *pathname, const struct stat *sbuf, int type,\n                   struct FTW *ftwb) {\n  remove(pathname);\n  return 0;\n}\n\nvoid RemoveDirectory(const std::string &path) {\n  nftw(path.c_str(), rmfiles, 10, FTW_DEPTH | FTW_MOUNT | FTW_PHYS);\n}\n\nStatus CopyFile(const std::string &src, const std::string &dest, int mode,\n                bool overwrite) {\n  if (overwrite == false && access(dest.c_str(), F_OK) == 0) {\n    return STATUS_FAULT;\n  }\n\n  std::ifstream src_file(src, std::ios::binary);\n  std::ofstream dst_file(dest, std::ios::binary | std::ios::trunc);\n  bool copy_fail = false;\n\n  if (src_file.fail() || dst_file.fail()) {\n    return STATUS_FAULT;\n  }\n\n  std::istreambuf_iterator<char> begin_source(src_file);\n  std::istreambuf_iterator<char> end_source;\n  std::ostreambuf_iterator<char> begin_dest(dst_file);\n  std::copy(begin_source, end_source, begin_dest);\n\n  src_file.seekg(0, std::ios::end);\n  if (dst_file.tellp() != src_file.tellg()) {\n    copy_fail = true;\n  }\n\n  src_file.close();\n  if (dst_file.fail() || copy_fail) {\n    dst_file.close();\n    remove(dest.c_str());\n    return false;\n  }\n  dst_file.close();\n\n  if (mode) {\n    chmod(dest.c_str(), mode);\n  }\n\n  return STATUS_OK;\n}\n\nint64_t GetCurrentTime() {\n  static const int64_t SECOND_TO_MICRO = 1000000;\n  struct timeval tv;\n  gettimeofday(&tv, nullptr);\n  return tv.tv_sec * SECOND_TO_MICRO + tv.tv_usec;\n}\n\nstd::vector<std::string> StringSplit(const std::string &s, const char delim) {\n  std::vector<std::string> ret;\n  std::string::size_type lastPos = s.find_first_not_of(delim, 0);\n  std::string::size_type pos = s.find_first_of(delim, lastPos);\n  while (std::string::npos != pos || std::string::npos != lastPos) {\n    ret.push_back(s.substr(lastPos, pos - lastPos));\n    lastPos = s.find_first_not_of(delim, pos);\n    pos = s.find_first_of(delim, lastPos);\n  }\n\n  return ret;\n}\n\nstd::vector<void *> GetStackFrames(int skip, int maxdepth) {\n  std::vector<void *> stack;\n  int size;\n\n  if (maxdepth <= 0) {\n    maxdepth = MAX_STACK_DEPTH;\n  }\n\n  skip++;\n  stack.resize(maxdepth + skip);\n  size = backtrace(&stack[0], maxdepth + skip);\n  size = size - skip;\n\n  if (size < 0) {\n    stack.resize(0);\n    return stack;\n  }\n\n  stack.erase(stack.begin(), stack.begin() + skip);\n  stack.resize(size);\n\n  return stack;\n}\n\nstd::string DemangleCPPSymbol(const char *symbol) {\n  int ret;\n  std::string symbolname;\n  char *name;\n\n  name = abi::__cxa_demangle(symbol, nullptr, nullptr, &ret);\n  if (ret == -2 || ret == -3 || name == nullptr) {\n    symbolname = symbol;\n  } else if (ret == 0) {\n    symbolname = name;\n  }\n\n  if (name) {\n    free(name);\n  }\n\n  return symbolname;\n}\n\nstd::tuple<void *, std::string> GetSymbol(void *addr) {\n  Dl_info info;\n  std::ostringstream os;\n\n  if (dladdr(addr, &info)) {\n    std::string symname;\n    if (info.dli_sname) {\n      symname = DemangleCPPSymbol(info.dli_sname);\n    }\n\n    if (symname.length() == 0) {\n      symname = \"?? ()\";\n    }\n\n    os << symname;\n    if (info.dli_fname) {\n      auto *offset = (void *)((char *)(addr) - (char *)(info.dli_fbase));\n      os << \" from \" << info.dli_fname << \"+\" << offset;\n    }\n  } else {\n    os << \"?? ()\";\n  }\n\n  return std::make_tuple(addr, os.str());\n}\n\nstd::vector<std::tuple<void *, std::string>> GetStacks(int skip, int maxdepth) {\n  std::vector<std::tuple<void *, std::string>> stacks;\n\n  auto frames = GetStackFrames(skip + 1, maxdepth);\n  stacks.reserve(frames.size());\n  for (auto &frame : frames) {\n    stacks.push_back(GetSymbol(frame));\n  }\n\n  return stacks;\n}\n\nstd::string GetStackTrace(int skip, int maxdepth) {\n  std::ostringstream os;\n  const int w = sizeof(char *) * 2;\n  int index = 0;\n  auto frames = GetStackFrames(skip + 1, maxdepth);\n\n  for (auto &frame : frames) {\n    void *addr = nullptr;\n    std::string symbol;\n    std::tie(addr, symbol) = GetSymbol(frame);\n    os << \"#\" << std::dec << index << \" \";\n    os << \"0x\" << std::setfill('0') << std::setw(w) << std::hex\n       << (unsigned long)addr;\n    os << \": \" << symbol << std::endl;\n    index++;\n  }\n\n  return os.str();\n}\n\nstd::string GetBytesReadable(size_t size) {\n  const char *suffix[] = {\"B\", \"KB\", \"MB\", \"GB\", \"TB\", \"PB\"};\n  char length = sizeof(suffix) / sizeof(suffix[0]);\n\n  int i = 0;\n  double double_size = size;\n\n  if (size >= 1024) {\n    for (i = 0; (size / 1024) > 0 && i < length - 1; i++, size /= 1024) {\n      double_size = size / 1024.0;\n    }\n  }\n\n  char output[32];\n  auto ret = snprintf_s(output, sizeof(output), sizeof(output) - 1, \"%g%s\",\n                        double_size, suffix[i]);\n  if (ret < 0 || ret == sizeof(output)) {\n    return \"\";\n  }\n\n  return output;\n}\n\nuint64_t GetBytesFromReadable(const std::string &size) {\n  const char *suffix[] = {\"B\", \"K\", \"M\", \"G\", \"T\", \"P\"};\n  char length = sizeof(suffix) / sizeof(suffix[0]);\n\n  uint64_t double_size = 1;\n  auto ret = std::stod(size);\n\n  auto uppercase_size = size;\n  std::transform(uppercase_size.begin(), uppercase_size.end(),\n                 uppercase_size.begin(), ::toupper);\n\n  for (int i = 1; i < length; i++) {\n    double_size *= 1024;\n    if (uppercase_size.find(suffix[i]) == std::string::npos) {\n      continue;\n    }\n\n    ret *= double_size;\n  }\n\n  return (uint64_t)ret;\n}\n\nunsigned long GetTickCount() {\n  struct timespec ts;\n  clock_gettime(CLOCK_MONOTONIC, &ts);\n  return (ts.tv_sec * 1000 + ts.tv_nsec / 1000000);\n}\n\nbool IsAbsolutePath(const std::string &path) {\n  for (const char &c : path) {\n    if (c == ' ') {\n      continue;\n    }\n\n    if (c != Separator) {\n      return false;\n    }\n\n    return true;\n  }\n\n  return false;\n}\n\nstd::string GetDirName(const std::string &path) {\n  std::vector<char> path_data(path.begin(), path.end());\n  path_data.push_back(0);\n  auto *result = dirname(path_data.data());\n  return result;\n}\n\nstd::string GetBaseName(const std::string &path) {\n  std::vector<char> path_data(path.begin(), path.end());\n  path_data.push_back(0);\n  auto *result = basename(path_data.data());\n  if (result == nullptr) {\n    return \"\";\n  }\n  return result;\n}\n\nvoid GetRandom(unsigned char *buf, int num) { RAND_bytes(buf, num); }\n\nstd::string PathCanonicalize(const std::string &path,\n                             const std::string &root_path) {\n  int skip_num = 0;\n  size_t i = 0;\n  std::string resolve_path = root_path;\n  std::deque<std::string> str;\n  std::vector<std::string> fields = StringSplit(path, '/');\n\n  for (auto itr = fields.rbegin(); itr != fields.rend(); itr++) {\n    if (itr->empty() || *itr == \".\") {\n      continue;\n    }\n\n    if (*itr == \"..\") {\n      ++skip_num;\n      continue;\n    }\n\n    if (skip_num <= 0) {\n      str.push_front(*itr);\n    } else {\n      --skip_num;\n    }\n  }\n\n  for (i = 0; i < str.size(); i++) {\n    resolve_path += \"/\";\n    resolve_path += str[i];\n  }\n\n  if (resolve_path.length() == 0) {\n    return \"/\";\n  }\n\n  return resolve_path;\n}\n\nvoid StringReplaceAll(std::string &str, const std::string &from,\n                      const std::string &to) {\n  size_t start_pos = 0;\n  if (from.empty()) {\n    return;\n  }\n\n  while ((start_pos = str.find(from, start_pos)) != std::string::npos) {\n    str.replace(start_pos, from.length(), to);\n    start_pos += to.length();\n  }\n}\n\nStatus HardeningSSL(SSL_CTX *ctx) {\n  SSL_CTX_set_options(ctx, SSL_OP_NO_SSLv2);\n  SSL_CTX_set_options(ctx, SSL_OP_NO_SSLv3);\n  SSL_CTX_set_options(ctx, SSL_OP_NO_TLSv1);\n  SSL_CTX_set_options(ctx, SSL_OP_NO_TLSv1_1);\n  std::string tls1_2_safer_ciphers =\n      \"DHE-RSA-AES256-GCM-SHA384:\"\n      \"ECDHE-RSA-AES256-GCM-SHA384:\"\n      \"ECDHE-ECDSA-AES256-GCM-SHA384:\"\n      \"DHE-RSA-AES128-GCM-SHA256:\"\n      \"ECDHE-RSA-AES128-GCM-SHA256:\"\n      \"ECDHE-ECDSA-AES128-GCM-SHA256:\"\n      \"@STRENGTH\";\n  const auto &tls1_2_ciphers = tls1_2_safer_ciphers;\n#if OPENSSL_VERSION_NUMBER < 0x10100000L\n  std::string tls1_2_suppoert_ciphers =\n      \"ECDHE-RSA-AES128-SHA256:\"\n      \"ECDHE-DSS-AES128-SHA256:\"\n      \"ECDHE-RSA-AES256-SHA256:\"\n      \"ECDHE-DSS-AES256-SHA256:\"\n      \"ECDHE-RSA-AES128-SHA:\"\n      \"ECDHE-DSS-AES128-SHA:\"\n      \"ECDHE-RSA-AES256-SHA:\"\n      \"ECDHE-DSS-AES256-SHA:\";\n  tls1_2_ciphers += tls1_2_suppoert_ciphers;\n#endif\n  SSL_CTX_set_cipher_list(ctx, tls1_2_ciphers.data());\n\n  return STATUS_OK;\n}\n\nstd::string StrError(int errnum) {\n  char buf[32];\n  return strerror_r(errnum, buf, sizeof(buf));\n}\n\nvoid GetCompiledTime(struct tm *compiled_time) {\n  char s_month[5];\n  int month;\n  int day;\n  int year;\n  int hour;\n  int min;\n  int sec;\n  static const char *month_names = \"JanFebMarAprMayJunJulAugSepOctNovDec\";\n\n  sscanf_s(__DATE__, \"%4s %d %d\", s_month, 4, &day, &year);\n  month = (strstr(month_names, s_month) - month_names) / 3;\n  sscanf_s(__TIME__, \"%d:%d:%d\", &hour, &min, &sec);\n  compiled_time->tm_year = year - 1900;\n  compiled_time->tm_mon = month;\n  compiled_time->tm_mday = day;\n  compiled_time->tm_isdst = -1;\n  compiled_time->tm_hour = hour;\n  compiled_time->tm_min = min;\n  compiled_time->tm_sec = sec;\n}\n\nconst char *GetModelBoxVersion() {\n  static char str_ver[64] = {0};\n  struct tm tm;\n  GetCompiledTime(&tm);\n  snprintf_s(str_ver, sizeof(str_ver), sizeof(str_ver),\n             \"%d.%d.%d (Build: %.4d%.2d%.2d-%.2d%.2d%.2d)\",\n             MODELBOX_VERSION_MAJOR, MODELBOX_VERSION_MINOR,\n             MODELBOX_VERSION_PATCH, tm.tm_year + 1900, tm.tm_mon + 1,\n             tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec);\n  return str_ver;\n}\n\nstd::string ExpandEnvironmentVariables(const std::string &text) {\n  std::string text_copies = text;\n  static std::regex env(R\"(((?!(\\\\)).|^)(\\$\\{([a-zA-Z0-9_]+)\\}))\");\n  static std::regex escape(R\"((\\\\\\$)\\{([a-zA-Z0-9_]+)\\})\");\n  std::smatch match;\n  while (std::regex_search(text_copies, match, env)) {\n    const char *env_value = std::getenv(match[4].str().c_str());\n    const std::string var(env_value == nullptr ? \"\" : env_value);\n    text_copies.replace(match[3].first, match[3].second, var);\n  }\n\n  while (std::regex_search(text_copies, match, escape)) {\n    text_copies.replace(match[1].first, match[1].second, \"$\");\n  }\n\n  return text_copies;\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/libmodelbox/base/utils/uuid.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/base/uuid.h\"\n\n#include \"modelbox/base/utils.h\"\n\n#define UUID_GENERATION_PATH \"/proc/sys/kernel/random/uuid\"\n\nnamespace modelbox {\n\nStatus GetUUID(std::string* uuid) {\n  char tmp[UUID_LENGTH];\n  FILE* fd = fopen(UUID_GENERATION_PATH, \"r\");\n  if (fd == nullptr) {\n    return STATUS_FAULT;\n  }\n  Defer { fclose(fd); };\n\n  size_t result = fread(tmp, 1, UUID_LENGTH - 1, fd);\n  if (result != UUID_LENGTH - 1) {\n    return STATUS_FAULT;\n  }\n\n  tmp[UUID_LENGTH - 1] = '\\0';\n  *uuid = std::string(tmp);\n  return STATUS_OK;\n}\n}  // namespace modelbox"
  },
  {
    "path": "src/libmodelbox/config.h.in",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_CONFIG_H_\n#define MODELBOX_CONFIG_H_\n\nnamespace modelbox {\n\n// driver default search path\n#define MODELBOX_DEFAULT_DRIVER_PATH \"@CMAKE_INSTALL_FULL_LIBDIR@\"\n\n#define DRIVER_SKIP_DEFAULT \"skip-default\"\n\n#define DRIVER_DIR \"dir\"\n\n#define MODELBOX_VERSION_MAJOR @MODELBOX_VERSION_MAJOR@\n#define MODELBOX_VERSION_MINOR @MODELBOX_VERSION_MINOR@\n#define MODELBOX_VERSION_PATCH @MODELBOX_VERSION_PATCH@\n\n}  // namespace modelbox\n\n#endif  // MODELBOX_CONFIG_H_"
  },
  {
    "path": "src/libmodelbox/engine/api/flow_graph_desc.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/flow_graph_desc.h\"\n\n#include <utility>\n\n#include \"modelbox/base/register_flowunit.h\"\n\nnamespace modelbox {\n\nFlowGraphFunctionInfo::FlowGraphFunctionInfo(\n    std::string name, std::vector<std::string> input_name_list,\n    std::vector<std::string> output_name_list,\n    std::function<Status(std::shared_ptr<DataContext>)> func)\n    : name_(std::move(name)),\n      input_name_list_(std::move(input_name_list)),\n      output_name_list_(std::move(output_name_list)),\n      func_(std::move(func)) {}\n\nstd::string FlowGraphFunctionInfo::GetName() { return name_; }\n\nstd::vector<std::string> FlowGraphFunctionInfo::GetInputNameList() {\n  return input_name_list_;\n}\n\nstd::vector<std::string> FlowGraphFunctionInfo::GetOutputNameList() {\n  return output_name_list_;\n}\n\nstd::function<Status(std::shared_ptr<DataContext>)>\nFlowGraphFunctionInfo::GetFunc() {\n  return func_;\n}\n\nconstexpr const char *CONFIG_KEY_QUEUE_SIZE = \"graph.queue_size\";\nconstexpr const char *CONFIG_KEY_BATCH_SIZE = \"graph.batch_size\";\nconstexpr const char *CONFIG_KEY_DRIVERS_DIR = \"driver.dir\";\nconstexpr const char *CONFIG_KEY_DRIVERS_SKIP_DEFAULT = \"driver.skip-default\";\nconstexpr const char *CONFIG_KEY_PROFILE_DIR = \"profile.dir\";\nconstexpr const char *CONFIG_KEY_PROFILE_TRACE_ENABLE = \"profile.trace\";\n\nFlowGraphDesc::FlowGraphDesc() { config_ = ConfigurationBuilder().Build(); }\n\nFlowGraphDesc::~FlowGraphDesc() {\n  for (auto &node_desc : node_desc_list_) {\n    node_desc->Clear();\n  }\n}\n\nvoid FlowGraphDesc::SetQueueSize(size_t queue_size) {\n  config_->SetProperty(CONFIG_KEY_QUEUE_SIZE, queue_size);\n}\n\nvoid FlowGraphDesc::SetBatchSize(size_t batch_size) {\n  config_->SetProperty(CONFIG_KEY_BATCH_SIZE, batch_size);\n}\n\nvoid FlowGraphDesc::SetDriversDir(\n    const std::vector<std::string> &drivers_dir_list) {\n  config_->SetProperty(CONFIG_KEY_DRIVERS_DIR, drivers_dir_list);\n}\n\nvoid FlowGraphDesc::SetSkipDefaultDrivers(bool is_skip) {\n  config_->SetProperty(CONFIG_KEY_DRIVERS_SKIP_DEFAULT, is_skip);\n}\n\nvoid FlowGraphDesc::SetProfileDir(const std::string &profile_dir) {\n  config_->SetProperty(CONFIG_KEY_PROFILE_DIR, profile_dir);\n}\n\nvoid FlowGraphDesc::SetProfileTraceEnable(bool profile_trace_enable) {\n  config_->SetProperty(CONFIG_KEY_PROFILE_TRACE_ENABLE, profile_trace_enable);\n}\n\n// add input\nstd::shared_ptr<FlowNodeDesc> FlowGraphDesc::AddInput(\n    const std::string &input_name) {\n  if (node_name_idx_map_[input_name] >= 1) {\n    MBLOG_ERROR << \"Input name \" << input_name << \" has been used\";\n    return nullptr;\n  }\n\n  ++node_name_idx_map_[input_name];\n  auto node = std::make_shared<FlowNodeDesc>(input_name);\n  node->SetNodeType(GRAPH_NODE_INPUT);\n  node_desc_list_.push_back(node);\n  return node;\n}\n\n// add output\n\nvoid FlowGraphDesc::AddOutput(\n    const std::string &output_name,\n    const std::shared_ptr<FlowPortDesc> &source_node_port) {\n  AddOutput(output_name, \"cpu\", source_node_port);\n}\n\nvoid FlowGraphDesc::AddOutput(\n    const std::string &output_name,\n    const std::shared_ptr<FlowNodeDesc> &source_node) {\n  if (source_node == nullptr) {\n    MBLOG_ERROR << \"add output \" << output_name\n                << \" failed, source_node is null\";\n    return;\n  }\n\n  AddOutput(output_name, \"cpu\", (*source_node)[0]);\n}\n\n// add flowunit\n\nstd::shared_ptr<FlowNodeDesc> FlowGraphDesc::AddNode(\n    const std::string &flowunit_name, const std::string &device,\n    const std::vector<std::string> &config,\n    const std::unordered_map<std::string, std::shared_ptr<FlowPortDesc>>\n        &source_node_ports) {\n  auto node_name = flowunit_name;\n  ++node_name_idx_map_[node_name];\n  auto idx = node_name_idx_map_[node_name];\n  if (idx != 1) {\n    node_name = node_name + std::to_string(idx);\n  }\n\n  auto node = std::make_shared<FlowNodeDesc>(node_name);\n  node->SetNodeType(GRAPH_NODE_FLOWUNIT);\n  node->SetFlowUnitName(flowunit_name);\n  node->SetDevice(device);\n  node->SetConfig(config);\n  node->SetInputLinks(source_node_ports);\n  node_desc_list_.push_back(node);\n  return node;\n}\n\nstd::shared_ptr<FlowNodeDesc> FlowGraphDesc::AddNode(\n    const std::string &flowunit_name, const std::string &device,\n    const std::vector<std::string> &config,\n    const std::shared_ptr<FlowNodeDesc> &source_node) {\n  if (source_node == nullptr) {\n    MBLOG_ERROR << \"source node is nullptr\";\n    return nullptr;\n  }\n\n  // all source node output connect to this node input in order\n  return AddNode(flowunit_name, device, config, {{\"*\", (*source_node)[\"*\"]}});\n}\n\nstd::shared_ptr<FlowNodeDesc> FlowGraphDesc::AddNode(\n    const std::string &flowunit_name, const std::string &device,\n    const std::unordered_map<std::string, std::shared_ptr<FlowPortDesc>>\n        &source_node_ports) {\n  return AddNode(flowunit_name, device, {}, source_node_ports);\n}\n\nstd::shared_ptr<FlowNodeDesc> FlowGraphDesc::AddNode(\n    const std::string &flowunit_name, const std::string &device,\n    const std::shared_ptr<FlowNodeDesc> &source_node) {\n  if (source_node == nullptr) {\n    MBLOG_ERROR << \"source node is nullptr\";\n    return nullptr;\n  }\n\n  return AddNode(flowunit_name, device, {}, source_node);\n}\n\nstd::shared_ptr<FlowNodeDesc> FlowGraphDesc::AddNode(\n    const std::string &flowunit_name, const std::string &device,\n    const std::vector<std::string> &config) {\n  return AddNode(\n      flowunit_name, device, config,\n      std::unordered_map<std::string, std::shared_ptr<FlowPortDesc>>{});\n}\n\n// add function\n\nstd::shared_ptr<FlowNodeDesc> FlowGraphDesc::AddFunction(\n    const std::function<Status(std::shared_ptr<DataContext>)> &func,\n    const std::vector<std::string> &input_name_list,\n    const std::vector<std::string> &output_name_list,\n    const std::unordered_map<std::string, std::shared_ptr<FlowPortDesc>>\n        &source_node_ports) {\n  std::string flowunit_name =\n      \"register_func_\" + std::to_string(function_node_idx_);\n  ++function_node_idx_;\n  const auto *device = \"cpu\";\n  // register flowunit\n  auto func_info = std::make_shared<FlowGraphFunctionInfo>(\n      flowunit_name, input_name_list, output_name_list, func);\n  function_list_.push_back(func_info);\n  // add node\n  auto node = std::make_shared<FlowNodeDesc>(flowunit_name);\n  node->SetNodeType(GRAPH_NODE_FLOWUNIT);\n  node->SetFlowUnitName(flowunit_name);\n  node->SetDevice(device);\n  node->SetInputLinks(source_node_ports);\n  node_desc_list_.push_back(node);\n  return node;\n}\n\nstd::shared_ptr<FlowNodeDesc> FlowGraphDesc::AddFunction(\n    const std::function<Status(std::shared_ptr<DataContext>)> &func,\n    const std::vector<std::string> &input_name_list,\n    const std::vector<std::string> &output_name_list,\n    const std::shared_ptr<FlowNodeDesc> &source_node) {\n  if (source_node == nullptr) {\n    MBLOG_ERROR << \"function node source_node is null\";\n    return nullptr;\n  }\n\n  return AddFunction(func, input_name_list, output_name_list,\n                     {{\"*\", (*source_node)[\"*\"]}});\n}\n\n// inner interface\n\nstd::shared_ptr<Configuration> FlowGraphDesc::GetConfig() { return config_; }\n\nvoid FlowGraphDesc::GetFuncFactoryList(\n    std::list<std::shared_ptr<FlowUnitFactory>> &factory_list) {\n  for (auto &func_info : function_list_) {\n    factory_list.push_back(std::make_shared<RegisterFlowUnitFactory>(\n        func_info->GetName(), func_info->GetInputNameList(),\n        func_info->GetOutputNameList(), func_info->GetFunc()));\n  }\n}\n\nstd::shared_ptr<GCGraph> FlowGraphDesc::GenGCGraph(\n    const std::shared_ptr<modelbox::FlowUnitManager> &flowunit_mgr) {\n  auto gcgraph = std::make_shared<GCGraph>();\n  gcgraph->Init(nullptr);\n  auto graph_config = config_->GetSubConfig(\"graph\");\n  gcgraph->SetConfiguration(graph_config);\n  auto ret = GenGCNodes(gcgraph);\n  if (!ret) {\n    return nullptr;\n  }\n\n  ret = GenGCEdges(gcgraph, flowunit_mgr);\n  if (!ret) {\n    return nullptr;\n  }\n\n  return gcgraph;\n}\n\nStatus FlowGraphDesc::GenGCNodes(const std::shared_ptr<GCGraph> &gcgraph) {\n  for (auto &node_desc : node_desc_list_) {\n    auto gcnode = std::make_shared<GCNode>();\n    gcnode->Init(node_desc->GetNodeName(), gcgraph);\n    const auto &node_config = node_desc->GetNodeConfig();\n    for (const auto &config_item : node_config) {\n      auto split_pos = config_item.find('=');\n      auto key = config_item.substr(0, split_pos);\n      auto value = config_item.substr(split_pos + 1);\n      gcnode->SetConfiguration(key, value);\n    }\n    gcgraph->AddNode(gcnode);\n  }\n\n  return STATUS_OK;\n}\n\nStatus FlowGraphDesc::GenGCEdges(\n    const std::shared_ptr<GCGraph> &gcgraph,\n    const std::shared_ptr<FlowUnitManager> &flowunit_mgr) {\n  for (auto &node_desc : node_desc_list_) {\n    auto dest_node_name = node_desc->GetNodeName();\n    std::unordered_map<std::string, std::shared_ptr<FlowPortDesc>> input_links;\n    auto status = GetInputLinks(node_desc, flowunit_mgr, input_links);\n    if (!status) {\n      return status;\n    }\n\n    for (const auto &link_item : input_links) {\n      const auto &dest_port = link_item.first;\n      const auto &src_node_port = link_item.second;\n      auto dest_node = gcgraph->GetNode(dest_node_name);\n      auto src_node = gcgraph->GetNode(src_node_port->GetNodeName());\n      dest_node->SetInputPort(dest_port);\n      src_node->SetOutputPort(src_node_port->port_name_);\n\n      auto gcedge = std::make_shared<GCEdge>();\n      gcedge->Init(gcgraph);\n      gcedge->SetHeadNode(src_node);\n      gcedge->SetHeadPort(src_node_port->port_name_);\n      gcedge->SetTailNode(dest_node);\n      gcedge->SetTailPort(dest_port);\n      gcgraph->AddEdge(gcedge);\n    }\n  }\n\n  return STATUS_OK;\n}\n\nStatus FlowGraphDesc::GetInputLinks(\n    const std::shared_ptr<FlowNodeDesc> &dest_node_desc,\n    const std::shared_ptr<FlowUnitManager> &flowunit_mgr,\n    std::unordered_map<std::string, std::shared_ptr<FlowPortDesc>>\n        &input_links) {\n  const auto &origin_input_links = dest_node_desc->GetInputLinks();\n  auto generate_match = origin_input_links.find(\"*\");\n  if (!(origin_input_links.size() == 1 &&\n        generate_match != origin_input_links.end())) {\n    input_links = origin_input_links;\n    return FormatInputLinks(flowunit_mgr, input_links);\n  }\n\n  /**\n   * user set src_node -> dest_node\n   * we need get port to port info\n   **/\n  auto dest_node_fu_desc = GetFlowUnitDesc(dest_node_desc, flowunit_mgr);\n  if (dest_node_fu_desc == nullptr) {\n    return STATUS_NOTFOUND;\n  }\n  const auto &dest_input_port_list = dest_node_fu_desc->GetFlowUnitInput();\n  if (dest_input_port_list.empty()) {\n    MBLOG_ERROR << \"dest node \" << dest_node_desc->GetNodeName()\n                << \" has no input\";\n    return STATUS_FAULT;\n  }\n\n  const auto &src_node_port = generate_match->second;\n  auto src_node_desc = src_node_port->GetNode();\n  if (src_node_desc->type_ == GRAPH_NODE_INPUT) {\n    if (dest_input_port_list.size() != 1) {\n      MBLOG_ERROR << \"node \" << dest_node_desc->GetNodeName()\n                  << \" has multi input port, please specify the port that \"\n                     \"input node connect to\";\n      return STATUS_FAULT;\n    }\n\n    input_links[dest_input_port_list[0].GetPortName()] =\n        std::make_shared<FlowPortDesc>(src_node_desc,\n                                       src_node_desc->GetNodeName());\n    return STATUS_OK;\n  }\n\n  auto src_node_fu_desc = GetFlowUnitDesc(src_node_desc, flowunit_mgr);\n  if (src_node_fu_desc == nullptr) {\n    return STATUS_NOTFOUND;\n  }\n  const auto &src_output_port_list = src_node_fu_desc->GetFlowUnitOutput();\n  if (src_output_port_list.size() != dest_input_port_list.size()) {\n    MBLOG_ERROR << \"src node \" << src_node_desc->GetNodeName()\n                << \" input port count and dest node \"\n                << dest_node_desc->GetNodeName()\n                << \" output port count not equal\";\n    return STATUS_FAULT;\n  }\n\n  for (size_t i = 0; i < src_output_port_list.size(); ++i) {\n    auto src_output_port_name = src_output_port_list[i].GetPortName();\n    auto dest_input_port_name = dest_input_port_list[i].GetPortName();\n    input_links[dest_input_port_name] =\n        std::make_shared<FlowPortDesc>(src_node_desc, src_output_port_name);\n  }\n\n  return STATUS_OK;\n}\n\nStatus FlowGraphDesc::FormatInputLinks(\n    const std::shared_ptr<FlowUnitManager> &flowunit_mgr,\n    std::unordered_map<std::string, std::shared_ptr<FlowPortDesc>>\n        &input_links) {\n  for (auto &input_link : input_links) {\n    const auto &dest_port_name = input_link.first;\n    auto src_node_port = input_link.second;\n    if (src_node_port == nullptr) {\n      MBLOG_ERROR << \"src port connect to \" << dest_port_name << \" is nullptr\";\n      return STATUS_FAULT;\n    }\n\n    if (src_node_port->IsDescribeInName()) {\n      continue;\n    }\n\n    // need translate port id to port name\n    auto port_idx = src_node_port->GetPortIdx();\n    auto src_node_desc = src_node_port->GetNode();\n    auto flowunit_desc = GetFlowUnitDesc(src_node_desc, flowunit_mgr);\n    if (flowunit_desc == nullptr) {\n      return STATUS_NOTFOUND;\n    }\n\n    const auto &outputs = flowunit_desc->GetFlowUnitOutput();\n    if (outputs.size() <= port_idx) {\n      MBLOG_ERROR << \"node \" << src_node_desc->GetNodeName() << \" has \"\n                  << outputs.size() << \" port, idx \" << port_idx\n                  << \" is out of range\";\n      return STATUS_NOTFOUND;\n    }\n\n    auto format_src_node_port = std::make_shared<FlowPortDesc>(\n        src_node_desc, outputs[port_idx].GetPortName());\n    input_link.second = format_src_node_port;\n  }\n\n  return STATUS_OK;\n}\n\nstd::shared_ptr<FlowUnitDesc> FlowGraphDesc::GetFlowUnitDesc(\n    const std::shared_ptr<FlowNodeDesc> &node_desc,\n    const std::shared_ptr<FlowUnitManager> &flowunit_mgr) {\n  // to support multi device config\n  auto node_fu_name = node_desc->GetFlowUnitName();\n  auto device_info_list = StringSplit(node_desc->device_, ';');\n  if (device_info_list.empty()) {\n    MBLOG_ERROR << \"flowunit: \" << node_fu_name << \", device config error, [\"\n                << node_desc->device_ << \"]\";\n    return nullptr;\n  }\n\n  std::string device_name;\n  for (auto &device_info : device_info_list) {\n    auto device_info_item = StringSplit(device_info, ':');\n    if (device_info_item.empty()) {\n      continue;\n    }\n\n    auto device_name = device_info_item[0];\n    auto fu_desc = flowunit_mgr->GetFlowUnitDesc(device_name, node_fu_name);\n    if (fu_desc != nullptr) {\n      return fu_desc;\n    }\n  }\n\n  MBLOG_ERROR << \"can not find flowunit: \" << node_fu_name\n              << \", device: \" << node_desc->device_;\n  return nullptr;\n}\n\nvoid FlowGraphDesc::AddOutput(\n    const std::string &output_name, const std::string &device,\n    const std::shared_ptr<FlowPortDesc> &source_node_port) {\n  if (source_node_port == nullptr) {\n    MBLOG_ERROR << \"add output \" << output_name\n                << \" failed, source_node_port is null\";\n    return;\n  }\n\n  if (node_name_idx_map_[output_name] >= 1) {\n    MBLOG_ERROR << \"Output name \" << output_name << \" has been used\";\n    return;\n  }\n\n  ++node_name_idx_map_[output_name];\n  auto node = std::make_shared<FlowNodeDesc>(output_name);\n  node->SetNodeType(GRAPH_NODE_OUTPUT);\n  node->SetDevice(device);\n  node->SetInputLinks({{output_name, source_node_port}});\n  node_desc_list_.push_back(node);\n}\n\n}  // namespace modelbox\n"
  },
  {
    "path": "src/libmodelbox/engine/api/flow_node_desc.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/flow_node_desc.h\"\n\n#include <utility>\n\nnamespace modelbox {\n\nFlowPortDesc::FlowPortDesc(std::shared_ptr<FlowNodeDesc> node,\n                           std::string port_name)\n    : node_(std::move(node)),\n      is_in_name_{true},\n      port_name_(std::move(port_name)) {}\n\nFlowPortDesc::FlowPortDesc(std::shared_ptr<FlowNodeDesc> node, size_t port_idx)\n    : node_(std::move(node)), is_in_name_{false}, port_idx_(port_idx) {}\n\nstd::shared_ptr<FlowNodeDesc> FlowPortDesc::GetNode() { return node_; }\n\nstd::string FlowPortDesc::GetNodeName() { return node_->GetNodeName(); }\n\nbool FlowPortDesc::IsDescribeInName() { return is_in_name_; }\n\nstd::string FlowPortDesc::GetPortName() { return port_name_; }\n\nsize_t FlowPortDesc::GetPortIdx() { return port_idx_; }\n\nFlowNodeDesc::FlowNodeDesc(std::string node_name)\n    : node_name_(std::move(node_name)) {}\n\nFlowNodeDesc::~FlowNodeDesc() = default;\n\nvoid FlowNodeDesc::SetNodeName(const std::string &node_name) {\n  node_name_ = node_name;\n}\n\nstd::string FlowNodeDesc::GetNodeName() { return node_name_; }\n\nstd::shared_ptr<FlowPortDesc> FlowNodeDesc::operator[](\n    const std::string &output_name) {\n  if (type_ == GRAPH_NODE_INPUT) {\n    return std::make_shared<FlowPortDesc>(shared_from_this(), output_name);\n  }\n\n  return std::make_shared<FlowPortDesc>(shared_from_this(), output_name);\n}\n\nstd::shared_ptr<FlowPortDesc> FlowNodeDesc::operator[](size_t port_idx) {\n  if (type_ == GRAPH_NODE_INPUT) {\n    return std::make_shared<FlowPortDesc>(shared_from_this(), node_name_);\n  }\n\n  return std::make_shared<FlowPortDesc>(shared_from_this(), port_idx);\n}\n\nvoid FlowNodeDesc::SetNodeType(const std::string &type) { type_ = type; }\n\nvoid FlowNodeDesc::SetFlowUnitName(const std::string &flowunit_name) {\n  flowunit_name_ = flowunit_name;\n}\n\nstd::string FlowNodeDesc::GetFlowUnitName() { return flowunit_name_; }\n\nvoid FlowNodeDesc::SetDevice(const std::string &device) { device_ = device; }\n\nvoid FlowNodeDesc::SetConfig(const std::vector<std::string> &config) {\n  config_ = config;\n}\n\nstd::vector<std::string> FlowNodeDesc::GetNodeConfig() {\n  auto node_config = config_;\n  node_config.push_back(\"type=\" + type_);\n  if (type_ == GRAPH_NODE_FLOWUNIT) {\n    node_config.push_back(\"flowunit=\" + flowunit_name_);\n  }\n  node_config.push_back(\"device=\" + device_);\n  return node_config;\n}\n\nvoid FlowNodeDesc::SetInputLinks(\n    const std::unordered_map<std::string, std::shared_ptr<FlowPortDesc>>\n        &source_node_ports) {\n  source_node_ports_ = source_node_ports;\n}\n\nconst std::unordered_map<std::string, std::shared_ptr<FlowPortDesc>>\n    &FlowNodeDesc::GetInputLinks() {\n  return source_node_ports_;\n}\n\nvoid FlowNodeDesc::Clear() { source_node_ports_.clear(); }\n\n}  // namespace modelbox\n"
  },
  {
    "path": "src/libmodelbox/engine/api/flowunit_builder.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <modelbox/flowunit_builder.h>\n\n#include <utility>\n\nnamespace modelbox {\n\nRegFlowUnitFactory::RegFlowUnitFactory(std::shared_ptr<FlowUnitBuilder> builder)\n    : builder_(std::move(builder)) {\n  auto desc = std::make_shared<FlowUnitDesc>();\n  builder_->Probe(desc);\n  unit_type_ = desc->GetFlowUnitType();\n  unit_name_ = desc->GetFlowUnitName();\n}\n\nstd::map<std::string, std::shared_ptr<FlowUnitDesc>>\nRegFlowUnitFactory::FlowUnitProbe() {\n  std::map<std::string, std::shared_ptr<FlowUnitDesc>> flowunit_desc_map;\n  auto desc = std::make_shared<FlowUnitDesc>();\n  builder_->Probe(desc);\n  flowunit_desc_map[unit_name_] = desc;\n  return flowunit_desc_map;\n}\n\nstd::string RegFlowUnitFactory::GetFlowUnitFactoryType() { return unit_type_; }\n\nstd::string RegFlowUnitFactory::GetFlowUnitFactoryName() { return unit_name_; }\n\nstd::shared_ptr<FlowUnit> RegFlowUnitFactory::CreateFlowUnit(\n    const std::string &unit_name, const std::string &unit_type) {\n  return builder_->Build();\n}\n\n}  // namespace modelbox\n"
  },
  {
    "path": "src/libmodelbox/engine/buffer.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/buffer.h\"\n\n#include <utility>\n\n#include \"modelbox/buffer_index_info.h\"\n\nnamespace modelbox {\n\nBufferMeta::BufferMeta() = default;\n\nBufferMeta::~BufferMeta() = default;\n\nStatus BufferMeta::CopyMeta(const std::shared_ptr<BufferMeta>& buf_meta,\n                            bool is_override) {\n  if (!buf_meta) {\n    return STATUS_FAULT;\n  }\n\n  custom_meta_.Merge(buf_meta->custom_meta_, is_override);\n\n  return STATUS_OK;\n}\n\nBufferMeta& BufferMeta::operator=(const BufferMeta& other) = default;\n\nstd::tuple<Any*, bool> BufferMeta::Get(const std::string& key) {\n  return custom_meta_.Get(key);\n}\n\nBufferMeta& BufferMeta::DeepCopy(const BufferMeta& other) {\n  custom_meta_ = other.custom_meta_;\n  return *this;\n}\n\nBuffer::Buffer()\n    : dev_mem_(nullptr),\n      delayed_copy_dest_device_(nullptr),\n      index_info_(std::make_shared<BufferIndexInfo>()) {\n  meta_ = std::make_shared<BufferMeta>();\n}\n\nBuffer::Buffer(const std::shared_ptr<Device>& device, uint32_t dev_mem_flags)\n    : Buffer() {\n  dev_mem_flags_ = dev_mem_flags;\n  if (device) {\n    dev_mem_ = device->MemAlloc(0);\n  }\n}\n\nBuffer::Buffer(const std::shared_ptr<DeviceMemory>& dev_mem) : Buffer() {\n  dev_mem_ = dev_mem;\n}\n\nBuffer::Buffer(const Buffer& other) : Buffer() {\n  meta_->CopyMeta(other.meta_);\n  dev_mem_ = other.dev_mem_;\n  delayed_copy_dest_device_ = other.delayed_copy_dest_device_;\n  delayed_copy_dest_mem_flags_ = other.delayed_copy_dest_mem_flags_;\n  type_ = other.type_;\n  dev_mem_flags_ = other.dev_mem_flags_;\n  data_error_ = other.data_error_;\n}\n\nBuffer::~Buffer() = default;\n\nStatus Buffer::Build(size_t size) {\n  if (!dev_mem_) {\n    return {STATUS_INVALID, \"Can't get device!\"};\n  }\n\n  auto&& device = dev_mem_->GetDevice();\n  dev_mem_ = device->MemAlloc(size, dev_mem_flags_);\n\n  if (nullptr == dev_mem_) {\n    MBLOG_WARN << device << \" MemAlloc \" << size << \" byte data failed!\";\n    return STATUS_NOMEM;\n  }\n\n  return STATUS_OK;\n}\n\nStatus Buffer::Build(void* data, size_t data_size, const DeleteFunction& func) {\n  if (!dev_mem_) {\n    return {STATUS_INVALID, \"device memory must not be nullptr.\"};\n  }\n\n  auto device = dev_mem_->GetDevice();\n  std::shared_ptr<DeviceMemory> dev_mem;\n  if (func) {\n    dev_mem = device->MemAcquire(data, data_size, func, dev_mem_flags_);\n  } else {\n    dev_mem = device->MemAcquire(data, data_size, dev_mem_flags_);\n  }\n\n  if (!dev_mem) {\n    return {STATUS_NOMEM, \"device MemAcquire failed.\"};\n  }\n\n  dev_mem_ = dev_mem;\n  return STATUS_OK;\n}\n\nStatus Buffer::BuildFromHost(void* data, size_t data_size,\n                             const DeleteFunction& func) {\n  if (!dev_mem_ && !func) {\n    return {STATUS_INVALID, \"device memory must not be nullptr.\"};\n  }\n\n  auto device = dev_mem_->GetDevice();\n  std::shared_ptr<DeviceMemory> dev_mem = nullptr;\n  if (dev_mem_->IsHost() && func) {\n    dev_mem = device->MemAcquire(data, data_size, func);\n  } else {\n    dev_mem = device->MemWrite(data, data_size);\n    if (!dev_mem) {\n      MBLOG_WARN << \" device MemWrite failed.\";\n      return STATUS_NOMEM;\n    }\n  }\n\n  dev_mem_ = dev_mem;\n\n  return STATUS_OK;\n}\n\nvoid* Buffer::MutableData() {\n  if (!dev_mem_) {\n    MBLOG_WARN << \"dev_mem_ is nullptr, may be exception buffer.\";\n    return nullptr;\n  }\n\n  auto&& data = dev_mem_->GetPtr<void>();\n  if (!data) {\n    return nullptr;\n  }\n\n  return data.get();\n}\n\nconst void* Buffer::ConstData() {\n  if (!dev_mem_) {\n    MBLOG_WARN << \"dev_mem_ is nullptr, may be exception buffer.\";\n    return nullptr;\n  }\n\n  auto status = MoveToTargetDevice();\n  if (!status) {\n    MBLOG_WARN << \"buffer move to target device faild.\" << status;\n    return nullptr;\n  }\n\n  auto&& data = dev_mem_->GetConstPtr<void>();\n  if (!data) {\n    return nullptr;\n  }\n\n  return data.get();\n}\n\nStatus Buffer::SetBufferMutable(bool is_mutable) {\n  if (!dev_mem_) {\n    return STATUS_OK;\n  }\n\n  return dev_mem_->SetContentMutable(is_mutable);\n}\n\nvoid Buffer::SetError(const std::string& error_code,\n                      const std::string& error_msg) {\n  data_error_ = std::make_shared<DataError>(error_code, error_msg);\n\n  dev_mem_ = nullptr;\n}\n\nbool Buffer::HasError() const { return data_error_ != nullptr; }\n\nstd::string Buffer::GetErrorCode() const {\n  if (data_error_ == nullptr) {\n    return \"\";\n  }\n  return data_error_->GetErrorCode();\n}\n\nstd::string Buffer::GetErrorMsg() const {\n  if (data_error_ == nullptr) {\n    return \"\";\n  }\n  return data_error_->GetErrorMsg();\n}\n\nsize_t Buffer::GetBytes() const { return dev_mem_ ? dev_mem_->GetSize() : 0; }\n\nstd::shared_ptr<Device> Buffer::GetDevice() const {\n  return dev_mem_ ? dev_mem_->GetDevice() : nullptr;\n}\n\nstd::tuple<Any*, bool> Buffer::Get(const std::string& key) {\n  return meta_->Get(key);\n}\n\nStatus Buffer::CopyMeta(const std::shared_ptr<Buffer>& buf, bool is_override) {\n  if (!buf) {\n    return {STATUS_INVALID, \"buffer must not be nullptr.\"};\n  }\n\n  auto status = meta_->CopyMeta(buf->meta_, is_override);\n  if (!status) {\n    MBLOG_WARN << \"buffer meta set meta failed.\" << status;\n  }\n\n  return status;\n}\n\nstd::shared_ptr<Buffer> Buffer::Copy() const {\n  return std::make_shared<Buffer>(*this);\n}\n\nstd::shared_ptr<Buffer> Buffer::DeepCopy() const {\n  auto buffer = std::make_shared<Buffer>();\n  auto status = buffer->DeepCopy(*this);\n  if (!status) {\n    MBLOG_ERROR << \"Buffer DeepCopy failed: \" << status;\n    return nullptr;\n  }\n\n  return buffer;\n}\n\nstd::shared_ptr<Buffer> Buffer::CopyTo(\n    const std::shared_ptr<Device>& dest_device) const {\n  if (dest_device == nullptr) {\n    return nullptr;\n  }\n\n  auto new_buffer = std::make_shared<Buffer>(dest_device);\n  auto status = new_buffer->DeepCopy(*this);\n  if (!status) {\n    MBLOG_ERROR << \"Buffer DeepCopy failed: \" << status;\n    return nullptr;\n  }\n\n  return new_buffer;\n}\n\nBufferEnumType Buffer::GetBufferType() const { return type_; }\n\nvoid Buffer::SetGetBufferType(BufferEnumType type) { type_ = type; }\n\nstd::shared_ptr<DeviceMemory> Buffer::GetDeviceMemory() const {\n  return dev_mem_;\n}\n\nStatus Buffer::DeepCopy(const Buffer& other) {\n  if (other.meta_) {\n    meta_ = std::make_shared<BufferMeta>();\n    meta_->DeepCopy(*(other.meta_));\n  } else {\n    meta_ = nullptr;\n  }\n\n  if (!other.dev_mem_) {\n    dev_mem_ = nullptr;\n    return STATUS_OK;\n  }\n\n  if (dev_mem_ == nullptr) {\n    dev_mem_ = other.dev_mem_->Clone(true);\n  } else {\n    auto&& device = dev_mem_->GetDevice();\n    dev_mem_ = device->MemAlloc(other.dev_mem_->GetSize());\n    dev_mem_->ReadFrom(other.dev_mem_, 0, other.dev_mem_->GetSize());\n  }\n\n  if (!dev_mem_) {\n    return {STATUS_NOMEM, \"device memory copy failed.\"};\n  }\n\n  return STATUS_OK;\n}\n\nvoid Buffer::SetDelayedCopyDestinationDevice(\n    std::shared_ptr<Device> dest_device) {\n  delayed_copy_dest_device_ = std::move(dest_device);\n}\n\nvoid Buffer::SetDelayedCopyDestinationMemFlags(uint32_t mem_flags) {\n  delayed_copy_dest_mem_flags_ = mem_flags;\n}\n\nvoid Buffer::ClearDelayedCopyDestinationInfo() {\n  delayed_copy_dest_device_ = nullptr;\n  delayed_copy_dest_mem_flags_ = 0;\n}\n\nbool Buffer::GetDelayedCopyFlag(const std::shared_ptr<Device>& dest_device) {\n  // if current buffer device type is \"cuda\"/\"ascend\" and input port device\n  // type is \"cpu\" , the real data will be copied to target device when the\n  // user calls Buffer::ConstData()\n  return dev_mem_ ? (\"cpu\" == dest_device->GetType() &&\n                     \"cpu\" != dev_mem_->GetDevice()->GetType())\n                  : false;\n}\n\nStatus Buffer::MoveToTargetDevice() {\n  // no need move\n  if (delayed_copy_dest_device_ == nullptr) {\n    return STATUS_OK;\n  }\n\n  if (delayed_copy_dest_device_ == dev_mem_->GetDevice() &&\n      delayed_copy_dest_mem_flags_ == dev_mem_->GetMemFlags()) {\n    return STATUS_OK;\n  }\n\n  auto data_size = GetBytes();\n  auto dev_mem = delayed_copy_dest_device_->MemAlloc(\n      data_size, delayed_copy_dest_mem_flags_);\n  if (!dev_mem) {\n    return {STATUS_NOMEM, \"target device memory alloc faied.\"};\n  }\n  if (data_size != 0) {\n    dev_mem->ReadFrom(dev_mem_, 0, data_size);\n  }\n\n  dev_mem_ = dev_mem;\n  delayed_copy_dest_device_ = nullptr;\n  return STATUS_SUCCESS;\n}\n\nvoid Buffer::SetPriority(int priority) { priority_ = priority; }\n\nint Buffer::GetPriority() { return priority_; }\n\n}  // namespace modelbox"
  },
  {
    "path": "src/libmodelbox/engine/buffer_index_info.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <utility>\n\n#include \"modelbox/buffer_index_info.h\"\n\n#include \"modelbox/buffer.h\"\n#include \"modelbox/node.h\"\n#include \"modelbox/stream.h\"\n\nnamespace modelbox {\n\nvoid BufferInheritInfo::SetType(BufferProcessType type) { type_ = type; }\n\nBufferProcessType BufferInheritInfo::GetType() { return type_; }\n\nvoid BufferInheritInfo::SetInheritFrom(\n    const std::shared_ptr<BufferIndexInfo> &buffer_index) {\n  inherit_from_buffer_ = buffer_index;\n  auto inherit_info = buffer_index->GetInheritInfo();\n  if (inherit_info == nullptr) {\n    return;\n  }\n\n  inherit_deepth_ = inherit_info->GetDeepth() + 1;\n}\n\nstd::shared_ptr<BufferIndexInfo> BufferInheritInfo::GetInheritFrom() {\n  return inherit_from_buffer_;\n}\n\nsize_t BufferInheritInfo::GetDeepth() { return inherit_deepth_; }\n\nvoid BufferProcessInfo::SetParentBuffers(\n    const std::string &port_name,\n    std::list<std::shared_ptr<BufferIndexInfo>> &&port_buffers) {\n  parent_buffers_[port_name] = port_buffers;\n}\n\nconst std::map<std::string, std::list<std::shared_ptr<BufferIndexInfo>>>\n    &BufferProcessInfo::GetParentBuffers() {\n  return parent_buffers_;\n}\n\nvoid BufferProcessInfo::SetType(BufferProcessType type) { type_ = type; }\n\nBufferProcessType BufferProcessInfo::GetType() { return type_; }\n\nBufferIndexInfo::BufferIndexInfo() = default;\n\nBufferIndexInfo::~BufferIndexInfo() = default;\n\nvoid BufferIndexInfo::SetInheritInfo(\n    std::shared_ptr<BufferInheritInfo> inherit_info) {\n  inherit_info_ = std::move(inherit_info);\n}\n\nstd::shared_ptr<BufferInheritInfo> BufferIndexInfo::GetInheritInfo() {\n  return inherit_info_;\n}\n\nvoid BufferIndexInfo::SetStream(std::shared_ptr<Stream> stream_belong_to) {\n  stream_belong_to_ = std::move(stream_belong_to);\n}\n\nstd::shared_ptr<Stream> BufferIndexInfo::GetStream() {\n  return stream_belong_to_;\n}\n\nvoid BufferIndexInfo::SetIndex(size_t index) {\n  index_in_current_stream_ = index;\n}\n\nsize_t BufferIndexInfo::GetIndex() { return index_in_current_stream_; }\n\nbool BufferIndexInfo::IsFirstBufferInStream() {\n  return index_in_current_stream_ == 0;\n}\n\nvoid BufferIndexInfo::MarkAsEndFlag() { is_end_flag_ = true; }\n\nbool BufferIndexInfo::IsEndFlag() { return is_end_flag_; }\n\nvoid BufferIndexInfo::MarkAsPlaceholder() { is_placeholder_ = true; }\n\nbool BufferIndexInfo::IsPlaceholder() { return is_placeholder_; }\n\nvoid BufferIndexInfo::SetProcessInfo(\n    std::shared_ptr<BufferProcessInfo> process_info) {\n  process_info_ = std::move(process_info);\n}\n\nstd::shared_ptr<BufferProcessInfo> BufferIndexInfo::GetProcessInfo() {\n  return process_info_;\n}\n\nstd::shared_ptr<BufferIndexInfo> BufferManageView::GetIndexInfo(\n    const std::shared_ptr<Buffer> &buffer) {\n  return buffer->index_info_;\n}\n\nvoid BufferManageView::SetIndexInfo(\n    const std::shared_ptr<Buffer> &buffer,\n    std::shared_ptr<BufferIndexInfo> buffer_index_info) {\n  buffer->index_info_ = std::move(buffer_index_info);\n}\n\nstd::shared_ptr<BufferIndexInfo> BufferManageView::GetFirstParentBuffer(\n    const std::shared_ptr<Buffer> &buffer) {\n  if (buffer->index_info_ == nullptr) {\n    MBLOG_ERROR << \"buffer index info is null\";\n    return nullptr;\n  }\n\n  auto process_info = buffer->index_info_->GetProcessInfo();\n  if (process_info == nullptr) {\n    MBLOG_ERROR << \"buffer process info is null\";\n    return nullptr;\n  }\n\n  const auto &parent_buffers = process_info->GetParentBuffers();\n  if (parent_buffers.empty()) {\n    MBLOG_ERROR << \"buffer parent info is empty\";\n    return nullptr;\n  }\n\n  return parent_buffers.begin()->second.front();\n}\n\nvoid BufferManageView::SetPriority(const std::shared_ptr<Buffer> &buffer,\n                                   int priority) {\n  buffer->SetPriority(priority);\n}\n\nint BufferManageView::GetPriority(const std::shared_ptr<Buffer> &buffer) {\n  return buffer->GetPriority();\n}\n\nvoid BufferManageView::SetError(const std::shared_ptr<Buffer> &buffer,\n              const std::shared_ptr<DataError> &data_error) {\n  buffer->data_error_ = data_error;\n}\n\nstd::shared_ptr<DataError> BufferManageView::GetError(\n    const std::shared_ptr<Buffer> &buffer) {\n  return buffer->data_error_;\n}\n\n}  // namespace modelbox\n"
  },
  {
    "path": "src/libmodelbox/engine/buffer_list.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/buffer_list.h\"\n\n#include <utility>\n\nnamespace modelbox {\n\nBufferList::BufferList() : dev_mem_(nullptr) {}\n\nBufferList::BufferList(const std::shared_ptr<Device>& device,\n                       uint32_t device_mem_flags)\n    : dev_mem_flags_(device_mem_flags) {\n  if (device) {\n    dev_mem_ = device->MemAlloc(0, device_mem_flags);\n  }\n}\n\nBufferList::BufferList(const std::shared_ptr<Buffer>& buffer) : BufferList() {\n  buffer_list_.push_back(buffer);\n}\n\nBufferList::BufferList(\n    const std::vector<std::shared_ptr<Buffer>>& buffer_vector)\n    : BufferList() {\n  buffer_list_.assign(buffer_vector.begin(), buffer_vector.end());\n}\n\nBufferList::BufferList(const BufferList& other) {\n  buffer_list_.clear();\n  is_contiguous_ = other.is_contiguous_;\n  dev_mem_ = other.dev_mem_;\n  dev_mem_flags_ = other.dev_mem_flags_;\n  buffer_list_.reserve(other.buffer_list_.size());\n  for (const auto& buffer : other) {\n    if (buffer == nullptr) {\n      buffer_list_.push_back(nullptr);\n      continue;\n    }\n\n    buffer_list_.push_back(buffer->Copy());\n  }\n}\n\nBufferList::~BufferList() = default;\n\nvoid BufferList::Copy(\n    const std::vector<std::shared_ptr<Buffer>>& buffer_vector) {\n  buffer_list_.assign(buffer_vector.begin(), buffer_vector.end());\n}\n\nStatus BufferList::Reset() {\n  buffer_list_.clear();\n  is_contiguous_ = false;\n  if (dev_mem_) {\n    auto device = dev_mem_->GetDevice();\n    dev_mem_ = device->MemAlloc(0, dev_mem_flags_);\n    return dev_mem_ ? STATUS_SUCCESS : STATUS_FAULT;\n  }\n\n  return STATUS_OK;\n}\n\nsize_t BufferList::Size() const { return buffer_list_.size(); }\n\nsize_t BufferList::GetBytes() const {\n  size_t byte_size = 0;\n  for (const auto& buffer : buffer_list_) {\n    byte_size += buffer->GetBytes();\n  }\n\n  return byte_size;\n}\n\nstd::vector<std::shared_ptr<Buffer>>::iterator BufferList::begin() {\n  return buffer_list_.begin();\n}\n\nstd::vector<std::shared_ptr<Buffer>>::const_iterator BufferList::begin() const {\n  return buffer_list_.begin();\n}\n\nstd::vector<std::shared_ptr<Buffer>>::iterator BufferList::end() {\n  return buffer_list_.end();\n}\n\nstd::vector<std::shared_ptr<Buffer>>::const_iterator BufferList::end() const {\n  return buffer_list_.end();\n}\n\nstd::shared_ptr<Buffer>& BufferList::operator[](size_t pos) {\n  return buffer_list_[pos];\n}\n\nconst std::shared_ptr<Buffer>& BufferList::operator[](size_t pos) const {\n  return buffer_list_[pos];\n}\n\nvoid BufferList::PushBack(const std::shared_ptr<Buffer>& buf) {\n  buffer_list_.push_back(buf);\n  SetNoContiguous();\n}\n\nvoid BufferList::Assign(\n    const std::vector<std::shared_ptr<Buffer>>& buffer_list) {\n  buffer_list_ = buffer_list;\n  SetNoContiguous();\n}\n\nvoid BufferList::Swap(std::vector<std::shared_ptr<Buffer>>& buffer_list) {\n  buffer_list_.swap(buffer_list);\n  SetNoContiguous();\n}\n\nvoid BufferList::SetNoContiguous() {\n  is_contiguous_ = false;\n  if (dev_mem_) {\n    auto device = dev_mem_->GetDevice();\n    dev_mem_ = device->MemAlloc(0, dev_mem_flags_);\n  }\n}\n\nbool BufferList::IsContiguous() const { return is_contiguous_; }\n\nStatus BufferList::SetMutable(bool is_mutable) {\n  for (auto& buff : buffer_list_) {\n    auto status = buff->SetBufferMutable(is_mutable);\n    if (!status) {\n      MBLOG_WARN << \"SetBufferMutable failed:\" << status;\n      return status;\n    }\n  }\n\n  if (dev_mem_) {\n    return dev_mem_->SetContentMutable(is_mutable);\n  }\n\n  return STATUS_OK;\n}\n\nvoid* BufferList::MutableBufferData(size_t idx) {\n  if (idx >= buffer_list_.size()) {\n    MBLOG_WARN << \"invalid idx: \" << idx\n               << \" buff_vec_view_.size(): \" << buffer_list_.size();\n    return nullptr;\n  }\n\n  return buffer_list_[idx]->MutableData();\n}\n\nconst void* BufferList::ConstBufferData(size_t idx) const {\n  if (idx >= buffer_list_.size()) {\n    MBLOG_WARN << \"invalid idx: \" << idx\n               << \" buff_vec_view_.size(): \" << buffer_list_.size();\n    return nullptr;\n  }\n\n  return buffer_list_[idx]->ConstData();\n}\n\nvoid* BufferList::MutableData() {\n  auto size = Size();\n  if (size == 1) {\n    return MutableBufferData(0);\n  }\n\n  if (size > 1 && IsContiguous() && dev_mem_) {\n    auto&& ptr = dev_mem_->GetPtr<void>();\n    return !ptr ? nullptr : ptr.get();\n  }\n\n  return nullptr;\n}\n\nconst void* BufferList::ConstData() const {\n  auto size = Size();\n  if (size == 1) {\n    return ConstBufferData(0);\n  }\n\n  if (size > 1 && IsContiguous() && dev_mem_) {\n    auto&& ptr = dev_mem_->GetConstPtr<void>();\n    return !ptr ? nullptr : ptr.get();\n  }\n\n  return nullptr;\n}\n\nstd::shared_ptr<Buffer> BufferList::At(size_t idx) {\n  if (idx >= buffer_list_.size()) {\n    return nullptr;\n  }\n\n  return buffer_list_.at(idx);\n}\n\nstd::shared_ptr<Buffer> BufferList::At(size_t idx) const {\n  if (idx >= buffer_list_.size()) {\n    return nullptr;\n  }\n\n  return buffer_list_.at(idx);\n}\n\nStatus BufferList::CopyToNewBufferList(std::shared_ptr<DeviceMemory>& dev_mem) {\n  size_t offset = 0;\n  size_t buff_size = 0;\n  std::vector<std::shared_ptr<Buffer>> new_buffer_list;\n  new_buffer_list.reserve(buffer_list_.size());\n  for (auto& buffer : buffer_list_) {\n    if (!buffer) {\n      new_buffer_list.push_back(buffer);\n      continue;\n    }\n\n    auto new_buffer = buffer->Copy();\n    new_buffer->index_info_ = buffer->index_info_;\n    if (!new_buffer) {\n      MBLOG_ERROR << \"Buffer copy failed.\";\n      return STATUS_FAULT;\n    }\n\n    new_buffer_list.push_back(new_buffer);\n    buff_size = new_buffer->GetBytes();\n    if (0 == buff_size) {\n      continue;\n    }\n\n    new_buffer->dev_mem_ = dev_mem->Cut(offset, buff_size);\n    if (!new_buffer->dev_mem_) {\n      MBLOG_ERROR << \"device memory cut failed.\";\n      return STATUS_NOMEM;\n    }\n\n    offset += buff_size;\n  }\n\n  buffer_list_.swap(new_buffer_list);\n  return STATUS_OK;\n}\n\nStatus BufferList::GenerateDeviceMemory(\n    const std::vector<std::shared_ptr<DeviceMemory>>& buffer_dev_mems) {\n  bool is_contiguous = false;\n  if (dev_mem_ && dev_mem_->IsSameDevice(buffer_dev_mems[0])) {\n    is_contiguous = DeviceMemory::IsContiguous(buffer_dev_mems);\n  }\n\n  if (is_contiguous) {\n    is_contiguous_ = true;\n    auto device =\n        dev_mem_ ? dev_mem_->GetDevice() : buffer_dev_mems[0]->GetDevice();\n    auto dev_mem =\n        DeviceMemory::Combine(buffer_dev_mems, device, dev_mem_flags_);\n    if (!dev_mem) {\n      MBLOG_ERROR << \"DeviceMemory Combine failed.\";\n      return STATUS_NOMEM;\n    }\n\n    dev_mem_ = dev_mem;\n    return STATUS_OK;\n  }\n\n  auto device =\n      dev_mem_ ? dev_mem_->GetDevice() : buffer_dev_mems[0]->GetDevice();\n  auto dev_mem = DeviceMemory::Combine(buffer_dev_mems, device, dev_mem_flags_);\n  if (!dev_mem) {\n    MBLOG_ERROR << \"DeviceMemory Combine failed.\";\n    return STATUS_NOMEM;\n  }\n\n  auto ret = CopyToNewBufferList(dev_mem);\n  if (ret != STATUS_OK) {\n    return ret;\n  }\n\n  dev_mem_ = dev_mem;\n  is_contiguous_ = true;\n  return STATUS_OK;\n}\n\nStatus BufferList::MakeContiguous() {\n  if (!SupportMemContiguous()) {\n    return {STATUS_NOTSUPPORT, \"not support mem contiguous\"};\n  }\n\n  std::vector<std::shared_ptr<DeviceMemory>> buffer_dev_mems;\n  for (auto& buffer : buffer_list_) {\n    if (buffer->HasError() || nullptr == buffer->dev_mem_) {\n      continue;\n    }\n\n    buffer_dev_mems.push_back(buffer->dev_mem_);\n  }\n\n  if (0 == buffer_dev_mems.size()) {\n    is_contiguous_ = true;\n    return STATUS_OK;\n  }\n\n  auto ret = GenerateDeviceMemory(buffer_dev_mems);\n  if (ret != STATUS_OK) {\n    return ret;\n  }\n\n  return STATUS_OK;\n}\n\nstd::shared_ptr<Device> BufferList::GetDevice() {\n  return dev_mem_ ? dev_mem_->GetDevice() : nullptr;\n}\n\nstd::shared_ptr<DeviceMemory> BufferList::GetDeviceMemory() { return dev_mem_; }\n\nStatus BufferList::CopyMeta(const std::shared_ptr<BufferList>& bufferlist,\n                            bool is_override) {\n  if (!bufferlist || Size() != bufferlist->Size()) {\n    return STATUS_FAULT;\n  }\n\n  auto status = STATUS_OK;\n  for (size_t i = 0; i < buffer_list_.size(); ++i) {\n    status = buffer_list_[i]->CopyMeta(bufferlist->At(i), is_override);\n    if (!status) {\n      MBLOG_WARN << \"buffer list copt meta failed:\" << status;\n      return status;\n    }\n  }\n\n  return STATUS_OK;\n}\n\nStatus BufferList::BuildContiguous(const std::shared_ptr<Device>& device,\n                                   const std::vector<size_t>& data_size_list) {\n  size_t size = std::accumulate(data_size_list.begin(), data_size_list.end(),\n                                (size_t)0, std::plus<size_t>());\n  auto mem = device->MemAlloc(size, dev_mem_flags_);\n  if (!mem) {\n    MBLOG_WARN << \" MemAlloc \" << size << \" byte data failed\";\n    return STATUS_NOMEM;\n  }\n\n  dev_mem_ = mem;\n  buffer_list_.resize(data_size_list.size(), nullptr);\n\n  size_t offset = 0;\n  for (size_t i = 0; i < buffer_list_.size(); i++) {\n    auto&& mem = dev_mem_->Cut(offset, data_size_list[i]);\n    buffer_list_[i] = std::make_shared<Buffer>(mem);\n    offset += data_size_list[i];\n  }\n\n  is_contiguous_ = true;\n  return STATUS_OK;\n}\n\nStatus BufferList::BuildSeparate(const std::shared_ptr<Device>& device,\n                                 const std::vector<size_t>& data_size_list) {\n  if (dev_mem_->GetSize() != 0) {\n    dev_mem_ = device->MemAlloc(0, dev_mem_flags_);\n  }\n\n  buffer_list_.resize(data_size_list.size(), nullptr);\n  for (size_t i = 0; i < buffer_list_.size(); ++i) {\n    const auto& size = data_size_list[i];\n    buffer_list_[i] =\n        std::make_shared<Buffer>(device->MemAlloc(size, dev_mem_flags_));\n  }\n\n  is_contiguous_ = false;\n  return STATUS_OK;\n}\n\nStatus BufferList::Build(const std::vector<size_t>& data_size_list,\n                         bool contiguous) {\n  if (!dev_mem_) {\n    return {STATUS_INVALID, \"device memory must not be nullptr.\"};\n  }\n\n  auto device = dev_mem_->GetDevice();\n  if (device == nullptr) {\n    return {STATUS_INVALID, \"device is invalid\"};\n  }\n\n  buffer_list_.clear();\n  return contiguous && SupportMemContiguous()\n             ? BuildContiguous(device, data_size_list)\n             : BuildSeparate(device, data_size_list);\n}\n\nStatus BufferList::BuildFromHost(const std::vector<size_t>& data_size_list,\n                                 void* data, size_t data_size,\n                                 const DeleteFunction& func) {\n  if (!dev_mem_) {\n    return {STATUS_INVALID, \"device memory must not be nullptr.\"};\n  }\n\n  size_t size = std::accumulate(data_size_list.begin(), data_size_list.end(),\n                                (size_t)0, std::plus<size_t>());\n  if (data_size < size) {\n    MBLOG_WARN << \"invalid data size. size: \" << size\n               << \" data_size: \" << data_size;\n    return STATUS_RANGE;\n  }\n\n  auto device = dev_mem_->GetDevice();\n  if (dev_mem_->IsHost() && func) {\n    std::shared_ptr<void> data_ptr(data, func);\n    dev_mem_ = device->MemAcquire(data, data_size, func);\n  } else {\n    dev_mem_ = device->MemWrite(data, data_size);\n    if (!dev_mem_) {\n      MBLOG_WARN << \" device MemWrite failed.\";\n      return STATUS_NOMEM;\n    }\n  }\n\n  buffer_list_.resize(data_size_list.size(), nullptr);\n\n  size_t offset = 0;\n  for (size_t i = 0; i < buffer_list_.size(); i++) {\n    auto&& mem = dev_mem_->Cut(offset, data_size_list[i]);\n    buffer_list_[i] = std::make_shared<Buffer>(mem);\n    offset += data_size_list[i];\n  }\n\n  is_contiguous_ = true;\n\n  return STATUS_OK;\n}\n\nstd::vector<std::shared_ptr<DeviceMemory>>\nBufferList::GetAllBufferDeviceMemory() {\n  std::vector<std::shared_ptr<DeviceMemory>> buffer_dev_mems;\n  for (auto& buffer : buffer_list_) {\n    if (buffer->HasError() || nullptr == buffer->dev_mem_) {\n      continue;\n    }\n\n    buffer_dev_mems.push_back(buffer->dev_mem_);\n  }\n  return buffer_dev_mems;\n}\n\nStatus BufferList::MoveAllBufferToTargetDevice() {\n  if (buffer_list_.empty()) {\n    return STATUS_OK;\n  }\n\n  if (dev_mem_ == nullptr) {\n    MBLOG_ERROR << \"dev_mem of buffer_list should not be null\";\n    return STATUS_FAULT;\n  }\n\n  std::vector<std::shared_ptr<Buffer>> new_buffer_list;\n  auto buffer_count = buffer_list_.size();\n  new_buffer_list.reserve(buffer_count);\n  auto target_device = dev_mem_->GetDevice();\n  if (target_device == nullptr) {\n    MBLOG_ERROR << \"target device is nullptr\";\n    return STATUS_FAULT;\n  }\n\n  for (auto& buffer : buffer_list_) {\n    if (buffer == nullptr) {\n      MBLOG_ERROR << \"buffer in buffer list is nullptr\";\n      return STATUS_FAULT;\n    }\n\n    if (buffer->HasError()) {\n      new_buffer_list.push_back(buffer);\n      continue;\n    }\n\n    auto same_device_flag = dev_mem_->IsSameDevice(buffer->dev_mem_);\n    auto delayed_copy_flag = buffer->GetDelayedCopyFlag(target_device);\n    // No need to copy real data or need delayed copy .\n    if (same_device_flag || delayed_copy_flag) {\n      auto new_mem = buffer->dev_mem_->Clone();\n      auto new_buffer = std::make_shared<Buffer>(new_mem);\n      new_buffer->CopyMeta(buffer);\n      if (delayed_copy_flag) {\n        new_buffer->SetDelayedCopyDestinationDevice(target_device);\n        new_buffer->SetDelayedCopyDestinationMemFlags(dev_mem_flags_);\n      }\n      new_buffer->index_info_ = buffer->index_info_;\n      new_buffer_list.push_back(new_buffer);\n      continue;\n    }\n    auto data_size = buffer->GetBytes();\n    auto dev_mem = target_device->MemAlloc(data_size, dev_mem_flags_);\n    if (!dev_mem) {\n      return STATUS_NOMEM;\n    }\n    auto new_buffer = std::make_shared<Buffer>(dev_mem);\n    new_buffer_list.push_back(new_buffer);\n    new_buffer->CopyMeta(buffer);\n    new_buffer->index_info_ = buffer->index_info_;\n    if (data_size == 0) {\n      continue;\n    }\n    new_buffer->dev_mem_->ReadFrom(buffer->dev_mem_, 0, data_size);\n  }\n\n  buffer_list_.swap(new_buffer_list);\n  return STATUS_OK;\n}\n\nvoid BufferList::SetError(const std::string& error_code,\n                          const std::string& error_msg) {\n  if (buffer_list_.empty()) {\n    return;\n  }\n\n  for (auto& buffer : buffer_list_) {\n    buffer->SetError(error_code, error_msg);\n  }\n}\n\nStatus BufferList::EmplaceBack(void* device_data, size_t data_size,\n                               const DeleteFunction& func) {\n  if (!dev_mem_) {\n    return {STATUS_INVALID, \"device memory must not be nullptr.\"};\n  }\n\n  auto device = dev_mem_->GetDevice();\n  auto buffer = std::make_shared<Buffer>(device, dev_mem_flags_);\n  auto ret = buffer->Build(device_data, data_size, func);\n  if (!ret) {\n    return ret;\n  }\n\n  PushBack(buffer);\n  return STATUS_OK;\n}\n\nStatus BufferList::EmplaceBack(const std::shared_ptr<void>& device_data,\n                               size_t data_size) {\n  auto delete_func = [device_data](void* /*unused*/) { /* hold device data */ };\n  return EmplaceBack(device_data.get(), data_size, delete_func);\n}\n\nStatus BufferList::EmplaceBackFromHost(void* host_data, size_t data_size) {\n  if (!dev_mem_) {\n    return {STATUS_INVALID, \"device memory must not be nullptr.\"};\n  }\n\n  auto device = dev_mem_->GetDevice();\n  auto buffer = std::make_shared<Buffer>(device, dev_mem_flags_);\n  auto ret = buffer->BuildFromHost(host_data, data_size);\n  if (ret != STATUS_OK) {\n    return ret;\n  }\n\n  PushBack(buffer);\n  return STATUS_OK;\n}\n\nstd::shared_ptr<Buffer> BufferList::Front() {\n  if (buffer_list_.empty()) {\n    return nullptr;\n  }\n\n  return buffer_list_.front();\n}\n\nstd::shared_ptr<Buffer> BufferList::Back() {\n  if (buffer_list_.empty()) {\n    return nullptr;\n  }\n\n  return buffer_list_.back();\n}\n\nbool BufferList::SupportMemContiguous() {\n  auto dev = GetDevice();\n  if (dev == nullptr) {\n    return false;\n  }\n\n  return dev->SupportMemContiguous();\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/libmodelbox/engine/buffer_type.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/buffer_type.h\"\n\n#include <utility>\n\n#include \"modelbox/base/log.h\"\nnamespace modelbox {\nBufferType::BufferType() = default;\nBufferType::BufferType(std::string type) : type_(std::move(type)) {}\nBufferType::~BufferType() = default;\n\nstd::shared_ptr<BufferTypeTree> BufferTypeTree::instance_(nullptr);\n\nvoid BufferType::SetType(std::string type) { type_ = std::move(type); }\n\nconst std::string& BufferType::GetType() const { return type_; }\n\nbool BufferType::AddParentType(const std::shared_ptr<BufferType>& parent) {\n  if (parent == nullptr) {\n    return false;\n  }\n\n  if (parent_ != nullptr) {\n    return false;\n  }\n\n  parent_ = parent;\n  return true;\n}\n\nbool BufferType::AddChildType(const std::shared_ptr<BufferType>& child) {\n  bool reuslt = false;\n  if (child == nullptr) {\n    return false;\n  }\n\n  bool add_flag = true;\n  auto type = child->GetType();\n  for (const auto& own_child : children_) {\n    if (own_child->GetType() == type) {\n      add_flag = false;\n      break;\n    }\n  }\n\n  if (add_flag) {\n    children_.push_back(child);\n    reuslt = true;\n  }\n\n  return reuslt;\n}\n\nvoid BufferType::ClearChildType() { children_.clear(); }\n\nvoid BufferType::ClearParentType() { parent_ = nullptr; }\n\nvoid BufferType::RemoveType() {\n  if (parent_ != nullptr) {\n    auto children = parent_->GetChildrenType();\n    std::vector<std::shared_ptr<BufferType>> keep_children;\n    for (const auto& child : children) {\n      if (child->GetType() != this->GetType()) {\n        keep_children.push_back(child);\n      }\n    }\n\n    parent_->ClearChildType();\n    for (const auto& keep_child : keep_children) {\n      parent_->AddChildType(keep_child);\n    }\n    parent_ = nullptr;\n  }\n  children_.clear();\n}\n\nbool BufferType::IsAncestor(const BufferType& other) {\n  const auto& type = other.GetType();\n\n  for (const auto& child : children_) {\n    if (child->GetType() == type) {\n      return true;\n    }\n\n    if (IsAncestor(*(child.get()))) {\n      return true;\n    }\n  }\n\n  return false;\n}\n\nbool BufferType::IsOffspring(const BufferType& other) {\n  const auto& type = other.GetType();\n\n  if (parent_ == nullptr) {\n    return false;\n  }\n\n  if (parent_->GetType() == type) {\n    return true;\n  }\n\n  return parent_->IsOffspring(other);\n}\n\nstd::shared_ptr<BufferType> BufferType::GetParentType() { return parent_; }\n\nstd::vector<std::shared_ptr<BufferType>> BufferType::GetChildrenType() {\n  return children_;\n}\n\nBufferTypeTree::BufferTypeTree() = default;\n\nBufferTypeTree::~BufferTypeTree() = default;\n\nbool BufferTypeTree::AddRootType(const std::string& root_type) {\n  std::shared_ptr<BufferType> root_buffer_type_ptr = GetType(root_);\n\n  if (root_buffer_type_ptr == nullptr) {\n    auto* root_buffer_type = new BufferType();\n    root_buffer_type->SetType(root_type);\n    root_buffer_type_ptr.reset(root_buffer_type);\n    nodes_.insert(std::make_pair(root_type, root_buffer_type_ptr));\n    root_ = root_type;\n    return true;\n  }\n\n  if (root_ == root_type) {\n    return true;\n  }\n\n  return false;\n}\n\nbool BufferTypeTree::AddType(const std::string& type,\n                             const std::string& parent_type) {\n  std::shared_ptr<BufferType> child_buffer_type_ptr = GetType(type);\n  std::shared_ptr<BufferType> parent_buffer_type_ptr = GetType(parent_type);\n\n  if (parent_buffer_type_ptr == nullptr) {\n    return false;\n  }\n\n  if (child_buffer_type_ptr != nullptr) {\n    return false;\n  }\n\n  auto* child_buffer_type = new BufferType();\n  child_buffer_type->SetType(type);\n  child_buffer_type_ptr.reset(child_buffer_type);\n  if (!parent_buffer_type_ptr->AddChildType(child_buffer_type_ptr)) {\n    return false;\n  }\n\n  if (!child_buffer_type_ptr->AddParentType(parent_buffer_type_ptr)) {\n    return false;\n  }\n\n  nodes_.insert(std::make_pair(type, child_buffer_type_ptr));\n  return true;\n}\n\nstd::shared_ptr<BufferType> BufferTypeTree::GetType(const std::string& type) {\n  std::shared_ptr<BufferType> buffer_type = nullptr;\n  for (const auto& node : nodes_) {\n    if (node.first == type) {\n      buffer_type = node.second;\n      break;\n    }\n  }\n\n  return buffer_type;\n}\n\nbool BufferTypeTree::RemoveType(const std::string& type) {\n  std::shared_ptr<BufferType> buffer_type_ptr = GetType(type);\n\n  if (buffer_type_ptr == nullptr) {\n    return false;\n  }\n\n  for (const auto& child_type : buffer_type_ptr->GetChildrenType()) {\n    RemoveType(child_type->GetType());\n  }\n  buffer_type_ptr->RemoveType();\n  nodes_.erase(type);\n\n  return true;\n}\n\nbool BufferTypeTree::IsCompatible(const std::string& type,\n                                  const std::string& ancestor_type) {\n  std::shared_ptr<BufferType> buffer_type_ptr = GetType(type);\n  std::shared_ptr<BufferType> ancestor_buffer_type_ptr = GetType(ancestor_type);\n\n  if (buffer_type_ptr == nullptr || ancestor_buffer_type_ptr == nullptr) {\n    return false;\n  }\n\n  if (buffer_type_ptr == ancestor_buffer_type_ptr) {\n    return true;\n  }\n\n  return ancestor_buffer_type_ptr->IsAncestor(*(buffer_type_ptr.get()));\n}\n\nBufferTypeTree* BufferTypeTree::GetInstance() {\n  if (nullptr == instance_) {\n    instance_.reset(new BufferTypeTree());\n  }\n  return instance_.get();\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/libmodelbox/engine/common/data_hub.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"engine/common/data_hub.h\"\n\n#include <algorithm>\n\n#include \"modelbox/base/log.h\"\n\nnamespace modelbox {\n\nDataHub::DataHub() = default;\nDataHub::~DataHub() = default;\n\nPriorityPort::PriorityPort(const std::shared_ptr<IPort>& port)\n    : active_time_(GetCurrentTime()), port_(port) {\n  if (port) {\n    priority_ = port->GetPriority();\n  } else {\n    priority_ = std::numeric_limits<int>::min();\n  }\n}\n\nPriorityPort::~PriorityPort() = default;\n\nconst std::shared_ptr<IPort>& PriorityPort::GetPort() const { return port_; }\nstd::shared_ptr<IPort> PriorityPort::GetPort() { return port_; }\n\nvoid PriorityPort::UpdateActiveTime() { active_time_ = GetCurrentTime(); }\n\nint32_t PriorityPort::GetPriority() const { return priority_; }\nvoid PriorityPort::SetPriority(int32_t priority) { priority_ = priority; }\n// TODO port update dynamic priority\nvoid PriorityPort::UpdatePriority() { priority_ = port_->GetPriority(); }\n\nvoid PriorityPort::SetPushEventCallBack(const PushCallBack& func) {\n  port_->SetPushEventCallBack(func);\n}\n\nvoid PriorityPort::SetPopEventCallBack(const PopCallBack& func) {\n  port_->SetPopEventCallBack(func);\n}\n\nbool PriorityPort::HasData() { return !port_->Empty(); }\n\nvoid PriorityPort::SetRuning(bool flag) { is_running_ = flag; }\nbool PriorityPort::IsRunning() { return is_running_; }\n\nbool PriorityPort::IsActivated() { return port_->IsActivated(); }\n\nstd::shared_ptr<NodeBase> PriorityPort::GetNode() const {\n  auto port = GetPort();\n  if (!port) {\n    return nullptr;\n  }\n\n  return port->GetNode();\n}\n\nbool PortCompare::operator()(const std::shared_ptr<PriorityPort>& left,\n                             const std::shared_ptr<PriorityPort>& right) {\n  if (left->port_ == right->port_) {\n    return false;\n  }\n\n  if (left->priority_ != right->priority_) {\n    return left->priority_ > right->priority_;\n  }\n\n  if (left->active_time_ != right->active_time_) {\n    return left->active_time_ < right->active_time_;\n  }\n\n  return left->port_ < right->port_;\n}\n\nDefaultDataHub::DefaultDataHub() = default;\n\nDefaultDataHub::~DefaultDataHub() {\n  std::lock_guard<std::mutex> guard(active_mutex_);\n  active_ports_.clear();\n\n  for (const auto& priority_port : priority_ports_) {\n    priority_port->SetPushEventCallBack(nullptr);\n    priority_port->SetPopEventCallBack(nullptr);\n  }\n\n  priority_ports_.clear();\n}\n\nStatus DefaultDataHub::AddPort(const std::shared_ptr<PriorityPort>& port) {\n  std::lock_guard<std::mutex> lock(active_mutex_);\n  if (!port) {\n    MBLOG_WARN << \"port is nullptr\";\n    return STATUS_INVALID;\n  }\n\n  auto iter = std::find(priority_ports_.begin(), priority_ports_.end(), port);\n  if (priority_ports_.end() != iter) {\n    MBLOG_WARN << port << \" port has been added to the data hub.\";\n    return STATUS_OK;\n  }\n\n  priority_ports_.push_back(port);\n\n  auto push_call_back = std::bind(&DefaultDataHub::PortEventCallback, this,\n                                  port, std::placeholders::_1);\n  port->SetPushEventCallBack(push_call_back);\n\n  auto pop_call_back =\n      std::bind(&DefaultDataHub::PortEventCallback, this, port, false);\n  port->SetPopEventCallBack(pop_call_back);\n\n  // If there is data in the port before the port is added, it needs to be added\n  // to the active port to solve the problem that the flow unit starts to run\n  // before the scheduler is initialized\n  if (port->HasData() && !port->IsRunning() && port->IsActivated()) {\n    UpdateActivePort(port, true);\n  }\n\n  return STATUS_OK;\n}\n\nStatus DefaultDataHub::AddToActivePort(\n    const std::shared_ptr<PriorityPort>& port) {\n  std::lock_guard<std::mutex> lock(active_mutex_);\n  if (!port) {\n    return {STATUS_INVALID, \"active_port is nullptr\"};\n  }\n\n  port->SetRuning(false);\n  if (port->HasData() && port->IsActivated()) {\n    active_ports_.insert(port);\n    cv_.notify_one();\n  }\n\n  return STATUS_OK;\n}\n\nStatus DefaultDataHub::AddToActivePort(\n    std::vector<std::shared_ptr<PriorityPort>>& ports) {\n  std::lock_guard<std::mutex> lock(active_mutex_);\n  for (auto& port : ports) {\n    if (!port) {\n      MBLOG_WARN << \"active_port is nullptr\";\n      continue;\n    }\n\n    port->SetRuning(false);\n    if (port->HasData() && port->IsActivated()) {\n      active_ports_.insert(port);\n    }\n  }\n\n  cv_.notify_all();\n  return STATUS_OK;\n}\n\nvoid DefaultDataHub::UpdateActivePort(const std::shared_ptr<PriorityPort>& port,\n                                      bool update_active_time) {\n  auto it = active_ports_.find(port);\n  if (active_ports_.end() != it) {\n    active_ports_.erase(it);\n  }\n\n  if (update_active_time) {\n    port->UpdateActiveTime();\n  }\n\n  port->UpdatePriority();\n  active_ports_.insert(port);\n}\n\nvoid DefaultDataHub::PortEventCallback(\n    const std::shared_ptr<PriorityPort>& port, bool update_active_time) {\n  std::lock_guard<std::mutex> lock(active_mutex_);\n  if (!port) {\n    MBLOG_WARN << \"port is nullptr\";\n    return;\n  }\n\n  if (port->HasData() && !port->IsRunning() && port->IsActivated()) {\n    UpdateActivePort(port, update_active_time);\n    cv_.notify_one();\n  }\n}\n\n/**\n * @brief Get the highest priority port that may contain data\n *\n * @param active_port Return the port containing the data\n * @param timeout Specify the timeout period, 0 means blocking\n * @return Status\n *  @retval STATUS_TIMEDOUT means timeout\n *  @retval STATUS_NODATA means no active data\n *  @retval STATUS_OK means success\n */\nStatus DefaultDataHub::SelectActivePort(\n    std::shared_ptr<PriorityPort>* active_port, int64_t timeout) {\n  auto pred = [this] { return !active_ports_.empty(); };\n  std::unique_lock<std::mutex> lock(active_mutex_);\n\n  if (timeout > 0) {\n    if (!cv_.wait_for(lock, std::chrono::milliseconds(timeout), pred)) {\n      return STATUS_TIMEDOUT;\n    }\n  } else if (0 == timeout) {\n    cv_.wait(lock, pred);\n  } else {\n    if (active_ports_.empty()) {\n      return STATUS_NODATA;\n    }\n  }\n\n  auto it = active_ports_.begin();\n  *active_port = *it;\n  (*active_port)->SetRuning(true);\n  active_ports_.erase(it);\n\n  return STATUS_OK;\n}\n\nvoid DefaultDataHub::RemoveFromActivePort(\n    std::vector<std::shared_ptr<PriorityPort>>& ports) {\n  std::unique_lock<std::mutex> lock(active_mutex_);\n  auto it = active_ports_.begin();\n  for (auto& port : ports) {\n    port->SetRuning(true);\n    it = active_ports_.find(port);\n    if (it != active_ports_.end()) {\n      active_ports_.erase(it);\n    }\n  }\n}\n\nsize_t DefaultDataHub::GetPortNum() const { return priority_ports_.size(); }\n\nsize_t DefaultDataHub::GetActivePortNum() const { return active_ports_.size(); }\n\n}  // namespace modelbox\n"
  },
  {
    "path": "src/libmodelbox/engine/common/data_hub.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#ifndef MODELBOX_MULTI_QUEUE_H_\n#define MODELBOX_MULTI_QUEUE_H_\n\n#include <modelbox/base/blocking_queue.h>\n#include <modelbox/base/status.h>\n#include <modelbox/base/utils.h>\n#include <modelbox/buffer.h>\n#include <modelbox/port.h>\n\n#include <memory>\n#include <set>\n#include <unordered_map>\n\nnamespace modelbox {\n\nclass PortCompare;\n\nclass PriorityPort {\n public:\n  PriorityPort(const std::shared_ptr<IPort>& port);\n\n  virtual ~PriorityPort();\n\n  const std::shared_ptr<IPort>& GetPort() const;\n  std::shared_ptr<IPort> GetPort();\n\n  std::shared_ptr<NodeBase> GetNode() const;\n\n  void UpdateActiveTime();\n\n  int32_t GetPriority() const;\n  void SetPriority(int32_t priority);\n  // TODO port优先级更新\n  void UpdatePriority();\n\n  void SetPushEventCallBack(const PushCallBack& func);\n  void SetPopEventCallBack(const PopCallBack& func);\n\n  bool HasData();\n\n  void SetRuning(bool flag);\n\n  bool IsRunning();\n\n  bool IsActivated();\n\n  friend PortCompare;\n\n private:\n  int32_t priority_{0};\n  int64_t active_time_{0};\n  bool is_running_{false};\n  std::shared_ptr<IPort> port_;\n};\n\n/**\n * @brief DataHub Class interface\n * Pure virtual class, can not instantiable\n *\n */\nclass DataHub {\n public:\n  DataHub();\n  virtual ~DataHub();\n\n  virtual Status AddPort(const std::shared_ptr<PriorityPort>& port) = 0;\n  virtual Status SelectActivePort(std::shared_ptr<PriorityPort>* active_port,\n                                  int64_t timeout = 0) = 0;\n\n  virtual size_t GetPortNum() const = 0;\n  virtual size_t GetActivePortNum() const = 0;\n\n  virtual void RemoveFromActivePort(\n      std::vector<std::shared_ptr<PriorityPort>>& ports) = 0;\n  virtual Status AddToActivePort(\n      std::vector<std::shared_ptr<PriorityPort>>& ports) = 0;\n  virtual Status AddToActivePort(const std::shared_ptr<PriorityPort>& port) = 0;\n};\n\nclass PortCompare {\n public:\n  bool operator()(const std::shared_ptr<PriorityPort>& left,\n                  const std::shared_ptr<PriorityPort>& right);\n};\n\n/**\n * @brief DataHub default implementation class\n *\n */\nclass DefaultDataHub : public DataHub {\n public:\n  DefaultDataHub();\n  ~DefaultDataHub() override;\n\n  Status AddPort(const std::shared_ptr<PriorityPort>& port) override;\n  Status SelectActivePort(std::shared_ptr<PriorityPort>* active_port,\n                          int64_t timeout = 0) override;\n\n  size_t GetPortNum() const override;\n\n  size_t GetActivePortNum() const override;\n\n  void RemoveFromActivePort(\n      std::vector<std::shared_ptr<PriorityPort>>& ports) override;\n  Status AddToActivePort(\n      std::vector<std::shared_ptr<PriorityPort>>& ports) override;\n  Status AddToActivePort(const std::shared_ptr<PriorityPort>& port) override;\n\n private:\n  void PortEventCallback(const std::shared_ptr<PriorityPort>& port,\n                         bool update_active_time);\n  void UpdateActivePort(const std::shared_ptr<PriorityPort>& port,\n                        bool update_active_time = true);\n\n  std::vector<std::shared_ptr<PriorityPort>> priority_ports_;\n  std::set<std::shared_ptr<PriorityPort>, PortCompare> active_ports_;\n\n  std::mutex active_mutex_;\n  std::condition_variable cv_;\n};\n\n}  // namespace modelbox\n#endif"
  },
  {
    "path": "src/libmodelbox/engine/data_context.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <modelbox/data_context.h>\n#include <modelbox/match_stream.h>\n#include <modelbox/node.h>\n#include <modelbox/port.h>\n#include <modelbox/session_context.h>\n\n#include <utility>\n\nnamespace modelbox {\n\nExternalData::ExternalData() = default;\n\nExternalData::~ExternalData() = default;\n\nDataContext::DataContext() = default;\n\nDataContext::~DataContext() = default;\n\nExternalDataImpl::ExternalDataImpl(std::shared_ptr<InPort> port,\n                                   std::shared_ptr<Device> device,\n                                   const std::shared_ptr<Stream> &init_stream)\n    : root_buffer_(std::make_shared<BufferIndexInfo>()),\n      ext_port_(std::move(port)),\n      device_(std::move(device)),\n      input_stream_(init_stream),\n      session_(init_stream->GetSession()),\n      session_ctx_(init_stream->GetSession()->GetSessionCtx()) {}\n\nExternalDataImpl::~ExternalDataImpl() { Close(); }\n\nstd::shared_ptr<BufferList> ExternalDataImpl::CreateBufferList() {\n  if (!device_) {\n    MBLOG_ERROR << \"device is null\";\n    return nullptr;\n  }\n\n  return std::make_shared<BufferList>(device_);\n}\n\nStatus ExternalDataImpl::Send(std::shared_ptr<BufferList> buffer_list) {\n  if (!buffer_list) {\n    return {STATUS_INVALID, \"input buffer list is null\"};\n  }\n\n  if (!ext_port_) {\n    return {STATUS_INVALID, \"external port is null\"};\n  }\n\n  for (auto &buffer : *buffer_list) {\n    auto index_info = BufferManageView::GetIndexInfo(buffer);\n    index_info->SetStream(input_stream_);\n    index_info->SetIndex(input_stream_->GetBufferCount());\n    input_stream_->IncreaseBufferCount();\n    auto inherit_info = std::make_shared<BufferInheritInfo>();\n    inherit_info->SetType(BufferProcessType::EXPAND);\n    inherit_info->SetInheritFrom(root_buffer_);\n    index_info->SetInheritInfo(inherit_info);\n    ext_port_->Send(buffer);\n  }\n\n  ext_port_->NotifyPushEvent();\n  return STATUS_OK;\n}\n\nstd::shared_ptr<SessionContext> ExternalDataImpl::GetSessionContext() {\n  return session_ctx_.lock();\n}\n\nStatus ExternalDataImpl::SetOutputMeta(std::shared_ptr<DataMeta> meta) {\n  input_stream_->SetStreamMeta(meta);\n  return STATUS_OK;\n}\n\n/**\n * @brief close input stream, wait process\n **/\nStatus ExternalDataImpl::Close() {\n  if (input_stream_ == nullptr) {\n    return STATUS_OK;\n  }\n\n  auto buffer = std::make_shared<Buffer>();\n  auto index_info = BufferManageView::GetIndexInfo(buffer);\n  index_info->SetStream(input_stream_);\n  index_info->SetIndex(input_stream_->GetBufferCount());\n  index_info->MarkAsEndFlag();\n  input_stream_->IncreaseBufferCount();\n  auto inherit_info = std::make_shared<BufferInheritInfo>();\n  inherit_info->SetType(BufferProcessType::EXPAND);\n  inherit_info->SetInheritFrom(root_buffer_);\n  index_info->SetInheritInfo(inherit_info);\n  ext_port_->Send(buffer);\n  ext_port_->NotifyPushEvent();\n  input_stream_ = nullptr;\n  root_buffer_ = nullptr;\n  return STATUS_OK;\n}\n\n/**\n * @brief stop task immediately\n **/\nStatus ExternalDataImpl::Shutdown() {\n  auto session = session_.lock();\n  if (session == nullptr) {\n    return STATUS_OK;\n  }\n\n  session->Close();\n  Close();  // make sure data end has been sent\n  return STATUS_OK;\n}\n\nstd::shared_ptr<Configuration> ExternalDataImpl::GetSessionConfig() {\n  auto ctx = session_ctx_.lock();\n  if (ctx == nullptr) {\n    return nullptr;\n  }\n\n  return ctx->GetConfig();\n}\n\nbool FlowUnitDataContext::HasError() { return input_valid_has_error_buffer_; }\n\nFlowUnitDataContext::~FlowUnitDataContext() {\n  for (auto &callback : destroy_callback_list_) {\n    callback();\n  }\n}\n\nFlowUnitDataContext::FlowUnitDataContext(\n    Node *node, MatchKey *data_ctx_match_key,\n    const std::shared_ptr<Session> &session) {\n  user_event_ = nullptr;\n  node_ = node;\n  data_ctx_match_key_ = data_ctx_match_key;\n  session_ = session;\n  if (session != nullptr) {\n    session_context_ = session->GetSessionCtx();\n  }\n  is_exception_visible_ = node->IsExceptionVisible();\n  InitStatistic();\n}\n\nvoid FlowUnitDataContext::WriteInputData(\n    std::shared_ptr<PortDataMap> stream_data_map) {\n  SetCurrentInputData(std::move(stream_data_map));\n}\n\nstd::shared_ptr<FlowUnitInnerEvent> FlowUnitDataContext::GenerateSendEvent() {\n  return nullptr;\n}\n\nbool FlowUnitDataContext::IsDataPre() { return false; }\n\nbool FlowUnitDataContext::IsDataPost() { return false; }\n\nvoid FlowUnitDataContext::UpdateBufferIndexInfo(\n    const std::shared_ptr<BufferIndexInfo> &cur_buffer,\n    const std::shared_ptr<BufferIndexInfo> &parent_buffer){};\n\nbool FlowUnitDataContext::SkipInheritInputToMatchNode() { return false; }\n\nvoid FlowUnitDataContext::SetCurrentInputData(\n    std::shared_ptr<PortDataMap> stream_data_map) {\n  cur_input_ = std::move(stream_data_map);\n  std::set<size_t> error_index_set;\n  std::set<size_t> valid_index_set;\n  for (auto &port_data_item : *cur_input_) {\n    const auto &port_name = port_data_item.first;\n    auto &data_list = port_data_item.second;\n    size_t index = 0;\n    for (auto &buffer : data_list) {\n      ++index;\n      auto index_info = BufferManageView::GetIndexInfo(buffer);\n      if (index_info->IsPlaceholder()) {\n        cur_input_placeholder_[port_name].push_back(buffer);\n        continue;\n      }\n\n      if (index_info->IsEndFlag()) {\n        end_flag_received_ = true;\n        if (index_info->GetIndex() == 0) {  // empty stream\n          is_empty_stream_ = true;\n        }\n        input_stream_max_buffer_count_ = index_info->GetIndex() + 1;\n        cur_input_end_flag_[port_name].push_back(buffer);\n        continue;\n      }\n\n      // select skip error buffer index\n      auto data_error = BufferManageView::GetError(buffer);\n      if (data_error != nullptr) {\n        if (!IsDataErrorVisible() ||\n            data_error->GetErrorDeepth() <\n                index_info->GetInheritInfo()->GetDeepth()) {\n          error_index_set.insert(index);\n          continue;\n        }\n        input_valid_has_error_buffer_ = true;\n      }\n\n      // select valid buffer index\n      valid_index_set.insert(index);\n    }\n  }\n\n  // push each port error/valid buffer\n  for (auto &port_data_item : *cur_input_) {\n    const auto &port_name = port_data_item.first;\n    auto &data_list = port_data_item.second;\n    size_t index = 0;\n    for (auto &buffer : data_list) {\n      ++index;\n      if (error_index_set.find(index) != error_index_set.end()) {\n        cur_input_error_[port_name].push_back(buffer);\n      } else if (valid_index_set.find(index) != valid_index_set.end()) {\n        cur_input_valid_data_[port_name].push_back(buffer);\n      }\n    }\n  }\n\n  // datapre error skip process\n  if (is_datapre_error_) {\n    cur_input_valid_data_.clear();\n    cur_input_error_.clear();\n    cur_input_placeholder_.clear();\n  }\n\n  // collapse has error skip collapse\n  if (!cur_input_error_.empty() &&\n      node_->GetOutputType() == FlowOutputType::COLLAPSE) {\n    cur_input_valid_data_.clear();\n  }\n\n  bool has_no_data = cur_input_valid_data_.empty() ||\n                     cur_input_valid_data_.begin()->second.empty();\n  if (!has_no_data) {\n    // save data for next event trigger, will not clear\n    last_input_valid_data_.clear();\n    for (auto &in_port_data : cur_input_valid_data_) {\n      const auto &port_name = in_port_data.first;\n      auto &port_data_list = in_port_data.second;\n      last_input_valid_data_[port_name].push_back(port_data_list.back());\n    }\n  }\n  SetSkippable(has_no_data);\n  UpdateInputInfo();\n}\n\nstd::shared_ptr<BufferList> FlowUnitDataContext::Input(\n    const std::string &port) const {\n  auto port_iter = cur_input_valid_data_.find(port);\n  if (port_iter == cur_input_valid_data_.end()) {\n    return nullptr;\n  }\n  auto buffer_list = std::make_shared<BufferList>(port_iter->second);\n  return buffer_list;\n}\n\nstd::shared_ptr<BufferList> FlowUnitDataContext::Output(\n    const std::string &port) {\n  auto item = cur_output_valid_data_.find(port);\n  if (item == cur_output_valid_data_.end()) {\n    return nullptr;\n  }\n  return item->second;\n}\n\nstd::shared_ptr<BufferListMap> FlowUnitDataContext::Input() const {\n  return nullptr;\n}\n\nstd::shared_ptr<BufferListMap> FlowUnitDataContext::Output() {\n  return std::shared_ptr<BufferListMap>(&(cur_output_valid_data_),\n                                        [](BufferListMap *buffer_list_map) {});\n}\n\nstd::shared_ptr<BufferList> FlowUnitDataContext::External() { return nullptr; }\n\nvoid FlowUnitDataContext::SetEvent(\n    const std::shared_ptr<FlowUnitEvent> &event) {\n  if (wait_user_events_.find(event) == wait_user_events_.end()) {\n    // not sent by user, should not cause data process\n    SetSkippable(true);\n  }\n  wait_user_events_.erase(event);\n  user_event_ = event;\n}\n\nstd::shared_ptr<FlowUnitEvent> FlowUnitDataContext::Event() {\n  return user_event_;\n}\n\nvoid FlowUnitDataContext::SetPrivate(const std::string &key,\n                                     std::shared_ptr<void> private_content) {\n  auto iter = private_map_.find(key);\n  if (iter == private_map_.end()) {\n    private_map_.emplace(key, private_content);\n  } else {\n    private_map_[key] = private_content;\n  }\n}\n\nstd::shared_ptr<void> FlowUnitDataContext::GetPrivate(const std::string &key) {\n  auto iter = private_map_.find(key);\n  if (iter == private_map_.end()) {\n    return nullptr;\n  }\n  return private_map_[key];\n}\n\n/**\n * @brief might not call in Node::Run, we need use last_input_valid_data as\n *parent\n **/\nvoid FlowUnitDataContext::SendEvent(std::shared_ptr<FlowUnitEvent> event) {\n  {\n    std::lock_guard<std::mutex> lock(wait_user_events_lock_);\n    if (session_ != nullptr && session_->IsClosed()) {\n      // stop event driven\n      return;\n    }\n\n    wait_user_events_.insert(event);\n  }\n  auto inner_event = std::make_shared<FlowUnitInnerEvent>(\n      FlowUnitInnerEvent::EXPAND_UNFINISH_DATA);\n  inner_event->SetUserEvent(event);\n  inner_event->SetDataCtxMatchKey(data_ctx_match_key_);\n  if (node_ == nullptr) {\n    return;\n  }\n  node_->SendEvent(inner_event);\n\n  // event expand last valid data\n  if (last_input_valid_data_.empty()) {\n    MBLOG_ERROR << \"node \" << node_->GetName()\n                << \", can not find last input valid data\";\n    return;\n  }\n\n  // driven by data\n  cur_event_input_data_ = last_input_valid_data_;\n}\n\nstd::shared_ptr<DataMeta> FlowUnitDataContext::GetInputMeta(\n    const std::string &port) {\n  if (input_port_meta_.find(port) == input_port_meta_.end()) {\n    return nullptr;\n  }\n  return input_port_meta_.find(port)->second;\n}\n\nstd::shared_ptr<DataMeta> FlowUnitDataContext::GetInputGroupMeta(\n    const std::string &port) {\n  return GetInputMeta(port);\n}\n\nvoid FlowUnitDataContext::SetOutputMeta(const std::string &port,\n                                        std::shared_ptr<DataMeta> data_meta) {\n  if (output_port_meta_.find(port) != output_port_meta_.end()) {\n    return;\n  }\n  output_port_meta_.emplace(port, data_meta);\n}\n\nstd::shared_ptr<SessionContext> FlowUnitDataContext::GetSessionContext() {\n  auto session_context = session_context_.lock();\n  return session_context;\n}\n\nstd::shared_ptr<Configuration> FlowUnitDataContext::GetSessionConfig() {\n  auto session_context = session_context_.lock();\n  if (session_context == nullptr) {\n    ConfigurationBuilder config_builder;\n    auto config = config_builder.Build();\n    return config;\n  }\n\n  auto config = session_context->GetConfig();\n  auto node_prop_name = CONFIG_NODE + node_->GetName();\n  auto flowunit_prop_name =\n      CONFIG_FLOWUNIT + node_->GetFlowUnitDesc()->GetFlowUnitName();\n  const auto *all_node_prop_name = CONFIG_NODES;\n\n  auto all_node_config = config->GetSubConfig(all_node_prop_name);\n  auto flowunit_config = config->GetSubConfig(flowunit_prop_name);\n  auto node_config = config->GetSubConfig(node_prop_name);\n\n  flowunit_config->Add(*(node_config.get()));\n  all_node_config->Add(*(flowunit_config.get()));\n\n  return all_node_config;\n}\n\nstd::shared_ptr<StatisticsItem> FlowUnitDataContext::GetStatistics(\n    DataContextStatsType type) {\n  switch (type) {\n    case DataContextStatsType::NODE:\n      return node_stats_;\n\n    case DataContextStatsType::SESSION:\n      return session_stats_;\n\n    case DataContextStatsType::GRAPH:\n      return graph_stats_;\n\n    default:\n      return nullptr;\n  }\n}\n\nconst std::unordered_map<std::string, std::vector<std::shared_ptr<Buffer>>>\n    &FlowUnitDataContext::GetInputs() const {\n  return cur_input_valid_data_;\n}\n\nconst std::unordered_map<std::string, std::vector<std::shared_ptr<Buffer>>>\n    &FlowUnitDataContext::GetErrorInputs() const {\n  return cur_input_error_;\n}\n\nconst std::unordered_map<std::string, std::vector<std::shared_ptr<Buffer>>>\n    &FlowUnitDataContext::GetExternals() const {\n  return cur_input_valid_data_;\n}\n\nconst std::unordered_map<std::string, std::shared_ptr<BufferList>>\n    &FlowUnitDataContext::GetOutputs() const {\n  return cur_output_valid_data_;\n}\n\nvoid FlowUnitDataContext::SetOutput(\n    const std::unordered_map<std::string, std::shared_ptr<BufferList>>\n        &data_list) {\n  cur_output_valid_data_ = data_list;\n}\n\nvoid FlowUnitDataContext::SetStatus(const Status &status) {\n  process_status_ = status;\n  last_process_status_ = status;\n}\n\nStatus FlowUnitDataContext::GetStatus() { return process_status_; }\n\nStatus FlowUnitDataContext::GetLastStatus() { return last_process_status_; }\n\nbool FlowUnitDataContext::IsErrorStatus() {\n  auto code = process_status_.Code();\n  if (code != STATUS_SUCCESS && code != STATUS_CONTINUE &&\n      code != STATUS_STOP && code != STATUS_SHUTDOWN) {\n    return true;\n  }\n  return false;\n}\n\nvoid FlowUnitDataContext::UpdateProcessState() {}\n\nvoid FlowUnitDataContext::ClearData() {\n  cur_input_ = nullptr;\n  cur_input_valid_data_.clear();\n  cur_input_placeholder_.clear();\n  cur_input_end_flag_.clear();\n  cur_input_error_.clear();\n\n  cur_output_valid_data_.clear();\n  cur_output_placeholder_.clear();\n  cur_output_error_.clear();\n  cur_output_.clear();\n\n  user_event_ = nullptr;\n\n  input_has_stream_start_ = false;\n  input_has_stream_end_ = false;\n  input_valid_has_error_buffer_ = false;\n\n  is_empty_stream_ = false;\n  is_skippable_ = false;\n}\n\nvoid FlowUnitDataContext::Dispose() {\n  // release ref to session\n  session_ = nullptr;\n  last_input_valid_data_.clear();\n  cur_event_input_data_.clear();\n}\n\nbool FlowUnitDataContext::IsSkippable() { return is_skippable_; }\nvoid FlowUnitDataContext::SetSkippable(bool skippable) {\n  is_skippable_ = skippable;\n}\n\nvoid FlowUnitDataContext::SetDataPreError(bool is_error) {\n  is_datapre_error_ = is_error;\n}\n\n// after flowunit process\nStatus FlowUnitDataContext::PostProcess() {\n  auto ret = GenerateOutputPlaceholder();\n  if (!ret) {\n    return ret;\n  }\n\n  ret = GenerateOutputError();\n  if (!ret) {\n    return ret;\n  }\n\n  ret = GenerateOutput();\n  if (!ret) {\n    return ret;\n  }\n\n  ret = AppendEndFlag();\n  if (!ret) {\n    return ret;\n  }\n\n  ret = UpdateOutputIndexInfo();\n  if (!ret) {\n    return ret;\n  }\n\n  ret = CheckOutputData();\n  if (!ret) {\n    return STATUS_STOP;  // fatal error\n  }\n\n  return STATUS_OK;\n}\n\nStatus FlowUnitDataContext::GenerateOutputPlaceholder() {\n  FillPlaceholderOutput();\n  return STATUS_OK;\n}\n\nStatus FlowUnitDataContext::GenerateOutputError() {\n  FillErrorOutput(false, \"\", \"\", true);\n\n  return STATUS_OK;\n}\n\nStatus FlowUnitDataContext::AppendEndFlag() {\n  if (!NeedStreamEndFlag()) {\n    return STATUS_OK;\n  }\n\n  if (end_flag_generated_) {\n    // protect end flag\n    MBLOG_WARN\n        << \"forbidden append redundant end flag, state for this run, has data:\"\n        << cur_input_ << \", has event:\" << user_event_;\n    return STATUS_OK;\n  }\n\n  std::vector<std::shared_ptr<BufferProcessInfo>> process_info_list;\n  auto *end_flag_parent = &cur_input_end_flag_;\n  if (end_flag_parent->empty()) {\n    // need append a new end flag. inherit input directly. in case expand\n    end_flag_parent = &cur_input_valid_data_;\n  }\n  if (end_flag_parent->empty()) {\n    // event driven expand\n    end_flag_parent = &cur_event_input_data_;\n  }\n  if (end_flag_parent->empty()) {\n    // expand empty buffer\n    end_flag_parent = &cur_input_placeholder_;\n  }\n  if (end_flag_parent->empty()) {\n    // expand error buffer\n    end_flag_parent = &cur_input_error_;\n  }\n  if (end_flag_parent->empty()) {\n    // no available input buffer\n    return STATUS_OK;\n  }\n  BufferManageView::GenProcessInfo<std::vector<std::shared_ptr<Buffer>>>(\n      *end_flag_parent, 1,\n      [](const std::vector<std::shared_ptr<Buffer>> &container, size_t idx) {\n        return container[idx];\n      },\n      process_info_list);\n  for (const auto &port_name : node_->GetOutputNames()) {\n    auto &port_data_list = cur_output_[port_name];\n    auto buffer = std::make_shared<Buffer>();\n    auto index_info = BufferManageView::GetIndexInfo(buffer);\n    index_info->MarkAsEndFlag();\n    index_info->SetProcessInfo(process_info_list.front());\n    port_data_list.push_back(buffer);\n  }\n\n  end_flag_generated_ = true;\n  return STATUS_OK;\n}\n\nbool FlowUnitDataContext::NeedStreamEndFlag() { return false; };\n\nvoid FlowUnitDataContext::FillPlaceholderOutput(bool from_valid_input,\n                                                bool same_with_input_num) {\n  PortDataMap *cur_parent = &cur_input_placeholder_;\n  if (from_valid_input) {\n    // in case user has no output, we generate new empty\n    cur_parent = &cur_input_valid_data_;\n  }\n\n  if (cur_parent->empty()) {\n    return;\n  }\n\n  auto first_input_port = cur_parent->begin()->second;\n  if (first_input_port.empty()) {\n    return;\n  }\n  auto input_num = first_input_port.size();\n\n  std::vector<std::shared_ptr<BufferProcessInfo>> process_info_list;\n  BufferManageView::GenProcessInfo<std::vector<std::shared_ptr<Buffer>>>(\n      *cur_parent, input_num,\n      [](const std::vector<std::shared_ptr<Buffer>> &container, size_t idx) {\n        return container[idx];\n      },\n      process_info_list, !same_with_input_num);\n  // generate empty output\n  bool is_condition = node_->GetConditionType() != ConditionType::NONE;\n  bool first_port = true;\n  cur_output_placeholder_.clear();\n  size_t output_num = 1;\n  if (same_with_input_num) {\n    output_num = input_num;\n  }\n\n  for (const auto &port_name : node_->GetOutputNames()) {\n    auto &port_data_list = cur_output_placeholder_[port_name];\n    if (is_condition && !first_port) {\n      port_data_list.resize(output_num, nullptr);\n      continue;\n    }\n\n    port_data_list.reserve(output_num);\n    for (size_t i = 0; i < output_num; ++i) {\n      auto buffer = std::make_shared<Buffer>();\n      auto index_info = BufferManageView::GetIndexInfo(buffer);\n      index_info->SetProcessInfo(process_info_list[i]);\n      index_info->MarkAsPlaceholder();\n      port_data_list.push_back(buffer);\n    }\n\n    first_port = false;\n  }\n}\n\nvoid FlowUnitDataContext::FillErrorOutput(bool from_valid,\n                                          const std::string &error_code,\n                                          const std::string &error_msg,\n                                          bool same_with_input_num) {\n  auto *cur_parent = &cur_input_error_;\n  if (from_valid) {\n    cur_parent = &cur_input_valid_data_;\n  }\n  if (cur_parent->empty()) {\n    // input for this process is empty, error might be last process\n    return;\n  }\n\n  auto first_input_port = cur_parent->begin()->second;\n  if (first_input_port.empty()) {\n    return;\n  }\n  auto input_num = first_input_port.size();\n  size_t output_num = 1;\n  if (same_with_input_num) {\n    output_num = input_num;\n  }\n\n  bool is_condition = node_->GetConditionType() != ConditionType::NONE;\n  bool first_port = true;\n\n  std::vector<std::shared_ptr<BufferProcessInfo>> process_info_list;\n  BufferManageView::GenProcessInfo<std::vector<std::shared_ptr<Buffer>>>(\n      *cur_parent, input_num,\n      [](const std::vector<std::shared_ptr<Buffer>> &container, size_t idx) {\n        return container[idx];\n      },\n      process_info_list, !same_with_input_num);\n\n  // get error code, error msg\n  std::vector<std::shared_ptr<DataError>> error_list;\n  error_list.reserve(output_num);\n  for (size_t i = 0; i < output_num; ++i) {\n    if (from_valid) {\n      error_list.push_back(std::make_shared<DataError>(error_code, error_msg));\n      continue;\n    }\n\n    for (const auto &input_port_name : node_->GetInputNames()) {\n      auto &input_port_data_list = (*cur_parent)[input_port_name];\n      if (input_port_data_list[i]->HasError()) {\n        error_list.push_back(\n            BufferManageView::GetError(input_port_data_list[i]));\n        break;\n      }\n    }\n  }\n\n  for (const auto &port_name : node_->GetOutputNames()) {\n    auto &port_data_list = cur_output_error_[port_name];\n    if (is_condition && !first_port) {\n      port_data_list.resize(port_data_list.size() + output_num, nullptr);\n      continue;\n    }\n\n    port_data_list.reserve(port_data_list.size() + output_num);\n    for (size_t i = 0; i < output_num; ++i) {\n      auto buffer = std::make_shared<Buffer>();\n      BufferManageView::SetError(buffer, error_list[i]);\n      auto index_info = BufferManageView::GetIndexInfo(buffer);\n      index_info->SetProcessInfo(process_info_list[i]);\n      port_data_list.push_back(buffer);\n    }\n\n    first_port = false;\n  }\n}\n\nbool FlowUnitDataContext::HasValidOutput() {\n  if (cur_output_valid_data_.empty()) {\n    return false;\n  }\n\n  auto &first_output_port = cur_output_valid_data_.begin()->second;\n  return first_output_port->Size() != 0;\n}\n\nbool FlowUnitDataContext::IsContinueProcess() {\n  return (process_status_ == STATUS_CONTINUE && !session_->IsClosed()) ||\n         !wait_user_events_.empty();\n}\n\nsize_t FlowUnitDataContext::GetOutputBufferNum() {\n  if (cur_output_.empty()) {\n    return 0;\n  }\n\n  auto &first_port = cur_output_.begin()->second;\n  return first_port.size();\n}\n\nStatus FlowUnitDataContext::CheckOutputData() { return STATUS_OK; };\n\nStatus FlowUnitDataContext::GenerateOutput() {\n  for (const auto &port_name : node_->GetOutputNames()) {\n    auto &valid_data_list = cur_output_valid_data_[port_name];\n    if (valid_data_list == nullptr) {\n      // no output\n      valid_data_list = std::make_shared<BufferList>();\n    }\n    auto &placeholder_data_list = cur_output_placeholder_[port_name];\n    auto &error_data_list = cur_output_error_[port_name];\n\n    BufferPtrList valid_placholder_data_list;\n    valid_placholder_data_list.resize(valid_data_list->Size() +\n                                      placeholder_data_list.size());\n\n    // merge buffer by input index\n    auto compare = [](const std::shared_ptr<Buffer> &b1,\n                      const std::shared_ptr<Buffer> &b2) {\n      if (b1 == nullptr || b2 == nullptr) {\n        // condition output, will be removed in node stream manage\n        return true;\n      }\n      auto parent_index_info1 = BufferManageView::GetFirstParentBuffer(b1);\n      auto parent_index_info2 = BufferManageView::GetFirstParentBuffer(b2);\n      return parent_index_info1->GetIndex() < parent_index_info2->GetIndex();\n    };\n    std::merge(valid_data_list->begin(), valid_data_list->end(),\n               placeholder_data_list.begin(), placeholder_data_list.end(),\n               valid_placholder_data_list.begin(), compare);\n\n    auto &port_data_list = cur_output_[port_name];\n    port_data_list.resize(valid_placholder_data_list.size() +\n                          error_data_list.size());\n    std::merge(valid_placholder_data_list.begin(),\n               valid_placholder_data_list.end(), error_data_list.begin(),\n               error_data_list.end(), port_data_list.begin(), compare);\n  }\n  return STATUS_OK;\n}\n\nStatus FlowUnitDataContext::UpdateOutputIndexInfo() {\n  for (auto &output_item : cur_output_) {\n    auto &output_data_list = output_item.second;\n    for (auto &buffer : output_data_list) {\n      if (buffer == nullptr) {\n        // condition output, will be removed in node stream manage\n        continue;\n      }\n      auto cur_buffer_index_info = BufferManageView::GetIndexInfo(buffer);\n      auto cur_node_process_info = GetCurNodeProcessInfo(cur_buffer_index_info);\n      if (cur_node_process_info == nullptr) {\n        return STATUS_STOP;  // fatal error\n      }\n      auto first_input_port = cur_node_process_info->GetParentBuffers().begin();\n      auto first_buffer_info_in_port = first_input_port->second.front();\n      UpdateBufferIndexInfo(cur_buffer_index_info, first_buffer_info_in_port);\n\n      // new throw error buffer, set current deepth\n      auto data_error = BufferManageView::GetError(buffer);\n      if (data_error != nullptr &&\n          cur_buffer_index_info->GetInheritInfo() != nullptr) {\n        auto deepth = cur_buffer_index_info->GetInheritInfo()->GetDeepth();\n        data_error->SetErrorDeepth(deepth);\n      }\n    }\n  }\n  return STATUS_OK;\n}\n\nstd::shared_ptr<BufferProcessInfo> FlowUnitDataContext::GetCurNodeProcessInfo(\n    const std::shared_ptr<BufferIndexInfo> &index_info) {\n  auto cur_node_process_info = index_info->GetProcessInfo();\n  if (cur_node_process_info != nullptr) {\n    return cur_node_process_info;\n  }\n\n  // event driven\n  if (cur_event_input_data_.empty()) {\n    MBLOG_ERROR << \"node \" << node_->GetName()\n                << \", current event expand no data\";\n    return nullptr;\n  }\n  cur_node_process_info = std::make_shared<BufferProcessInfo>();\n  for (auto &in_port_data_item : cur_event_input_data_) {\n    const auto &in_port_name = in_port_data_item.first;\n    auto &in_port_data_list = in_port_data_item.second;\n    std::list<std::shared_ptr<BufferIndexInfo>> index_info_list;\n    for (auto &in_buffer : in_port_data_list) {\n      auto in_buffer_index_info = BufferManageView::GetIndexInfo(in_buffer);\n      index_info_list.push_back(in_buffer_index_info);\n    }\n    cur_node_process_info->SetParentBuffers(in_port_name,\n                                            std::move(index_info_list));\n  }\n\n  index_info->SetProcessInfo(cur_node_process_info);\n  return cur_node_process_info;\n}\n\nstd::shared_ptr<Session> FlowUnitDataContext::GetSession() { return session_; }\n\nvoid FlowUnitDataContext::NotifySessionClose() {\n  std::lock_guard<std::mutex> lock(wait_user_events_lock_);\n  if (process_status_ == STATUS_CONTINUE && wait_user_events_.empty()) {\n    /** append one event to push data ctx end, event not sent by user will not\n     *  cause data process\n     *  case: videodemuxer connect failed, and return continue\n     *  at Node::Run, then user call session close, and demuxer reconnect event\n     *  still wait, session will be stuck\n     **/\n    auto inner_event = std::make_shared<FlowUnitInnerEvent>(\n        FlowUnitInnerEvent::EXPAND_UNFINISH_DATA);\n    inner_event->SetUserEvent(std::make_shared<FlowUnitEvent>());\n    inner_event->SetDataCtxMatchKey(data_ctx_match_key_);\n    if (node_ == nullptr) {\n      return;\n    }\n    node_->SendEvent(inner_event);\n  }\n}\n\nvoid FlowUnitDataContext::DealWithDataPreError(const std::string &error_code,\n                                               const std::string &error_msg) {\n  FillErrorOutput(true, error_code, error_msg, false);\n  SetDataPreError(true);\n  SetSkippable(true);\n}\n\nbool FlowUnitDataContext::IsFinished() { return is_finished_; }\n\nvoid FlowUnitDataContext::InitStatistic() {\n  if (node_ == nullptr) {\n    MBLOG_DEBUG << \"Node is null, init statistics ctx failed\";\n    return;\n  }\n  auto session_context = session_context_.lock();\n  if (session_context == nullptr) {\n    MBLOG_DEBUG\n        << \"session_context is null, init statistics ctx failed, node : \"\n        << node_->GetName();\n    return;\n  }\n\n  graph_stats_ = session_context->GetStatistics(SessionContexStatsType::GRAPH);\n  session_stats_ = session_context->GetStatistics();\n  if (session_stats_ == nullptr) {\n    MBLOG_DEBUG << \"Get session statistics ctx failed, node : \"\n                << node_->GetName();\n    return;\n  }\n\n  node_stats_ = session_stats_->AddItem(node_->GetName());\n  if (node_stats_ == nullptr) {\n    MBLOG_WARN << \"Get statistics ctx failed for \" << node_->GetName();\n  }\n}\n\nvoid FlowUnitDataContext::AddDestroyCallback(\n    const std::function<void()> &func) {\n  destroy_callback_list_.push_back(func);\n}\n\nbool FlowUnitDataContext::IsDataErrorVisible() { return is_exception_visible_; }\n\nStatus FlowUnitDataContext::PopOutputData(PortDataMap &stream_data_map) {\n  cur_output_.swap(stream_data_map);\n  return STATUS_OK;\n}\n\nstd::unordered_map<std::string, std::shared_ptr<DataMeta>>\nFlowUnitDataContext::GetOutputPortStreamMeta() {\n  return output_port_meta_;\n}\n\nvoid FlowUnitDataContext::UpdateInputInfo() {\n  for (auto &input_item : *cur_input_) {\n    const auto &input_port_name = input_item.first;\n    auto &input_port_data_list = input_item.second;\n    if (input_port_data_list.empty()) {\n      continue;\n    }\n\n    auto input_stream =\n        BufferManageView::GetIndexInfo(input_port_data_list.front())\n            ->GetStream();\n    if (input_stream == nullptr) {\n      continue;\n    }\n\n    input_port_meta_[input_port_name] = input_stream->GetStreamMeta();\n  }\n\n  auto first_port_data_list = cur_input_->begin()->second;\n  if (first_port_data_list.empty()) {\n    return;\n  }\n\n  input_stream_cur_buffer_count_ += first_port_data_list.size();\n\n  auto first_buffer = first_port_data_list.front();\n  auto buffer_index_info = BufferManageView::GetIndexInfo(first_buffer);\n  input_has_stream_start_ = buffer_index_info->IsFirstBufferInStream();\n\n  auto last_buffer = first_port_data_list.back();\n  buffer_index_info = BufferManageView::GetIndexInfo(last_buffer);\n  input_has_stream_end_ = buffer_index_info->IsEndFlag();\n}\n\nExecutorDataContext::ExecutorDataContext(\n    std::shared_ptr<FlowUnitDataContext> origin_ctx,\n    std::shared_ptr<FlowUnitExecData> data)\n    : origin_ctx_(std::move(origin_ctx)), data_(std::move(data)){};\n\nExecutorDataContext::~ExecutorDataContext() = default;\n\nstd::shared_ptr<BufferList> ExecutorDataContext::Input(\n    const std::string &port) const {\n  return data_->GetInDataForUser(port);\n}\n\nstd::shared_ptr<BufferList> ExecutorDataContext::Output(\n    const std::string &port) {\n  return data_->GetOutData(port);\n}\n\nstd::shared_ptr<BufferListMap> ExecutorDataContext::Input() const {\n  return data_->GetInDataForUser();\n}\n\nstd::shared_ptr<BufferListMap> ExecutorDataContext::Output() {\n  return data_->GetOutData();\n}\n\nstd::shared_ptr<BufferList> ExecutorDataContext::External() {\n  return data_->GetExternalDataForUser(EXTERNAL_PORT_NAME);\n}\n\nbool ExecutorDataContext::HasError() { return origin_ctx_->HasError(); }\n\nstd::shared_ptr<FlowUnitEvent> ExecutorDataContext::Event() {\n  return origin_ctx_->Event();\n}\n\nvoid ExecutorDataContext::SendEvent(std::shared_ptr<FlowUnitEvent> event) {\n  origin_ctx_->SendEvent(event);\n}\n\nvoid ExecutorDataContext::SetPrivate(const std::string &key,\n                                     std::shared_ptr<void> private_content) {\n  origin_ctx_->SetPrivate(key, private_content);\n}\n\nstd::shared_ptr<void> ExecutorDataContext::GetPrivate(const std::string &key) {\n  return origin_ctx_->GetPrivate(key);\n}\n\nstd::shared_ptr<DataMeta> ExecutorDataContext::GetInputMeta(\n    const std::string &port) {\n  return origin_ctx_->GetInputMeta(port);\n}\n\nstd::shared_ptr<DataMeta> ExecutorDataContext::GetInputGroupMeta(\n    const std::string &port) {\n  return origin_ctx_->GetInputGroupMeta(port);\n}\n\nvoid ExecutorDataContext::SetOutputMeta(const std::string &port,\n                                        std::shared_ptr<DataMeta> data_meta) {\n  origin_ctx_->SetOutputMeta(port, data_meta);\n}\n\nstd::shared_ptr<SessionContext> ExecutorDataContext::GetSessionContext() {\n  return origin_ctx_->GetSessionContext();\n}\n\nvoid ExecutorDataContext::SetStatus(const Status &status) {\n  data_->SetStatus(status);\n}\n\nstd::shared_ptr<Configuration> ExecutorDataContext::GetSessionConfig() {\n  return origin_ctx_->GetSessionConfig();\n}\n\nstd::shared_ptr<StatisticsItem> ExecutorDataContext::GetStatistics(\n    DataContextStatsType type) {\n  return origin_ctx_->GetStatistics(type);\n}\n\nvoid ExecutorDataContext::Clear() { data_ = nullptr; }\n\nNormalFlowUnitDataContext::NormalFlowUnitDataContext(\n    Node *node, MatchKey *data_ctx_match_key,\n    const std::shared_ptr<Session> &session)\n    : FlowUnitDataContext(node, data_ctx_match_key, session) {}\n\nNormalFlowUnitDataContext::~NormalFlowUnitDataContext() = default;\n\nvoid NormalFlowUnitDataContext::SendEvent(\n    std::shared_ptr<FlowUnitEvent> event) {\n  // not support user send event\n}\n\nvoid NormalFlowUnitDataContext::UpdateProcessState() {\n  // input process over, normal data ctx is over\n  if (input_stream_max_buffer_count_ == 0) {\n    is_finished_ = false;\n    return;\n  }\n\n  is_finished_ =\n      input_stream_cur_buffer_count_ == input_stream_max_buffer_count_;\n}\n\nbool NormalFlowUnitDataContext::NeedStreamEndFlag() {\n  return input_has_stream_end_;\n}\n\nvoid NormalFlowUnitDataContext::UpdateBufferIndexInfo(\n    const std::shared_ptr<BufferIndexInfo> &cur_buffer,\n    const std::shared_ptr<BufferIndexInfo> &parent_buffer) {\n  if (node_->GetConditionType() == ConditionType::IF_ELSE) {\n    cur_buffer->GetProcessInfo()->SetType(BufferProcessType::CONDITION_START);\n    auto inherit_info = std::make_shared<BufferInheritInfo>();\n    inherit_info->SetType(BufferProcessType::CONDITION_START);\n    inherit_info->SetInheritFrom(parent_buffer);\n    cur_buffer->SetInheritInfo(inherit_info);\n    return;\n  }\n\n  cur_buffer->SetIndex(parent_buffer->GetIndex());\n  cur_buffer->SetInheritInfo(parent_buffer->GetInheritInfo());\n}\n\nLoopNormalFlowUnitDataContext::LoopNormalFlowUnitDataContext(\n    Node *node, MatchKey *data_ctx_match_key,\n    const std::shared_ptr<Session> &session)\n    : NormalFlowUnitDataContext(node, data_ctx_match_key, session) {}\n\nLoopNormalFlowUnitDataContext::~LoopNormalFlowUnitDataContext() = default;\n\nStatus LoopNormalFlowUnitDataContext::GenerateOutput() {\n  // need know output port for this loop\n  if (HasValidOutput()) {\n    // has user output\n    for (auto &port_data_item : cur_output_valid_data_) {\n      const auto &port_name = port_data_item.first;\n      auto &port_data_list = port_data_item.second;\n      if (port_data_list->Front() != nullptr) {\n        output_port_for_this_loop_ = port_name;\n        break;\n      }\n    }\n  }\n\n  for (auto &port_data_item : cur_output_placeholder_) {\n    const auto &port_name = port_data_item.first;\n    auto &port_data_list = port_data_item.second;\n    auto &cached_port_data_list = cached_output_placeholder_[port_name];\n    cached_port_data_list.insert(cached_port_data_list.end(),\n                                 port_data_list.begin(), port_data_list.end());\n  }\n  if (!cur_input_end_flag_.empty()) {\n    cached_input_end_flag_.swap(cur_input_end_flag_);\n  }\n\n  if (output_port_for_this_loop_.empty() &&\n      input_stream_cur_buffer_count_ < input_stream_max_buffer_count_) {\n    // not decide this loop yet and input stream not over,\n    // then not process cache\n    return STATUS_OK;\n  }\n\n  if (output_port_for_this_loop_.empty() &&\n      input_stream_cur_buffer_count_ == input_stream_max_buffer_count_) {\n    // input stream end, but no user output, then just go through this loop\n    output_port_for_this_loop_ = node_->GetLoopOutPortName();\n  }\n\n  cur_output_placeholder_.swap(cur_output_placeholder_);\n  NormalFlowUnitDataContext::GenerateOutput();\n  return STATUS_OK;\n}\n\nStatus LoopNormalFlowUnitDataContext::AppendEndFlag() {\n  if (output_port_for_this_loop_.empty()) {\n    // not decide this loop\n    return STATUS_OK;\n  }\n\n  if (cached_input_end_flag_.empty()) {\n    // not end\n    return STATUS_OK;\n  }\n\n  cur_input_end_flag_.swap(cached_input_end_flag_);\n  input_has_stream_end_ = true;\n  NormalFlowUnitDataContext::AppendEndFlag();\n  return STATUS_OK;\n}\n\nStatus LoopNormalFlowUnitDataContext::CheckOutputData() {\n  if (output_port_for_this_loop_.empty()) {\n    return STATUS_OK;\n  }\n\n  for (auto iter = cur_output_.begin(); iter != cur_output_.end();) {\n    const auto &port_name = iter->first;\n    if (port_name != output_port_for_this_loop_) {\n      iter = cur_output_.erase(iter);\n    } else {\n      ++iter;\n    }\n  }\n\n  return STATUS_OK;\n}\n\nStreamFlowUnitDataContext::~StreamFlowUnitDataContext() = default;\n\nStreamFlowUnitDataContext::StreamFlowUnitDataContext(\n    Node *node, MatchKey *data_ctx_match_key,\n    const std::shared_ptr<Session> &session)\n    : FlowUnitDataContext(node, data_ctx_match_key, session) {}\n\nbool StreamFlowUnitDataContext::IsDataPre() {\n  return input_has_stream_start_ && !is_empty_stream_;\n}\n\nbool StreamFlowUnitDataContext::IsDataPost() {\n  return end_flag_received_ && !is_empty_stream_ && !IsContinueProcess();\n}\n\nvoid StreamFlowUnitDataContext::UpdateProcessState() {\n  is_finished_ = end_flag_received_ && !IsContinueProcess();\n  if (is_finished_) {\n    is_datapre_error_ = false;\n  }\n}\n\nbool StreamFlowUnitDataContext::NeedStreamEndFlag() {\n  auto ret = end_flag_received_ && !IsContinueProcess();\n  if (!ret) {\n    if (!cur_input_end_flag_.empty()) {\n      // received end flag, but not process at this run, cache it\n      cached_input_end_flag_.swap(cur_input_end_flag_);\n    }\n    return false;\n  }\n\n  if (cur_input_end_flag_.empty()) {\n    // use cached input end flag\n    cur_input_end_flag_.swap(cached_input_end_flag_);\n  }\n  return true;\n}\n\nvoid StreamFlowUnitDataContext::UpdateBufferIndexInfo(\n    const std::shared_ptr<BufferIndexInfo> &cur_buffer,\n    const std::shared_ptr<BufferIndexInfo> &parent_buffer) {\n  cur_buffer->SetInheritInfo(parent_buffer->GetInheritInfo());\n}\n\nNormalExpandFlowUnitDataContext::NormalExpandFlowUnitDataContext(\n    Node *node, MatchKey *data_ctx_match_key,\n    const std::shared_ptr<Session> &session)\n    : FlowUnitDataContext(node, data_ctx_match_key, session) {}\n\nNormalExpandFlowUnitDataContext::~NormalExpandFlowUnitDataContext() = default;\n\nvoid NormalExpandFlowUnitDataContext::UpdateProcessState() {\n  // each buffer in stream has one data ctx, finish after buffer expand end\n  is_finished_ = !IsContinueProcess();\n  if (is_finished_) {\n    is_datapre_error_ = false;\n  }\n}\n\nbool NormalExpandFlowUnitDataContext::NeedStreamEndFlag() {\n  return !IsContinueProcess();\n}\n\nvoid NormalExpandFlowUnitDataContext::UpdateBufferIndexInfo(\n    const std::shared_ptr<BufferIndexInfo> &cur_buffer,\n    const std::shared_ptr<BufferIndexInfo> &parent_buffer) {\n  cur_buffer->GetProcessInfo()->SetType(BufferProcessType::EXPAND);\n\n  auto inherit_info = std::make_shared<BufferInheritInfo>();\n  inherit_info->SetType(BufferProcessType::EXPAND);\n  inherit_info->SetInheritFrom(parent_buffer);\n  cur_buffer->SetInheritInfo(inherit_info);\n}\n\nStreamExpandFlowUnitDataContext::StreamExpandFlowUnitDataContext(\n    Node *node, MatchKey *data_ctx_match_key,\n    const std::shared_ptr<Session> &session)\n    : FlowUnitDataContext(node, data_ctx_match_key, session) {}\n\nStreamExpandFlowUnitDataContext::~StreamExpandFlowUnitDataContext() = default;\n\nvoid StreamExpandFlowUnitDataContext::WriteInputData(\n    std::shared_ptr<PortDataMap> stream_data_map) {\n  if (stream_data_map->empty()) {\n    SetSkippable(true);\n    return;\n  }\n\n  if (stream_data_map->begin()->second.empty()) {\n    SetSkippable(true);\n    return;\n  }\n\n  stream_data_cache_.push_back(stream_data_map);\n  if (next_expand_buffer_event_generated_) {\n    // next buffer expand will trigger by event\n    SetSkippable(true);\n    return;\n  }\n\n  // no event trigger next buffer expand\n  ExpandNextBuffer();\n}\n\n/**\n * @brief cache structure\n * stream_data_cache: block block block x n\n * each block: buffer buffer buffer x m\n * for each expand process, only take one buffer\n **/\nstd::shared_ptr<PortDataMap>\nStreamExpandFlowUnitDataContext::ReadFirstInCache() {\n  if (stream_data_cache_.empty()) {\n    // no data to process\n    return nullptr;\n  }\n\n  auto front_cache = stream_data_cache_.front();\n  auto &first_port = front_cache->begin()->second;\n  if (first_port.size() <= cur_data_pose_in_first_cache_) {\n    // this block read over\n    stream_data_cache_.pop_front();\n    cur_data_pose_in_first_cache_ = 0;\n    if (stream_data_cache_.empty()) {\n      // No data to process\n      return nullptr;\n    }\n\n    front_cache = stream_data_cache_.front();\n  }\n\n  auto first_data = std::make_shared<PortDataMap>();\n  for (auto &port_data : *front_cache) {\n    const auto &port_name = port_data.first;\n    auto &data_list = port_data.second;\n    (*first_data)[port_name].push_back(\n        data_list[cur_data_pose_in_first_cache_]);\n  }\n\n  return first_data;\n}\n\nbool StreamExpandFlowUnitDataContext::IsNextExpand(\n    const std::shared_ptr<PortDataMap> &data) {\n  // test cur input is next buffer to process\n  auto &first_input = data->begin()->second.front();\n  auto first_input_index = BufferManageView::GetIndexInfo(first_input);\n  return first_input_index->GetIndex() == cur_expand_buffer_index_;\n}\n\nvoid StreamExpandFlowUnitDataContext::ExpandNextBuffer() {\n  auto next_cache = ReadFirstInCache();\n  if (next_cache == nullptr) {\n    // no data to process\n    SetSkippable(true);\n    return;\n  }\n\n  if (!IsNextExpand(next_cache)) {\n    // next buffer not received\n    SetSkippable(true);\n    return;\n  }\n\n  cur_expand_buffer_index_received_ = true;\n  end_flag_generated_ = false;  // each expand buffer generate new stream\n  SetCurrentInputData(next_cache);\n  // state for next process\n  ++cur_data_pose_in_first_cache_;\n  next_expand_buffer_event_generated_ = false;\n}\n\nbool StreamExpandFlowUnitDataContext::IsDataPre() {\n  return input_has_stream_start_ && !is_empty_stream_;\n}\n\nbool StreamExpandFlowUnitDataContext::IsDataPost() {\n  return end_flag_received_ && !is_empty_stream_ && !IsContinueProcess();\n}\n\nstd::shared_ptr<FlowUnitInnerEvent>\nStreamExpandFlowUnitDataContext::GenerateSendEvent() {\n  if (IsContinueProcess()) {\n    // user event driven\n    return nullptr;\n  }\n\n  if (end_flag_received_) {\n    // all data processed\n    return nullptr;\n  }\n\n  auto next_cache = ReadFirstInCache();\n  if (next_cache == nullptr) {\n    // no data to expand\n    return nullptr;\n  }\n\n  if (!IsNextExpand(next_cache)) {\n    // cache is not target expand buffer\n    return nullptr;\n  }\n\n  if (next_expand_buffer_event_generated_) {\n    // event has been sent, should not repeat\n    return nullptr;\n  }\n\n  next_expand_buffer_event_generated_ = true;\n  auto expand_event = std::make_shared<FlowUnitInnerEvent>(\n      FlowUnitInnerEvent::EXPAND_NEXT_STREAM);\n  expand_event->SetDataCtxMatchKey(data_ctx_match_key_);\n  return expand_event;\n}\n\nvoid StreamExpandFlowUnitDataContext::UpdateProcessState() {\n  is_finished_ = end_flag_received_ && !IsContinueProcess();\n  if (!IsContinueProcess() && cur_expand_buffer_index_received_) {\n    ++cur_expand_buffer_index_;\n    cur_expand_buffer_index_received_ = false;\n  }\n  if (is_finished_) {\n    is_datapre_error_ = false;\n  }\n}\n\nbool StreamExpandFlowUnitDataContext::NeedStreamEndFlag() {\n  return cur_expand_buffer_index_received_ && !IsContinueProcess();\n}\n\nvoid StreamExpandFlowUnitDataContext::UpdateBufferIndexInfo(\n    const std::shared_ptr<BufferIndexInfo> &cur_buffer,\n    const std::shared_ptr<BufferIndexInfo> &parent_buffer) {\n  cur_buffer->GetProcessInfo()->SetType(BufferProcessType::EXPAND);\n\n  auto inherit_info = std::make_shared<BufferInheritInfo>();\n  inherit_info->SetType(BufferProcessType::EXPAND);\n  inherit_info->SetInheritFrom(parent_buffer);\n  cur_buffer->SetInheritInfo(inherit_info);\n}\n\nNormalCollapseFlowUnitDataContext::NormalCollapseFlowUnitDataContext(\n    Node *node, MatchKey *data_ctx_match_key,\n    const std::shared_ptr<Session> &session)\n    : FlowUnitDataContext(node, data_ctx_match_key, session){};\n\nNormalCollapseFlowUnitDataContext::~NormalCollapseFlowUnitDataContext() =\n    default;\n\nbool NormalCollapseFlowUnitDataContext::IsDataPre() {\n  return input_has_stream_start_ && !is_empty_stream_;\n}\n\nbool NormalCollapseFlowUnitDataContext::IsDataPost() {\n  return input_has_stream_end_ && !is_empty_stream_;\n}\n\nvoid NormalCollapseFlowUnitDataContext::UpdateProcessState() {\n  is_finished_ = end_flag_received_;\n  if (input_has_stream_end_) {\n    is_datapre_error_ = false;\n  }\n}\n\nStatus NormalCollapseFlowUnitDataContext::GenerateOutputError() {\n  FillErrorOutput(false, \"\", \"\", false);\n  return STATUS_OK;\n}\n\nbool NormalCollapseFlowUnitDataContext::NeedStreamEndFlag() {\n  if (!input_has_stream_end_) {\n    return false;\n  }\n\n  // check stream end flag before expand\n  auto &first_port = cur_input_end_flag_.begin()->second;\n  auto &input_end_buffer = first_port.front();\n  auto input_end_buffer_index =\n      BufferManageView::GetIndexInfo(input_end_buffer);\n  auto expand_from = input_end_buffer_index->GetInheritInfo()->GetInheritFrom();\n  return expand_from->IsEndFlag();\n}\n\nStatus NormalCollapseFlowUnitDataContext::CheckOutputData() {\n  output_buffer_for_current_stream_ += GetOutputBufferNum();\n  if (output_buffer_for_current_stream_ >\n      1) {  // collapse for one stream should only generate one buffer\n    MBLOG_ERROR << \"node \" << node_->GetName() << \" output buffer is \"\n                << output_buffer_for_current_stream_\n                << \", should generate one buffer for one stream collapse\";\n    return STATUS_INVALID;\n  }\n\n  if (output_buffer_for_current_stream_ == 0 && input_has_stream_end_) {\n    // collapse over, but has no data, we need generate empty from valid input\n    FillPlaceholderOutput(true, false);\n  }\n\n  return STATUS_OK;\n}\n\nStatus NormalCollapseFlowUnitDataContext::GenerateOutput() {\n  if (output_buffer_for_current_stream_ >= 1 && !cur_output_error_.empty()) {\n    cur_output_error_.clear();\n  }\n\n  return FlowUnitDataContext::GenerateOutput();\n}\n\nStatus NormalCollapseFlowUnitDataContext::GenerateOutputPlaceholder() {\n  if (!(end_flag_received_ && !HasValidOutput())) {\n    return STATUS_OK;\n  }\n  // receive end and no buffer generated, try generate empty from input\n  // placehold\n  FillPlaceholderOutput(false, false);\n  return STATUS_OK;\n}\n\nvoid NormalCollapseFlowUnitDataContext::UpdateBufferIndexInfo(\n    const std::shared_ptr<BufferIndexInfo> &cur_buffer,\n    const std::shared_ptr<BufferIndexInfo> &parent_buffer) {\n  cur_buffer->GetProcessInfo()->SetType(BufferProcessType::COLLAPSE);\n\n  auto expand_from_buffer = parent_buffer->GetInheritInfo()->GetInheritFrom();\n  cur_buffer->SetInheritInfo(expand_from_buffer->GetInheritInfo());\n  cur_buffer->SetIndex(expand_from_buffer->GetIndex());\n}\n\nStreamCollapseFlowUnitDataContext::StreamCollapseFlowUnitDataContext(\n    Node *node, MatchKey *data_ctx_match_key,\n    const std::shared_ptr<Session> &session)\n    : FlowUnitDataContext(node, data_ctx_match_key, session) {}\n\nStreamCollapseFlowUnitDataContext::~StreamCollapseFlowUnitDataContext() =\n    default;\n\nvoid StreamCollapseFlowUnitDataContext::SendEvent(\n    std::shared_ptr<FlowUnitEvent> event) {\n  // not support user send event\n}\n\nvoid StreamCollapseFlowUnitDataContext::WriteInputData(\n    std::shared_ptr<PortDataMap> stream_data_map) {\n  AppendToCache(stream_data_map);\n  CollapseNextStream();\n}\n\nvoid StreamCollapseFlowUnitDataContext::AppendToCache(\n    const std::shared_ptr<PortDataMap> &stream_data_map) {\n  auto first_buffer = stream_data_map->begin()->second.front();\n  auto buffer_index = BufferManageView::GetIndexInfo(first_buffer);\n  auto expand_from = buffer_index->GetInheritInfo()->GetInheritFrom();\n  auto index_before_expand = expand_from->GetIndex();\n  // find cache\n  auto cache_item = stream_data_cache_.find(index_before_expand);\n  if (cache_item == stream_data_cache_.end()) {\n    stream_data_cache_[index_before_expand] = stream_data_map;\n    return;\n  }\n\n  auto &cache_stream_data = cache_item->second;\n  for (auto &port_item : *stream_data_map) {\n    const auto &port_name = port_item.first;\n    auto &port_new_data = port_item.second;\n    auto &old_data = (*cache_stream_data)[port_name];\n    old_data.insert(old_data.end(), port_new_data.begin(), port_new_data.end());\n  }\n}\n\nvoid StreamCollapseFlowUnitDataContext::UpdateInputInfo() {\n  FlowUnitDataContext::UpdateInputInfo();\n  auto &first_input_port = cur_input_->begin()->second;\n  auto &first_buffer_in_port = first_input_port.front();\n  auto index_info_first_buffer_in_port =\n      BufferManageView::GetIndexInfo(first_buffer_in_port);\n  auto expand_buffer_index_info =\n      index_info_first_buffer_in_port->GetInheritInfo()->GetInheritFrom();\n  input_is_expand_from_end_buffer_ = expand_buffer_index_info->IsEndFlag();\n}\n\nvoid StreamCollapseFlowUnitDataContext::CollapseNextStream() {\n  auto next_stream_item = stream_data_cache_.find(current_collapse_order_);\n  if (next_stream_item == stream_data_cache_.end()) {\n    // in single node run, multi stream data expand from same one will cache at\n    // same data context\n    // this stream data not the next, but cur_input_ might be ready\n    SetSkippable(cur_input_ == nullptr);\n    return;\n  }\n\n  SetCurrentInputData(next_stream_item->second);\n  stream_data_cache_.erase(current_collapse_order_);\n}\n\nbool StreamCollapseFlowUnitDataContext::IsDataPre() {\n  return input_has_stream_start_ && !is_empty_stream_;\n}\n\nbool StreamCollapseFlowUnitDataContext::IsDataPost() {\n  return input_has_stream_end_ && !is_empty_stream_;\n}\n\nstd::shared_ptr<FlowUnitInnerEvent>\nStreamCollapseFlowUnitDataContext::GenerateSendEvent() {\n  if (!input_is_expand_from_end_buffer_ && input_has_stream_end_) {\n    auto event = std::make_shared<FlowUnitInnerEvent>(\n        FlowUnitInnerEvent::COLLAPSE_NEXT_STREAM);\n    event->SetDataCtxMatchKey(data_ctx_match_key_);\n    return event;\n  }\n\n  return nullptr;\n}\n\nvoid StreamCollapseFlowUnitDataContext::UpdateProcessState() {\n  if (!input_has_stream_end_) {\n    return;\n  }\n\n  // last stream collapse over, process next stream, reset stream state\n  ++current_collapse_order_;\n  is_empty_stream_ = false;\n  end_flag_received_ = false;\n  input_stream_cur_buffer_count_ = 0;\n  input_stream_max_buffer_count_ = 0;\n  output_buffer_for_current_stream_ = 0;\n  is_datapre_error_ = false;\n\n  // test ctx finish\n  if (input_is_expand_from_end_buffer_) {\n    is_finished_ = true;  // this is last packet to collapse\n  }\n}\n\nStatus StreamCollapseFlowUnitDataContext::GenerateOutputError() {\n  FillErrorOutput(false, \"\", \"\", false);\n  return STATUS_OK;\n}\n\nbool StreamCollapseFlowUnitDataContext::NeedStreamEndFlag() {\n  if (!end_flag_received_) {\n    return false;\n  }\n\n  // check stream end flag before expand\n  auto &first_port = cur_input_end_flag_.begin()->second;\n  auto &input_end_buffer = first_port.front();\n  auto input_end_buffer_index =\n      BufferManageView::GetIndexInfo(input_end_buffer);\n  auto expand_from = input_end_buffer_index->GetInheritInfo()->GetInheritFrom();\n  return expand_from->IsEndFlag();\n}\n\nStatus StreamCollapseFlowUnitDataContext::CheckOutputData() {\n  output_buffer_for_current_stream_ += GetOutputBufferNum();\n  if (output_buffer_for_current_stream_ >\n      1) {  // collapse for one stream should only generate one buffer\n    MBLOG_ERROR << \"node \" << node_->GetName() << \" output buffer is \"\n                << output_buffer_for_current_stream_\n                << \", should generate one buffer for one stream collapse\";\n    return STATUS_INVALID;\n  }\n\n  if (output_buffer_for_current_stream_ == 0 && input_has_stream_end_) {\n    // collapse over, but has no data, we need generate empty from valid input\n    FillPlaceholderOutput(true, false);\n  }\n\n  return STATUS_OK;\n}\n\nStatus StreamCollapseFlowUnitDataContext::GenerateOutput() {\n  if (output_buffer_for_current_stream_ >= 1 && !cur_output_error_.empty()) {\n    cur_output_error_.clear();\n  }\n\n  return FlowUnitDataContext::GenerateOutput();\n}\n\nStatus StreamCollapseFlowUnitDataContext::GenerateOutputPlaceholder() {\n  if (!(end_flag_received_ && !HasValidOutput())) {\n    return STATUS_OK;\n  }\n  // receive end and no buffer generated, try generate empty from input\n  // placehold\n  FillPlaceholderOutput(false, false);\n  return STATUS_OK;\n}\n\nvoid StreamCollapseFlowUnitDataContext::UpdateBufferIndexInfo(\n    const std::shared_ptr<BufferIndexInfo> &cur_buffer,\n    const std::shared_ptr<BufferIndexInfo> &parent_buffer) {\n  cur_buffer->GetProcessInfo()->SetType(BufferProcessType::COLLAPSE);\n\n  auto expand_from_buffer = parent_buffer->GetInheritInfo()->GetInheritFrom();\n  cur_buffer->SetInheritInfo(expand_from_buffer->GetInheritInfo());\n  cur_buffer->SetIndex(expand_from_buffer->GetIndex());\n}\n\n}  // namespace modelbox\n"
  },
  {
    "path": "src/libmodelbox/engine/dynamic_graph/context.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/context.h\"\n\n#include <utility>\n\n#include \"modelbox/data_handler.h\"\n#include \"modelbox/modelbox_engine.h\"\n\n#define MAX_INPUT_QUEUE_SIZE 10\nnamespace modelbox {\nHandlerContext::HandlerContext(std::weak_ptr<ModelBoxEngine> &env) {\n  env_ = env;\n}\n\nHandlerContext::~HandlerContext() = default;\n\nvoid HandlerContext::SetMeta(const std::string &key, const std::string &value) {\n  meta_.emplace(key, value);\n}\n\nstd::string HandlerContext::GetMeta(const std::string &key) {\n  if (meta_.find(key) == meta_.end()) {\n    return \"\";\n  }\n  return meta_[key];\n}\n\nstd::shared_ptr<GraphState> HandlerContext::GetGraphState() {\n  return graph_state_;\n}\n\nstd::shared_ptr<FlowUnitDesc> HandlerContext::GetFlowUnitDesc() {\n  return desc_;\n}\n\nvoid HandlerContext::SetFlowUnitDesc(\n    const std::shared_ptr<FlowUnitDesc> &desc) {\n  desc_ = desc;\n}\n\nvoid HandlerContext::SetGraphState(const std::shared_ptr<GraphState> &state) {\n  graph_state_ = state;\n}\n\nvoid HandlerContext::Close(){};\n\nInputContext::InputContext(std::weak_ptr<ModelBoxEngine> env)\n    : HandlerContext(env) {}\n\nInputContext::~InputContext() {\n  if (extern_buffer_list_) {\n    extern_buffer_list_->Reset();\n  }\n  extern_buffer_list_ = nullptr;\n  extern_data_map_ = nullptr;\n}\n\nStatus InputContext::RunGraph(const std::shared_ptr<DataHandler> &handler) {\n  MBLOG_ERROR << \"input handler not support next function.\";\n  return STATUS_FAULT;\n}\n\nvoid InputContext::SetExternPtr(\n    std::shared_ptr<void> extern_data_map,\n    std::shared_ptr<BufferList> extern_buffer_list) {\n  extern_data_map_ = std::move(extern_data_map);\n  extern_buffer_list_ = std::move(extern_buffer_list);\n}\n\nvoid InputContext::Close() {\n  if (extern_data_map_) {\n    auto externdata =\n        std::static_pointer_cast<ExternalDataMap>(extern_data_map_);\n    externdata->Shutdown();\n  }\n}\n\nstd::shared_ptr<BufferList> InputContext::GetBufferList(\n    const std::string &key) {\n  if (data_map_.size() <= 0) {\n    return nullptr;\n  }\n  return data_map_[key];\n}\nStatus InputContext::PushData(const std::string &key,\n                              const std::shared_ptr<BufferList> &bufferlist) {\n  auto save_buffer = [&]() {\n    if (data_map_.find(key) != data_map_.end()) {\n      for (auto &buffer : *bufferlist) {\n        data_map_[key]->PushBack(buffer);\n      }\n      bufferlist->Reset();\n    }\n  };\n\n  if (extern_data_map_ == nullptr || extern_buffer_list_ == nullptr) {\n    if (data_map_.find(key) == data_map_.end()) {\n      data_map_[key] = std::make_shared<BufferList>();\n    }\n\n    if (data_map_[key]->Size() > MAX_INPUT_QUEUE_SIZE) {\n      const auto *msg =\n          \"temp bufferlist store too many buffers,please use it firstly.\";\n      MBLOG_ERROR << msg;\n      return {STATUS_INVALID, msg};\n    }\n    save_buffer();\n  }\n\n  if (extern_data_map_ != nullptr && extern_buffer_list_ != nullptr) {\n    auto externdata =\n        std::static_pointer_cast<ExternalDataMap>(extern_data_map_);\n    auto flowunit_error = externdata->GetSessionContext()->GetError();\n    if (flowunit_error) {\n      auto error_msg = flowunit_error->GetDesc();\n      return {STATUS_FAULT, error_msg};\n    }\n    if (data_map_.find(key) != data_map_.end() && data_map_[key]->Size() > 0) {\n      save_buffer();\n      auto status = externdata->Send(key, data_map_[key]);\n      data_map_[key]->Reset();\n    } else {\n      auto status = externdata->Send(key, bufferlist);\n      bufferlist->Reset();\n    }\n  }\n  return STATUS_OK;\n}\n\nBufferListContext::BufferListContext(std::weak_ptr<ModelBoxEngine> env)\n    : HandlerContext(env) {}\n\nBufferListContext::~BufferListContext() = default;\n\nStatus BufferListContext::RunGraph(\n    const std::shared_ptr<DataHandler> &handler) {\n  MBLOG_ERROR << \"bufferlist handler not support next function.\";\n  return STATUS_FAULT;\n}\nStatus BufferListContext::PushData(\n    const std::string &key, const std::shared_ptr<BufferList> &bufferlist) {\n  if (data_map_.find(key) == data_map_.end()) {\n    data_map_[key] = std::make_shared<BufferList>();\n  }\n\n  for (auto &buffer : *bufferlist) {\n    data_map_[key]->PushBack(buffer);\n  }\n  bufferlist->Reset();\n\n  return STATUS_OK;\n}\n\nstd::shared_ptr<BufferList> BufferListContext::GetBufferList(\n    const std::string &key) {\n  if (data_map_.find(key) == data_map_.end()) {\n    return nullptr;\n  }\n  if (data_map_[key]->Size() <= 0) {\n    return nullptr;\n  }\n\n  return data_map_[key];\n}\n\nStreamContext::StreamContext(std::weak_ptr<ModelBoxEngine> env)\n    : HandlerContext(env) {\n  end_flag_ = false;\n}\n\nStreamContext::~StreamContext() = default;\n\nstd::shared_ptr<BufferList> StreamContext::GetBufferList(\n    const std::string &key) {\n  MBLOG_ERROR << \"stream context not support get bufferlist.\";\n  return nullptr;\n}\n\nStatus StreamContext::PushData(const std::string &key,\n                               const std::shared_ptr<BufferList> &bufferlist) {\n  return STATUS_FAULT;\n}\n\nStatus StreamContext::RunGraph(const std::shared_ptr<DataHandler> &handler) {\n  if (env_.lock() == nullptr) {\n    MBLOG_ERROR << \"env is nullptr, please check input is right\";\n    return STATUS_FAULT;\n  }\n  auto graph_state = GetGraphState();\n  if (nullptr == graph_state) {\n    MBLOG_ERROR << \"graph state is nullptr, please set input as the first node\";\n    return STATUS_FAULT;\n  }\n  if (graph_state->graph_ == nullptr) {\n    auto env = env_.lock();\n\n    auto dynamic_graph = env->CreateDynamicGraph(graph_state->gcgraph_);\n\n    if (STATUS_OK != env->FeedData(dynamic_graph, graph_state->gcgraph_)) {\n      MBLOG_ERROR << \"failed feed data into input\";\n      return STATUS_FAULT;\n    }\n    graph_state->graph_ = dynamic_graph;\n  }\n  return STATUS_OK;\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/libmodelbox/engine/dynamic_graph/data_handler.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/data_handler.h\"\n\n#include <utility>\n\n#include \"modelbox/modelbox_engine.h\"\n\nnamespace modelbox {\n\nDataHandler::DataHandler(BindNodeType type,\n                         const std::shared_ptr<ModelBoxEngine> &env) {\n  env_ = env;\n  data_type_ = type;\n  switch (data_type_) {\n    case STREAM_NODE:\n      context_ = std::make_shared<StreamContext>(env);\n      break;\n    case BUFFERLIST_NODE:\n      context_ = std::make_shared<BufferListContext>(env);\n      break;\n    case VIRTUAL_NODE:\n      context_ = std::make_shared<InputContext>(env);\n      break;\n    default:\n      MBLOG_ERROR << \"failed find right type\";\n  }\n}\n\nDataHandler::~DataHandler() { context_ = nullptr; }\n\nvoid DataHandler::Close() {\n  closed_ = true;\n  if (context_) {\n    context_->Close();\n  }\n}\n\nbool DataHandler::IsClosed() { return closed_; }\n\nvoid DataHandler::SetEnv(const std::shared_ptr<ModelBoxEngine> &env) {\n  env_ = env;\n}\n\nstd::shared_ptr<ModelBoxEngine> DataHandler::GetEnv() { return env_.lock(); }\n\nDataHandlerType DataHandler::GetDataHandlerType() { return data_handler_type_; }\n\nvoid DataHandler::SetDataHandlerType(const DataHandlerType &type) {\n  data_handler_type_ = type;\n}\n\nstd::shared_ptr<GraphState> DataHandler::GetBindGraph() {\n  if (context_ == nullptr) {\n    return nullptr;\n  }\n  return context_->GetGraphState();\n}\n\nStatus DataHandler::SetBindGraph(const std::shared_ptr<GraphState> &gcgraph) {\n  if (context_ == nullptr) {\n    MBLOG_ERROR << \"context_ is null, SetBindGraph failed\";\n    return STATUS_FAULT;\n  }\n  context_->SetGraphState(gcgraph);\n  return STATUS_SUCCESS;\n}\n\nstd::unordered_map<std::string, std::string> DataHandler::GetPortMap() {\n  return port_to_port_;\n};\n\nvoid DataHandler::SetNodeName(const std::string &name) { node_name_ = name; }\n\nstd::string DataHandler::GetNodeName() { return node_name_; }\n\nstd::set<std::string> DataHandler::GetPortNames() { return port_names_; }\n\nBindNodeType DataHandler::GetBindNodeType() { return data_type_; }\n\nvoid DataHandler::SetBindNodeType(BindNodeType type) { data_type_ = type; }\n\nvoid DataHandler::SetExternData(std::shared_ptr<void> extern_map,\n                                std::shared_ptr<BufferList> &bufferlist) {\n  if (context_ == nullptr) {\n    MBLOG_ERROR << \"context_ is null, SetExternData failed\";\n    return;\n  }\n  if (data_type_ == VIRTUAL_NODE) {\n    auto context = std::static_pointer_cast<InputContext>(context_);\n    context->SetExternPtr(std::move(extern_map), bufferlist);\n  }\n}\n\nStatus DataHandler::PushData(std::shared_ptr<DataHandler> &data,\n                             const std::string &key) {\n  if (GetDataHandlerType() != INPUT) {\n    MBLOG_ERROR << \"only input data can receive other data\";\n    return STATUS_FAULT;\n  }\n\n  if (data->GetPortNames().size() > 0) {\n    auto port_name1 = *(data->GetPortNames().begin());\n  } else {\n    MBLOG_ERROR << \"port name is nullptr\";\n    return STATUS_FAULT;\n  }\n  auto port_name = *(data->GetPortNames().begin());\n  auto bufferlist = data->GetBufferList(port_name);\n  return context_->PushData(key, bufferlist);\n}\n\nStatus DataHandler::PushData(std::shared_ptr<Buffer> &data,\n                             const std::string &key) {\n  if (context_ == nullptr) {\n    MBLOG_ERROR << \"context is nullptr, datahanler init failed\";\n    return STATUS_FAULT;\n  }\n  port_names_.emplace(key);\n  auto bufferlist = std::make_shared<BufferList>(data);\n  return context_->PushData(key, bufferlist);\n}\n\nStatus DataHandler::PushData(std::shared_ptr<BufferList> &data,\n                             const std::string &key) {\n  if (context_ == nullptr) {\n    MBLOG_ERROR << \"context is nullptr, datahanler init failed\";\n    return STATUS_FAULT;\n  }\n  port_names_.emplace(key);\n  return context_->PushData(key, data);\n}\n\nStatus DataHandler::SetMeta(const std::string &key, const std::string &data) {\n  if (context_ == nullptr) {\n    return STATUS_FAULT;\n  }\n  if (key == \"\" || data == \"\") {\n    MBLOG_ERROR << \"input key or value is invalid\";\n    return STATUS_FAULT;\n  }\n  context_->SetMeta(key, data);\n\n  return STATUS_OK;\n}\n\nstd::shared_ptr<DataHandler> DataHandler::GetDataHandler(\n    const std::string &key) {\n  if (data_type_ == VIRTUAL_NODE) {\n    MBLOG_ERROR << \"input node not support GetDataHandler function\";\n    return nullptr;\n  }\n  if (port_names_.find(key) == port_names_.end()) {\n    MBLOG_ERROR << \"faild find port name: \" << key\n                << \" in node: \" << node_name_;\n    return nullptr;\n  }\n  auto data = std::make_shared<DataHandler>(data_type_);\n\n  data->SetNodeName(node_name_);\n  auto graph_state = GetBindGraph();\n  data->SetBindGraph(graph_state);\n  std::set<std::string> ports = {key};\n  data->SetPortNames(ports);\n  if (data_type_ == BUFFERLIST_NODE) {\n    auto bufferlist = data->GetBufferList(key);\n    data->context_->PushData(key, bufferlist);\n  }\n  return data;\n}\n\nStatus DataHandler::SetDataHandler(\n    const std::map<std::string, std::shared_ptr<DataHandler>> &data_map) {\n  if (GetBindNodeType() == BUFFERLIST_NODE) {\n    MBLOG_ERROR\n        << \"function SetDataHandler not support node type: bufferlistnode\";\n    return STATUS_FAULT;\n  }\n\n  for (const auto &iter : data_map) {\n    if (iter.second->GetBindNodeType() == BUFFERLIST_NODE) {\n      MBLOG_ERROR\n          << \"function SetDataHandler not support node type: bufferlistnode\";\n      return STATUS_FAULT;\n    }\n\n    auto ports = iter.second->GetPortNames();\n    if (ports.size() != 1) {\n      std::string err_msg = \"input data handler has one more ports\";\n      return {STATUS_FAULT, err_msg};\n    }\n\n    auto in_port_name = iter.first;\n    auto temp_data = iter.second;\n    auto node_name = temp_data->GetNodeName();\n    auto out_port_name = *(ports.begin());\n    context_->data_map_[in_port_name] = temp_data->GetBufferList(out_port_name);\n    port_to_port_[in_port_name] = out_port_name;\n\n    port_to_node_[in_port_name] = node_name;\n    node_type_map_[node_name] = temp_data->GetBindNodeType();\n    if (GetBindGraph() == nullptr) {\n      SetBindGraph(iter.second->GetBindGraph());\n    }\n    if (GetBindGraph() != iter.second->GetBindGraph()) {\n      std::string msg = \"sub datahandler bind different graph\";\n      MBLOG_ERROR << msg;\n      return {STATUS_FAULT, msg};\n    }\n  }\n  return STATUS_SUCCESS;\n}\n\nStatus DataHandler::CheckInputType(BindNodeType &node_type) {\n  if (node_type_map_.size() == 0) {\n    return STATUS_OK;\n  }\n\n  auto type = node_type_map_.begin()->second;\n  for (auto &iter : node_type_map_) {\n    if (type != iter.second) {\n      return STATUS_FAULT;\n    }\n  }\n  node_type = type;\n  return STATUS_OK;\n}\n\nstd::shared_ptr<BufferList> DataHandler::GetBufferList(const std::string &key) {\n  if (context_ == nullptr || data_type_ != BUFFERLIST_NODE) {\n    return nullptr;\n  }\n\n  return context_->GetBufferList(key);\n}\n\nstd::shared_ptr<DataHandler> DataHandler::operator[](\n    const std::string &port_name) {\n  return GetDataHandler(port_name);\n}\n\nStatus DataHandler::InsertOutputNode(std::shared_ptr<HandlerContext> &context) {\n  std::shared_ptr<FlowUnitDesc> desc = context->GetFlowUnitDesc();\n  if (desc == nullptr) {\n    return STATUS_FAULT;\n  }\n\n  auto gcgraph = context->GetGraphState()->gcgraph_;\n  auto node = context->GetGraphState()->gcgraph_->GetNode(node_name_);\n  auto outputs = desc->GetFlowUnitOutput();\n  if (outputs.size() > 0) {\n    for (auto &iter : outputs) {\n      auto outport_name = iter.GetPortName();  //\n      auto outnode = std::make_shared<GCNode>();\n      if (context->GetGraphState()->gcgraph_) {\n        outnode->Init(outport_name, gcgraph);\n        outnode->SetConfiguration(\"type\", \"output\");\n        context->GetGraphState()->gcgraph_->AddNode(outnode);\n\n        env_.lock()->InsertGrahEdge(gcgraph, node, outport_name, outnode,\n                                    outport_name);\n      }\n    }\n  }\n  return STATUS_OK;\n}\nstd::shared_ptr<DataHandler> DataHandler::GetData() {\n  if (context_ == nullptr || GetBindNodeType() != STREAM_NODE) {\n    return nullptr;\n  }\n\n  if (context_->GetGraphState()->graph_ == nullptr) {\n    if (InsertOutputNode(context_) != STATUS_OK) {\n      return nullptr;\n    }\n\n    auto status = context_->RunGraph(shared_from_this());\n    if (status != STATUS_OK) {\n      return nullptr;\n    }\n  }\n\n  OutputBufferList map_buffer_list;\n  auto external_data = std::static_pointer_cast<ExternalDataMap>(\n      context_->GetGraphState()->external_data_);\n  auto status = external_data->Recv(map_buffer_list);\n  if (status != STATUS_SUCCESS) {\n    return nullptr;\n  }\n\n  auto buffer = std::make_shared<DataHandler>(BUFFERLIST_NODE);\n  for (auto &iter : map_buffer_list) {\n    auto temp_buffer = iter.second;\n    buffer->PushData(temp_buffer, iter.first);\n  }\n  return buffer;\n}\nStatus DataHandler::SetPortNames(std::set<std::string> &port_names) {\n  port_names_ = port_names;\n  return STATUS_OK;\n}\n\nstd::string DataHandler::GetMeta(std::string &key) {\n  if (context_ == nullptr) {\n    return \"\";\n  }\n  return context_->GetMeta(key);\n}\n\nStatus DataHandler::GetError() { return error_; }\n\nvoid DataHandler::SetError(const Status &status) { error_ = status; }\n\n}  // namespace modelbox\n"
  },
  {
    "path": "src/libmodelbox/engine/dynamic_graph/modelbox_engine.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/modelbox_engine.h\"\n\n#include <securec.h>\n\n#include <utility>\n\n#include \"modelbox/base/log.h\"\n#include \"modelbox/base/utils.h\"\n#include \"modelbox/data_handler.h\"\n#include \"modelbox/single_node.h\"\n#include \"scheduler/flow_scheduler.h\"\n\nnamespace modelbox {\n\nconstexpr const char *GRAPH_VIRTUAL_NODE = \"inner_virtual_node_\";\n\nstatic std::shared_ptr<DataHandler> err_msg(const Status &status) {\n  auto data_handler = std::make_shared<DataHandler>();\n  data_handler->SetError(status);\n  MBLOG_ERROR << status.Errormsg();\n  return data_handler;\n}\n\nModelBoxEngine::ModelBoxEngine() = default;\n\nModelBoxEngine::~ModelBoxEngine() { Close(); }\n\nstd::shared_ptr<DeviceManager> ModelBoxEngine::GetDeviceManager() {\n  return device_mgr_;\n}\n\nstd::shared_ptr<FlowUnitManager> ModelBoxEngine::GetFlowUnitManager() {\n  return flowunit_mgr_;\n}\n\nstd::shared_ptr<Scheduler> ModelBoxEngine::GetScheduler() { return scheduler_; }\n\nstd::shared_ptr<Profiler> ModelBoxEngine::GetProfiler() { return profiler_; }\n\nStatus ModelBoxEngine::Init(std::shared_ptr<Configuration> &config) {\n  config_ = config;\n  drivers_ = std::make_shared<Drivers>();\n  device_mgr_ = std::make_shared<DeviceManager>();\n  flowunit_mgr_ = std::make_shared<FlowUnitManager>();\n  scheduler_ = std::make_shared<FlowScheduler>();\n  profiler_ = nullptr;\n  std::string msg;\n  if (!(drivers_ && device_mgr_ && flowunit_mgr_)) {\n    msg = \"drivers, flowunit_mgr or device is null, return\";\n    MBLOG_ERROR << msg;\n    return STATUS_FAULT;\n  }\n\n  auto ret = drivers_->Initialize(config_->GetSubConfig(\"drivers\"));\n  if (!ret) {\n    msg = \"driver init failed\";\n    MBLOG_ERROR << msg << \", \" << ret.WrapErrormsgs();\n    return {ret, msg};\n  }\n\n  Defer {\n    if (ret == STATUS_OK) {\n      return;\n    }\n    Close();\n  };\n\n  ret = drivers_->Scan();\n  if (!ret) {\n    msg = \"Scan driver failed.\";\n    MBLOG_ERROR << msg << \": \" << ret.WrapErrormsgs();\n    return {ret, msg};\n  }\n\n  ret = device_mgr_->Initialize(drivers_, nullptr);\n  if (!ret) {\n    msg = \"Inital device failed\";\n    MBLOG_ERROR << msg << \", \" << ret.WrapErrormsgs();\n    return {ret, msg};\n  }\n\n  ret = flowunit_mgr_->Initialize(drivers_, device_mgr_, nullptr);\n  if (!ret) {\n    msg = \"Initial flowunit manager failed \";\n    MBLOG_ERROR << msg << \", \" << ret.WrapErrormsgs();\n    return {ret, msg};\n  }\n\n  ret = scheduler_->Init(config_);\n  if (!ret) {\n    msg = \"scheduler_ init  failed\";\n    MBLOG_ERROR << msg << \", \" << ret.WrapErrormsgs();\n    return {ret, msg};\n  }\n\n  scheduler_->RunAsync();\n  return STATUS_OK;\n}\n\nstd::shared_ptr<FlowUnitDesc> ModelBoxEngine::GetFlowunitDesc(\n    const std::string &name, const std::map<std::string, std::string> &config) {\n  std::string msg;\n  if (flowunit_mgr_ == nullptr) {\n    MBLOG_ERROR << \"failed get flowunit manager, please init Unit firstly\";\n    return nullptr;\n  }\n  auto device_iter = config.find(\"device\");\n  if (device_iter == config.end()) {\n    return nullptr;\n  }\n  auto device = device_iter->second;\n  auto desc = flowunit_mgr_->GetFlowUnitDesc(device_iter->second, name);\n  if (desc == nullptr) {\n    MBLOG_ERROR << \"failed find flowunit \" << name << \" description\";\n    return nullptr;\n  }\n\n  return desc;\n}\n\nbool CheckMapEquate(const std::map<std::string, std::string> &first_map,\n                    const std::map<std::string, std::string> &second_map) {\n  if (first_map.size() != second_map.size()) {\n    return false;\n  }\n\n  for (const auto &iter : first_map) {\n    auto temp_iter = second_map.find(iter.first);\n    if (temp_iter == second_map.end()) {\n      return false;\n    }\n    if (temp_iter->second != iter.second) {\n      return false;\n    }\n  }\n  return true;\n}\n\nstd::shared_ptr<NodeBase> ModelBoxEngine::CheckNodeExist(\n    const std::string &name, const std::map<std::string, std::string> &config) {\n  auto iter = nodes_config_.find(name);\n  if (iter == nodes_config_.end()) {\n    return nullptr;\n  }\n\n  auto node_config_map = iter->second;\n  if (node_config_map.size() == 0) {\n    return nullptr;\n  }\n\n  for (auto &temp_iter : node_config_map) {\n    if (CheckMapEquate(temp_iter.first, config)) {\n      return temp_iter.second;\n    }\n  }\n\n  return nullptr;\n}\n\nstd::shared_ptr<NodeBase> ModelBoxEngine::CreateDynamicNormalNode(\n    const std::string &name,\n    const std::map<std::string, std::string> &config_map) {\n  auto node = CheckNodeExist(name, config_map);\n  if (node != nullptr) {\n    return node;\n  }\n\n  ConfigurationBuilder builder;\n  auto config = builder.Build();\n  for (const auto &iter : config_map) {\n    config->SetProperty(iter.first, iter.second);\n  }\n\n  auto flowunit_desc = GetFlowunitDesc(name, config_map);\n  if (flowunit_desc == nullptr) {\n    MBLOG_ERROR << \"failed find flowunit: \" << name;\n    return nullptr;\n  }\n\n  auto unit_type = config->GetString(\"device\");\n  auto unit_device_id = config->GetString(\"deviceid\");\n  auto flow_stats = Statistics::GetGlobalItem()->GetItem(STATISTICS_ITEM_FLOW);\n  auto dynamic_node =\n      std::make_shared<SingleNode>(name, unit_type, unit_device_id,\n                                   flowunit_mgr_, config, nullptr, flow_stats);\n  auto status = dynamic_node->Init();\n\n  if (status != STATUS_OK) {\n    return nullptr;\n  }\n  nodes_config_[name][config_map] = dynamic_node;\n  return dynamic_node;\n}\n\nstd::shared_ptr<DataHandler> ModelBoxEngine::CreateInput(\n    const std::set<std::string> &port_map) {\n  auto gcgraph = std::make_shared<GCGraph>();\n  gcgraph->Init(nullptr);\n  auto grah_config = config_->GetSubConfig(\"graph\");\n  if (grah_config != nullptr) {\n    gcgraph->SetConfiguration(grah_config);\n  }\n  auto data_handler =\n      std::make_shared<DataHandler>(VIRTUAL_NODE, shared_from_this());\n  data_handler->SetDataHandlerType(INPUT);\n  data_handler->SetNodeName(GRAPH_VIRTUAL_NODE);\n  for (const auto &iter : port_map) {\n    auto gcnode = std::make_shared<GCNode>();\n    gcnode->Init(iter, gcgraph);\n    gcnode->SetConfiguration(\"type\", \"input\");\n    gcgraph->AddNode(gcnode);\n    gcgraph->SetFirstNode(gcnode);\n    gcnode->SetOutDataHandler(data_handler);\n\n    if (port_map.size() == 1) {\n      data_handler->SetNodeName(iter);\n    }\n  }\n\n  auto graph_state = std::make_shared<GraphState>();\n  graph_state->gcgraph_ = gcgraph;\n  data_handler->SetBindGraph(graph_state);\n\n  return data_handler;\n}\n\nStatus ModelBoxEngine::InsertGrahEdge(std::shared_ptr<GCGraph> &root_graph,\n                                      std::shared_ptr<GCNode> &input_node,\n                                      std::string &input_port,\n                                      std::shared_ptr<GCNode> &output_node,\n                                      std::string &output_port) {\n  if (root_graph == nullptr || input_node == nullptr ||\n      output_node == nullptr) {\n    return STATUS_FAULT;\n  }\n\n  auto gcedge = std::make_shared<GCEdge>();\n  if (STATUS_OK != gcedge->Init(root_graph)) {\n    return STATUS_FAULT;\n  }\n\n  gcedge->SetHeadPort(input_port);\n  gcedge->SetTailPort(output_port);\n  gcedge->SetHeadNode(input_node);\n  gcedge->SetTailNode(output_node);\n  root_graph->AddEdge(gcedge);\n  return STATUS_OK;\n}\n\nStatus ModelBoxEngine::CheckInputPort(\n    const std::shared_ptr<FlowUnitDesc> &flowunit_desc,\n    const std::shared_ptr<DataHandler> &data_handler) {\n  auto flowunit_input = flowunit_desc->GetFlowUnitInput();\n  if (data_handler->GetPortNames().size() == 1 && flowunit_input.size() == 1) {\n    return STATUS_OK;\n  }\n\n  if (data_handler->GetPortNames().size() != 0) {\n    auto inport_names = data_handler->GetPortNames();\n    if (inport_names.size() != flowunit_input.size()) {\n      MBLOG_ERROR << \"not all input port has data\";\n      return STATUS_INVALID;\n    }\n    for (auto &iter : flowunit_input) {\n      if (inport_names.find(iter.GetPortName()) == inport_names.end()) {\n        MBLOG_ERROR << \"port:\" << iter.GetPortName() << \" has no data\";\n        return STATUS_INVALID;\n      }\n    }\n  }\n  return STATUS_OK;\n}\n\nstd::shared_ptr<GCNode> ModelBoxEngine::CreateDynamicStreamNode(\n    const std::string &name, const std::map<std::string, std::string> &config,\n    const std::shared_ptr<DataHandler> &data_handler) {\n  auto root_graph = data_handler->GetBindGraph();\n  if (root_graph == nullptr) {\n    return nullptr;\n  }\n\n  auto flowunit_desc = GetFlowunitDesc(name, config);\n  auto gcnode = std::make_shared<GCNode>();\n  gcnode->Init(name, root_graph->gcgraph_);\n  for (const auto &iter : config) {\n    gcnode->SetConfiguration(iter.first, iter.second);\n  }\n\n  auto flowunit_input = flowunit_desc->GetFlowUnitInput();\n  for (auto &iter : flowunit_input) {\n    gcnode->SetInputPort(iter.GetPortName());\n  }\n\n  std::set<std::string> outport_names;\n  auto flowunit_outports = flowunit_desc->GetFlowUnitOutput();\n  for (auto &iter : flowunit_outports) {\n    outport_names.insert(iter.GetPortName());\n    gcnode->SetOutputPort(iter.GetPortName());\n  }\n\n  root_graph->gcgraph_->AddNode(gcnode);\n  return gcnode;\n}\n\nstd::shared_ptr<GCNode> ModelBoxEngine::ProcessOutputHandler(\n    const std::shared_ptr<DataHandler> &data_handler,\n    std::shared_ptr<GCNode> &gcnode, std::shared_ptr<GCGraph> &root_graph) {\n  auto inport_name = *(data_handler->GetPortNames().begin());\n  auto input_node = root_graph->GetNode(data_handler->GetNodeName());\n  if (input_node == nullptr) {\n    MBLOG_ERROR << \"failed find input node: \" << data_handler->GetNodeName();\n    return nullptr;\n  }\n\n  if (gcnode->GetInputPorts()->size() > 0) {\n    auto outport_name = *(gcnode->GetInputPorts()->begin());\n    if (InsertGrahEdge(root_graph, input_node, inport_name, gcnode,\n                       outport_name) != STATUS_OK) {\n      MBLOG_ERROR << \"InsertGrahEdge failed\";\n      return nullptr;\n    }\n  }\n  return gcnode;\n}\n\nstd::shared_ptr<GCNode> ModelBoxEngine::ProcessVirtualHandler(\n    std::shared_ptr<GCNode> &gcnode, std::shared_ptr<GCGraph> &root_graph) {\n  auto virtual_nodes = root_graph->GetFirstNodes();\n  for (auto &iter : virtual_nodes) {\n    std::string inport_name = iter->GetNodeName();\n    auto output_node = gcnode;\n    auto outport_name = *(gcnode->GetInputPorts()->begin());\n    if (InsertGrahEdge(root_graph, iter, inport_name, gcnode, outport_name) !=\n        STATUS_OK) {\n      MBLOG_ERROR << \"InsertGrahEdge failed,inport_name:\" << inport_name\n                  << \", outport_name \" << outport_name;\n      return nullptr;\n    }\n  }\n  return gcnode;\n}\n\nstd::shared_ptr<ErrorInfo> ModelBoxEngine::GetErrorInfo() { return error_info_; }\n\nstd::shared_ptr<GCNode> ModelBoxEngine::CreateDynamicGCGraph(\n    const std::string &name, const std::map<std::string, std::string> &config,\n    const std::shared_ptr<DataHandler> &data_handler) {\n  auto gcnode = CreateDynamicStreamNode(name, config, data_handler);\n  if (gcnode == nullptr) {\n    MBLOG_ERROR << \"CreateDynamicStreamNode failed\";\n    return nullptr;\n  }\n\n  if (data_handler == nullptr) {\n    return gcnode;\n  }\n  auto root_graph = data_handler->GetBindGraph()->gcgraph_;\n  if (data_handler->GetDataHandlerType() == OUTPUT) {\n    return ProcessOutputHandler(data_handler, gcnode, root_graph);\n  }\n\n  if (data_handler->GetBindNodeType() == VIRTUAL_NODE) {\n    return ProcessVirtualHandler(gcnode, root_graph);\n  }\n\n  auto port_map = data_handler->GetPortMap();\n  for (auto &iter : port_map) {\n    std::string node_name = data_handler->port_to_node_[iter.first];\n    auto outport_name = iter.first;\n    auto inport_name = iter.second;\n    auto input_node = root_graph->GetNode(node_name);\n    auto output_node = gcnode;\n    if (InsertGrahEdge(root_graph, input_node, inport_name, gcnode,\n                       outport_name) != STATUS_OK) {\n      MBLOG_ERROR << \"InsertGrahEdge failed\";\n      return nullptr;\n    }\n  }\n\n  return gcnode;\n}\n\nStatus ModelBoxEngine::CheckBuffer(const std::shared_ptr<FlowUnitDesc> &desc,\n                                   const std::shared_ptr<DataHandler> &data) {\n  if (data->GetDataHandlerType() == OUTPUT) {\n    auto input_num = desc->GetFlowUnitInput().size();\n    auto output_num = data->GetPortNames().size();\n    if (desc->GetFlowUnitInput().size() != 1 ||\n        data->GetPortNames().size() != 1) {\n      MBLOG_ERROR << \"node: \" << desc->GetFlowUnitName()\n                  << \"must use correct input: \"\n                  << \"input_num(\" << std::to_string(input_num)\n                  << \"),output_num(\" << std::to_string(output_num) << \").\";\n      return STATUS_FAULT;\n    }\n  }\n  return STATUS_OK;\n}\n\nstd::shared_ptr<DataHandler> ModelBoxEngine::BindDataHanlder(\n    std::shared_ptr<DataHandler> &data_handler,\n    std::shared_ptr<GCNode> &gcnode) {\n  gcnode->SetOutDataHandler(data_handler);\n  auto outports = gcnode->GetOutputPorts();\n  auto outport_names =\n      std::set<std::string>(outports->begin(), outports->end());\n  auto name = gcnode->GetNodeName();\n  data_handler->SetPortNames(outport_names);\n\n  data_handler->SetNodeName(name);\n  data_handler->SetDataHandlerType(OUTPUT);\n  data_handler->SetEnv(shared_from_this());\n  data_handler->SetBindNodeType(STREAM_NODE);\n  gcnode->SetOutDataHandler(data_handler);\n  return data_handler;\n}\nStatus ModelBoxEngine::RunGraph(std::shared_ptr<DataHandler> &data_handler) {\n  Status ret = STATUS_OK;\n\n  if (data_handler->context_->GetGraphState()->graph_) {\n    MBLOG_WARN << \"graph has been build and run\";\n    return STATUS_EXIST;\n  }\n\n  auto gcgraph = data_handler->context_->GetGraphState()->gcgraph_;\n  if (gcgraph == nullptr) {\n    MBLOG_WARN << \"DataHandler has no bind graph\";\n    return STATUS_FAULT;\n  }\n\n  auto dynamic_graph = std::make_shared<DynamicGraph>();\n  auto status =\n      dynamic_graph->Initialize(flowunit_mgr_, device_mgr_, profiler_, config_);\n  if (status != STATUS_OK) {\n    MBLOG_ERROR << \"graph init failed\";\n    return STATUS_FAULT;\n  }\n\n  status = dynamic_graph->Build(gcgraph);\n  if (status != STATUS_OK) {\n    MBLOG_ERROR << \"build graph failed: \" << status.Errormsg();\n    return status;\n  }\n  graphs_.emplace(dynamic_graph);\n\n  auto scheduler = GetScheduler();\n  if (scheduler == nullptr) {\n    MBLOG_ERROR << \"scheduler has not been created\";\n    return STATUS_INVALID;\n  }\n\n  if (scheduler->Build(*dynamic_graph) != STATUS_OK) {\n    MBLOG_ERROR << \"failed build graph\";\n    return STATUS_FAULT;\n  }\n  auto first_nodes = gcgraph->GetFirstNodes();\n\n  if (first_nodes.size() > 0) {\n    FeedData(dynamic_graph, gcgraph);\n  }\n  return STATUS_OK;\n}\n\nstd::shared_ptr<DynamicGraph> ModelBoxEngine::CreateDynamicGraph(\n    std::shared_ptr<GCGraph> &graph) {\n  auto dynamic_graph = std::make_shared<DynamicGraph>();\n  auto ret = dynamic_graph->Initialize(GetFlowUnitManager(), GetDeviceManager(),\n                                       GetProfiler(), GetConfig());\n  if (ret != STATUS_OK) {\n    MBLOG_ERROR << \"init graph failed\";\n    return nullptr;\n  }\n  graph->ShowAllNode();\n  graph->ShowAllEdge();\n  ret = dynamic_graph->Build(graph);\n  if (ret != STATUS_OK) {\n    MBLOG_ERROR << \"build dynmaic graph failed: \" << ret.Errormsg();\n    return nullptr;\n  }\n\n  graphs_.emplace(dynamic_graph);\n\n  auto scheduler = GetScheduler();\n  if (scheduler == nullptr) {\n    MBLOG_ERROR << \"scheduler is not inited\";\n    return nullptr;\n  }\n\n  if (STATUS_OK != scheduler->Build(*dynamic_graph)) {\n    MBLOG_ERROR << \"add graph to scheduler failed\";\n    return nullptr;\n  }\n\n  return dynamic_graph;\n}\n\nStatus ModelBoxEngine::SendExternalData(\n    std::shared_ptr<ExternalDataMap> &extern_datamap,\n    std::shared_ptr<BufferList> &buffer_list,\n    const std::shared_ptr<GCNode> &gcnode) {\n  auto input_data = gcnode->GetBindDataHandler();\n  if (input_data == nullptr) {\n    MBLOG_ERROR << \"failed find bind data handler for input.\";\n    return STATUS_FAULT;\n  }\n  auto node_name = gcnode->GetNodeName();\n  bool send_data = false;\n  auto input_buffer_list = input_data->context_->GetBufferList(node_name);\n  if (input_buffer_list == nullptr || input_buffer_list->Size() == 0) {\n    buffer_list->Build({1});\n  } else {\n    for (auto iter = input_buffer_list->begin();\n         iter != input_buffer_list->end(); iter++) {\n      buffer_list->PushBack(*iter);\n    }\n    send_data = true;\n  }\n\n  if (!input_data->context_->meta_.empty()) {\n    auto data_meta = std::make_shared<DataMeta>();\n    for (auto &iter : input_data->context_->meta_) {\n      data_meta->SetMeta(iter.first,\n                         std::make_shared<std::string>(iter.second));\n    }\n    extern_datamap->SetOutputMeta(node_name, data_meta);\n    send_data = true;\n  }\n  if (send_data) {\n    if (STATUS_OK != extern_datamap->Send(node_name, buffer_list)) {\n      return STATUS_FAULT;\n    }\n  }\n\n  return STATUS_SUCCESS;\n}\n\nStatus ModelBoxEngine::FeedData(std::shared_ptr<DynamicGraph> &dynamic_graph,\n                                std::shared_ptr<GCGraph> &gcgraph) {\n  if (dynamic_graph == nullptr || gcgraph == nullptr) {\n    MBLOG_ERROR << \"graph or gcgraph is nullptr .\";\n    return STATUS_FAULT;\n  }\n\n  auto extern_data = dynamic_graph->CreateExternalDataMap();\n  if (extern_data == nullptr) {\n    return STATUS_FAULT;\n  }\n  auto buffer_list = extern_data->CreateBufferList();\n  if (buffer_list == nullptr) {\n    return STATUS_FAULT;\n  }\n\n  auto input_nodes = gcgraph->GetFirstNodes();\n  for (auto &iter : input_nodes) {\n    if (STATUS_SUCCESS != SendExternalData(extern_data, buffer_list, iter)) {\n      return STATUS_FAULT;\n    }\n  }\n  auto input_data = input_nodes.front()->GetBindDataHandler();\n  auto input_context =\n      std::static_pointer_cast<InputContext>(input_data->context_);\n  input_context->SetExternPtr(extern_data, buffer_list);\n  input_data->context_->GetGraphState()->external_data_ = extern_data;\n  input_data->context_->GetGraphState()->graph_ = dynamic_graph;\n  if (input_data->closed_) {\n    extern_data->Close();\n  }\n  return STATUS_OK;\n}\n\nstd::shared_ptr<DataHandler> ModelBoxEngine::Execute(\n    const std::string &name, std::map<std::string, std::string> config_map,\n    const std::map<std::string, std::shared_ptr<DataHandler>> &data) {\n  auto data_handler =\n      std::make_shared<DataHandler>(STREAM_NODE, shared_from_this());\n  auto ret = data_handler->SetDataHandler(data);\n  if (ret != STATUS_OK) {\n    data_handler->SetError(ret);\n    return data_handler;\n  }\n\n  return Execute(name, std::move(config_map), data_handler);\n}\n\nbool CheckPortisLinked(std::shared_ptr<GCGraph> &gcgraph,\n                       std::shared_ptr<GCNode> &gcnode,\n                       const std::string &port_name) {\n  auto edges = gcgraph->GetAllEdges();\n  for (auto &iter : edges) {\n    auto edge = iter.second;\n    if (edge->GetTailInPort() == port_name && edge->GetTailNode() == gcnode) {\n      return true;\n    }\n  }\n  return false;\n}\n\nbool CheckNodeIsLinked(std::shared_ptr<GCGraph> &gcgraph,\n                       std::shared_ptr<GCNode> &gcnode) {\n  bool result = true;\n  auto inports = gcnode->GetInputPorts();\n  for (const auto &iter : *inports) {\n    result &= CheckPortisLinked(gcgraph, gcnode, iter);\n  }\n  return result;\n}\n\nbool ModelBoxEngine::CheckisStream(\n    const std::shared_ptr<FlowUnitDesc> &desc,\n    const std::shared_ptr<DataHandler> &data_handler) {\n  if (desc->GetFlowType() == STREAM) {\n    return true;\n  }\n\n  if (data_handler == nullptr) {\n    return true;\n  }\n\n  if (desc->GetFlowType() == NORMAL) {\n    if (data_handler->GetBindNodeType() == STREAM_NODE) {\n      return true;\n    }\n\n    if (data_handler->GetBindNodeType() == VIRTUAL_NODE) {\n      return true;\n    }\n  }\n  return false;\n}\n\nstatic void SetDefaultConfigValue(\n    const std::string &name, std::map<std::string, std::string> &config_map) {\n  if (config_map.find(\"type\") == config_map.end()) {\n    config_map[\"type\"] = \"flowunit\";\n  }\n  if (config_map.find(\"device\") == config_map.end()) {\n    config_map[\"device\"] = \"cpu\";\n  }\n  if (config_map.find(\"deviceid\") == config_map.end()) {\n    config_map[\"deviceid\"] = \"0\";\n  }\n  if (config_map.find(\"flowunit\") == config_map.end()) {\n    config_map[\"flowunit\"] = name;\n  }\n}\n\nStatus ModelBoxEngine::CheckInputFlowUnit(\n    const std::string &name, std::map<std::string, std::string> &config_map,\n    const std::shared_ptr<DataHandler> &buffers,\n    const std::shared_ptr<FlowUnitDesc> &desc) {\n  if (buffers != nullptr) {\n    if (buffers->GetError() != STATUS_SUCCESS) {\n      auto err_msg = \"node: \" + name + \", input data has error.\";\n      MBLOG_ERROR << err_msg;\n      return {STATUS_FAULT, err_msg};\n    }\n\n    if (buffers->GetBindGraph() != nullptr &&\n        buffers->GetBindGraph()->graph_ != nullptr) {\n      std::string err_msg = \"graph has been build, flowunit \" + name +\n                            \" cannot been linked to this graph\";\n      MBLOG_ERROR << err_msg;\n      return {STATUS_FAULT, err_msg};\n    }\n  }\n\n  if (desc->GetFlowUnitInput().size() > 0 && buffers == nullptr) {\n    auto msg = \"must set input for flowunit: \" + name;\n    MBLOG_ERROR << msg;\n    return {STATUS_FAULT, msg};\n  }\n\n  auto node_type = STREAM_NODE;\n  if (buffers != nullptr) {\n    if (CheckBuffer(desc, buffers) != STATUS_OK) {\n      return {STATUS_INVALID, \"input DataHandler is invalid\"};\n    }\n    if (STATUS_OK != buffers->CheckInputType(node_type)) {\n      auto msg = \"check input failed, node name = \" + name;\n      return {STATUS_FAULT, msg};\n    }\n  }\n  return STATUS_OK;\n}\n\nstd::shared_ptr<DataHandler> ModelBoxEngine::ExecuteStreamNode(\n    const std::shared_ptr<FlowUnitDesc> &desc,\n    const std::shared_ptr<DataHandler> &buffers,\n    std::map<std::string, std::string> &config_map) {\n  auto stream_data_handler =\n      std::make_shared<DataHandler>(STREAM_NODE, shared_from_this());\n  stream_data_handler->context_->SetFlowUnitDesc(desc);\n  if (buffers == nullptr) {\n    stream_data_handler->SetBindGraph(std::make_shared<GraphState>());\n    auto gcgraph = std::make_shared<GCGraph>();\n    gcgraph->Init(nullptr);\n    auto graph_config = config_->GetSubConfig(\"graph\");\n    if (graph_config != nullptr) {\n      gcgraph->SetConfiguration(config_);\n    }\n\n    stream_data_handler->GetBindGraph()->gcgraph_ = gcgraph;\n    auto root_graph = stream_data_handler->GetBindGraph()->gcgraph_;\n    auto gcnode = CreateDynamicStreamNode(desc->GetFlowUnitName(), config_map,\n                                          stream_data_handler);\n    if (gcnode == nullptr) {\n      const auto *msg = \"CreateDynamicStreamNode failed\";\n      return err_msg({STATUS_FAULT, msg});\n    }\n    stream_data_handler = BindDataHanlder(stream_data_handler, gcnode);\n    return stream_data_handler;\n  }\n\n  if (CheckInputPort(desc, buffers) != STATUS_OK) {\n    return err_msg({STATUS_FAULT, \"not all port has data\"});\n  }\n  if (buffers->GetBindGraph() == nullptr) {\n    return err_msg({STATUS_FAULT, \"input datahandler has no valid graph\"});\n  }\n\n  stream_data_handler->SetBindGraph(buffers->GetBindGraph());\n\n  auto gcnode =\n      CreateDynamicGCGraph(desc->GetFlowUnitName(), config_map, buffers);\n  if (gcnode == nullptr) {\n    const auto *msg = \"create gcnode failed\";\n    stream_data_handler->SetError({STATUS_INVALID, msg});\n    MBLOG_ERROR << msg;\n    return stream_data_handler;\n  }\n  stream_data_handler = BindDataHanlder(stream_data_handler, gcnode);\n\n  if (desc->GetFlowUnitOutput().size() == 0 &&\n      CheckNodeIsLinked(stream_data_handler->GetBindGraph()->gcgraph_,\n                        gcnode)) {\n    if (STATUS_OK != RunGraph(stream_data_handler)) {\n      const auto *msg = \"build graph failed\";\n      stream_data_handler->SetError({STATUS_INVALID, msg});\n      MBLOG_ERROR << msg;\n    }\n  }\n  return stream_data_handler;\n}\n\nstd::shared_ptr<DataHandler> ModelBoxEngine::ExecuteBufferListNode(\n    const std::string &name, std::map<std::string, std::string> &config_map,\n    const std::shared_ptr<DataHandler> &buffers) {\n  auto node = CreateDynamicNormalNode(name, config_map);\n  if (node == nullptr) {\n    return err_msg({STATUS_INVALID, \"create dynamic node \" + name + \" failed\"});\n  }\n\n  auto dynamic_node = std::static_pointer_cast<SingleNode>(node);\n  dynamic_node->Run(buffers);\n  auto data_handler = std::make_shared<DataHandler>();\n  data_handler->SetDataHandlerType(OUTPUT);\n\n  if (STATUS_NODATA == dynamic_node->PushDataToDataHandler(data_handler)) {\n    return err_msg({STATUS_NODATA, \"recv no data from node\"});\n  }\n\n  return data_handler;\n}\n\nstd::shared_ptr<DataHandler> ModelBoxEngine::Execute(\n    const std::string &name, std::map<std::string, std::string> config_map,\n    const std::shared_ptr<DataHandler> &data) {\n  SetDefaultConfigValue(name, config_map);\n  auto flowunit_desc = GetFlowunitDesc(name, config_map);\n  if (flowunit_desc == nullptr) {\n    return err_msg(\n        {STATUS_INVALID, \"failed find flowunit \" + name + \" description\"});\n  }\n\n  auto ret = CheckInputFlowUnit(name, config_map, data, flowunit_desc);\n  if (ret != STATUS_OK) {\n    return err_msg(ret);\n  }\n\n  // stream node, create gcgraph\n  if (CheckisStream(flowunit_desc, data)) {\n    return ExecuteStreamNode(flowunit_desc, data, config_map);\n  }\n  return ExecuteBufferListNode(name, config_map, data);\n}\n\nvoid ModelBoxEngine::ShutDown() {\n  if (scheduler_) {\n    scheduler_->Shutdown();\n    scheduler_ = nullptr;\n  }\n}\n\nvoid ModelBoxEngine::Close() {\n  if (scheduler_) {\n    if (graphs_.size() == 1) {\n      Status status = STATUS_OK;\n      scheduler_->Wait(0, &status);\n    }\n\n    scheduler_->Shutdown();\n    scheduler_ = nullptr;\n  }\n\n  graphs_.clear();\n  nodes_config_.clear();\n  if (device_mgr_) {\n    device_mgr_->Clear();\n  }\n  flowunit_mgr_ = nullptr;\n  device_mgr_ = nullptr;\n  profiler_ = nullptr;\n  if (drivers_) {\n    drivers_->Clear();\n  }\n  drivers_ = nullptr;\n}\n\nstd::shared_ptr<Configuration> ModelBoxEngine::GetConfig() { return config_; }\n\n}  // namespace modelbox"
  },
  {
    "path": "src/libmodelbox/engine/error.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/error.h\"\n\n#include <utility>\n\nnamespace modelbox {\n\nFlowUnitError::FlowUnitError(std::string desc) { desc_ = std::move(desc); }\n\nFlowUnitError::FlowUnitError(const std::string& node,\n                             const std::string& error_pos,\n                             const Status& error_status) {\n  desc_ = \"node:\" + node + \" error pos:\" + error_pos +\n          \" status:\" + error_status.StrCode() +\n          \" error:\" + error_status.Errormsg();\n  error_status_ = error_status;\n}\n\nFlowUnitError::~FlowUnitError() = default;\n\nstd::string FlowUnitError::GetDesc() { return desc_; };\nStatus FlowUnitError::GetStatus() { return error_status_; };\n\nDataError::DataError(const std::string& error_code,\n                     const std::string& error_msg) {\n  error_code_ = error_code;\n  error_msg_ = error_msg;\n  new_error_ = true;\n}\n\nDataError::~DataError() = default;\n\nstd::string DataError::GetErrorCode() { return error_code_; }\n\nstd::string DataError::GetErrorMsg() { return error_msg_; }\n\nvoid DataError::SetErrorDeepth(size_t error_deepth) {\n  // only new error need set deepth\n  if (new_error_) {\n    error_deepth_ = error_deepth;\n    new_error_ = false;\n  }\n}\n\nsize_t DataError::GetErrorDeepth() { return error_deepth_; }\n\n}  // namespace modelbox\n"
  },
  {
    "path": "src/libmodelbox/engine/external_data_map.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/external_data_map.h\"\n\n#include <functional>\n#include <utility>\n\n#include \"modelbox/node.h\"\n#include \"modelbox/session.h\"\n#include \"modelbox/session_context.h\"\n#include \"modelbox/stream.h\"\n\nnamespace modelbox {\n\nExternalDataMapImpl::ExternalDataMapImpl(\n    const std::shared_ptr<Node>& input_node,\n    const std::shared_ptr<Stream>& init_stream)\n    : init_stream_(init_stream),\n      session_(init_stream_->GetSession()),\n      session_ctx_(init_stream->GetSession()->GetSessionCtx()) {\n  root_buffer_ = std::make_shared<BufferIndexInfo>();\n  root_buffer_->SetStream(init_stream);\n  root_buffer_->SetIndex(0);\n  graph_input_node_ = input_node;\n  graph_input_node_device_ = input_node->GetDevice();\n  for (auto& ext_port : input_node->GetExternalPorts()) {\n    const auto& port_name = ext_port->GetName();\n    graph_input_node_ports_[port_name] = ext_port;\n    graph_input_ports_cache_[port_name] = std::list<std::shared_ptr<Buffer>>();\n    graph_input_ports_stream_[port_name] =\n        std::make_shared<Stream>(init_stream_->GetSession());\n  }\n  graph_output_cache_ = std::make_shared<BlockingQueue<OutputBufferList>>();\n}\n\nExternalDataMapImpl::~ExternalDataMapImpl() = default;\n\nstd::shared_ptr<BufferList> ExternalDataMapImpl::CreateBufferList() {\n  if (!graph_input_node_device_) {\n    MBLOG_ERROR << \"device_ must not be nullptr\";\n    return nullptr;\n  }\n\n  return std::make_shared<BufferList>(graph_input_node_device_);\n}\n\nStatus ExternalDataMapImpl::SetOutputMeta(const std::string& port_name,\n                                          std::shared_ptr<DataMeta> meta) {\n  auto item = graph_input_ports_stream_.find(port_name);\n  if (item == graph_input_ports_stream_.end()) {\n    return {STATUS_INVALID, \"Send Port \" + port_name + \" is not exist\"};\n  }\n\n  auto& stream = item->second;\n  stream->SetStreamMeta(meta);\n  return STATUS_OK;\n}\n\nStatus ExternalDataMapImpl::Send(const std::string& port_name,\n                                 std::shared_ptr<BufferList> buffer_list) {\n  std::lock_guard<std::recursive_mutex> lock(close_state_lock_);\n  if (close_flag_ || init_stream_ == nullptr) {\n    return STATUS_STOP;\n  }\n\n  auto ret = PushToInputCache(port_name, buffer_list);\n  if (!ret) {\n    return ret;\n  }\n\n  std::unordered_map<std::string, std::list<std::shared_ptr<Buffer>>>\n      matched_port_data;\n  size_t matched_data_size = 0;\n  PopMachedInput(matched_port_data, matched_data_size);\n  return SendMatchData(matched_port_data, matched_data_size);\n}\n\nStatus ExternalDataMapImpl::PushToInputCache(\n    const std::string& port_name,\n    const std::shared_ptr<BufferList>& buffer_list) {\n  auto item = graph_input_ports_cache_.find(port_name);\n  if (item == graph_input_ports_cache_.end()) {\n    return {STATUS_INVALID, \"Send Port \" + port_name + \" is not exist\"};\n  }\n\n  auto& port_cache = item->second;\n  for (auto& buffer : *buffer_list) {\n    port_cache.push_back(buffer->Copy());\n  }\n\n  return STATUS_SUCCESS;\n}\n\nvoid ExternalDataMapImpl::PopMachedInput(\n    std::unordered_map<std::string, std::list<std::shared_ptr<Buffer>>>&\n        matched_port_data,\n    size_t& matched_data_size) {\n  if (graph_input_ports_cache_.empty()) {\n    return;\n  }\n\n  matched_data_size = SIZE_MAX;\n  for (auto& port_data_list_iter : graph_input_ports_cache_) {\n    auto data_list_size = port_data_list_iter.second.size();\n    if (data_list_size < matched_data_size) {\n      matched_data_size = data_list_size;\n    }\n  }\n\n  for (auto& port_data_list_iter : graph_input_ports_cache_) {\n    const auto& port_name = port_data_list_iter.first;\n    auto& port_data_list = port_data_list_iter.second;\n    auto& matched_data_list = matched_port_data[port_name];\n    auto end_pos = port_data_list.begin();\n    std::advance(end_pos, matched_data_size);\n    matched_data_list.splice(matched_data_list.begin(), port_data_list,\n                             port_data_list.begin(), end_pos);\n  }\n}\n\nStatus ExternalDataMapImpl::SendMatchData(\n    const std::unordered_map<std::string, std::list<std::shared_ptr<Buffer>>>&\n        matched_port_data,\n    size_t matched_data_size) {\n  if (matched_data_size == 0) {\n    return STATUS_SUCCESS;\n  }\n\n  for (const auto& input_port_data_iter : matched_port_data) {\n    const auto& port_name = input_port_data_iter.first;\n    const auto& port_data_list = input_port_data_iter.second;\n    auto& port_stream = graph_input_ports_stream_[port_name];\n    auto& graph_input_port = graph_input_node_ports_[port_name];\n    std::vector<std::shared_ptr<modelbox::Buffer>> batch_data;\n    batch_data.reserve(port_data_list.size());\n    for (const auto& port_data : port_data_list) {\n      auto& port_buffer_index_info = port_data->index_info_;\n      auto port_buffer_index = port_stream->GetBufferCount();\n      port_stream->IncreaseBufferCount();\n      port_buffer_index_info->SetIndex(port_buffer_index);\n      port_buffer_index_info->SetStream(port_stream);\n      auto inherit_info = std::make_shared<BufferInheritInfo>();\n      inherit_info->SetInheritFrom(root_buffer_);\n      inherit_info->SetType(BufferProcessType::EXPAND);\n      port_buffer_index_info->SetInheritInfo(inherit_info);\n      batch_data.emplace_back(port_data);\n    }\n    graph_input_port->GetQueue()->PushBatchForce(&batch_data);\n  }\n\n  auto& port = graph_input_node_ports_.begin()->second;\n  port->NotifyPushEvent();\n  return STATUS_SUCCESS;\n}\n\nStatus ExternalDataMapImpl::Recv(OutputBufferList& map_buffer_list,\n                                 int32_t timeout) {\n  if (graph_output_cache_ == nullptr) {\n    return STATUS_NODATA;\n  }\n\n  std::vector<OutputBufferList> output_bufferlist_vector;\n  auto size = graph_output_cache_->Pop(&output_bufferlist_vector, timeout);\n  if (size == 0) {\n    std::lock_guard<std::mutex> lock(session_state_lock_);\n    if (!session_end_flag_) {\n      return STATUS_TIMEDOUT;\n    }\n\n    auto selector = selector_.lock();\n    if (selector != nullptr) {\n      selector->RemoveExternalData(shared_from_this());\n    }\n\n    if (last_error_ == nullptr) {\n      return STATUS_EOF;\n    }\n\n    return STATUS_INVALID;\n  }\n\n  for (auto& output_buffer_list : output_bufferlist_vector) {\n    if (output_buffer_list.empty()) {\n      continue;\n    }\n\n    for (auto& port_data_item : output_buffer_list) {\n      const auto& port_name = port_data_item.first;\n      auto& port_data_list = port_data_item.second;\n      std::shared_ptr<BufferList> buffer_list;\n      auto out_item = map_buffer_list.find(port_name);\n      if (out_item == map_buffer_list.end()) {\n        buffer_list = std::make_shared<BufferList>();\n        map_buffer_list[port_name] = buffer_list;\n      } else {\n        buffer_list = out_item->second;\n      }\n\n      for (auto& buffer : *port_data_list) {\n        buffer_list->PushBack(buffer->Copy());\n      }\n    }\n  }\n\n  return STATUS_OK;\n}\n\n/**\n * @brief close input stream, wait process\n **/\nStatus ExternalDataMapImpl::Close() {\n  std::lock_guard<std::recursive_mutex> lock(close_state_lock_);\n  if (close_flag_) {\n    return STATUS_OK;\n  }\n\n  close_flag_ = true;\n  if (init_stream_ == nullptr) {\n    return STATUS_OK;\n  }\n\n  // add end buffer\n  for (auto& input_node_port_item : graph_input_node_ports_) {\n    const auto& port_name = input_node_port_item.first;\n    auto& port = input_node_port_item.second;\n    auto& port_stream = graph_input_ports_stream_[port_name];\n    auto end_buffer = std::make_shared<Buffer>();\n    auto end_index_info = BufferManageView::GetIndexInfo(end_buffer);\n    end_index_info->SetStream(port_stream);\n    end_index_info->SetIndex(port_stream->GetBufferCount());\n    end_index_info->MarkAsEndFlag();\n    port_stream->IncreaseBufferCount();\n    auto inherit_info = std::make_shared<BufferInheritInfo>();\n    inherit_info->SetInheritFrom(root_buffer_);\n    inherit_info->SetType(BufferProcessType::EXPAND);\n    end_index_info->SetInheritInfo(inherit_info);\n    port->Send(end_buffer);\n  }\n\n  auto& port = graph_input_node_ports_.begin()->second;\n  port->NotifyPushEvent();\n  // clear\n  init_stream_ = nullptr;\n  root_buffer_ = nullptr;\n  graph_input_ports_stream_.clear();\n  return STATUS_OK;\n}\n\n/**\n * @brief stop task immediately\n **/\nStatus ExternalDataMapImpl::Shutdown() {\n  std::lock_guard<std::recursive_mutex> lock(close_state_lock_);\n  if (shutdown_flag_) {\n    return STATUS_OK;\n  }\n\n  shutdown_flag_ = true;\n  auto session = session_.lock();\n  if (session == nullptr) {\n    return STATUS_OK;\n  }\n\n  session->Close();\n  Close();  // make sure data end has been sent\n  return STATUS_OK;\n}\n\nstd::shared_ptr<SessionContext> ExternalDataMapImpl::GetSessionContext() {\n  return session_ctx_.lock();\n}\n\nstd::shared_ptr<Configuration> ExternalDataMapImpl::GetSessionConfig() {\n  auto ctx = session_ctx_.lock();\n  if (ctx == nullptr) {\n    return nullptr;\n  }\n\n  return ctx->GetConfig();\n}\n\nvoid ExternalDataMapImpl::SetPrivate(std::shared_ptr<void> ptr) {\n  private_ptr_ = ptr;\n}\n\nstd::shared_ptr<void> ExternalDataMapImpl::GetPrivate() { return private_ptr_; }\n\nvoid ExternalDataMapImpl::SetLastError(std::shared_ptr<FlowUnitError> error) {\n  last_error_ = std::move(error);\n}\n\nstd::shared_ptr<FlowUnitError> ExternalDataMapImpl::GetLastError() {\n  return last_error_;\n}\n\nvoid ExternalDataMapImpl::SetSelector(\n    const std::shared_ptr<ExternalDataSelect>& selector) {\n  selector_ = selector;\n}\n\nbool ExternalDataMapImpl::GetReadyFlag() {\n  if (session_end_flag_) {\n    return true;\n  }\n\n  return !(graph_output_cache_->Empty());\n}\n\nvoid ExternalDataMapImpl::PushGraphOutputBuffer(OutputBufferList& output) {\n  auto size = graph_output_cache_->Size();\n  if (!graph_output_cache_->Push(output)) {\n    MBLOG_ERROR << \"graph save output failed\";\n    return;\n  }\n\n  if (size != 0) {\n    return;\n  }\n\n  auto selector = selector_.lock();\n  if (selector == nullptr) {\n    return;\n  }\n\n  selector->NotifySelect();\n}\n\nvoid ExternalDataMapImpl::SessionEnd(std::shared_ptr<FlowUnitError> error) {\n  {\n    std::lock_guard<std::mutex> lock(session_state_lock_);\n    if (session_end_flag_) {\n      return;\n    }\n\n    session_end_flag_ = true;\n    last_error_ = error;\n    graph_output_cache_->Shutdown();\n  }\n\n  auto selector = selector_.lock();\n  if (selector != nullptr) {\n    selector->NotifySelect();\n  }\n}\n\nExternalDataMap::ExternalDataMap() = default;\n\nExternalDataMap::~ExternalDataMap() = default;\n\nExternalDataSelect::ExternalDataSelect() = default;\n\nExternalDataSelect::~ExternalDataSelect() = default;\n\nvoid ExternalDataSelect::RegisterExternalData(\n    const std::shared_ptr<ExternalDataMap>& externl_data) {\n  std::lock_guard<std::mutex> lock(external_list_lock_);\n  std::shared_ptr<ExternalDataMapImpl> externl_data_imp =\n      std::dynamic_pointer_cast<ExternalDataMapImpl>(externl_data);\n  external_list_.push_back(externl_data_imp);\n  externl_data_imp->SetSelector(shared_from_this());\n}\n\nvoid ExternalDataSelect::RemoveExternalData(\n    const std::shared_ptr<ExternalDataMap>& externl_data) {\n  std::lock_guard<std::mutex> lock(external_list_lock_);\n  auto iter = external_list_.begin();\n  while (iter != external_list_.end()) {\n    if (*iter == std::dynamic_pointer_cast<ExternalDataMapImpl>(externl_data)) {\n      iter = external_list_.erase(iter);\n      break;\n    }\n\n    iter++;\n  }\n}\n\nbool ExternalDataSelect::IsExternalDataReady() {\n  std::lock_guard<std::mutex> lock(external_list_lock_);\n  if (external_list_.empty()) {\n    // wait for external_data_map insert\n    return false;\n  }\n\n  for (const auto& external_data : external_list_) {\n    if (external_data->GetReadyFlag()) {\n      return true;\n    }\n  }\n\n  return false;\n}\n\nStatus ExternalDataSelect::SelectExternalData(\n    std::list<std::shared_ptr<ExternalDataMap>>& external_list,\n    std::chrono::duration<long, std::milli> waittime) {\n  MBLOG_DEBUG << \"SelectExternalData\";\n  // wait for data ready\n  {\n    std::unique_lock<std::mutex> lck(data_ready_mtx_);\n    auto data_ready_func = [this]() { return IsExternalDataReady(); };\n    if (waittime <= std::chrono::milliseconds(0)) {\n      data_ready_cv_.wait(lck, data_ready_func);\n    } else {\n      if (!data_ready_cv_.wait_for(lck, waittime, data_ready_func)) {\n        return STATUS_TIMEDOUT;\n      }\n    }\n  }\n\n  // get ready external data\n  std::lock_guard<std::mutex> lock(external_list_lock_);\n  for (const auto& external_data : external_list_) {\n    if (external_data->GetReadyFlag()) {\n      external_list.push_back(external_data);\n    }\n  }\n\n  return STATUS_SUCCESS;\n}\n\nvoid ExternalDataSelect::NotifySelect() {\n  std::unique_lock<std::mutex> lck(data_ready_mtx_);\n  data_ready_cv_.notify_one();\n  MBLOG_DEBUG << \"NotifySelect\";\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/libmodelbox/engine/external_data_simple.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <modelbox/external_data_simple.h>\n#include <securec.h>\n\nnamespace modelbox {\nExternalDataSimple::ExternalDataSimple(\n    std::shared_ptr<ExternalDataMap>& data_map)\n    : data_map_(data_map) {}\n\nExternalDataSimple::~ExternalDataSimple() {\n  if (data_map_) {\n    data_map_->Close();\n  }\n}\n\nstd::shared_ptr<BufferList> ExternalDataSimple::CreateBufferList() {\n  if (data_map_) {\n    return data_map_->CreateBufferList();\n  }\n  return nullptr;\n}\n\nStatus ExternalDataSimple::PushData(const std::string& port_name,\n                                    std::shared_ptr<BufferList>& bufferlist) {\n  if (data_map_ == nullptr) {\n    return {STATUS_INVALID, \"data_map is nullptr\"};\n  }\n\n  auto temp = data_map_->CreateBufferList();\n  if (temp->GetDevice() != bufferlist->GetDevice()) {\n    MBLOG_ERROR << \"pushed buffer is on different device\";\n    return {STATUS_INVALID, \"pushed buffer is on different device\"};\n  }\n\n  auto status = data_map_->Send(port_name, bufferlist);\n  if (!status) {\n    MBLOG_ERROR << \"failed send data to graph: \" << status.Errormsg();\n    return status;\n  }\n\n  return data_map_->Shutdown();\n}\n\nStatus ExternalDataSimple::PushData(\n    const std::string& port_name, const void* data, const size_t& data_len,\n    const std::map<std::string, std::string>& meta) {\n  if ((data_map_ == nullptr) || ((data == nullptr) && (data_len != 0))) {\n    MBLOG_ERROR\n        << \"push data failed,because data map is null or data is nullptr\";\n    return STATUS_FAULT;\n  }\n\n  Status status = STATUS_OK;\n  auto input_buffer = data_map_->CreateBufferList();\n\n  if (data_len == 0) {\n    status = input_buffer->Build({1});\n  } else {\n    status = input_buffer->Build({data_len});\n  }\n\n  if (status != STATUS_OK) {\n    MBLOG_ERROR << \"failed build buffer for len :\" << data_len;\n    return STATUS_FAULT;\n  }\n\n  auto buffer = input_buffer->At(0);\n  auto* buffer_data = buffer->MutableData();\n  if (data_len > 0) {\n    auto ret = memcpy_s(buffer_data, data_len, data, data_len);\n    if (ret < 0) {\n      MBLOG_ERROR << \"copy data to external buffer failed.\";\n      return STATUS_FAULT;\n    }\n  }\n\n  for (const auto& iter : meta) {\n    buffer->Set(iter.first, iter.second);\n  }\n\n  status = data_map_->Send(port_name, input_buffer);\n  if (!status) {\n    MBLOG_ERROR << \"failed send data to graph: \" << status.Errormsg();\n    return status;\n  }\n  \n  return data_map_->Shutdown();\n}\n\nStatus ExternalDataSimple::GetResult(const std::string& port_name,\n                                     std::shared_ptr<Buffer>& buffer,\n                                     const int& timeout) {\n  if (data_map_ == nullptr) {\n    return {STATUS_INVALID, \"data map is null\"};\n  }\n\n  if (buffer_list_map_[port_name].size() == 0) {\n    if (status_ != STATUS_OK) {\n      return status_;\n    }\n\n    OutputBufferList map_buffer_list;\n    status_ = data_map_->Recv(map_buffer_list, timeout);\n    Defer {\n      if (status_ != STATUS_SUCCESS) {\n        data_map_->Close();\n      }\n    };\n\n    if (status_ != STATUS_SUCCESS) {\n      MBLOG_ERROR << \"recv failed, error is \" << data_map_->GetLastError();\n      return status_;\n    }\n\n    if (map_buffer_list.find(port_name) == map_buffer_list.end()) {\n      std::string error_msg = \"port name not found: \" + port_name;\n      buffer_list_map_.erase(port_name);\n      return {STATUS_INVALID, error_msg};\n    }\n\n    for (auto& iter : map_buffer_list) {\n      auto buffers = std::vector<std::shared_ptr<Buffer>>(iter.second->begin(),\n                                                          iter.second->end());\n      auto temp_buffer = data_map_->CreateBufferList();\n      temp_buffer->Assign(buffers);\n      temp_buffer->MoveAllBufferToTargetDevice();\n      for (auto& buffer_iter : *temp_buffer) {\n        buffer_list_map_[iter.first].push(buffer_iter);\n      }\n    }\n  }\n\n  buffer = buffer_list_map_[port_name].front();\n  buffer_list_map_[port_name].pop();\n  return STATUS_OK;\n}\n\nStatus ExternalDataSimple::GetResult(const std::string& port_name,\n                                     std::shared_ptr<void>& data, size_t& len,\n                                     const int& timeout) {\n  std::shared_ptr<Buffer> buffer;\n  auto status = GetResult(port_name, buffer, timeout);\n  if (status != STATUS_OK) {\n    return status;\n  }\n\n  len = buffer->GetBytes();\n  data.reset(buffer->MutableData(), [buffer](void* data) {});\n\n  return STATUS_OK;\n}\n\nvoid ExternalDataSimple::Close() {\n  if (data_map_) {\n    data_map_->Close();\n    data_map_ = nullptr;\n  }\n}\n\n}  // namespace modelbox\n"
  },
  {
    "path": "src/libmodelbox/engine/flow_stream_io.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/flow_stream_io.h\"\n\n#include <utility>\n\nnamespace modelbox {\nFlowStreamIO::FlowStreamIO(std::shared_ptr<ExternalDataMap> data_map)\n    : data_map_(std::move(data_map)) {}\n\nFlowStreamIO::~FlowStreamIO() { data_map_->Shutdown(); }\n\nstd::shared_ptr<Buffer> FlowStreamIO::CreateBuffer() {\n  auto buffer_list = data_map_->CreateBufferList();\n  return std::make_shared<Buffer>(buffer_list->GetDevice());\n}\n\nStatus FlowStreamIO::Send(const std::string &input_name,\n                          const std::shared_ptr<Buffer> &buffer) {\n  auto buffer_list = data_map_->CreateBufferList();\n  buffer_list->PushBack(buffer);\n  return data_map_->Send(input_name, buffer_list);\n}\n\nStatus FlowStreamIO::Send(\n    const std::string &input_name,\n    const std::vector<std::shared_ptr<Buffer>> &input_list) {\n  auto buffer_list = data_map_->CreateBufferList();\n  buffer_list->Assign(input_list);\n  return data_map_->Send(input_name, buffer_list);\n}\n\nStatus FlowStreamIO::Send(const std::string &input_name, void *data,\n                          size_t size) {\n  auto buffer = CreateBuffer();\n  auto ret = buffer->BuildFromHost(data, size);\n  if (!ret) {\n    return ret;\n  }\n\n  return Send(input_name, buffer);\n}\n\nStatus FlowStreamIO::Recv(const std::string &output_name,\n                          std::shared_ptr<Buffer> &buffer, long timeout) {\n  auto port_data_cache_item = port_data_cache_map_.find(output_name);\n  if (port_data_cache_item == port_data_cache_map_.end() ||\n      port_data_cache_item->second.empty()) {\n    OutputBufferList map_buffer_list;\n    auto status = data_map_->Recv(map_buffer_list, timeout);\n\n    if (!status) {\n      if (status == STATUS_EOF) {\n        return status;\n      }\n      MBLOG_ERROR << \"Recv data failed, ret \" << status;\n      return status;\n    }\n\n    for (auto &port_item : map_buffer_list) {\n      const auto &port_name = port_item.first;\n      auto &port_buffer_list = port_item.second;\n      auto &data_cache = port_data_cache_map_[port_name];\n      data_cache.insert(data_cache.end(), port_buffer_list->begin(),\n                        port_buffer_list->end());\n    }\n  }\n\n  buffer = port_data_cache_map_[output_name].front();\n  port_data_cache_map_[output_name].pop_front();\n  return STATUS_OK;\n}\n\nstd::shared_ptr<Buffer> FlowStreamIO::Recv(const std::string &output_name,\n                                           long timeout) {\n  std::shared_ptr<Buffer> buffer;\n  auto ret = Recv(output_name, buffer, timeout);\n  if (ret != STATUS_SUCCESS) {\n    StatusError = ret;\n    return nullptr;\n  }\n\n  return buffer;\n}\n\nvoid FlowStreamIO::CloseInput() { data_map_->Close(); }\n\n}  // namespace modelbox\n"
  },
  {
    "path": "src/libmodelbox/engine/flowunit.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/flowunit.h\"\n\n#include <utility>\n\n#include \"modelbox/tensor_list.h\"\n\nnamespace modelbox {\n\nstatic const std::regex REGROUPTYPE(\"^[A-Z][\\\\w/]*\");\n\nIFlowUnit::IFlowUnit() = default;\nIFlowUnit::~IFlowUnit() = default;\n\n/* class when unit is close */\nStatus IFlowUnit::Close() { return STATUS_OK; }\n\n// NOLINTNEXTLINE\nStatus IFlowUnit::DataPre(std::shared_ptr<DataContext> data_ctx) {\n  return STATUS_OK;\n}\n\n// NOLINTNEXTLINE\nStatus IFlowUnit::DataPost(std::shared_ptr<DataContext> data_ctx) {\n  return STATUS_OK;\n}\n\n// NOLINTNEXTLINE\nStatus IFlowUnit::DataGroupPre(std::shared_ptr<DataContext> data_ctx) {\n  return STATUS_OK;\n}\n\n// NOLINTNEXTLINE\nStatus IFlowUnit::DataGroupPost(std::shared_ptr<DataContext> data_ctx) {\n  return STATUS_OK;\n}\n\nvoid FlowUnit::SetFlowUnitDesc(std::shared_ptr<FlowUnitDesc> desc) {\n  flowunit_desc_ = std::move(desc);\n}\n\nFlowUnit::FlowUnit() = default;\nFlowUnit::~FlowUnit() = default;\n\nStatus FlowUnit::Open(const std::shared_ptr<Configuration> &config) {\n  return STATUS_OK;\n}\n\nStatus FlowUnit::Close() { return STATUS_OK; }\n\nvoid FlowUnit::SetBindDevice(const std::shared_ptr<Device> &device) {\n  device_ = device;\n  if (device == nullptr) {\n    return;\n  }\n\n  auto dev_id_str = device->GetDeviceID();\n  try {\n    dev_id_ = std::stoi(dev_id_str);\n  } catch (const std::exception &e) {\n    MBLOG_WARN << \"Convert device id to int failed, id \" << dev_id_str\n               << \", err \" << e.what() << \"; use device 0 as default\";\n  }\n}\n\nvoid FlowUnit::SetExternalData(\n    const CreateExternalDataFunc &create_external_data) {\n  create_ext_data_func_ = create_external_data;\n}\n\nstd::shared_ptr<ExternalData> FlowUnit::CreateExternalData() const {\n  if (!create_ext_data_func_) {\n    return nullptr;\n  }\n\n  return create_ext_data_func_(device_);\n}\n\nCreateExternalDataFunc FlowUnit::GetCreateExternalDataFunc() {\n  return create_ext_data_func_;\n}\n\nFlowUnitPort::FlowUnitPort(std::string name) : port_name_(std::move(name)) {}\n\nFlowUnitPort::FlowUnitPort(std::string name, std::string device_type)\n    : port_name_(std::move(name)), device_type_(std::move(device_type)) {}\n\nFlowUnitPort::FlowUnitPort(std::string name, uint32_t device_mem_flags)\n    : port_name_(std::move(name)), device_mem_flags_(device_mem_flags) {}\n\nFlowUnitPort::FlowUnitPort(std::string name, std::string device_type,\n                           uint32_t device_mem_flags)\n    : port_name_(std::move(name)),\n      device_type_(std::move(device_type)),\n      device_mem_flags_(device_mem_flags) {}\n\nFlowUnitPort::FlowUnitPort(std::string name, std::string device_type,\n                           std::string type)\n    : port_name_(std::move(name)),\n      device_type_(std::move(device_type)),\n      port_type_(std::move(type)) {}\n\nFlowUnitPort::FlowUnitPort(std::string name, std::string device_type,\n                           std::string type,\n                           std::map<std::string, std::string> ext)\n    : port_name_(std::move(name)),\n      device_type_(std::move(device_type)),\n      port_type_(std::move(type)),\n      ext_(std::move(ext)) {}\n\nFlowUnitPort::~FlowUnitPort() = default;\n\nvoid FlowUnitPort::SetDeviceType(const std::string &device_type) {\n  device_type_ = device_type;\n}\n\nvoid FlowUnitPort::SetPortName(const std::string &port_name) {\n  port_name_ = port_name;\n}\n\nvoid FlowUnitPort::SetPortType(const std::string &port_type) {\n  port_type_ = port_type;\n}\n\nvoid FlowUnitPort::SetDevice(std::shared_ptr<Device> device) {\n  device_ = std::move(device);\n}\n\nvoid FlowUnitPort::SetProperity(const std::string &key,\n                                const std::string &value) {\n  ext_[key] = value;\n}\n\nstd::string FlowUnitPort::GetDeviceType() const { return device_type_; }\n\nstd::string FlowUnitPort::GetPortName() const { return port_name_; }\n\nstd::string FlowUnitPort::GetPortType() const { return port_type_; }\n\nstd::shared_ptr<Device> FlowUnitPort::GetDevice() const { return device_; }\n\nuint32_t FlowUnitPort::GetDeviceMemFlags() const { return device_mem_flags_; }\n\nstd::string FlowUnitPort::GetProperity(const std::string &key) {\n  if (ext_.find(key) == ext_.end()) {\n    return \"\";\n  }\n\n  return ext_[key];\n}\n\nFlowUnitInput::FlowUnitInput(const std::string &name) : FlowUnitPort(name){};\nFlowUnitInput::FlowUnitInput(const std::string &name,\n                             const std::string &device_type)\n    : FlowUnitPort(name, device_type){};\nFlowUnitInput::FlowUnitInput(const std::string &name, uint32_t device_mem_flags)\n    : FlowUnitPort(name, device_mem_flags){};\n\nFlowUnitInput::FlowUnitInput(const std::string &name,\n                             const std::string &device_type,\n                             uint32_t device_mem_flags)\n    : FlowUnitPort(name, device_type, device_mem_flags){};\nFlowUnitInput::FlowUnitInput(const std::string &name,\n                             const std::string &device_type,\n                             const std::string &type)\n    : FlowUnitPort(name, device_type, type){};\nFlowUnitInput::FlowUnitInput(const std::string &name,\n                             const std::string &device_type,\n                             const std::string &type,\n                             const std::map<std::string, std::string> &ext)\n    : FlowUnitPort(name, device_type, type, ext){};\nFlowUnitInput::~FlowUnitInput() = default;\n\nFlowUnitOutput::FlowUnitOutput(const std::string &name) : FlowUnitPort(name){};\n\nFlowUnitOutput::FlowUnitOutput(const std::string &name,\n                               uint32_t device_mem_flags)\n    : FlowUnitPort(name, device_mem_flags){};\n\nFlowUnitOutput::FlowUnitOutput(const std::string &name,\n                               const std::string &device_type)\n    : FlowUnitPort(name, device_type){};\n\nFlowUnitOutput::FlowUnitOutput(const std::string &name,\n                               const std::string &device_type,\n                               uint32_t device_mem_flags)\n    : FlowUnitPort(name, device_type, device_mem_flags){};\n\nFlowUnitOutput::FlowUnitOutput(const std::string &name,\n                               const std::string &device_type,\n                               const std::string &type)\n    : FlowUnitPort(name, device_type, type){};\n\nFlowUnitOutput::FlowUnitOutput(const std::string &name,\n                               const std::string &device_type,\n                               const std::string &type,\n                               const std::map<std::string, std::string> &ext)\n    : FlowUnitPort(name, device_type, type, ext){};\n\nFlowUnitOutput::~FlowUnitOutput() = default;\n\nFlowUnitOption::FlowUnitOption(std::string name, std::string type)\n    : option_name_(std::move(name)), option_type_(std::move(type)) {}\n\nFlowUnitOption::FlowUnitOption(std::string name, std::string type, bool require)\n    : option_name_(std::move(name)),\n      option_type_(std::move(type)),\n      option_require_{require} {}\n\nFlowUnitOption::FlowUnitOption(std::string name, std::string type, bool require,\n                               std::string default_value, std::string desc,\n                               std::map<std::string, std::string> values)\n    : option_name_(std::move(name)),\n      option_type_(std::move(type)),\n      option_require_(require),\n      option_default_(std::move(default_value)),\n      option_desc_(std::move(desc)),\n      option_values_(std::move(values)) {}\n\nFlowUnitOption::FlowUnitOption(std::string name, std::string type, bool require,\n                               std::string default_value, std::string desc)\n    : option_name_(std::move(name)),\n      option_type_(std::move(type)),\n      option_require_(require),\n      option_default_(std::move(default_value)),\n      option_desc_(std::move(desc)) {}\n\nFlowUnitOption::~FlowUnitOption() { option_values_.clear(); }\n\nvoid FlowUnitOption::SetOptionName(const std::string &option_name) {\n  option_name_ = option_name;\n}\n\nvoid FlowUnitOption::SetOptionType(const std::string &option_type) {\n  option_type_ = option_type;\n}\n\nvoid FlowUnitOption::SetOptionRequire(bool option_require) {\n  option_require_ = option_require;\n}\n\nvoid FlowUnitOption::SetOptionDesc(const std::string &option_desc) {\n  option_desc_ = option_desc;\n}\n\nvoid FlowUnitOption::AddOptionValue(const std::string &key,\n                                    const std::string &value) {\n  option_values_.emplace(key, value);\n}\n\nstd::string FlowUnitOption::GetOptionName() const { return option_name_; }\n\nstd::string FlowUnitOption::GetOptionType() const { return option_type_; }\n\nbool FlowUnitOption::IsRequire() const { return option_require_; }\n\nstd::string FlowUnitOption::GetOptionDefault() const { return option_default_; }\n\nstd::string FlowUnitOption::GetOptionDesc() const { return option_desc_; }\n\nstd::map<std::string, std::string> FlowUnitOption::GetOptionValues() {\n  return option_values_;\n}\n\nstd::string FlowUnitOption::GetOptionValue(const std::string &key) {\n  auto iter = option_values_.find(key);\n  if (iter == option_values_.end()) {\n    return \"\";\n  }\n\n  return option_values_[key];\n}\n\nstd::shared_ptr<Device> FlowUnit::GetBindDevice() { return device_; }\n\nstd::shared_ptr<FlowUnitDesc> FlowUnit::GetFlowUnitDesc() {\n  return flowunit_desc_;\n}\n\nFlowUnitDesc::FlowUnitDesc() = default;\n\nFlowUnitDesc::~FlowUnitDesc() = default;\n\nstd::string FlowUnitDesc::GetFlowUnitName() { return flowunit_name_; }\n\nstd::string FlowUnitDesc::GetFlowUnitType() { return flowunit_type_; }\n\nstd::string FlowUnitDesc::GetFlowUnitAliasName() { return alias_name_; }\n\nstd::string FlowUnitDesc::GetFlowUnitArgument() { return argument_; }\n\nbool FlowUnitDesc::IsCollapseAll() {\n  if (loop_type_ != LOOP) {\n    if (output_type_ != COLLAPSE) {\n      return false;\n    }\n    return is_collapse_all_;\n  }\n\n  return true;\n}\n\nbool FlowUnitDesc::IsStreamSameCount() {\n  if (flow_type_ == NORMAL) {\n    return true;\n  }\n  return is_stream_same_count_;\n}\n\nbool FlowUnitDesc::IsInputContiguous() const { return is_input_contiguous_; }\n\nbool FlowUnitDesc::IsResourceNice() const { return is_resource_nice_; }\n\nbool FlowUnitDesc::IsExceptionVisible() { return is_exception_visible_; }\n\nConditionType FlowUnitDesc::GetConditionType() { return condition_type_; }\n\nFlowOutputType FlowUnitDesc::GetOutputType() { return output_type_; }\n\nbool FlowUnitDesc::IsUserSetFlowType() { return is_user_set_flow_type_; }\n\nFlowType FlowUnitDesc::GetFlowType() { return flow_type_; }\n\nLoopType FlowUnitDesc::GetLoopType() { return loop_type_; }\n\nstd::string FlowUnitDesc::GetGroupType() { return group_type_; }\n\nuint32_t FlowUnitDesc::GetMaxBatchSize() {\n  if (max_batch_size_ != 0) {\n    return max_batch_size_;\n  }\n\n  // return default value\n  if (flow_type_ == STREAM) {\n    return STREAM_MAX_BATCH_SIZE;\n  }\n  return NORMAL_MAX_BATCH_SIZE;\n}\n\nuint32_t FlowUnitDesc::GetDefaultBatchSize() {\n  if (default_batch_size_ != 0) {\n    return default_batch_size_;\n  }\n\n  // return default value\n  if (flow_type_ == STREAM) {\n    return STREAM_DEFAULT_BATCH_SIZE;\n  }\n  return NORMAL_DEFAULT_BATCH_SIZE;\n}\n\nstd::vector<FlowUnitInput> &FlowUnitDesc::GetFlowUnitInput() {\n  return flowunit_input_list_;\n}\nconst std::vector<FlowUnitOutput> &FlowUnitDesc::GetFlowUnitOutput() {\n  return flowunit_output_list_;\n}\n\nstd::vector<FlowUnitOption> &FlowUnitDesc::GetFlowUnitOption() {\n  return flowunit_option_list_;\n}\n\nstd::shared_ptr<DriverDesc> FlowUnitDesc::GetDriverDesc() {\n  return driver_desc_;\n}\n\nstd::string FlowUnitDesc::GetDescription() { return flowunit_description_; }\n\nstd::string FlowUnitDesc::GetVirtualType() { return virtual_type_; }\n\nvoid FlowUnitDesc::SetFlowUnitName(const std::string &flowunit_name) {\n  flowunit_name_ = flowunit_name;\n}\n\nvoid FlowUnitDesc::SetFlowUnitType(const std::string &flowunit_type) {\n  flowunit_type_ = flowunit_type;\n}\n\nvoid FlowUnitDesc::SetFlowUnitGroupType(const std::string &group_type) {\n  if (CheckGroupType(group_type) != STATUS_SUCCESS) {\n    MBLOG_WARN << \"check group type failed , your group_type is \" << group_type\n               << \", the right group_type is a or a/b , for instance input \"\n                  \"or input/http.\";\n    return;\n  }\n\n  group_type_ = group_type;\n}\n\nvoid FlowUnitDesc::SetDriverDesc(std::shared_ptr<DriverDesc> driver_desc) {\n  driver_desc_ = std::move(driver_desc);\n}\n\nvoid FlowUnitDesc::SetFlowUnitAliasName(const std::string &alias_name) {\n  alias_name_ = alias_name;\n}\n\nvoid FlowUnitDesc::SetFlowUnitArgument(const std::string &argument) {\n  argument_ = argument;\n}\n\nvoid FlowUnitDesc::SetConditionType(ConditionType condition_type) {\n  condition_type_ = condition_type;\n}\n\nvoid FlowUnitDesc::SetLoopType(LoopType loop_type) { loop_type_ = loop_type; }\n\nvoid FlowUnitDesc::SetOutputType(FlowOutputType output_type) {\n  output_type_ = output_type;\n}\n\nvoid FlowUnitDesc::SetFlowType(FlowType flow_type) {\n  is_user_set_flow_type_ = true;\n  flow_type_ = flow_type;\n}\n\nvoid FlowUnitDesc::SetStreamSameCount(bool is_stream_same_count) {\n  if (flow_type_ == STREAM) {\n    is_stream_same_count_ = is_stream_same_count;\n  }\n}\n\nvoid FlowUnitDesc::SetInputContiguous(bool is_input_contiguous) {\n  is_input_contiguous_ = is_input_contiguous;\n}\n\nvoid FlowUnitDesc::SetResourceNice(bool is_resource_nice) {\n  is_resource_nice_ = is_resource_nice;\n}\n\nvoid FlowUnitDesc::SetCollapseAll(bool is_collapse_all) {\n  if (output_type_ == COLLAPSE) {\n    is_collapse_all_ = is_collapse_all;\n  }\n}\n\nvoid FlowUnitDesc::SetExceptionVisible(bool is_exception_visible) {\n  is_exception_visible_ = is_exception_visible;\n}\n\nvoid FlowUnitDesc::SetVirtualType(const std::string &virtual_type) {\n  virtual_type_ = virtual_type;\n}\n\nvoid FlowUnitDesc::SetDescription(const std::string &description) {\n  flowunit_description_ = description;\n}\n\nvoid FlowUnitDesc::SetMaxBatchSize(const uint32_t &max_batch_size) {\n  if (max_batch_size == 0) {\n    MBLOG_ERROR << \"max_batch_size must be greater than zero.\";\n    return;\n  }\n  max_batch_size_ = max_batch_size;\n}\n\nvoid FlowUnitDesc::SetDefaultBatchSize(const uint32_t &default_batch_size) {\n  if (default_batch_size == 0) {\n    MBLOG_ERROR << \"default_batch_size must be greater than zero.\";\n    return;\n  }\n  default_batch_size_ = default_batch_size;\n}\n\nStatus FlowUnitDesc::AddFlowUnitInput(const FlowUnitInput &flowunit_input) {\n  if (CheckInputDuplication(flowunit_input) != STATUS_OK) {\n    MBLOG_WARN << \"The flowunit input has already added.\";\n    return STATUS_EXIST;\n  }\n\n  flowunit_input_list_.push_back(flowunit_input);\n  return STATUS_OK;\n}\n\nStatus FlowUnitDesc::AddFlowUnitOutput(const FlowUnitOutput &flowunit_output) {\n  if (CheckOutputDuplication(flowunit_output) != STATUS_OK) {\n    MBLOG_WARN << \"The flowunit input has already added.\";\n    return STATUS_EXIST;\n  }\n\n  flowunit_output_list_.push_back(flowunit_output);\n  return STATUS_OK;\n}\n\nStatus FlowUnitDesc::AddFlowUnitOption(const FlowUnitOption &flowunit_option) {\n  if (CheckOptionDuplication(flowunit_option) != STATUS_OK) {\n    MBLOG_WARN << \"The flowunit input has already added.\";\n    return STATUS_EXIST;\n  }\n\n  flowunit_option_list_.push_back(flowunit_option);\n  return STATUS_OK;\n}\n\nStatus FlowUnitDesc::CheckGroupType(const std::string &group_type) {\n  if (!std::regex_match(group_type, REGROUPTYPE)) {\n    auto err_msg = group_type +\n                   \" is not match, you can use a-z, A-Z, 1-9, _ and uppercase \"\n                   \"the first character.\";\n    MBLOG_WARN << err_msg;\n    return {STATUS_INVALID, err_msg};\n  }\n\n  if (group_type.find('/') == std::string::npos) {\n    return STATUS_SUCCESS;\n  }\n\n  if (group_type.find_first_of('/') != group_type.find_last_of('/')) {\n    auto err_msg = \"there are more than one / in \" + group_type;\n    MBLOG_WARN << err_msg;\n    return {STATUS_INVALID, err_msg};\n  }\n\n  return STATUS_SUCCESS;\n}\n\nStatus FlowUnitDesc::CheckInputDuplication(\n    const FlowUnitInput &flowunit_input) {\n  for (const auto &input : flowunit_input_list_) {\n    if (input.GetPortName() != flowunit_input.GetPortName()) {\n      continue;\n    }\n\n    if (input.GetDeviceType() != flowunit_input.GetDeviceType()) {\n      continue;\n    }\n\n    return STATUS_EXIST;\n  }\n\n  return STATUS_OK;\n}\n\nStatus FlowUnitDesc::CheckOutputDuplication(\n    const FlowUnitOutput &flowunit_output) {\n  for (const auto &output : flowunit_output_list_) {\n    if (output.GetPortName() != flowunit_output.GetPortName()) {\n      continue;\n    }\n\n    if (output.GetDeviceType() != flowunit_output.GetDeviceType()) {\n      continue;\n    }\n\n    return STATUS_EXIST;\n  }\n\n  return STATUS_OK;\n}\n\nStatus FlowUnitDesc::CheckOptionDuplication(\n    const FlowUnitOption &flowunit_option) {\n  for (const auto &option : flowunit_option_list_) {\n    if (option.GetOptionName() != flowunit_option.GetOptionName()) {\n      continue;\n    }\n\n    if (option.GetOptionType() != flowunit_option.GetOptionType()) {\n      continue;\n    }\n\n    return STATUS_EXIST;\n  }\n\n  return STATUS_OK;\n}\n\nFlowUnitFactory::FlowUnitFactory() = default;\nFlowUnitFactory::~FlowUnitFactory() = default;\n\nstd::map<std::string, std::shared_ptr<FlowUnitDesc>>\nFlowUnitFactory::FlowUnitProbe() {\n  return std::map<std::string, std::shared_ptr<FlowUnitDesc>>();\n}\n\nvoid FlowUnitFactory::SetDriver(const std::shared_ptr<Driver> &driver) {\n  driver_ = driver;\n}\n\nstd::shared_ptr<Driver> FlowUnitFactory::GetDriver() { return driver_; }\n\nstd::string FlowUnitFactory::GetFlowUnitFactoryType() { return \"\"; }\n\nstd::string FlowUnitFactory::GetFlowUnitFactoryName() { return \"\"; }\n\nstd::vector<std::string> FlowUnitFactory::GetFlowUnitNames() {\n  return std::vector<std::string>();\n}\n\nstd::string FlowUnitFactory::GetVirtualType() { return \"\"; }\n\nvoid FlowUnitFactory::SetVirtualType(const std::string &virtual_type) {}\n\nstd::shared_ptr<FlowUnit> FlowUnitFactory::CreateFlowUnit(\n    const std::string &unit_name, const std::string &unit_type) {\n  if (GetVirtualType().empty()) {\n    StatusError = {STATUS_FAULT, \"invalid Flow Unit\"};\n    return nullptr;\n  }\n\n  return VirtualCreateFlowUnit(unit_name, unit_type, GetVirtualType());\n}\n\nstd::shared_ptr<FlowUnit> FlowUnitFactory::VirtualCreateFlowUnit(\n    const std::string &unit_name, const std::string &unit_type,\n    const std::string &virtual_type) {\n  StatusError = {STATUS_FAULT, \"Invalid virtual flowunit\"};\n  return nullptr;\n}\n\nvoid FlowUnitFactory::SetFlowUnitFactory(\n    const std::vector<std::shared_ptr<DriverFactory>>\n        &bind_flowunit_factory_list) {}\n\nstd::string FlowUnitFactory::GetFlowUnitInputDeviceType() { return \"\"; };\n\n}  // namespace modelbox\n"
  },
  {
    "path": "src/libmodelbox/engine/flowunit_balancer.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/flowunit_balancer.h\"\n\nnamespace modelbox {\n\nstatic std::unordered_map<FlowUnitBalanceStrategy, std::string,\n                          FUBalanceStrategyHash>\n    g_strategy_name_map = {\n        {FlowUnitBalanceStrategy::FU_ROUND_ROBIN, \"RoundRobin\"},\n        {FlowUnitBalanceStrategy::FU_CAPABILITY, \"Capability\"},\n        {FlowUnitBalanceStrategy::FU_NULL, \"Null\"}};\n\nstd::ostream& operator<<(std::ostream& os, const FlowUnitBalanceStrategy& s) {\n  os << g_strategy_name_map[s];\n  return os;\n}\n\nFlowUnitBalancer::FlowUnitBalancer() = default;\n\nFlowUnitBalancer::~FlowUnitBalancer() = default;\n\nStatus FlowUnitBalancer::Init(\n    const std::vector<std::shared_ptr<FlowUnit>>& flowunits) {\n  if (flowunits.empty()) {\n    return {STATUS_FAULT, \"no flowunit available\"};\n  }\n\n  flowunits_ = flowunits;\n  return OnInit();\n}\n\nStatus FlowUnitBalancer::OnInit() { return STATUS_OK; }\n\nstd::shared_ptr<FlowUnit> FlowUnitBalancer::GetFlowUnit(\n    const std::shared_ptr<FlowUnitDataContext>& data_ctx) {\n  {\n    std::lock_guard<std::mutex> lock(ctx_to_flowunit_map_lock_);\n    auto item = ctx_to_flowunit_map_.find(data_ctx.get());\n    if (item != ctx_to_flowunit_map_.end()) {\n      return item->second;\n    }\n  }\n\n  return FirstBind(data_ctx);\n}\n\nstd::shared_ptr<FlowUnit> FlowUnitBalancer::FirstBind(\n    const std::shared_ptr<FlowUnitDataContext>& data_ctx) {\n  auto fu = BindFlowUnit(data_ctx);\n  if (fu == nullptr) {\n    return nullptr;\n  }\n\n  {\n    std::lock_guard<std::mutex> lock(ctx_to_flowunit_map_lock_);\n    ctx_to_flowunit_map_[data_ctx.get()] = fu;\n  }\n  std::weak_ptr<FlowUnitBalancer> balancer_ref = shared_from_this();\n  auto* data_ctx_ptr = data_ctx.get();\n  data_ctx->AddDestroyCallback([data_ctx_ptr, balancer_ref]() {\n    auto balancer = balancer_ref.lock();\n    if (balancer == nullptr) {\n      return;\n    }\n\n    balancer->UnbindFlowUnit(data_ctx_ptr);\n  });\n  return fu;\n}\n\nvoid FlowUnitBalancer::UnbindFlowUnit(const FlowUnitDataContext* data_ctx_ptr) {\n  std::lock_guard<std::mutex> lock(ctx_to_flowunit_map_lock_);\n  ctx_to_flowunit_map_.erase(data_ctx_ptr);\n}\n\nFlowUnitBalancerFactory::FlowUnitBalancerFactory() = default;\n\nFlowUnitBalancerFactory::~FlowUnitBalancerFactory() = default;\n\nFlowUnitBalancerFactory& FlowUnitBalancerFactory::GetInstance() {\n  static FlowUnitBalancerFactory factory;\n  return factory;\n}\n\nstd::shared_ptr<FlowUnitBalancer> FlowUnitBalancerFactory::CreateBalancer(\n    FlowUnitBalanceStrategy strategy) {\n  auto item = balancer_creator_map_.find(strategy);\n  if (item == balancer_creator_map_.end()) {\n    MBLOG_ERROR << \"Flowunit balance strategy \" << strategy\n                << \" is not supported\";\n    return nullptr;\n  }\n\n  return item->second();\n}\n\nvoid FlowUnitBalancerFactory::RegistBalancer(\n    const FUBalancerCreateFunc& create_func) {\n  auto balancer = create_func();\n  auto strategy = balancer->GetType();\n  balancer_creator_map_[strategy] = create_func;\n}\n\nFlowUnitBalancerRegister::FlowUnitBalancerRegister(\n    const FUBalancerCreateFunc& create_func) {\n  auto& factory = FlowUnitBalancerFactory::GetInstance();\n  factory.RegistBalancer(create_func);\n}\n\nvoid FlowUnitBalancerUtil::Init(\n    const std::vector<std::shared_ptr<FlowUnit>>& flowunits) {\n  for (const auto& fu : flowunits) {\n    device_to_fu_map_[fu->GetBindDevice().get()] = fu;\n  }\n}\n\nstd::shared_ptr<FlowUnit> FlowUnitBalancerUtil::GetFlowUnitByDevice(\n    const std::shared_ptr<Device>& device) {\n  auto item = device_to_fu_map_.find(device.get());\n  if (item != device_to_fu_map_.end()) {\n    return item->second;\n  }\n\n  return nullptr;\n}\n\nstd::set<std::shared_ptr<Device>> FlowUnitBalancerUtil::GetInputDevices(\n    const std::shared_ptr<FlowUnitDataContext>& data_ctx) {\n  const auto& inputs = data_ctx->GetInputs();\n  std::set<std::shared_ptr<Device>> devices;\n  for (const auto& port_item : inputs) {\n    const auto& port_buffer_list = port_item.second;\n    if (port_buffer_list.empty()) {\n      continue;\n    }\n\n    auto first_buffer = port_buffer_list.front();\n    if (first_buffer == nullptr) {\n      continue;\n    }\n\n    auto dev = first_buffer->GetDevice();\n    if (dev == nullptr) {\n      continue;\n    }\n\n    devices.insert(dev);\n  }\n\n  return devices;\n}\n\nREGIST_FLOWUNIT_BALANCER(FURoundRobinBalancer);\n\nFlowUnitBalanceStrategy FURoundRobinBalancer::GetType() {\n  return FlowUnitBalanceStrategy::FU_ROUND_ROBIN;\n}\n\nStatus FURoundRobinBalancer::OnInit() {\n  util.Init(flowunits_);\n  return STATUS_OK;\n}\n\nstd::shared_ptr<FlowUnit> FURoundRobinBalancer::BindFlowUnit(\n    const std::shared_ptr<FlowUnitDataContext>& data_ctx) {\n  std::list<std::shared_ptr<FlowUnit>> candidate_fu_list;\n  auto devices = util.GetInputDevices(data_ctx);\n  for (const auto& device : devices) {\n    auto fu = util.GetFlowUnitByDevice(device);\n    if (fu == nullptr) {\n      continue;\n    }\n\n    candidate_fu_list.push_back(fu);\n  }\n\n  std::shared_ptr<FlowUnit> fu;\n  if (candidate_fu_list.empty()) {\n    fu = GetNextFU();\n  } else {\n    // Use first directly\n    fu = candidate_fu_list.front();\n  }\n\n  return fu;\n}\n\nstd::shared_ptr<FlowUnit> FURoundRobinBalancer::GetNextFU() {\n  auto fu = flowunits_[fu_index_];\n  fu_index_ = (fu_index_ + 1) % flowunits_.size();\n  return fu;\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/libmodelbox/engine/flowunit_data_executor.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/flowunit_data_executor.h\"\n\n#include <utility>\n\n#include \"modelbox/node.h\"\n\nnamespace modelbox {\n\nExecutor::Executor() {\n  thread_pool_ = std::make_shared<ThreadPool>();\n  thread_pool_->SetName(\"Executor\");\n}\n\nExecutor::Executor(int thread_count) {\n  thread_pool_ = std::make_shared<ThreadPool>(thread_count);\n  thread_pool_->SetName(\"Executor\");\n}\n\nExecutor::~Executor() { thread_pool_ = nullptr; }\n\nvoid Executor::SetThreadCount(int thread_count) {\n  thread_pool_->SetThreadSize(thread_count);\n}\n\nFlowUnitExecContext::FlowUnitExecContext(\n    std::shared_ptr<FlowUnitDataContext> data_ctx)\n    : data_ctx_(std::move(data_ctx)) {}\n\nvoid FlowUnitExecContext::SetFlowUnit(std::shared_ptr<FlowUnit> fu) {\n  bind_fu_ = std::move(fu);\n}\n\nconst std::shared_ptr<FlowUnit> &FlowUnitExecContext::GetFlowUnit() {\n  return bind_fu_;\n}\n\nconst std::shared_ptr<FlowUnitDataContext> &FlowUnitExecContext::GetDataCtx() {\n  return data_ctx_;\n}\n\nFlowUnitExecData::FlowUnitExecData(const std::shared_ptr<FlowUnit> &fu)\n    : fu_(fu) {\n  // Prepare data container\n  const auto &fu_desc = fu->GetFlowUnitDesc();\n  const auto &in_list = fu_desc->GetFlowUnitInput();\n  auto device = fu->GetBindDevice();\n  in_data_ = std::make_shared<BufferListMap>();\n  for (const auto &in_item : in_list) {\n    auto in_device = in_item.GetDevice();\n    in_data_->emplace(\n        in_item.GetPortName(),\n        std::make_shared<BufferList>(in_device, in_item.GetDeviceMemFlags()));\n  }\n\n  const auto &out_list = fu_desc->GetFlowUnitOutput();\n  out_data_ = std::make_shared<BufferListMap>();\n  for (const auto &out_item : out_list) {\n    out_data_->emplace(\n        out_item.GetPortName(),\n        std::make_shared<BufferList>(device, out_item.GetDeviceMemFlags()));\n  }\n\n  ext_data_ = std::make_shared<BufferListMap>();\n  if (in_list.empty()) {\n    ext_data_->emplace(EXTERNAL_PORT_NAME,\n                       std::make_shared<BufferList>(device));\n  }\n}\n\nFlowUnitExecData::~FlowUnitExecData() = default;\n\nvoid FlowUnitExecData::ReserveCache(size_t buffer_count, DataType type) {\n  auto data = in_data_;\n  auto *cache = &in_data_cache_;\n  if (type == OUT_DATA) {\n    data = out_data_;\n    cache = &out_data_cache_;\n  }\n\n  for (auto &port_item : *data) {\n    const auto &port_name = port_item.first;\n    auto &cache_buffer_list = (*cache)[port_name];\n    cache_buffer_list.clear();\n    cache_buffer_list.reserve(buffer_count);\n  }\n}\n\nvoid FlowUnitExecData::AppendToCache(\n    const std::shared_ptr<FlowUnitExecData> &src, size_t start_idx,\n    size_t count, DataType type) {\n  auto src_data = src->in_data_;\n  auto *cache = &in_data_cache_;\n  if (type == OUT_DATA) {\n    cache = &out_data_cache_;\n    src_data = src->out_data_;\n  }\n\n  for (auto &port_item : *src_data) {\n    const auto &port_name = port_item.first;\n    auto &port_data = port_item.second;\n    auto &cache_buffer_list = (*cache)[port_name];\n    auto end_idx = start_idx + count;\n    for (size_t idx = start_idx; idx < end_idx; ++idx) {\n      if (port_data->Size() == 0) {\n        // For if_else, only one port has data, need push nullptr to result\n        cache_buffer_list.push_back(nullptr);\n        continue;\n      }\n\n      cache_buffer_list.push_back(port_data->At(idx));\n    }\n  }\n}\n\nvoid FlowUnitExecData::FlushCache(DataType type) {\n  auto data = in_data_;\n  auto *cache = &in_data_cache_;\n  if (type == OUT_DATA) {\n    data = out_data_;\n    cache = &out_data_cache_;\n  }\n\n  for (auto &port_item : *data) {\n    const auto &port_name = port_item.first;\n    auto &port_data = port_item.second;\n    auto &cache_buffer_list = (*cache)[port_name];\n    port_data->Swap(cache_buffer_list);\n  }\n}\n\nstd::shared_ptr<BufferListMap> FlowUnitExecData::GetInData() {\n  return in_data_;\n}\n\nstd::shared_ptr<BufferListMap> FlowUnitExecData::GetInDataForUser() {\n  return in_data_for_user_;\n}\n\nstd::shared_ptr<BufferList> FlowUnitExecData::GetInDataForUser(\n    const std::string &name) {\n  return HasInData(name) ? in_data_for_user_->at(name) : nullptr;\n}\n\nvoid FlowUnitExecData::SetInData(\n    const std::string &name,\n    const std::vector<std::shared_ptr<Buffer>> &buffer_list) {\n  // if in_data_ is empty it means the input is a external data\n  if (in_data_->empty()) {\n    SetExternalData(name, buffer_list);\n    return;\n  }\n\n  (*in_data_)[name]->Assign(buffer_list);\n}\n\nstd::shared_ptr<BufferListMap> FlowUnitExecData::GetOutData() {\n  return out_data_;\n}\n\nstd::shared_ptr<BufferList> FlowUnitExecData::GetOutData(\n    const std::string &name) {\n  return HasOutData(name) ? out_data_->at(name) : nullptr;\n}\n\nStatus FlowUnitExecData::SetExternalData(\n    const std::string &name,\n    const std::vector<std::shared_ptr<Buffer>> &buffer_list) {\n  auto iter = ext_data_->find(name);\n  if (iter == ext_data_->end()) {\n    return {STATUS_INVALID, \"can not find external port\"};\n  }\n\n  auto &ext_buffer_list = iter->second;\n  if (!ext_buffer_list) {\n    return {STATUS_INVALID, \"external port must not be nullptr\"};\n  }\n\n  ext_buffer_list->Assign(buffer_list);\n  return STATUS_OK;\n}\n\nstd::shared_ptr<BufferListMap> FlowUnitExecData::GetExternalData() {\n  return ext_data_;\n}\n\nstd::shared_ptr<BufferListMap> FlowUnitExecData::GetExternalDataForUser() {\n  return ext_data_for_user_;\n}\n\nstd::shared_ptr<BufferList> FlowUnitExecData::GetExternalDataForUser(\n    const std::string &name) {\n  return HasExternData(name) ? ext_data_for_user_->at(name) : nullptr;\n}\n\nsize_t FlowUnitExecData::GetInBufferNum() {\n  // All port data number is same\n  if (in_data_->empty() || !(in_data_->begin()->second)) {\n    return 0;\n  }\n\n  return in_data_->begin()->second->Size();\n}\n\nsize_t FlowUnitExecData::GetExtBufferNum() {\n  if (ext_data_->empty() || !(ext_data_->begin()->second)) {\n    return 0;\n  }\n\n  return ext_data_->begin()->second->Size();\n}\n\nsize_t FlowUnitExecData::GetOutBufferNum(bool accumulate_all_port) {\n  // All port data number is same\n  if (out_data_->empty() || !(out_data_->begin()->second)) {\n    return 0;\n  }\n\n  if (!accumulate_all_port) {\n    return out_data_->begin()->second->Size();\n  }\n\n  size_t sum = 0;\n  for (auto &port_item : *out_data_) {\n    sum += port_item.second->Size();\n  }\n\n  return sum;\n}\n\nStatus FlowUnitExecData::GetStatus() const { return status_; }\n\nvoid FlowUnitExecData::SetStatus(const Status &status) { status_ = status; }\n\nbool FlowUnitExecData::HasInData(const std::string &name) const {\n  return in_data_->find(name) != in_data_->end();\n}\n\nbool FlowUnitExecData::HasOutData(const std::string &name) const {\n  return out_data_->find(name) != out_data_->end();\n}\n\nbool FlowUnitExecData::HasExternData(const std::string &name) const {\n  return ext_data_->find(name) != ext_data_->end();\n}\n\nvoid FlowUnitExecData::SetupUserInput() {\n  // freeze data and make copy to avoid user modify origin input\n  in_data_for_user_ = std::make_shared<BufferListMap>();\n  for (auto &in_item : *in_data_) {\n    auto in_buffer_list_copy = std::make_shared<BufferList>(*in_item.second);\n    in_buffer_list_copy->SetMutable(false);\n    (*in_data_for_user_)[in_item.first] = in_buffer_list_copy;\n  }\n\n  ext_data_for_user_ = std::make_shared<BufferListMap>();\n  for (auto &ext_item : *ext_data_) {\n    auto ext_buffer_list_copy = std::make_shared<BufferList>(*ext_item.second);\n    ext_buffer_list_copy->SetMutable(false);\n    (*ext_data_for_user_)[ext_item.first] = ext_buffer_list_copy;\n  }\n}\n\nStatus FlowUnitExecData::CheckStatus(bool one_to_one, bool data_in_one_port) {\n  if (status_ == STATUS_OK || status_ == STATUS_CONTINUE ||\n      status_ == STATUS_SHUTDOWN || status_ == STATUS_STOP) {\n    return STATUS_OK;\n  }\n  MBLOG_INFO << \"flowunit \" << fu_->GetFlowUnitDesc()->GetFlowUnitName()\n             << \" process return: \" << status_;\n\n  auto in_count = GetInBufferNum();\n  if (in_count == 0) {\n    in_count = GetExtBufferNum();\n  }\n\n  size_t out_count = in_count;\n  if (!one_to_one || out_count == 0) {\n    out_count = 1;\n  }\n\n  if (!out_data_->empty()) {\n    FillErrorOutput(out_count, data_in_one_port);\n    status_ = STATUS_OK;\n  }\n\n  return STATUS_OK;\n}\n\nvoid FlowUnitExecData::FillErrorOutput(size_t out_count,\n                                       bool data_in_one_port) {\n  bool first_port = true;\n  for (auto &out_item : *out_data_) {\n    auto &port_data_list = out_item.second;\n    port_data_list->Reset();\n\n    if (data_in_one_port && !first_port) {\n      continue;\n    }\n    for (size_t i = 0; i < out_count; ++i) {\n      auto buffer = std::make_shared<Buffer>();\n      buffer->SetError(\n          fu_->GetFlowUnitDesc()->GetFlowUnitName() + \".ProcessError\",\n          status_.Errormsg());\n      port_data_list->PushBack(buffer);\n    }\n    first_port = false;\n  }\n}\n\nStatus FlowUnitExecData::SetupUserOutput(bool one_to_one,\n                                         bool data_in_one_port) {\n  if (status_ != STATUS_OK && status_ != STATUS_CONTINUE) {\n    // process error, no need to save inherit info\n    return STATUS_OK;\n  }\n\n  // avoid user push same buffer as multi output\n  MakeCopyForUserOutput();\n\n  auto in_count = GetInBufferNum();\n  auto parent_data = in_data_;\n  if (in_count == 0) {\n    in_count = GetExtBufferNum();\n    parent_data = ext_data_;\n  }\n\n  if (in_count == 0) {\n    // event driven\n    return STATUS_SUCCESS;\n  }\n\n  if (one_to_one) {\n    return SaveProcessOneToOne(parent_data, in_count, data_in_one_port);\n  }\n\n  return SaveProcessNToM(parent_data);\n}\n\nvoid FlowUnitExecData::MakeCopyForUserOutput() {\n  auto output = std::make_shared<BufferListMap>();\n  for (auto &out_item : *out_data_) {\n    auto output_buffer_list_copy =\n        std::make_shared<BufferList>(*out_item.second);\n    (*output)[out_item.first] = output_buffer_list_copy;\n    for (auto &buffer : *output_buffer_list_copy) {\n      buffer->ClearDelayedCopyDestinationInfo();\n    }\n  }\n\n  out_data_ = output;\n}\n\nStatus FlowUnitExecData::SaveProcessOneToOne(\n    const std::shared_ptr<BufferListMap> &parent_data, size_t data_count,\n    bool data_in_one_port) {\n  // input n, output n, and inherit one to one\n  std::vector<std::shared_ptr<BufferProcessInfo>> process_info_list;\n\n  auto out_count = GetOutBufferNum(data_in_one_port);\n  if (data_count != out_count) {\n    return {STATUS_FAULT, \"input buffer count \" + std::to_string(data_count) +\n                              \" should equal output buffer count \" +\n                              std::to_string(out_count)};\n  }\n\n  process_info_list.reserve(data_count);\n  for (size_t i = 0; i < data_count; ++i) {\n    process_info_list.push_back(std::make_shared<BufferProcessInfo>());\n  }\n\n  for (auto &in_item : *parent_data) {\n    const auto &port_name = in_item.first;\n    auto &port_data_list = in_item.second;\n    for (size_t i = 0; i < port_data_list->Size(); ++i) {\n      auto process_info = process_info_list[i];\n      auto buffer = port_data_list->At(i);\n      auto buffer_index_info = BufferManageView::GetIndexInfo(buffer);\n      process_info->SetParentBuffers(port_name, {buffer_index_info});\n    }\n  }\n\n  for (auto &out_item : *out_data_) {\n    auto &port_data_list = out_item.second;\n    for (size_t i = 0; i < port_data_list->Size(); ++i) {\n      auto process_info = process_info_list[i];\n      auto buffer = port_data_list->At(i);\n      auto index_info = BufferManageView::GetIndexInfo(buffer);\n      index_info->SetProcessInfo(process_info);\n    }\n  }\n\n  return STATUS_OK;\n}\n\nStatus FlowUnitExecData::SaveProcessNToM(\n    const std::shared_ptr<BufferListMap> &parent_data) {\n  // input n, output m\n  auto process_info = std::make_shared<BufferProcessInfo>();\n  for (auto &in_item : *parent_data) {\n    const auto &port_name = in_item.first;\n    auto &port_data_list = in_item.second;\n    std::list<std::shared_ptr<BufferIndexInfo>> in_port_buffer_index_info_list;\n    for (auto &buffer : *port_data_list) {\n      auto buffer_index_info = BufferManageView::GetIndexInfo(buffer);\n      in_port_buffer_index_info_list.push_back(buffer_index_info);\n    }\n\n    process_info->SetParentBuffers(port_name,\n                                   std::move(in_port_buffer_index_info_list));\n  }\n\n  for (auto &out_item : *out_data_) {\n    auto &port_data_list = out_item.second;\n    for (auto &buffer : *port_data_list) {\n      auto index_info = BufferManageView::GetIndexInfo(buffer);\n      index_info->SetProcessInfo(process_info);\n    }\n  }\n\n  return STATUS_OK;\n}\n\nvoid FlowUnitExecDataMapper::AddExecCtx(\n    const std::shared_ptr<FlowUnitExecContext> &exec_ctx) {\n  origin_exec_ctx_list_.push_back(exec_ctx);\n}\n\nvoid FlowUnitExecDataMapper::LoadDataFromExecCtx() {\n  auto ctx_count = origin_exec_ctx_list_.size();\n  origin_data_list_.reserve(ctx_count);\n  origin_shapes_.reserve(ctx_count);\n  for (auto &exec_ctx : origin_exec_ctx_list_) {\n    auto exec_data =\n        std::make_shared<FlowUnitExecData>(exec_ctx->GetFlowUnit());\n    const auto &inputs = exec_ctx->GetDataCtx()->GetInputs();\n    for (const auto &item : inputs) {\n      const auto &port_name = item.first;\n      const auto &port_data_list = item.second;\n      if (port_data_list.empty()) {\n        continue;\n      }\n\n      exec_data->SetInData(port_name, port_data_list);\n    }\n\n    origin_data_list_.push_back(exec_data);\n    origin_shapes_.push_back(exec_data->GetInBufferNum());\n  }\n}\n\nStatus FlowUnitExecDataMapper::MapData(bool need_reshape, size_t batch_size,\n                                       bool is_stream) {\n  if (!need_reshape || !NeedReshape(batch_size)) {\n    map_type_ = DIRECT_MAP;\n    return DirectMap();\n  }\n\n  if (batch_size == 0) {\n    return {STATUS_FAULT, \"batch_size should not be zero\"};\n  }\n\n  if (is_stream) {\n    map_type_ = RESHAPE_STREAM;\n    return ReshapeStream(batch_size);\n  }\n\n  map_type_ = RESHAPE_NORMAL;\n  return ReshapeNormal(batch_size);\n}\n\nStatus FlowUnitExecDataMapper::MoveToTargetDevice(bool need_contiguous) {\n  for (auto &batched_data : mapped_data_list_) {\n    for (auto &data : batched_data) {\n      auto in_data = data->GetInData();\n      auto ret = MoveDataToTargetDevice(in_data, need_contiguous);\n      if (!ret) {\n        MBLOG_ERROR << \"Move input data to target dev failed, err: \" << ret;\n        return ret;\n      }\n\n      auto ext_data = data->GetExternalData();\n      ret = MoveDataToTargetDevice(ext_data, need_contiguous);\n      if (!ret) {\n        MBLOG_ERROR << \"Move external data to target dev failed, err: \" << ret;\n        return ret;\n      }\n    }\n  }\n\n  return STATUS_SUCCESS;\n}\n\nStatus FlowUnitExecDataMapper::MoveDataToTargetDevice(\n    std::shared_ptr<BufferListMap> &data, bool need_contiguous) {\n  for (auto &item : *data) {\n    auto &buffer_list = item.second;\n    if (need_contiguous && buffer_list->SupportMemContiguous()) {\n      if (!buffer_list->MakeContiguous()) {\n        return {STATUS_FAULT, \"make contiguous failed, port:\" + item.first};\n      }\n    } else {\n      if (!buffer_list->MoveAllBufferToTargetDevice()) {\n        return {STATUS_FAULT,\n                \"move buffer to target dev failed, port:\" + item.first};\n      }\n    }\n  }\n\n  return STATUS_SUCCESS;\n}\n\nvoid FlowUnitExecDataMapper::SetupUserInput() {\n  for (auto &data_batch : mapped_data_list_) {\n    for (auto &data : data_batch) {\n      data->SetupUserInput();\n    }\n  }\n}\n\nBatchedFUExecDataCtxList FlowUnitExecDataMapper::GetBatchedExecDataCtxList() {\n  return mapped_exec_data_ctx_list_;\n}\n\nStatus FlowUnitExecDataMapper::CheckOutputDataNumber(bool data_in_one_port) {\n  for (auto &batched_data : mapped_data_list_) {\n    for (auto &data : batched_data) {\n      auto status = data->GetStatus();\n      if (status != STATUS_OK && status != STATUS_CONTINUE) {\n        // Flowunit process failed, skip this batch\n        continue;\n      }\n\n      auto outputs = data->GetOutData();\n      if (outputs == nullptr) {\n        return {STATUS_FAULT, \"output data is nullptr\"};\n      }\n\n      if (outputs->empty()) {\n        // Flowunit has no output port\n        continue;\n      }\n\n      auto ret = CheckAllOutputNumEqual(data, data_in_one_port);\n      if (!ret) {\n        return ret;\n      }\n\n      ret = CheckOutputNumEqualInput(data, data_in_one_port);\n      if (!ret) {\n        return ret;\n      }\n    }\n  }\n\n  return true;\n}\n\nStatus FlowUnitExecDataMapper::CheckAllOutputNumEqual(\n    const std::shared_ptr<FlowUnitExecData> &data, bool data_in_one_port) {\n  auto outputs = data->GetOutData();\n  if (outputs->size() == 1) {\n    return STATUS_OK;\n  }\n\n  size_t none_empty_port_num = 0;\n  size_t port_data_num = 0;\n  bool first_buffer = true;\n  for (auto &port_item : *outputs) {\n    auto &port_data_list = port_item.second;\n    auto cur_port_data_num = port_data_list->Size();\n    // For if else: only one port has data\n    if (data_in_one_port) {\n      none_empty_port_num += (cur_port_data_num == 0 ? 0 : 1);\n      if (none_empty_port_num > 1) {\n        return {STATUS_FAULT,\n                \"For condition flowunit, should only one port has data\"};\n      }\n\n      continue;\n    }\n\n    // For other: all port has same output number\n    if (first_buffer) {\n      port_data_num = cur_port_data_num;\n      first_buffer = false;\n      continue;\n    }\n\n    if (port_data_num != cur_port_data_num) {\n      return {STATUS_FAULT, \"Output port \" + port_item.first +\n                                \" data is not same with other port\"};\n    }\n  }\n\n  return STATUS_OK;\n}\n\nStatus FlowUnitExecDataMapper::CheckOutputNumEqualInput(\n    const std::shared_ptr<FlowUnitExecData> &data, bool data_in_one_port) {\n  if (map_type_ != RESHAPE_NORMAL) {\n    // Only reshape normal needs input == output\n    return STATUS_OK;\n  }\n\n  auto in_num = data->GetInBufferNum();\n  auto accumulate_all_port_data = data_in_one_port;\n  auto out_num = data->GetOutBufferNum(accumulate_all_port_data);\n\n  if (in_num != out_num) {\n    return {STATUS_FAULT,\n            \"Output number must equals input number in normal flowunit\"};\n  }\n\n  return STATUS_OK;\n}\n\nStatus FlowUnitExecDataMapper::CheckStatus(bool one_to_one,\n                                           bool data_in_one_port) {\n  for (auto &mapped_ctx_data : mapped_data_list_) {\n    for (auto &mapped_batch_data : mapped_ctx_data) {\n      auto ret = mapped_batch_data->CheckStatus(one_to_one, data_in_one_port);\n      if (!ret) {\n        return ret;\n      }\n    }\n  }\n\n  return STATUS_OK;\n}\n\nStatus FlowUnitExecDataMapper::SetupUserOutput(bool one_to_one,\n                                               bool data_in_one_port) {\n  for (auto &mapped_ctx_data : mapped_data_list_) {\n    for (auto &mapped_batch_data : mapped_ctx_data) {\n      auto ret =\n          mapped_batch_data->SetupUserOutput(one_to_one, data_in_one_port);\n      if (!ret) {\n        return ret;\n      }\n    }\n  }\n\n  return STATUS_OK;\n}\n\nStatus FlowUnitExecDataMapper::SaveDataToExecCtx() {\n  /**\n   * case DirectMap:\n   * {origin_data(8)} ====> origin_data(8)\n   * case Reshape Normal:\n   * {mapped_data(5)}       origin_data(8)\n   * {mapped_data(5)} ====> origin_data(5)\n   * {mapped_data(3)}\n   * case Reshape Stream: data number not same with origin input\n   * {mapped_data, mapped_data} ===> origin_data\n   **/\n  auto ret = STATUS_OK;\n  switch (map_type_) {\n    case RESHAPE_STREAM:\n      ret = WriteBackStream();\n      break;\n\n    case RESHAPE_NORMAL:\n      ret = WriteBackNormal();\n      break;\n\n    case DIRECT_MAP:\n    default:\n      break;\n  }\n\n  if (!ret) {\n    MBLOG_ERROR << \"Write back data failed, err \" << ret;\n    return ret;\n  }\n\n  return FillExecCtxOutput();\n}\n\nvoid FlowUnitExecDataMapper::Clear() {\n  // release processing data\n  for (auto &batched_data_ctx : mapped_exec_data_ctx_list_) {\n    for (auto &data_ctx : batched_data_ctx) {\n      data_ctx->Clear();\n    }\n  }\n}\n\nStatus FlowUnitExecDataMapper::WriteBackStream() {\n  auto ctx_count = mapped_data_list_.size();\n  for (size_t ctx_idx = 0; ctx_idx < ctx_count; ++ctx_idx) {\n    auto &mapped_batch_data = mapped_data_list_[ctx_idx];\n    auto &origin_data = origin_data_list_[ctx_idx];\n    origin_data->SetStatus(STATUS_OK);\n    auto output_size = std::accumulate(\n        mapped_batch_data.begin(), mapped_batch_data.end(), size_t(0),\n        [](size_t sum, const std::shared_ptr<FlowUnitExecData> &mapped_data) {\n          return sum + mapped_data->GetOutBufferNum();\n        });\n    auto type = FlowUnitExecData::OUT_DATA;\n    origin_data->ReserveCache(output_size, type);\n    for (auto &mapped_data : mapped_batch_data) {\n      if (origin_data->GetStatus() == STATUS_OK ||\n          origin_data->GetStatus() == STATUS_CONTINUE) {\n        origin_data->SetStatus(mapped_data->GetStatus());\n      }\n\n      origin_data->AppendToCache(mapped_data, 0, mapped_data->GetOutBufferNum(),\n                                 type);\n    }\n    origin_data->FlushCache(type);\n  }\n\n  return STATUS_SUCCESS;\n}\n\nStatus FlowUnitExecDataMapper::WriteBackNormal() {\n  auto mapped_ctx_count = mapped_data_list_.size();\n  size_t mapped_ctx_idx = 0;\n  size_t buffer_idx_in_mapped_data = 0;\n  size_t origin_ctx_idx = 0;\n  size_t buffer_idx_in_origin_data = 0;\n  const size_t buffer_count = 1;\n  auto type = FlowUnitExecData::OUT_DATA;\n  while (mapped_ctx_idx < mapped_ctx_count) {\n    // only one data per batch for normal\n    auto &mapped_data = mapped_data_list_[mapped_ctx_idx].front();\n    auto mapped_shape = mapped_shapes_[mapped_ctx_idx].front();\n    auto &origin_data = origin_data_list_[origin_ctx_idx];\n    auto origin_shape = origin_shapes_[origin_ctx_idx];\n\n    if (buffer_idx_in_mapped_data >= mapped_shape) {\n      buffer_idx_in_mapped_data = 0;\n      ++mapped_ctx_idx;\n      if (mapped_ctx_idx >= mapped_ctx_count) {\n        // The end buffer\n        origin_data->FlushCache(type);\n      }\n\n      continue;\n    }\n\n    if (buffer_idx_in_origin_data == 0) {\n      // Start to write a new ctx output\n      origin_data->ReserveCache(origin_shape, type);\n    }\n\n    if (buffer_idx_in_origin_data >= origin_shape) {\n      origin_data->FlushCache(type);\n      buffer_idx_in_origin_data = 0;\n      ++origin_ctx_idx;\n      continue;\n    }\n\n    if (!mapped_data->GetStatus()) {\n      origin_data->SetStatus(mapped_data->GetStatus());\n    } else {\n      origin_data->AppendToCache(mapped_data, buffer_idx_in_mapped_data,\n                                 buffer_count, type);\n    }\n\n    ++buffer_idx_in_mapped_data;\n    ++buffer_idx_in_origin_data;\n  }\n\n  return STATUS_SUCCESS;\n}\n\nStatus FlowUnitExecDataMapper::FillExecCtxOutput() {\n  size_t ctx_idx = 0;\n  for (auto &exec_ctx : origin_exec_ctx_list_) {\n    auto &data = origin_data_list_[ctx_idx];\n    const auto &data_ctx = exec_ctx->GetDataCtx();\n    data_ctx->SetStatus(data->GetStatus());\n    data_ctx->SetOutput(*data->GetOutData());\n    ++ctx_idx;\n  }\n\n  return STATUS_SUCCESS;\n}\n\nbool FlowUnitExecDataMapper::NeedReshape(size_t batch_size) {\n  /**\n   * if any one input is 0, then it cannot be reshaped\n   * in case: external data, event data\n   **/\n  if (std::any_of(origin_data_list_.begin(), origin_data_list_.end(),\n                  [](std::shared_ptr<FlowUnitExecData> &data) {\n                    return data->GetInBufferNum() == 0;\n                  })) {\n    return false;\n  }\n\n  // Input buffer num might less than batch_size, but we still need use reshape\n  // process to check output\n  return true;\n}\n\nStatus FlowUnitExecDataMapper::DirectMap() {\n  /** Direct map\n   * origin_data(8)          {origin_data(8)}\n   * origin_data(7) =======> {origin_data(7)}\n   * origin_data(3)          {origin_data(3)}\n   **/\n  auto data_ctx_count = origin_data_list_.size();\n  mapped_data_list_.resize(data_ctx_count);\n  mapped_exec_data_ctx_list_.resize(data_ctx_count);\n  for (size_t ctx_idx = 0; ctx_idx < data_ctx_count; ++ctx_idx) {\n    auto &origin_data = origin_data_list_[ctx_idx];\n    auto &origin_ctx = origin_exec_ctx_list_[ctx_idx];\n    mapped_data_list_[ctx_idx].push_back(origin_data);\n    auto mapped_ctx = std::make_shared<ExecutorDataContext>(\n        origin_ctx->GetDataCtx(), origin_data);\n    mapped_exec_data_ctx_list_[ctx_idx].push_back(mapped_ctx);\n  }\n\n  return STATUS_SUCCESS;\n}\n\nStatus FlowUnitExecDataMapper::ReshapeNormal(size_t batch_size) {\n  /** Will mix diff data ctx\n   * origin_data(8)  batch_size = 5   {mapped_data(5)}\n   * origin_data(7) ================> {mapped_data(5)}\n   * origin_data(3)                   {mapped_data(5)}\n   *                                  {mapped_data(3)}\n   * total_input_buffer = 18\n   **/\n  BuildMappedDataNormal(batch_size);\n  FillMappedDataNormal(batch_size);\n  return STATUS_SUCCESS;\n}\n\nvoid FlowUnitExecDataMapper::BuildMappedDataNormal(size_t batch_size) {\n  size_t total_input_buffer = std::accumulate(\n      origin_data_list_.begin(), origin_data_list_.end(), (size_t)0,\n      [](size_t sum, std::shared_ptr<FlowUnitExecData> &data) {\n        return sum + data->GetInBufferNum();\n      });\n  size_t new_exec_data_count =\n      (total_input_buffer + batch_size - 1) / batch_size;\n  mapped_data_list_.resize(new_exec_data_count);\n  mapped_shapes_.resize(new_exec_data_count);\n  mapped_exec_data_ctx_list_.resize(new_exec_data_count);\n}\n\nvoid FlowUnitExecDataMapper::FillMappedDataNormal(size_t batch_size) {\n  size_t origin_data_count = origin_data_list_.size();\n  size_t origin_index = 0;\n  size_t index_in_origin_bufferlist = 0;\n  size_t mapped_index = 0;\n  size_t index_in_mapped_bufferlist = 0;\n  const size_t buffer_count = 1;\n  while (origin_index < origin_data_count) {\n    auto &origin_data = origin_data_list_[origin_index];\n    auto &origin_shape = origin_shapes_[origin_index];\n    auto &mapped_batch_data = mapped_data_list_[mapped_index];\n    auto &mapped_batch_shape = mapped_shapes_[mapped_index];\n    if (mapped_batch_data.empty()) {\n      auto &origin_exec_ctx = origin_exec_ctx_list_[origin_index];\n      auto &mapped_batch_data_ctx = mapped_exec_data_ctx_list_[mapped_index];\n      auto mapped_data =\n          std::make_shared<FlowUnitExecData>(origin_exec_ctx->GetFlowUnit());\n      auto mapped_data_ctx = std::make_shared<ExecutorDataContext>(\n          origin_exec_ctx->GetDataCtx(), mapped_data);\n      mapped_data->ReserveCache(batch_size);\n      mapped_batch_data.push_back(mapped_data);\n      mapped_batch_shape.push_back(0);\n      mapped_batch_data_ctx.push_back(mapped_data_ctx);\n    }\n\n    auto &mapped_data = mapped_batch_data[0];\n    mapped_data->AppendToCache(origin_data, index_in_origin_bufferlist,\n                               buffer_count);\n    ++index_in_origin_bufferlist;\n    ++index_in_mapped_bufferlist;\n\n    if (index_in_origin_bufferlist >= origin_shape) {\n      // Read next data\n      ++origin_index;\n      index_in_origin_bufferlist = 0;\n      if (origin_index >= origin_data_count) {\n        // The end buffer\n        mapped_batch_shape[0] = index_in_mapped_bufferlist;\n        mapped_data->FlushCache();\n        continue;\n      }\n    }\n\n    if (index_in_mapped_bufferlist >= batch_size) {\n      // Save last mapped data\n      mapped_batch_shape[0] = batch_size;\n      mapped_data->FlushCache();\n      // Fill next mapped data\n      ++mapped_index;\n      index_in_mapped_bufferlist = 0;\n    }\n  }\n}\n\nStatus FlowUnitExecDataMapper::ReshapeStream(size_t batch_size) {\n  /** Will not mix diff data ctx\n   * origin_data(8)  batch_size = 5   {mapped_data(5), mapped_data(3)}\n   * origin_data(7) ================> {mapped_data(5), mapped_data(2)}\n   * origin_data(3)                   {mapped_data(3)}\n   * total_input_buffer = 18\n   **/\n  BuildMappedDataStream();\n  FillMappedDataStream(batch_size);\n  return STATUS_SUCCESS;\n}\n\nvoid FlowUnitExecDataMapper::BuildMappedDataStream() {\n  size_t data_ctx_count = origin_data_list_.size();\n  mapped_data_list_.resize(data_ctx_count);\n  mapped_shapes_.resize(data_ctx_count);\n  mapped_exec_data_ctx_list_.resize(data_ctx_count);\n}\n\nvoid FlowUnitExecDataMapper::FillMappedDataStream(size_t batch_size) {\n  size_t data_ctx_count = origin_data_list_.size();\n  for (size_t data_ctx_idx = 0; data_ctx_idx < data_ctx_count; ++data_ctx_idx) {\n    auto &origin_data = origin_data_list_[data_ctx_idx];\n    auto &origin_shape = origin_shapes_[data_ctx_idx];\n    auto &origin_exec_ctx = origin_exec_ctx_list_[data_ctx_idx];\n    auto &mapped_batch_data = mapped_data_list_[data_ctx_idx];\n    auto &mapped_batch_shape = mapped_shapes_[data_ctx_idx];\n    auto &mapped_batch_data_ctx = mapped_exec_data_ctx_list_[data_ctx_idx];\n    auto batch_count = (origin_shape + batch_size - 1) / batch_size;\n    mapped_batch_data.reserve(batch_count);\n    mapped_batch_data_ctx.reserve(batch_count);\n    for (size_t batch_idx = 0; batch_idx < batch_count; ++batch_idx) {\n      auto mapped_data =\n          std::make_shared<FlowUnitExecData>(origin_exec_ctx->GetFlowUnit());\n      auto mapped_ctx = std::make_shared<ExecutorDataContext>(\n          origin_exec_ctx->GetDataCtx(), mapped_data);\n      mapped_batch_data.push_back(mapped_data);\n      mapped_batch_data_ctx.push_back(mapped_ctx);\n      auto buffer_idx_in_origin = batch_idx * batch_size;\n      auto buffer_count =\n          std::min(batch_size, origin_shape - buffer_idx_in_origin);\n      mapped_batch_shape.push_back(buffer_count);\n      mapped_data->ReserveCache(buffer_count);\n      mapped_data->AppendToCache(origin_data, buffer_idx_in_origin,\n                                 buffer_count);\n      mapped_data->FlushCache();\n    }\n  }\n}\n\nFlowUnitExecDataView::FlowUnitExecDataView(FUExecContextList exec_ctx_list)\n    : exec_ctx_list_(std::move(exec_ctx_list)) {}\n\nFlowUnitExecDataView::~FlowUnitExecDataView() = default;\n\nStatus FlowUnitExecDataView::LoadInputFromExecCtx(bool need_reshape,\n                                                  bool is_stream,\n                                                  size_t batch_size,\n                                                  bool need_contiguous) {\n  auto ret = DevideExecCtxByFlowunit();\n  if (!ret) {\n    return ret;\n  }\n\n  LoadConfig cfg(need_reshape, is_stream, batch_size, need_contiguous);\n  std::vector<std::shared_ptr<Executor>> executor_of_flownit;\n  std::vector<std::function<Status()>> task_of_flowunit;\n  ret = PackLoadTasks(cfg, executor_of_flownit, task_of_flowunit);\n  if (!ret) {\n    return ret;\n  }\n\n  std::vector<std::future<Status>> status_list;\n  size_t fu_count = task_of_flowunit.size() - 1;\n  status_list.reserve(fu_count);\n  for (size_t fu_idx = 0; fu_idx < fu_count; ++fu_idx) {\n    auto &fu_executor = executor_of_flownit[fu_idx];\n    auto ret = fu_executor->Run(task_of_flowunit[fu_idx], 0);\n    status_list.push_back(std::move(ret));\n  }\n\n  // Use current thread to process last one\n  auto &last_prepare_task = task_of_flowunit.back();\n  auto task_ret = last_prepare_task();\n  if (!task_ret) {\n    return task_ret;\n  }\n\n  // Wait async process result\n  for (auto &status : status_list) {\n    auto ret = status.get();\n    if (!ret) {\n      return ret;\n    }\n  }\n\n  return STATUS_SUCCESS;\n}\n\nStatus FlowUnitExecDataView::DataLoadTask(\n    const LoadConfig &cfg, FlowUnit *flowunit,\n    const std::shared_ptr<FlowUnitExecDataMapper> &exec_data_mapper) {\n  exec_data_mapper->LoadDataFromExecCtx();\n  auto ret = exec_data_mapper->MapData(cfg.need_reshape_, cfg.batch_size_,\n                                       cfg.is_stream_);\n  if (!ret) {\n    return ret;\n  }\n\n  ret = exec_data_mapper->MoveToTargetDevice(cfg.need_contiguous_);\n  if (!ret) {\n    return ret;\n  }\n\n  exec_data_mapper->SetupUserInput();\n  std::lock_guard<std::mutex> lock(data_of_flowunit_lock_);\n  data_of_flowunit_[flowunit] = exec_data_mapper->GetBatchedExecDataCtxList();\n  return STATUS_OK;\n}\n\nStatus FlowUnitExecDataView::PackLoadTasks(\n    const LoadConfig &cfg, std::vector<std::shared_ptr<Executor>> &executors,\n    std::vector<std::function<Status()>> &tasks) {\n  executors.reserve(mapper_of_flowunit_.size());\n  tasks.reserve(mapper_of_flowunit_.size());\n  for (auto &item : mapper_of_flowunit_) {\n    const auto &flowunit = item.first;\n    auto device = flowunit->GetBindDevice();\n    if (device == nullptr) {\n      MBLOG_ERROR << \"Get bind device failed\";\n      return STATUS_FAULT;\n    }\n\n    auto executor = device->GetDeviceExecutor();\n    if (executor == nullptr) {\n      MBLOG_ERROR << \"Get device executor failed\";\n      return STATUS_FAULT;\n    }\n\n    auto exec_data_mapper = item.second;\n    executors.push_back(executor);\n    tasks.emplace_back(std::bind(&FlowUnitExecDataView::DataLoadTask, this, cfg,\n                                 flowunit, exec_data_mapper));\n  }\n\n  return STATUS_OK;\n}\n\nconst std::vector<FlowUnit *> &FlowUnitExecDataView::GetFlowUnits() {\n  return flowunit_list_;\n}\n\nconst BatchedFUExecDataCtxList &FlowUnitExecDataView::GetFlowUnitProcessData(\n    FlowUnit *flowunit) {\n  return data_of_flowunit_[flowunit];\n}\n\nStatus FlowUnitExecDataView::CheckOutputDataNumber(bool data_in_one_port) {\n  for (auto &mapper_item : mapper_of_flowunit_) {\n    auto &mapper = mapper_item.second;\n    auto ret = mapper->CheckOutputDataNumber(data_in_one_port);\n    if (!ret) {\n      return ret;\n    }\n  }\n\n  return STATUS_OK;\n}\n\nStatus FlowUnitExecDataView::CheckStatus(bool one_to_one,\n                                         bool data_in_one_port) {\n  for (auto &mapper_item : mapper_of_flowunit_) {\n    auto &mapper = mapper_item.second;\n    auto ret = mapper->CheckStatus(one_to_one, data_in_one_port);\n    if (!ret) {\n      return ret;\n    }\n  }\n  return STATUS_OK;\n}\n\nStatus FlowUnitExecDataView::SetupUserOutput(bool one_to_one,\n                                             bool data_in_one_port) {\n  for (auto &mapper_item : mapper_of_flowunit_) {\n    auto &mapper = mapper_item.second;\n    auto ret = mapper->SetupUserOutput(one_to_one, data_in_one_port);\n    if (!ret) {\n      return ret;\n    }\n  }\n  return STATUS_OK;\n}\n\nStatus FlowUnitExecDataView::SaveOutputToExecCtx() {\n  std::vector<std::shared_ptr<Executor>> executor_of_flowunit;\n  std::vector<std::function<Status()>> task_of_flowunit;\n  auto ret = PackSaveTasks(executor_of_flowunit, task_of_flowunit);\n  if (!ret) {\n    return ret;\n  }\n\n  size_t fu_count = task_of_flowunit.size() - 1;\n  std::vector<std::future<Status>> status_list;\n  status_list.reserve(fu_count);\n  for (size_t fu_idx = 0; fu_idx < fu_count; ++fu_idx) {\n    auto &flowunit_executor = executor_of_flowunit[fu_idx];\n    auto ret = flowunit_executor->Run(task_of_flowunit[fu_idx], 0);\n    status_list.push_back(std::move(ret));\n  }\n\n  auto &last_save_task = task_of_flowunit.back();\n  auto task_ret = last_save_task();\n  if (!task_ret) {\n    return ret;\n  }\n\n  for (auto &status : status_list) {\n    auto ret = status.get();\n    if (!ret) {\n      return ret;\n    }\n  }\n\n  return STATUS_SUCCESS;\n}\n\nvoid FlowUnitExecDataView::Clear() {\n  // release processing data\n  for (auto &mapper : mapper_of_flowunit_) {\n    mapper.second->Clear();\n  }\n}\n\nStatus FlowUnitExecDataView::PackSaveTasks(\n    std::vector<std::shared_ptr<Executor>> &executors,\n    std::vector<std::function<Status()>> &tasks) {\n  executors.reserve(mapper_of_flowunit_.size());\n  tasks.reserve(mapper_of_flowunit_.size());\n  for (auto &mapper_item : mapper_of_flowunit_) {\n    auto *flowunit = mapper_item.first;\n    auto device = flowunit->GetBindDevice();\n    if (device == nullptr) {\n      MBLOG_ERROR << \"Get bind device failed\";\n      return STATUS_FAULT;\n    }\n\n    auto executor = device->GetDeviceExecutor();\n    if (executor == nullptr) {\n      MBLOG_ERROR << \"Get device executor failed\";\n      return STATUS_FAULT;\n    }\n\n    auto &mapper = mapper_item.second;\n    executors.push_back(executor);\n    tasks.emplace_back(\n        std::bind(&FlowUnitExecDataMapper::SaveDataToExecCtx, mapper.get()));\n  }\n\n  return STATUS_OK;\n}\n\nStatus FlowUnitExecDataView::DevideExecCtxByFlowunit() {\n  for (auto &exec_ctx : exec_ctx_list_) {\n    const auto &flowunit = exec_ctx->GetFlowUnit();\n    auto item = mapper_of_flowunit_.find(flowunit.get());\n    std::shared_ptr<FlowUnitExecDataMapper> mapper;\n    if (item != mapper_of_flowunit_.end()) {\n      mapper = item->second;\n    } else {\n      mapper = std::make_shared<FlowUnitExecDataMapper>();\n      mapper_of_flowunit_[flowunit.get()] = mapper;\n      flowunit_list_.push_back(flowunit.get());\n    }\n\n    mapper->AddExecCtx(exec_ctx);\n  }\n\n  return STATUS_SUCCESS;\n}\n\nFlowUnitDataExecutor::FlowUnitDataExecutor(std::weak_ptr<Node> node_ref,\n                                           size_t batch_size)\n    : node_ref_(std::move(node_ref)), batch_size_(batch_size) {}\n\nFlowUnitDataExecutor::~FlowUnitDataExecutor() = default;\n\nStatus FlowUnitDataExecutor::DataCtxExecuteFunc(\n    FlowUnit *flowunit, const BatchedFUExecDataCtxList &process_data,\n    size_t data_ctx_idx) {\n  const auto &batched_fu_data_ctx = process_data[data_ctx_idx];\n  for (const auto &data_ctx : batched_fu_data_ctx) {\n    Status status = STATUS_FAULT;\n    try {\n      status = flowunit->Process(data_ctx);\n    } catch (const std::exception &e) {\n      std::string msg(\"Exception caught in flowunit process\");\n      msg += \", detail:\";\n      msg += e.what();\n      status = {STATUS_SHUTDOWN, msg};\n    }\n\n    data_ctx->SetStatus(status);\n    /** Only STOP and SHUTDOWN will be transparent\n     * STOP means to stop scheduling, SHUTDOWN means that a fatal error\n     * has occurred and needs to exit\n     **/\n    if (status == STATUS_STOP || status == STATUS_SHUTDOWN) {\n      return status;\n    }\n  }\n\n  return STATUS_SUCCESS;\n}\n\nvoid FlowUnitDataExecutor::SetNeedCheckOutput(bool need_check) {\n  need_check_output_ = need_check;\n}\n\nStatus FlowUnitDataExecutor::Process(const FUExecContextList &exec_ctx_list) {\n  /**\n   * for event type data ctx list, all inputs is 0. (videodemuxer event input)\n   * for data type data ctx list, inputs is ports, port buffer list size will\n   * not be 0\n   **/\n  FlowUnitExecDataView exec_view(exec_ctx_list);\n  auto node = node_ref_.lock();\n  if (node == nullptr) {\n    return {STATUS_FAULT, \"Node has been released\"};\n  }\n\n  auto node_name = node->GetName();\n  auto ret = LoadExecuteInput(node, exec_view);\n  if (!ret) {\n    MBLOG_ERROR << \"node: \" << node_name << \", load execute input failed, err \"\n                << ret;\n    return ret;\n  }\n\n  ret = Execute(exec_view);\n  if (!ret) {\n    MBLOG_ERROR << \"node: \" << node_name << \", execute failed, err \" << ret;\n    return ret;\n  }\n\n  ret = SaveExecuteOutput(node, exec_view);\n  if (!ret) {\n    MBLOG_ERROR << \"node: \" << node_name << \", save execute output failed, err \"\n                << ret;\n    return ret;\n  }\n\n  return STATUS_OK;\n}\n\nStatus FlowUnitDataExecutor::LoadExecuteInput(const std::shared_ptr<Node> &node,\n                                              FlowUnitExecDataView &exec_view) {\n  bool need_reshape = false;\n\n  if (node->GetOutputType() == ORIGIN) {\n    need_reshape = true;\n  }\n\n  if (node->GetConditionType() == IF_ELSE) {\n    if (batch_size_ != 1) {\n      MBLOG_WARN\n          << \"Batch size not available for condition flowunit, auto set to 1\";\n      batch_size_ = 1;\n    }\n\n    need_reshape = true;\n  }\n\n  auto is_stream = (node->GetFlowType() == STREAM);\n  auto need_contiguous = node->IsInputContiguous();\n\n  auto ret = exec_view.LoadInputFromExecCtx(need_reshape, is_stream,\n                                            batch_size_, need_contiguous);\n  if (!ret) {\n    MBLOG_ERROR << \"Prepare exec view by batch size failed, \" << ret;\n    return ret;\n  }\n\n  return STATUS_OK;\n}\n\nStatus FlowUnitDataExecutor::Execute(FlowUnitExecDataView &exec_view) {\n  const int32_t priority = 0;\n  std::list<std::future<Status>> status_list;\n  // each flowunit has a device executor which manages thread pool\n  const auto &flowunits = exec_view.GetFlowUnits();\n  for (const auto &flowunit : flowunits) {\n    const auto &process_data = exec_view.GetFlowUnitProcessData(flowunit);\n    auto data_ctx_count = process_data.size();\n    auto exec_func = std::bind(&FlowUnitDataExecutor::DataCtxExecuteFunc, this,\n                               flowunit, process_data, std::placeholders::_1);\n    auto resource_nice = flowunit->GetFlowUnitDesc()->IsResourceNice();\n    auto exec_device = flowunit->GetBindDevice();\n    auto future_status_list = exec_device->DeviceExecuteAsync(\n        exec_func, priority, data_ctx_count, resource_nice);\n    status_list.splice(status_list.begin(), future_status_list);\n  }\n\n  auto status_count = status_list.size();\n  std::vector<Status> exec_results(status_count, STATUS_OK);\n  size_t result_idx = 0;\n  for (auto &fu_status : status_list) {\n    exec_results[result_idx] = fu_status.get();\n    ++result_idx;\n  }\n\n  for (const auto &result : exec_results) {\n    if (!result) {\n      return result;\n    }\n  }\n\n  return STATUS_OK;\n}\n\nStatus FlowUnitDataExecutor::SaveExecuteOutput(\n    const std::shared_ptr<Node> &node, FlowUnitExecDataView &exec_view) {\n  /**\n   * input num must equals output num in normal flowunit\n   * condition flowunit only one port has data\n   * all port data num should be same\n   *\n   * as usual, only need check output in develop mode\n   * user could close check at running mode\n   **/\n  auto data_in_one_port = (node->GetConditionType() != ConditionType::NONE ||\n                           node->GetLoopType() == LOOP);\n  auto node_has_output = node->GetOutputNum() != 0;\n\n  if (need_check_output_ && node_has_output) {\n    auto ret = exec_view.CheckOutputDataNumber(data_in_one_port);\n    if (!ret) {\n      MBLOG_ERROR << \"check output failed, err \" << ret;\n      return ret;\n    }\n  }\n\n  bool one_to_one =\n      node->GetFlowType() == NORMAL && node->GetOutputType() == ORIGIN;\n  auto ret = exec_view.CheckStatus(one_to_one, data_in_one_port);\n  if (!ret) {\n    MBLOG_ERROR << \"check data context status failed, err \" << ret;\n    return STATUS_FAULT;\n  }\n\n  if (node_has_output) {\n    ret = exec_view.SetupUserOutput(one_to_one, data_in_one_port);\n    if (!ret) {\n      MBLOG_ERROR << \"save buffer inherit info failed, err \" << ret;\n      return STATUS_FAULT;\n    }\n  }\n\n  ret = exec_view.SaveOutputToExecCtx();\n  if (!ret) {\n    MBLOG_ERROR << \"setup failed, err \" << ret;\n    return ret;\n  }\n\n  // release processing data\n  exec_view.Clear();\n\n  return STATUS_OK;\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/libmodelbox/engine/flowunit_group.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/flowunit_group.h\"\n\n#include \"modelbox/error.h\"\n#include \"modelbox/flowunit_balancer.h\"\n#include \"modelbox/flowunit_data_executor.h\"\n#include \"modelbox/node.h\"\n\nnamespace modelbox {\n\nvoid FlowUnitGroup::InitTrace() {\n  if (profiler_ == nullptr) {\n    return;\n  }\n\n  auto trace = profiler_->GetTrace();\n  if (trace == nullptr) {\n    return;\n  }\n\n  auto node = node_.lock();\n  if (node == nullptr) {\n    MBLOG_WARN << \"node is nullptr for flownit \" << unit_name_\n               << \", init trace failed\";\n    return;\n  }\n\n  flowunit_trace_ = trace->FlowUnit(node->GetName());\n  if (flowunit_trace_ == nullptr) {\n    MBLOG_WARN << \"create trace for node \" << node->GetName() << \" failed\";\n  }\n}\n\nuint32_t FlowUnitGroup::GetBatchSize() const { return batch_size_; }\n\nstd::shared_ptr<TraceSlice> FlowUnitGroup::StartTrace(\n    FUExecContextList &exec_ctx_list) {\n  std::call_once(trace_init_flag_, &FlowUnitGroup::InitTrace, this);\n\n  if (flowunit_trace_ == nullptr) {\n    return nullptr;\n  }\n\n  auto total_input_count = std::accumulate(\n      exec_ctx_list.begin(), exec_ctx_list.end(), (size_t)0,\n      [](size_t sum, std::shared_ptr<FlowUnitExecContext> &exec_ctx) {\n        const auto &data_ctx = exec_ctx->GetDataCtx();\n        auto inputs = data_ctx->GetInputs();\n        if (inputs.empty()) {\n          // this is event\n          return sum + 1;\n        }\n\n        auto input_count = inputs.begin()->second.size();\n        return sum + input_count;\n      });\n\n  auto slice = flowunit_trace_->Slice(TraceSliceType::PROCESS, \"\");\n  slice->SetBatchSize(total_input_count);\n  slice->Begin();\n  return slice;\n}\n\nvoid FlowUnitGroup::StopTrace(std::shared_ptr<TraceSlice> &slice) {\n  if (slice != nullptr) {\n    slice->End();\n  }\n}\n\nvoid FlowUnitGroup::PreProcess(FUExecContextList &exec_ctx_list) {\n  auto exec_ctx_iter = exec_ctx_list.begin();\n  while (exec_ctx_iter != exec_ctx_list.end()) {\n    auto exec_ctx = *exec_ctx_iter;\n    const auto &data_ctx = exec_ctx->GetDataCtx();\n    const auto &flowunit = exec_ctx->GetFlowUnit();\n\n    // stream start\n    if (data_ctx->IsDataPre()) {\n      auto status =\n          flowunit->DataPre(std::dynamic_pointer_cast<DataContext>(data_ctx));\n      if (status != STATUS_SUCCESS) {\n        MBLOG_INFO << \"flowunit \" << unit_name_\n                   << \" data pre return: \" << status;\n        const auto &error_msg = status.Errormsg();\n        data_ctx->DealWithDataPreError(unit_name_ + \".DataPreError\", error_msg);\n      }\n    }\n\n    ++exec_ctx_iter;\n  }\n}\n\nStatus FlowUnitGroup::Process(FUExecContextList &exec_ctx_list) {\n  FUExecContextList actual_exec_ctx_list;\n  // will skip end_buffer create by framework\n  for (auto &exec_ctx : exec_ctx_list) {\n    const auto &data_ctx = exec_ctx->GetDataCtx();\n    if (!data_ctx->IsSkippable()) {\n      actual_exec_ctx_list.emplace_back(exec_ctx);\n    } else {\n      data_ctx->SetStatus(data_ctx->GetLastStatus());\n      data_ctx->SetSkippable(false);\n    }\n  }\n\n  if (actual_exec_ctx_list.size() == 0) {\n    return STATUS_SUCCESS;\n  }\n\n  auto slice = StartTrace(actual_exec_ctx_list);\n  auto status = executor_->Process(actual_exec_ctx_list);\n  StopTrace(slice);\n  if (!status) {\n    MBLOG_WARN << \"execute unit \" << unit_name_ << \" failed: \" << status;\n    return STATUS_STOP;\n  }\n\n  return status;\n}\n\nStatus FlowUnitGroup::PostProcess(FUExecContextList &exec_ctx_list) {\n  auto exec_ctx_iter = exec_ctx_list.begin();\n  auto status = STATUS_OK;\n  auto ret_status = STATUS_OK;\n  while (exec_ctx_iter != exec_ctx_list.end()) {\n    auto exec_ctx = *exec_ctx_iter;\n    const auto &data_ctx = exec_ctx->GetDataCtx();\n\n    status = data_ctx->PostProcess();\n    if (status == STATUS_STOP || status == STATUS_SHUTDOWN) {\n      ret_status = status;\n      return ret_status;\n    }\n\n    const auto &flowunit = exec_ctx->GetFlowUnit();\n    if (data_ctx->IsDataPost()) {\n      status =\n          flowunit->DataPost(std::dynamic_pointer_cast<DataContext>(data_ctx));\n      if (!status) {\n        MBLOG_INFO << \"flowunit \" << unit_name_\n                   << \" data post return: \" << status;\n      }\n    }\n\n    if (status == STATUS_STOP || status == STATUS_SHUTDOWN) {\n      ret_status = status;\n      return ret_status;\n    }\n\n    // make sure ctx state is right for next process\n    data_ctx->UpdateProcessState();\n\n    exec_ctx_iter++;\n  }\n  return ret_status;\n}\n\nvoid FlowUnitGroup::PostProcessEvent(FUExecContextList &exec_ctx_list) {\n  std::vector<std::shared_ptr<FlowUnitInnerEvent>> event_vector;\n  for (auto &exec_ctx : exec_ctx_list) {\n    const auto &data_ctx = exec_ctx->GetDataCtx();\n    auto event = data_ctx->GenerateSendEvent();\n    if (event != nullptr) {\n      event_vector.push_back(event);\n    }\n  }\n  auto node = node_.lock();\n  if (node == nullptr) {\n    return;\n  }\n  node->SendBatchEvent(event_vector);\n}\n\nFUExecContextList FlowUnitGroup::CreateExecCtx(\n    std::list<std::shared_ptr<FlowUnitDataContext>> &data_ctx_list) {\n  FUExecContextList exec_ctx_list;\n  for (auto &data_ctx : data_ctx_list) {\n    auto exec_ctx = std::make_shared<FlowUnitExecContext>(data_ctx);\n    exec_ctx->SetFlowUnit(balancer_->GetFlowUnit(data_ctx));\n    exec_ctx_list.push_back(exec_ctx);\n  }\n\n  return exec_ctx_list;\n}\n\nStatus FlowUnitGroup::Run(\n    std::list<std::shared_ptr<FlowUnitDataContext>> &data_ctx_list) {\n  Status status = STATUS_OK;\n  Status ret_status = STATUS_OK;\n  auto exec_ctx_list = CreateExecCtx(data_ctx_list);\n  try {\n    PreProcess(exec_ctx_list);\n\n    status = Process(exec_ctx_list);\n    if (status == STATUS_STOP || status == STATUS_SHUTDOWN) {\n      ret_status = status;\n      return ret_status;\n    }\n\n    status = PostProcess(exec_ctx_list);\n    if (status == STATUS_STOP || status == STATUS_SHUTDOWN) {\n      ret_status = status;\n      return ret_status;\n    }\n\n    PostProcessEvent(exec_ctx_list);\n    // rearrange data ctx order\n    std::list<std::shared_ptr<FlowUnitDataContext>> processed_ctx_list;\n    for (auto &exec_ctx : exec_ctx_list) {\n      processed_ctx_list.push_back(exec_ctx->GetDataCtx());\n    }\n    data_ctx_list.swap(processed_ctx_list);\n  } catch (const std::exception &e) {\n    ret_status = {STATUS_FAULT, unit_name_ + \" process failed, \" + e.what()};\n  }\n\n  return ret_status;\n}\n\nvoid FlowUnitGroup::SetNode(const std::shared_ptr<Node> &node) { node_ = node; }\n\nstd::shared_ptr<FlowUnit> FlowUnitGroup::GetExecutorUnit() {\n  return flowunit_group_[0];\n}\n\nStatus FlowUnitGroup::CheckInputAndOutput(\n    const std::set<std::string> &input_ports_name,\n    const std::set<std::string> &output_ports_name) {\n  auto flowunit_iter = flowunit_group_.begin();\n  while (flowunit_iter != flowunit_group_.end()) {\n    auto flowunit_desc = flowunit_iter->get()->GetFlowUnitDesc();\n    auto check_failed = false;\n\n    auto input_set = flowunit_desc->GetFlowUnitInput();\n    auto input_ports_in_cfg = input_ports_name;\n    for (auto &input_item : input_set) {\n      auto item = input_ports_in_cfg.find(input_item.GetPortName());\n      if (item == input_ports_in_cfg.end()) {\n        MBLOG_WARN << \"node input port: \" << input_item.GetPortName()\n                   << \" is not connected\";\n        check_failed = true;\n        continue;\n      }\n\n      input_ports_in_cfg.erase(item);\n    }\n\n    if (!input_ports_in_cfg.empty()) {\n      std::string err_msg = \"config input port [ \";\n      for (const auto &port_name : input_ports_in_cfg) {\n        err_msg += port_name + \" \";\n      }\n\n      err_msg += \"] not defined in flowunit\";\n      MBLOG_WARN << err_msg;\n      check_failed = true;\n    }\n\n    auto output_set = flowunit_desc->GetFlowUnitOutput();\n    auto output_ports_in_cfg = output_ports_name;\n    for (auto &output_item : output_set) {\n      auto item = output_ports_in_cfg.find(output_item.GetPortName());\n      if (item == output_ports_in_cfg.end()) {\n        MBLOG_WARN << \"node output port: \" << output_item.GetPortName()\n                   << \" is not connected\";\n        check_failed = true;\n        continue;\n      }\n\n      output_ports_in_cfg.erase(item);\n    }\n\n    if (!output_ports_in_cfg.empty()) {\n      std::string err_msg = \"config output port [ \";\n      for (const auto &port_name : output_ports_in_cfg) {\n        err_msg += port_name + \" \";\n      }\n\n      err_msg += \"] not defined in flowunit\";\n      MBLOG_WARN << err_msg;\n      check_failed = true;\n    }\n\n    if (check_failed) {\n      MBLOG_WARN << \"flowunit \" << flowunit_desc->GetFlowUnitName()\n                 << \" port check failed.\";\n      flowunit_iter = flowunit_group_.erase(flowunit_iter);\n    } else {\n      flowunit_iter++;\n    }\n  }\n\n  if (flowunit_group_.size() == 0) {\n    return {STATUS_BADCONF, \"flowunit '\" + unit_name_ +\n                                \"' config error, port not connect correctly.\"};\n  }\n  return STATUS_SUCCESS;\n}\n\nFlowUnitGroup::FlowUnitGroup(std::string unit_name, std::string unit_type,\n                             std::string unit_device_id,\n                             std::shared_ptr<Configuration> config,\n                             std::shared_ptr<Profiler> profiler)\n    : batch_size_(1),\n      unit_name_(std::move(unit_name)),\n      unit_type_(std::move(unit_type)),\n      unit_device_id_(std::move(unit_device_id)),\n      config_(std::move(config)),\n      profiler_(std::move(profiler)){};\n\nFlowUnitGroup::~FlowUnitGroup() = default;\n\nStatus FlowUnitGroup::Init(const std::set<std::string> &input_ports_name,\n                           const std::set<std::string> &output_ports_name,\n                           const std::shared_ptr<FlowUnitManager> &flowunit_mgr,\n                           bool checkport) {\n  if (flowunit_mgr == nullptr) {\n    return {STATUS_FAULT, \"flowunit manager is null\"};\n  }\n\n  flowunit_group_ =\n      flowunit_mgr->CreateFlowUnit(unit_name_, unit_type_, unit_device_id_);\n  if (flowunit_group_.size() == 0) {\n    if (StatusError == STATUS_OK) {\n      StatusError = STATUS_NOTFOUND;\n    }\n    return {StatusError,\n            std::string(\"create flowunit '\") + unit_name_ + \"' failed.\"};\n  }\n\n  if (flowunit_group_.size() == 0) {\n    return {STATUS_BADCONF, std::string(\"flowunit '\") + unit_name_ +\n                                \"' config error, port not connect correctly.\"};\n  }\n\n  if (checkport) {\n    auto status = CheckInputAndOutput(input_ports_name, output_ports_name);\n    if (status != STATUS_SUCCESS) {\n      return status;\n    }\n  }\n  return STATUS_OK;\n}\n\nStatus FlowUnitGroup::Open(const CreateExternalDataFunc &create_func) {\n  auto status = STATUS_OK;\n  auto open_func = [&](const std::shared_ptr<FlowUnit> &flowunit) -> modelbox::Status {\n    if (!flowunit) {\n      MBLOG_WARN << \"flow unit is nullptr.\";\n      return STATUS_INVALID;\n    }\n\n    auto flowunit_desc = flowunit->GetFlowUnitDesc();\n    flowunit->SetExternalData(create_func);\n    try {\n      status = flowunit->Open(config_);\n    } catch (const std::exception &e) {\n      status = {STATUS_FAULT,\n                flowunit_desc->GetFlowUnitName() + \" open failed, \" + e.what()};\n    }\n\n    if (!status) {\n      MBLOG_WARN << flowunit_desc->GetFlowUnitName() << \":\"\n                 << flowunit_desc->GetFlowUnitAliasName()\n                 << \" open failed: \" << status;\n      status = {status, \"open flowunit '\" + flowunit_desc->GetFlowUnitName() +\n                            \"', type '\" +\n                            flowunit_desc->GetDriverDesc()->GetType() +\n                            \"' failed.\"};\n      return status;\n    }\n\n    MBLOG_DEBUG << flowunit_desc->GetFlowUnitName() << \":\"\n                << flowunit_desc->GetFlowUnitAliasName() << \" opened.\";\n\n    return STATUS_OK;\n  };\n\n  ThreadPool pool(std::thread::hardware_concurrency());\n  pool.SetName(unit_name_ + \"-Open\");\n  std::vector<std::future<Status>> result;\n\n  for (auto &flowunit : flowunit_group_) {\n    auto ret = pool.Submit(open_func, flowunit);\n    result.push_back(std::move(ret));\n  }\n\n  for (auto &fut : result) {\n    const auto *msg = \"open flowunit failed, please check log.\";\n    if (!fut.valid()) {\n      return {STATUS_FAULT, msg};\n    }\n\n    auto ret = fut.get();\n    if (!ret) {\n      return ret;\n    }\n  }\n\n  bool need_check_output = false;\n  if (config_) {\n    uint32_t default_batch_size =\n        GetExecutorUnit()->GetFlowUnitDesc()->GetDefaultBatchSize();\n    batch_size_ =\n        config_->GetProperty<uint32_t>(\"batch_size\", default_batch_size);\n    uint32_t max_batch_size =\n        GetExecutorUnit()->GetFlowUnitDesc()->GetMaxBatchSize();\n    if (max_batch_size != 0 && batch_size_ > max_batch_size) {\n      batch_size_ = max_batch_size;\n    }\n    need_check_output = config_->GetProperty<bool>(\"need_check_output\", false);\n  }\n\n  auto node = node_.lock();\n  if (node != nullptr) {\n    MBLOG_INFO << \"node: \" << node->GetName() << \" get batch size is \"\n               << batch_size_;\n  }\n\n  balancer_ = FlowUnitBalancerFactory::GetInstance().CreateBalancer();\n  if (balancer_ == nullptr) {\n    return {STATUS_FAULT, \"Get flowunit balancer failed\"};\n  }\n\n  auto ret = balancer_->Init(flowunit_group_);\n  if (!ret) {\n    return {STATUS_FAULT, \"Init balancer failed: \" + ret.Errormsg()};\n  }\n\n  executor_ = std::make_shared<FlowUnitDataExecutor>(node_, batch_size_);\n  executor_->SetNeedCheckOutput(need_check_output);\n  return status;\n}\n\nStatus FlowUnitGroup::Close() {\n  auto status = STATUS_OK;\n  for (auto &flowunit : flowunit_group_) {\n    if (!flowunit) {\n      MBLOG_WARN << \"flow unit is nullptr.\";\n      continue;\n    }\n\n    auto flowunit_desc = flowunit->GetFlowUnitDesc();\n    try {\n      status = flowunit->Close();\n    } catch (const std::exception &e) {\n      status = {STATUS_FAULT, flowunit_desc->GetFlowUnitName() +\n                                  \" close failed, \" + e.what()};\n    }\n\n    if (!status) {\n      MBLOG_WARN << flowunit_desc->GetFlowUnitName() << \":\"\n                 << flowunit_desc->GetFlowUnitAliasName()\n                 << \" close failed: \" << status;\n      break;\n    }\n\n    MBLOG_DEBUG << flowunit_desc->GetFlowUnitName() << \":\"\n                << flowunit_desc->GetFlowUnitAliasName() << \" closed.\";\n  }\n\n  return status;\n}\n\nStatus FlowUnitGroup::Destory() { return STATUS_OK; }\n\n}  // namespace modelbox"
  },
  {
    "path": "src/libmodelbox/engine/flowunit_manager.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <algorithm>\n#include <utility>\n\n#include \"modelbox/base/log.h\"\n#include \"modelbox/flowunit.h\"\n\nnamespace modelbox {\n\nFlowUnitManager::FlowUnitManager() = default;\nFlowUnitManager::~FlowUnitManager() = default;\n\nstd::shared_ptr<FlowUnitManager> FlowUnitManager::GetInstance() {\n  static std::shared_ptr<FlowUnitManager> flowunit_mgr =\n      std::make_shared<FlowUnitManager>();\n  return flowunit_mgr;\n}\n\nstd::shared_ptr<FlowUnitDesc> FlowUnitManager::GetFlowUnitDesc(\n    const std::string &flowunit_type, const std::string &flowunit_name) {\n  auto iter_device_type = flowunit_desc_list_.find(flowunit_type);\n  if (iter_device_type == flowunit_desc_list_.end()) {\n    MBLOG_ERROR << \"do not find device_type \" << flowunit_type\n                << \" in the flowunit desc map, please check the device type.\";\n    return nullptr;\n  }\n\n  auto iter_flowunit_name =\n      flowunit_desc_list_[flowunit_type].find(flowunit_name);\n  if (iter_flowunit_name == flowunit_desc_list_[flowunit_type].end()) {\n    MBLOG_ERROR << \"do not find flowunit name \" << flowunit_name\n                << \" in device type \" << flowunit_type\n                << \" in the flowunit desc map, please check the device name.\";\n    return nullptr;\n  }\n\n  return flowunit_desc_list_[flowunit_type][flowunit_name];\n}\n\nStatus FlowUnitManager::Initialize(\n    const std::shared_ptr<Drivers> &driver,\n    std::shared_ptr<DeviceManager> device_mgr,\n    const std::shared_ptr<Configuration> &config) {\n  SetDeviceManager(std::move(device_mgr));\n  Status status;\n  status = InitFlowUnitFactory(driver);\n\n  if (config != nullptr) {\n    max_executor_thread_num_ =\n        config->GetUint32(\"graph.max_executor_thread_num\", 0);\n  } else {\n    max_executor_thread_num_ = 0;\n  }\n\n  if (status != STATUS_SUCCESS) {\n    return status;\n  }\n\n  status = FlowUnitProbe();\n  if (status != STATUS_SUCCESS) {\n    return status;\n  }\n\n  status = SetUpFlowUnitDesc();\n  if (status != STATUS_SUCCESS) {\n    return status;\n  }\n\n  return status;\n}\n\nStatus FlowUnitManager::InitFlowUnitFactory(\n    const std::shared_ptr<Drivers> &driver) {\n  std::vector<std::shared_ptr<Driver>> driver_list =\n      driver->GetDriverListByClass(\"DRIVER-FLOWUNIT\");\n  std::vector<std::shared_ptr<Driver>> inference_driver_list =\n      driver->GetDriverListByClass(\"DRIVER-INFERENCE\");\n  for (auto &infer_driver : inference_driver_list) {\n    driver_list.emplace_back(infer_driver);\n  }\n\n  std::shared_ptr<DriverDesc> desc;\n  for (auto &flowunit_driver : driver_list) {\n    auto temp_factory = flowunit_driver->CreateFactory();\n    if (nullptr == temp_factory) {\n      continue;\n    }\n    desc = flowunit_driver->GetDriverDesc();\n    std::shared_ptr<FlowUnitFactory> flowunit_factory =\n        std::dynamic_pointer_cast<FlowUnitFactory>(temp_factory);\n\n    flowunit_factory->SetDriver(flowunit_driver);\n\n    auto names = flowunit_factory->GetFlowUnitNames();\n    if (names.empty()) {\n      flowunit_factory_.insert(std::make_pair(\n          std::make_pair(desc->GetType(), desc->GetName()), flowunit_factory));\n    } else {\n      for (const auto &name : names) {\n        flowunit_factory_.insert(std::make_pair(\n            std::make_pair(desc->GetType(), name), flowunit_factory));\n      }\n    }\n  }\n\n  return STATUS_OK;\n}\n\nStatus FlowUnitManager::FlowUnitProbe() {\n  for (auto &iter : flowunit_factory_) {\n    auto tmp = iter.second->FlowUnitProbe();\n    if (!tmp.size()) {\n      continue;\n    }\n\n    auto value = flowunit_desc_list_.find(iter.first.first);\n    if (value == flowunit_desc_list_.end()) {\n      flowunit_desc_list_.insert(std::make_pair(iter.first.first, tmp));\n    } else {\n      for (const auto &item : tmp) {\n        { value->second.insert(std::make_pair(item.first, item.second)); }\n      }\n    }\n\n    for (const auto &iter_flow : tmp) {\n      auto flowunit_desc = iter_flow.second;\n      MBLOG_DEBUG << \"add flowunit:\";\n      MBLOG_DEBUG << \"  name: \" << flowunit_desc->GetFlowUnitName();\n      MBLOG_DEBUG << \"  type: \" << iter.first.first;\n    }\n  }\n\n  return STATUS_OK;\n}\n\nStatus FlowUnitManager::Register(\n    const std::shared_ptr<FlowUnitFactory> &factory) {\n  std::string factory_type = factory->GetFlowUnitFactoryType();\n  std::string factory_unit_name = factory->GetFlowUnitFactoryName();\n  if (flowunit_factory_.count(\n          std::make_pair(factory_type, factory_unit_name))) {\n    MBLOG_WARN << \"The type \" << factory_type << \" has already existed.\";\n    return Status(STATUS_EXIST);\n  }\n\n  flowunit_factory_.insert(\n      std::make_pair(std::make_pair(factory_type, factory_unit_name), factory));\n  return STATUS_OK;\n}\n\nstd::vector<std::string> FlowUnitManager::GetFlowUnitTypes() {\n  std::vector<std::string> flowunit_type;\n  std::set<std::string> tmp_set;\n  for (auto &iter : flowunit_factory_) {\n    tmp_set.insert(iter.first.first);\n  }\n  std::copy(tmp_set.begin(), tmp_set.end(), std::back_inserter(flowunit_type));\n\n  return flowunit_type;\n}\n\nstd::vector<std::string> FlowUnitManager::GetFlowUnitTypes(\n    const std::string &unit_name) {\n  std::vector<std::string> unit_types;\n  for (auto &iter : flowunit_desc_list_) {\n    const auto &dev_type = iter.first;\n    auto &units = iter.second;\n    auto unit_item = units.find(unit_name);\n    if (unit_item == units.end()) {\n      continue;\n    }\n\n    unit_types.push_back(dev_type);\n  }\n\n  return unit_types;\n}\n\nstd::vector<std::string> FlowUnitManager::GetFlowUnitList(\n    const std::string &unit_type) {\n  std::vector<std::string> flowunit_name;\n  auto iter = flowunit_desc_list_.find(unit_type);\n  if (iter == flowunit_desc_list_.end()) {\n    return std::vector<std::string>();\n  }\n\n  for (auto &name : flowunit_desc_list_[unit_type]) {\n    flowunit_name.push_back(name.first);\n  }\n\n  return flowunit_name;\n}\n\nStatus FlowUnitManager::CheckParams(const std::string &unit_name,\n                                    const std::string &unit_type,\n                                    const std::string &unit_device_id) {\n  if (unit_name.empty()) {\n    MBLOG_WARN << \"FlowUnit name should not be empty.\";\n    return STATUS_BADCONF;\n  }\n\n  if (unit_type.empty() && !unit_device_id.empty()) {\n    MBLOG_WARN << \"Empty flowUnit type and not empty flowunit device id are \"\n                  \"not allowed.\";\n    return STATUS_BADCONF;\n  }\n\n  return STATUS_OK;\n}\n\nStatus FlowUnitManager::ParseUnitDeviceConf(const std::string &unit_name,\n                                            const std::string &unit_type,\n                                            const std::string &unit_device_id,\n                                            FlowUnitDeviceConfig &dev_cfg) {\n  auto ret = ParseUserDeviceConf(unit_type, unit_device_id, dev_cfg);\n  if (!ret) {\n    return ret;\n  }\n\n  ret = AutoFillDeviceConf(unit_name, dev_cfg);\n  if (!ret) {\n    return ret;\n  }\n\n  return STATUS_OK;\n}\n\nStatus FlowUnitManager::ParseUserDeviceConf(const std::string &unit_type,\n                                            const std::string &unit_device_id,\n                                            FlowUnitDeviceConfig &dev_cfg) {\n  /**\n   * user format: unit_type = \"cuda:0,1;cpu\"\n   * what we get from configuration will be: unit_type = \"cuda:0~1;cpu\"\n   **/\n  auto unit_type_formatted = unit_type;\n  std::replace(unit_type_formatted.begin(), unit_type_formatted.end(),\n               LIST_DELIMITER[0], ',');\n  auto device_list = StringSplit(unit_type_formatted, ';');\n  for (auto &device_info : device_list) {\n    auto data = StringSplit(device_info, ':');\n    if (data.empty() || data.size() > 2) {\n      return {STATUS_BADCONF, \"device info \" + unit_type + \" format error\"};\n    }\n\n    auto &device_type = data[0];\n    auto &single_dev_cfg = dev_cfg[device_type];\n    if (data.size() == 1) {\n      continue;\n    }\n\n    auto &ids = data[1];\n    auto id_list = StringSplit(ids, ',');\n    for (const auto &id_index : id_list) {\n      single_dev_cfg.push_back(id_index);\n    }\n  }\n\n  if (unit_device_id.empty()) {\n    return STATUS_OK;\n  }\n\n  // For compatibility, check old config: device=\"cpu\", deviceid=\"0\"\n  if (dev_cfg.size() != 1) {\n    return {STATUS_BADCONF,\n            \"should not set deviceid param when use multi device\"};\n  }\n\n  for (auto &cfg_item : dev_cfg) {\n    auto &ids = cfg_item.second;\n    if (!ids.empty()) {\n      return {STATUS_BADCONF,\n              \"should not set deviceid param when use [device:id] format\"};\n    }\n\n    ids.push_back(unit_device_id);\n  }\n\n  return STATUS_OK;\n}\n\nStatus FlowUnitManager::AutoFillDeviceConf(const std::string &unit_name,\n                                           FlowUnitDeviceConfig &dev_cfg) {\n  if (dev_cfg.empty()) {\n    // will auto fill all device type if no device selected\n    auto unit_types = GetFlowUnitTypes(unit_name);\n    for (const auto &type : unit_types) {\n      dev_cfg[type];  // create empty list\n    }\n  }\n\n  for (auto &cfg_item : dev_cfg) {\n    const auto &dev_type = cfg_item.first;\n    auto &ids = cfg_item.second;\n    if (ids.empty()) {\n      // will auto fill all id if no id selected\n      auto real_ids = device_mgr_->GetDevicesIdList(dev_type);\n      ids.assign(real_ids.begin(), real_ids.end());\n    }\n  }\n\n  return STATUS_OK;\n}\n\nstd::vector<std::shared_ptr<FlowUnit>> FlowUnitManager::CreateFlowUnit(\n    const std::string &unit_name, const std::string &unit_type,\n    const std::string &unit_device_id) {\n  std::vector<std::shared_ptr<FlowUnit>> flowunit_list;\n\n  StatusError = {STATUS_NOTFOUND};\n\n  auto ret = CheckParams(unit_name, unit_type, unit_device_id);\n  if (ret != STATUS_OK) {\n    return flowunit_list;\n  }\n\n  FlowUnitDeviceConfig unit_dev_cfg;\n  ret = ParseUnitDeviceConf(unit_name, unit_type, unit_device_id, unit_dev_cfg);\n  if (!ret) {\n    MBLOG_ERROR << \"Parse unit device config failed, err \" << ret;\n    return flowunit_list;\n  }\n\n  for (auto &cfg_item : unit_dev_cfg) {\n    const auto &dev_type = cfg_item.first;\n    auto &ids = cfg_item.second;\n    if (ids.empty()) {\n      MBLOG_WARN << \"CreateFlowUnit: \" << unit_name << \",\" << dev_type\n                 << \" failed, No available device for type \" << dev_type;\n      continue;\n    }\n\n    for (auto &id : ids) {\n      auto flowunit = CreateSingleFlowUnit(unit_name, dev_type, id);\n      if (flowunit == nullptr) {\n        MBLOG_WARN << \"CreateFlowUnit: \" << unit_name << \" failed, \"\n                   << StatusError;\n        continue;\n      }\n\n      flowunit_list.push_back(flowunit);\n    }\n  }\n\n  return flowunit_list;\n}\n\nstd::shared_ptr<FlowUnit> FlowUnitManager::CreateSingleFlowUnit(\n    const std::string &unit_name, const std::string &unit_type,\n    const std::string &unit_device_id) {\n  if (unit_device_id.empty()) {\n    StatusError = {STATUS_INVALID, \"FlowUnit device id is none.\"};\n    MBLOG_WARN << StatusError.Errormsg();\n    return nullptr;\n  }\n\n  if (unit_type.empty()) {\n    StatusError = {STATUS_INVALID, \"FlowUnit device type is none.\"};\n    MBLOG_WARN << StatusError.Errormsg();\n    return nullptr;\n  }\n\n  std::shared_ptr<FlowUnit> flowunit;\n  std::shared_ptr<Device> device;\n  std::shared_ptr<DeviceManager> device_mgr = GetDeviceManager();\n\n  auto iter = flowunit_factory_.find(std::make_pair(unit_type, unit_name));\n  if (iter == flowunit_factory_.end()) {\n    StatusError = {STATUS_NOTFOUND,\n                   \"can not find flowunit [type: \" + unit_type +\n                       \", name:\" + unit_name +\n                       \"], Please check if the 'device' configured correctly \"\n                       \"or if the flowunit library exists.\"};\n    return nullptr;\n  }\n\n  auto &factory = iter->second;\n  const auto &flowunit_desc_map = factory->FlowUnitProbe();\n  auto item = flowunit_desc_map.find(unit_name);\n  if (item == flowunit_desc_map.end()) {\n    StatusError = {STATUS_FAULT,\n                   \"flowunit probe for unit \" + unit_name + \" failed.\"};\n    return nullptr;\n  }\n\n  auto flowunit_desc = item->second;\n  if (flowunit_desc->GetFlowUnitType() == \"\") {\n    flowunit_desc->SetFlowUnitType(unit_type);\n  }\n\n  auto driver_desc = factory->GetDriver()->GetDriverDesc();\n  flowunit_desc->SetDriverDesc(driver_desc);\n  factory->SetVirtualType(flowunit_desc->GetVirtualType());\n  flowunit = factory->CreateFlowUnit(unit_name, unit_type);\n  if (flowunit == nullptr) {\n    return nullptr;\n  }\n\n  device = device_mgr->CreateDevice(unit_type, unit_device_id);\n  if (device == nullptr) {\n    return nullptr;\n  }\n\n  if (max_executor_thread_num_ > 0) {\n    MBLOG_INFO << \"find the parameter max_executor_thread_num in the config: \"\n               << max_executor_thread_num_;\n    device->GetDeviceExecutor()->SetThreadCount(max_executor_thread_num_);\n  }\n\n  flowunit->SetBindDevice(device);\n  std::vector<FlowUnitInput> &in_list = flowunit_desc->GetFlowUnitInput();\n  for (auto &in_item : in_list) {\n    const auto &device_type = in_item.GetDeviceType();\n    if (device_type.empty() || device_type == device->GetType()) {\n      in_item.SetDevice(device);\n      continue;\n    }\n\n    const auto &dev_id_list = device_mgr->GetDevicesIdList(device_type);\n    // TODO if this device type has not device, what should we do?\n    if (dev_id_list.empty()) {\n      in_item.SetDevice(device);\n      continue;\n    }\n    // TODO what device id select?\n    auto in_device = device_mgr->CreateDevice(device_type, dev_id_list[0]);\n    if (in_device == nullptr) {\n      in_device = device;\n    }\n\n    in_item.SetDevice(in_device);\n  }\n  flowunit->SetFlowUnitDesc(flowunit_desc);\n\n  return flowunit;\n}\n\nvoid FlowUnitManager::Clear() {\n  flowunit_desc_list_.clear();\n  flowunit_factory_.clear();\n}\n\nstd::map<std::pair<std::string, std::string>, std::shared_ptr<FlowUnitFactory>>\nFlowUnitManager::GetFlowUnitFactoryList() {\n  return flowunit_factory_;\n}\n\nstd::map<std::string, std::map<std::string, std::shared_ptr<FlowUnitDesc>>>\nFlowUnitManager::GetFlowUnitDescList() {\n  return flowunit_desc_list_;\n}\n\nStatus FlowUnitManager::SetUpFlowUnitDesc() {\n  for (auto &iter_device : flowunit_desc_list_) {\n    for (auto &iter_name : flowunit_desc_list_[iter_device.first]) {\n      auto pair = std::make_pair(iter_device.first, iter_name.first);\n      auto iter_driver_desc = flowunit_factory_.find(pair);\n      if (iter_driver_desc == flowunit_factory_.end()) {\n        auto err_msg = \"flowunit_factory find \" + iter_device.first + \", \" +\n                       iter_name.first + \" failed\";\n        MBLOG_ERROR << err_msg;\n        return {STATUS_FAULT, err_msg};\n      }\n\n      auto driver = flowunit_factory_[pair]->GetDriver();\n      auto driver_desc = driver->GetDriverDesc();\n      flowunit_desc_list_[iter_device.first][iter_name.first]->SetDriverDesc(\n          driver_desc);\n    }\n  }\n  return STATUS_SUCCESS;\n}\n\nstd::vector<std::shared_ptr<FlowUnitDesc>>\nFlowUnitManager::GetAllFlowUnitDesc() {\n  std::vector<std::shared_ptr<FlowUnitDesc>> desc_vec;\n  for (auto &iter_device : flowunit_desc_list_) {\n    for (auto &iter_name : flowunit_desc_list_[iter_device.first]) {\n      desc_vec.push_back(\n          flowunit_desc_list_[iter_device.first][iter_name.first]);\n    }\n  }\n\n  return desc_vec;\n}\n\nvoid FlowUnitManager::SetDeviceManager(\n    std::shared_ptr<DeviceManager> device_mgr) {\n  device_mgr_ = std::move(device_mgr);\n}\n\nstd::shared_ptr<DeviceManager> FlowUnitManager::GetDeviceManager() {\n  return device_mgr_;\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/libmodelbox/engine/graph/graph_checker.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/graph_checker.h\"\n\n#include <cmath>\n#include <queue>\n#include <stack>\n#include <utility>\n\nnamespace modelbox {\n\nconstexpr const char *EXTERNAL = \"external\";\n\nstatic std::shared_ptr<Node> CastNode(\n    const std::shared_ptr<NodeBase> &node_base) {\n  return std::dynamic_pointer_cast<Node>(node_base);\n}\n\nIndexPort::IndexPort() = default;\nIndexPort::IndexPort(std::string node, std::string port,\n                     const IndexPortType &type)\n    : node_name_(std::move(node)),\n      port_name_(std::move(port)),\n      port_type_(type) {}\nIndexPort::~IndexPort() = default;\n\nstd::string IndexPort::ToString() const {\n  return node_name_ + \".\" + port_name_ + \".\" + std::to_string(port_type_);\n}\n\nvoid IndexPort::SetNodeName(std::string node_name) {\n  node_name_ = std::move(node_name);\n}\n\nvoid IndexPort::SetPortName(std::string port_name) {\n  port_name_ = std::move(port_name);\n}\n\nvoid IndexPort::SetPortType(IndexPortType port_type) { port_type_ = port_type; }\n\nconst std::string &IndexPort::GetNodeName() const { return node_name_; }\n\nconst std::string &IndexPort::GetPortName() const { return port_name_; }\n\nconst IndexPortType &IndexPort::GetPortType() const { return port_type_; }\n\nvoid LeastCommonAncestor::InitMap() {\n  int index = 0;\n  for (auto &all_node : all_nodes_) {\n    index_name_map_[index] = all_node.second->GetName();\n    name_index_map_[all_node.second->GetName()] = index;\n    index++;\n  }\n}\n\nLeastCommonAncestor::LeastCommonAncestor(\n    std::unordered_map<std::string, std::shared_ptr<NodeBase>> all_nodes)\n    : all_nodes_(std::move(all_nodes)) {\n  InitMap();\n}\n\nLeastCommonAncestor::~LeastCommonAncestor() {\n  index_name_map_.clear();\n  name_index_map_.clear();\n}\n\nvoid LeastCommonAncestor::Update(\n    const std::vector<IndexPort> &values,\n    const std::unordered_map<std::string, std::string> &match_map) {\n  for (const auto &value : values) {\n    auto cur_name = value.GetNodeName();\n    auto index = name_index_map_[cur_name];\n    std::vector<int> path{index};\n    auto pre_name = match_map.at(cur_name);\n    while (pre_name != EXTERNAL) {\n      path.push_back(name_index_map_[pre_name]);\n      cur_name = pre_name;\n      pre_name = match_map.at(cur_name);\n    }\n\n    path.push_back(-1);\n    paths_[index] = path;\n  }\n}\n\nstd::string LeastCommonAncestor::GetMatchPortName(\n    const std::string &match_a_name, const std::string &match_b_name,\n    const std::string &match_node_name) {\n  std::shared_ptr<Node> node_a = CastNode(all_nodes_[match_a_name]);\n  std::shared_ptr<Node> node_b = CastNode(all_nodes_[match_b_name]);\n  std::shared_ptr<Node> match = CastNode(all_nodes_[match_node_name]);\n  std::shared_ptr<InputVirtualNode> match_virtual_node;\n  if (match == nullptr) {\n    match_virtual_node = std::dynamic_pointer_cast<InputVirtualNode>(\n        all_nodes_[match_node_name]);\n  }\n\n  auto input_port_a = node_a->GetInputPorts()[0];\n  auto input_port_b = node_b->GetInputPorts()[0];\n  std::string output_port_a =\n      input_port_a->GetAllOutPort()[0].lock()->GetName();\n  std::string output_port_b =\n      input_port_b->GetAllOutPort()[0].lock()->GetName();\n  if (output_port_a != output_port_b) {\n    std::vector<std::shared_ptr<InPort>> match_input_ports;\n    if (match == nullptr) {\n      match_input_ports = match_virtual_node->GetInputPorts();\n    } else {\n      match_input_ports = match->GetInputPorts();\n    }\n\n    if (match_input_ports.size() == 0) {\n      return EXTERNAL;\n    }\n    return match_input_ports[0]->GetName();\n  }\n\n  return output_port_a;\n}\n\nIndexPort LeastCommonAncestor::ProcessSameNode(const IndexPort &node_a,\n                                               const IndexPort &node_b) {\n  if (node_a.GetPortName() == node_b.GetPortName()) {\n    return IndexPort(node_a.GetNodeName(), node_a.GetPortName(),\n                     node_a.GetPortType());\n  }\n\n  std::string match_port_name;\n  auto input_nums = all_nodes_[node_a.GetNodeName()]->GetInputNum();\n  if (input_nums == 0) {\n    match_port_name = EXTERNAL;\n  } else {\n    match_port_name =\n        all_nodes_[node_a.GetNodeName()]->GetInputPorts()[0]->GetName();\n  }\n\n  return IndexPort(node_a.GetNodeName(), match_port_name, IndexPortType::INPUT);\n}\n\nvoid LeastCommonAncestor::FindMatchNode(const IndexPort &node_a,\n                                        const IndexPort &node_b,\n                                        std::string &match_a_name,\n                                        std::string &match_b_name,\n                                        std::string &match_node_name,\n                                        std::string &port_name) {\n  int index_a = name_index_map_[node_a.GetNodeName()];\n  int index_b = name_index_map_[node_b.GetNodeName()];\n  auto path_a = paths_[index_a];\n  auto path_b = paths_[index_b];\n  int res = -1;\n  bool swap_flag = false;\n  if (path_a.size() > path_b.size()) {\n    swap_flag = true;\n    std::swap(path_a, path_b);\n  }\n\n  int begin_b = path_b.size() - path_a.size();\n  size_t index = -1;\n  for (size_t i = 0; i < path_a.size(); ++i) {\n    if (path_a[i] != path_b[begin_b + i]) {\n      continue;\n    }\n\n    res = path_a[i];\n    index = i;\n    break;\n  }\n\n  if (res == -1) {\n    return;\n  }\n\n  match_node_name = index_name_map_[res];\n  if (index == 0) {\n    match_a_name = match_node_name;\n    match_b_name = index_name_map_[path_b[begin_b - 1]];\n    if (swap_flag) {\n      port_name = node_b.GetPortName();\n    } else {\n      port_name = node_a.GetPortName();\n    }\n    return;\n  }\n\n  match_a_name = index_name_map_[path_a[index - 1]];\n  match_b_name = index_name_map_[path_b[begin_b + index - 1]];\n}\n\nstd::string LeastCommonAncestor::GetMatchPortName(\n    const std::string &port_name, const std::string &match_a_name,\n    const std::string &match_b_name, const std::string &match_node_name) {\n  std::string match_port_name;\n  auto matching_node = all_nodes_[match_node_name];\n  auto input_ports = all_nodes_[match_b_name]->GetInputPorts();\n  for (auto &input_port : input_ports) {\n    auto connected_out_ports = input_port->GetAllOutPort();\n    for (auto &connected_out_port : connected_out_ports) {\n      if (connected_out_port.lock()->GetNode()->GetName() != match_node_name) {\n        continue;\n      }\n\n      if (connected_out_port.lock()->GetName() == port_name) {\n        match_port_name = port_name;\n        return match_port_name;\n      }\n    }\n  }\n\n  if (matching_node->GetInputPorts().size() == 0) {\n    match_port_name = EXTERNAL;\n  } else {\n    match_port_name = matching_node->GetInputPorts()[0]->GetName();\n  }\n\n  return match_port_name;\n}\n\nvoid LeastCommonAncestor::GetIndexPortType(const std::string &node_name,\n                                           const std::string &port_name,\n                                           IndexPortType &port_type) {\n  auto node = all_nodes_[node_name];\n  auto input_ports = node->GetInputPorts();\n  for (auto &input_port : input_ports) {\n    auto name = input_port->GetName();\n    if (name != port_name) {\n      continue;\n    }\n\n    port_type = IndexPortType::INPUT;\n    return;\n  }\n\n  auto output_ports = node->GetOutputPorts();\n  for (auto &output_port : output_ports) {\n    auto name = output_port->GetName();\n    if (name != port_name) {\n      continue;\n    }\n\n    port_type = IndexPortType::OUTPUT;\n    return;\n  }\n\n  port_type = IndexPortType::UNKNOWN;\n}\n\nIndexPort LeastCommonAncestor::Find(const IndexPort &node_a,\n                                    const IndexPort &node_b) {\n  if (node_a.GetNodeName() == node_b.GetNodeName()) {\n    return ProcessSameNode(node_a, node_b);\n  }\n\n  IndexPort ans;\n  std::string match_node_name;\n  std::string match_a_name;\n  std::string match_b_name;\n  std::string port_name;\n  FindMatchNode(node_a, node_b, match_a_name, match_b_name, match_node_name,\n                port_name);\n  if (match_node_name.empty()) {\n    return ans;\n  }\n\n  std::string match_port_name;\n  IndexPortType port_type;\n  if (match_node_name == match_a_name) {\n    match_port_name = GetMatchPortName(port_name, match_a_name, match_b_name,\n                                       match_node_name);\n    GetIndexPortType(match_node_name, match_port_name, port_type);\n    ans = IndexPort(match_node_name, match_port_name, port_type);\n    return ans;\n  }\n\n  match_port_name =\n      GetMatchPortName(match_a_name, match_b_name, match_node_name);\n  GetIndexPortType(match_node_name, match_port_name, port_type);\n  ans = IndexPort(match_node_name, match_port_name, port_type);\n  return ans;\n}\n\nOverHierarchyCheck::OverHierarchyCheck(\n    const std::unordered_map<std::string, std::shared_ptr<NodeBase>> &all_nodes,\n    std::set<std::shared_ptr<NodeBase>> start_nodes,\n    std::map<std::string, std::string> loop_links,\n    std::vector<std::vector<std::string>> loop_structures,\n    std::map<std::shared_ptr<OutPort>, std::set<std::shared_ptr<InPort>>> edges)\n    : all_nodes_(all_nodes),\n      start_nodes_(std::move(start_nodes)),\n      loop_links_(std::move(loop_links)),\n      loop_structures_(std::move(loop_structures)),\n      edges_(std::move(edges)) {\n  for (const auto &all_node : all_nodes) {\n    visited_[all_node.first] = false;\n  }\n}\n\nOverHierarchyCheck::~OverHierarchyCheck() {\n  all_nodes_.clear();\n  loop_links_.clear();\n  start_nodes_.clear();\n  edges_.clear();\n}\n\nvoid OverHierarchyCheck::InitFirstNode(const std::shared_ptr<Node> &node) {\n  auto node_name = node->GetName();\n  auto index_port =\n      std::make_shared<IndexPort>(node_name, EXTERNAL, IndexPortType::INPUT);\n  color_map_[index_port->ToString()] = {0};\n\n  for (auto &output_port : node->GetOutputPorts()) {\n    auto index_port = std::make_shared<IndexPort>(\n        node_name, output_port->GetName(), IndexPortType::OUTPUT);\n    color_map_[index_port->ToString()] = {0};\n  }\n}\n\nbool OverHierarchyCheck::CheckEndIfPort(\n    const std::shared_ptr<InPort> &input_port,\n    const std::shared_ptr<IndexPort> &index_port,\n    const std::unordered_map<std::string,\n                             std::unordered_map<std::string, std::string>>\n        &graph_single_port_match_map) {\n  auto connect_ports = input_port->GetAllOutPort();\n  if (connect_ports.size() <= 1) {\n    return false;\n  }\n\n  if (graph_single_port_match_map.find(index_port->GetNodeName()) ==\n      graph_single_port_match_map.end()) {\n    return false;\n  }\n\n  if (graph_single_port_match_map.at(index_port->GetNodeName())\n          .find(index_port->GetPortName()) ==\n      graph_single_port_match_map.at(index_port->GetNodeName()).end()) {\n    return false;\n  }\n\n  return true;\n}\n\nStatus OverHierarchyCheck::CheckInputPortsColorReady(\n    std::shared_ptr<IndexPort> &index_port,\n    const std::vector<std::shared_ptr<InPort>> &input_ports) {\n  for (const auto &input_port : input_ports) {\n    index_port->SetPortName(input_port->GetName());\n    index_port->SetPortType(IndexPortType::INPUT);\n    if (color_map_.find(index_port->ToString()) == color_map_.end()) {\n      return STATUS_NODATA;\n    }\n  }\n\n  return STATUS_SUCCESS;\n}\n\nStatus OverHierarchyCheck::CheckInputPorts(\n    const std::shared_ptr<Node> &node,\n    const std::unordered_map<std::string,\n                             std::unordered_map<std::string, std::string>>\n        &graph_single_port_match_map) {\n  Status status{STATUS_OK};\n  auto input_ports = node->GetInputPorts();\n  std::vector<int> color;\n  std::shared_ptr<IndexPort> index_port = std::make_shared<IndexPort>();\n  index_port->SetNodeName(node->GetName());\n  status = CheckInputPortsColorReady(index_port, input_ports);\n  if (status != STATUS_SUCCESS) {\n    return status;\n  }\n\n  for (auto &input_port : input_ports) {\n    index_port->SetPortName(input_port->GetName());\n    std::vector<int> tmp_color(color_map_[index_port->ToString()]);\n    if (CheckEndIfPort(input_port, index_port, graph_single_port_match_map)) {\n      tmp_color.pop_back();\n    }\n\n    if (color.size() == 0) {\n      color = tmp_color;\n      continue;\n    }\n\n    if (color != tmp_color) {\n      status = {STATUS_BADCONF,\n                \"node:\" + node->GetName() +\n                    \" has different level links, pls check the input links.\"};\n      return status;\n    }\n  }\n\n  return status;\n}\n\nstd::shared_ptr<NodeBase> OverHierarchyCheck::FindLoopLinkNode(\n    const std::shared_ptr<Node> &node) {\n  auto node_name = node->GetName();\n  std::shared_ptr<NodeBase> res;\n  for (auto &loop : loop_structures_) {\n    for (size_t i = 0; i < loop.size(); ++i) {\n      if (loop[i] != node_name) {\n        continue;\n      }\n\n      res = all_nodes_[loop[i + 1]];\n      break;\n    }\n\n    if (res != nullptr) {\n      break;\n    }\n  }\n\n  return res;\n}\n\nbool OverHierarchyCheck::CheckEndIfNode(\n    const std::shared_ptr<Node> &node,\n    const std::unordered_map<std::string, std::string> &end_if_map) {\n  auto name = node->GetName();\n  if (end_if_map.find(name) == end_if_map.end()) {\n    return false;\n  }\n\n  auto input_ports = node->GetInputPorts();\n  for (auto &input_port : input_ports) {\n    if (input_port->GetConnectedPortNumber() == 1) {\n      return false;\n    }\n  }\n\n  auto match_node = CastNode(all_nodes_[end_if_map.at(name)]);\n  if (match_node == nullptr) {\n    return false;\n  }\n\n  if (match_node->GetConditionType() != ConditionType::IF_ELSE) {\n    return false;\n  }\n\n  return true;\n}\n\nvoid OverHierarchyCheck::GetColorMap(\n    const std::shared_ptr<Node> &node,\n    const std::vector<std::shared_ptr<OutPort>> &output_ports,\n    const std::unordered_map<std::string, std::string> &graph_match_map,\n    const std::unordered_map<std::string,\n                             std::unordered_map<std::string, std::string>>\n        &graph_single_port_match_map,\n    const std::unordered_map<std::string, std::string> &end_if_map) {\n  std::string node_name = node->GetName();\n  std::vector<int> new_color;\n  auto input_ports = node->GetInputPorts();\n  std::shared_ptr<IndexPort> input_index_port = std::make_shared<IndexPort>();\n  input_index_port->SetNodeName(node->GetName());\n  input_index_port->SetPortType(IndexPortType::INPUT);\n  if (input_ports.size() == 0) {\n    input_index_port->SetPortName(EXTERNAL);\n  } else {\n    input_index_port->SetPortName(input_ports[0]->GetName());\n  }\n\n  auto input_color = color_map_[input_index_port->ToString()];\n\n  new_color.assign(input_color.begin(), input_color.end());\n  if (node->GetConditionType() == ConditionType::IF_ELSE ||\n      node->GetOutputType() == FlowOutputType::EXPAND) {\n    if (CheckEndIfNode(node, end_if_map)) {\n      new_color.pop_back();\n    }\n\n    ++max_color_;\n    new_color.push_back(max_color_);\n    SetOutPortColor(node, output_ports, new_color);\n    return;\n  }\n\n  if (node->GetOutputType() == FlowOutputType::COLLAPSE) {\n    if (CheckEndIfNode(node, end_if_map)) {\n      new_color.pop_back();\n    }\n\n    new_color.pop_back();\n    SetOutPortColor(node, output_ports, new_color);\n    return;\n  }\n\n  if (node->GetLoopType() == LoopType::LOOP) {\n    auto link_node = FindLoopLinkNode(node);\n    if (link_node == node) {\n      SetOutPortColor(node, output_ports, new_color);\n      return;\n    }\n\n    for (const auto &out_port : output_ports) {\n      std::shared_ptr<IndexPort> index_output_port =\n          std::make_shared<IndexPort>();\n      index_output_port->SetNodeName(node_name);\n      index_output_port->SetPortName(out_port->GetName());\n      index_output_port->SetPortType(IndexPortType::OUTPUT);\n      auto inport = *out_port->GetConnectInPort().begin();\n      auto link_node_name = inport->GetNode()->GetName();\n      if (link_node->GetName() == link_node_name) {\n        new_color.push_back(++max_color_);\n        color_map_[index_output_port->ToString()] = new_color;\n        new_color.pop_back();\n      } else {\n        color_map_[index_output_port->ToString()] = new_color;\n      }\n    }\n    return;\n  }\n\n  auto match_node_name = graph_match_map.at(node->GetName());\n  if (match_node_name == EXTERNAL) {\n    SetOutPortColor(node, output_ports, new_color);\n    return;\n  }\n\n  auto pre_match_real_node = CastNode(all_nodes_.at(match_node_name));\n  if (pre_match_real_node == nullptr) {\n    SetOutPortColor(node, output_ports, new_color);\n    return;\n  }\n\n  for (auto &links : loop_links_) {\n    if (links.second == node->GetName()) {\n      for (const auto &out_port : output_ports) {\n        std::shared_ptr<IndexPort> index_output_port =\n            std::make_shared<IndexPort>();\n        index_output_port->SetNodeName(node_name);\n        index_output_port->SetPortName(out_port->GetName());\n        index_output_port->SetPortType(IndexPortType::OUTPUT);\n        auto inport = *out_port->GetConnectInPort().begin();\n        auto link_node_name = inport->GetNode()->GetName();\n        if (links.first == link_node_name) {\n          auto color = new_color[new_color.size() - 1];\n          new_color.pop_back();\n          color_map_[index_output_port->ToString()] = new_color;\n          new_color.push_back(color);\n        } else {\n          color_map_[index_output_port->ToString()] = new_color;\n        }\n      }\n      return;\n    }\n  }\n\n  bool single_port_has_multi_links = false;\n  std::string input_port_name;\n  for (auto &input_port : input_ports) {\n    auto outputs = input_port->GetAllOutPort();\n    if (outputs.size() > 1) {\n      single_port_has_multi_links = true;\n      input_port_name = input_port->GetName();\n      break;\n    }\n  }\n\n  if (!single_port_has_multi_links) {\n    SetOutPortColor(node, output_ports, new_color);\n    return;\n  }\n\n  if (pre_match_real_node->GetConditionType() == ConditionType::IF_ELSE) {\n    new_color.pop_back();\n    SetOutPortColor(node, output_ports, new_color);\n    return;\n  }\n\n  auto condition_match_node =\n      graph_single_port_match_map.at(node_name).at(input_port_name);\n  if (condition_match_node.empty()) {\n    SetOutPortColor(node, output_ports, new_color);\n    return;\n  }\n\n  auto condition_match_real_node =\n      CastNode(all_nodes_.at(condition_match_node));\n  if (condition_match_real_node != nullptr &&\n      condition_match_real_node->GetConditionType() == ConditionType::IF_ELSE) {\n    new_color.pop_back();\n    SetOutPortColor(node, output_ports, new_color);\n    return;\n  }\n\n  SetOutPortColor(node, output_ports, new_color);\n}\n\nvoid OverHierarchyCheck::SetOutPortColor(\n    const std::shared_ptr<Node> &node,\n    const std::vector<std::shared_ptr<OutPort>> &out_ports,\n    const std::vector<int> &new_color) {\n  auto node_name = node->GetName();\n  for (const auto &output_port : out_ports) {\n    std::shared_ptr<IndexPort> index_output_port =\n        std::make_shared<IndexPort>();\n    index_output_port->SetNodeName(node_name);\n    index_output_port->SetPortName(output_port->GetName());\n    index_output_port->SetPortType(IndexPortType::OUTPUT);\n    color_map_[index_output_port->ToString()] = new_color;\n  }\n}\n\nStatus OverHierarchyCheck::Check(\n    const std::unordered_map<std::string, std::string> &graph_match_map,\n    const std::unordered_map<std::string,\n                             std::unordered_map<std::string, std::string>>\n        &graph_single_port_match_map,\n    const std::unordered_map<std::string, std::string> &end_if_map) {\n  Status status{STATUS_OK};\n  for (const auto &start_node : start_nodes_) {\n    auto real_node = CastNode(start_node);\n    if (real_node == nullptr) {\n      continue;\n    }\n\n    std::queue<std::shared_ptr<Node>> queue;\n    queue.push(real_node);\n    InitFirstNode(real_node);\n\n    while (!queue.empty()) {\n      auto node = queue.front();\n      auto node_name = node->GetName();\n      queue.pop();\n      visited_[node_name] = true;\n      status = CheckInputPorts(node, graph_single_port_match_map);\n      if (status == STATUS_BADCONF) {\n        status = {STATUS_BADCONF, status.WrapErrormsgs()};\n        return status;\n      }\n\n      if (status == STATUS_NODATA) {\n        queue.push(node);\n        continue;\n      }\n\n      auto output_ports = node->GetOutputPorts();\n      GetColorMap(node, output_ports, graph_match_map,\n                  graph_single_port_match_map, end_if_map);\n\n      for (auto &output_port : output_ports) {\n        std::shared_ptr<IndexPort> index_output_port =\n            std::make_shared<IndexPort>();\n        index_output_port->SetNodeName(node_name);\n        index_output_port->SetPortName(output_port->GetName());\n        index_output_port->SetPortType(IndexPortType::OUTPUT);\n        auto input_ports = output_port->GetConnectInPort();\n        for (const auto &input_port : input_ports) {\n          std::shared_ptr<IndexPort> index_input_port =\n              std::make_shared<IndexPort>();\n          auto inport_node_name = input_port->GetNode()->GetName();\n          index_input_port->SetNodeName(inport_node_name);\n          index_input_port->SetPortName(input_port->GetName());\n          index_input_port->SetPortType(IndexPortType::INPUT);\n\n          if (!visited_[inport_node_name]) {\n            auto connect_node = CastNode(all_nodes_[inport_node_name]);\n            if (connect_node != nullptr) {\n              queue.push(connect_node);\n              visited_[inport_node_name] = true;\n            }\n          }\n\n          if (color_map_.find(index_input_port->ToString()) ==\n                  color_map_.end() ||\n              color_map_[index_input_port->ToString()].empty()) {\n            color_map_[index_input_port->ToString()] =\n                color_map_[index_output_port->ToString()];\n            continue;\n          }\n\n          if (CheckEndIfPort(input_port, index_input_port,\n                             graph_single_port_match_map)) {\n            auto color_level = color_map_[index_output_port->ToString()];\n            if (color_level == color_map_[index_input_port->ToString()]) {\n              continue;\n            }\n\n            color_level.pop_back();\n            if (color_level == color_map_[index_input_port->ToString()]) {\n              continue;\n            }\n          }\n\n          if (color_map_[index_input_port->ToString()] !=\n              color_map_[index_output_port->ToString()]) {\n            status = {STATUS_BADCONF,\n                      index_output_port->GetNodeName() + \":\" +\n                          index_output_port->GetPortName() + \" links \" +\n                          index_input_port->GetNodeName() + \":\" +\n                          index_input_port->GetPortName() + \" failed. \"};\n            return status;\n          }\n        }\n      }\n    }\n  }\n  return status;\n}\n\nGraphChecker::GraphChecker(\n    const std::vector<std::shared_ptr<NodeBase>> &nodes,\n    const std::set<std::shared_ptr<NodeBase>> &start_nodes,\n    std::map<std::string, std::string> loop_links,\n    std::vector<std::vector<std::string>> loop_structures,\n    const std::map<std::shared_ptr<OutPort>, std::set<std::shared_ptr<InPort>>>\n        &edges)\n    : nodes_(nodes),\n      loop_links_(std::move(loop_links)),\n      loop_structures_(std::move(loop_structures)) {\n  for (const auto &node : nodes) {\n    all_nodes_[node->GetName()] = node;\n  }\n\n  lca_ = std::make_shared<LeastCommonAncestor>(all_nodes_);\n  ovc_ = std::make_shared<OverHierarchyCheck>(\n      all_nodes_, start_nodes, loop_links_, loop_structures_, edges);\n}\n\nGraphChecker::~GraphChecker() {\n  all_nodes_.clear();\n  lca_ = nullptr;\n  ovc_ = nullptr;\n}\n\nvoid GraphChecker::SetMatchNodes() {\n  for (auto &node : all_nodes_) {\n    auto real_node = CastNode(node.second);\n    if (real_node == nullptr) {\n      continue;\n    }\n\n    if (real_node->GetInputNum() < 1) {\n      continue;\n    }\n\n    auto match_node = CastNode(all_nodes_.at(graph_match_map_[node.first]));\n    if (match_node == nullptr) {\n      continue;\n    }\n\n    if (real_node->GetInputNum() == 1 &&\n        real_node->GetOutputType() != FlowOutputType::COLLAPSE &&\n        match_node->GetConditionType() != ConditionType::IF_ELSE) {\n      continue;\n    }\n\n    real_node->SetMatchNode(\"match_node\", match_node);\n\n    for (auto &single_node_match : graph_single_port_match_map_[node.first]) {\n      auto match_condition_node =\n          CastNode(all_nodes_.at(single_node_match.second));\n      if (match_condition_node == nullptr) {\n        Abort(\"cast match condition node failed.\");\n      }\n\n      real_node->SetMatchNode(single_node_match.first, match_condition_node);\n    }\n  }\n}\n\nvoid GraphChecker::ShowMatchNodes() {\n  for (auto &node : all_nodes_) {\n    auto real_node = CastNode(node.second);\n    if (real_node == nullptr) {\n      continue;\n    }\n\n    auto match_nodes = real_node->GetMatchNodes();\n    for (auto &match_node : match_nodes) {\n      MBLOG_INFO << \"node: \" << node.first << \", key: \" << match_node.first\n                 << \", value: \" << match_node.second->GetName();\n    }\n  }\n}\n\nStatus GraphChecker::Check() {\n  for (auto &check_node : nodes_) {\n    NodeStreamConnection node_stream_map;\n    auto status = CalNodeStreamMap(check_node, node_stream_map);\n    if (status != STATUS_SUCCESS) {\n      const auto *msg = \"caculate node stream map failed\";\n      MBLOG_ERROR << msg;\n      return {status, msg};\n    }\n\n    auto cur_real_node = CastNode(check_node);\n    // virtual node\n    if (cur_real_node == nullptr) {\n      continue;\n    }\n\n    auto name = cur_real_node->GetName();\n    status = CheckNodeMatch(cur_real_node, node_stream_map);\n    if (status != STATUS_SUCCESS) {\n      auto msg = \"check node \" + name + \" link connect failed.\";\n      MBLOG_ERROR << msg << \", \" << status.WrapErrormsgs();\n      return {status, msg};\n    }\n\n    status = CheckCollapseMatch(cur_real_node);\n    if (status != STATUS_SUCCESS) {\n      auto msg = \"check node \" + name + \"branch match CollapseMatch failed\";\n      MBLOG_ERROR << msg << \", \" << status.WrapErrormsgs();\n      return {status, msg};\n    }\n\n    node_stream_connection_map_[cur_real_node->GetName()] = node_stream_map;\n  }\n\n  auto status = CheckOverHierarchyMatch();\n  if (status != STATUS_SUCCESS) {\n    auto msg = \"check over hierarchy match failed, \" + status.WrapErrormsgs();\n    MBLOG_ERROR << msg;\n    return {status, msg};\n  }\n\n  return STATUS_SUCCESS;\n}\n\nStatus GraphChecker::CalNodeStreamMap(const std::shared_ptr<NodeBase> &node,\n                                      NodeStreamConnection &node_stream_map) {\n  Status status{STATUS_SUCCESS};\n  auto input_ports = node->GetInputPorts();\n\n  // no input\n  if (input_ports.empty()) {\n    auto external = IndexPort(EXTERNAL, EXTERNAL, IndexPortType::INPUT);\n    node_stream_map[\"p1\"] = {external};\n    graph_match_map_[node->GetName()] = EXTERNAL;\n    return status;\n  }\n\n  for (auto &input_port : input_ports) {\n    auto pre_output_ports = input_port->GetAllOutPort();\n    auto key = input_port->GetName();\n    for (auto &pre_output_port : pre_output_ports) {\n      std::string output_port_name = pre_output_port.lock()->GetName();\n      std::string pre_node_name = pre_output_port.lock()->GetNode()->GetName();\n      auto value =\n          IndexPort(pre_node_name, output_port_name, IndexPortType::OUTPUT);\n      node_stream_map[key].emplace_back(value);\n    }\n  }\n\n  if (node_stream_map.empty()) {\n    status = {STATUS_BADCONF, \"cal node stream connection failed.\"};\n  }\n\n  return status;\n}\n\nStatus GraphChecker::CheckBranchPathMatch(const std::string &start,\n                                          const std::string &end) {\n  Status status{STATUS_SUCCESS};\n  int expand_collapse_flag = 0;\n  if (end == start) {\n    auto end_node = CastNode(all_nodes_[end]);\n    if (end_node == nullptr) {\n      return status;\n    }\n\n    if (end_node->GetOutputType() == FlowOutputType::EXPAND) {\n      expands_++;\n    }\n\n    return status;\n  }\n\n  std::string tmp{start};\n  do {\n    auto tmp_node = CastNode(all_nodes_[tmp]);\n\n    if (tmp_node == nullptr) {\n      break;\n    }\n\n    if (tmp_node->GetOutputType() == FlowOutputType::COLLAPSE) {\n      expand_collapse_flag++;\n    }\n\n    if (tmp_node->GetOutputType() == FlowOutputType::EXPAND) {\n      expand_collapse_flag--;\n    }\n\n    tmp = graph_match_map_[tmp];\n  } while (tmp != graph_match_map_[end]);\n\n  auto end_node = CastNode(all_nodes_[end]);\n  // match node is virtual node\n  if (end_node == nullptr && expand_collapse_flag != 0) {\n    status = {STATUS_BADCONF, \"from node:\" + start + \" to node:\" + end +\n                                  \" has unmatched expand or collapse nodes\"};\n    return status;\n  }\n\n  // maybe the end expand node match at the checking node\n  if (expand_collapse_flag == -1 &&\n      end_node->GetOutputType() == FlowOutputType::EXPAND) {\n    expands_++;\n    return status;\n  }\n\n  // the end collapse node match at pre path.\n  if (expand_collapse_flag == 1 &&\n      end_node->GetOutputType() == FlowOutputType::COLLAPSE) {\n    return status;\n  }\n\n  if (expand_collapse_flag != 0) {\n    status = {STATUS_BADCONF, \"from node:\" + start + \" to node:\" + end +\n                                  \" has unmatched expand or collapse nodes\"};\n  }\n\n  return status;\n}\n\nbool GraphChecker::CheckPortMatch(const IndexPort &match_pair) {\n  std::shared_ptr<Node> node = CastNode(all_nodes_[match_pair.GetNodeName()]);\n  if (node == nullptr) {\n    if (match_pair.GetPortName() == EXTERNAL) {\n      return false;\n    }\n\n    return true;\n  }\n\n  auto port = node->GetOutputPort(match_pair.GetPortName());\n\n  // input port\n  if (port == nullptr) {\n    return false;\n  }\n\n  // output port\n  return true;\n}\n\nvoid GraphChecker::UpdateAncestorPath(const std::vector<IndexPort> &values) {\n  lca_->Update(values, graph_match_map_);\n}\n\nStatus GraphChecker::CheckUnmatchExpands(size_t size) {\n  if (expands_ == 0) {\n    return STATUS_OK;\n  }\n\n  if (expands_ == size) {\n    expands_ = 0;\n    return STATUS_OK;\n  }\n\n  expands_ = 0;\n  return {STATUS_BADCONF, \"unmatch expands are not the same.\"};\n}\n\nStatus GraphChecker::CheckNodeMatch(\n    const std::shared_ptr<Node> &node,\n    const NodeStreamConnection &node_stream_map) {\n  Status status{STATUS_SUCCESS};\n  auto node_name = node->GetName();\n  if (node->GetInputPorts().empty()) {\n    graph_match_map_[node_name] = EXTERNAL;\n    return status;\n  }\n\n  std::vector<IndexPort> single_match_result;\n  std::unordered_map<std::string, std::string> single_port_match_map_;\n  for (const auto &iter : node_stream_map) {\n    auto values = iter.second;\n\n    // in: {d.output}\n    // one input port and one edge links the port\n    if (values.size() == 1) {\n      single_match_result.emplace_back(values[0]);\n      graph_match_map_[node_name] = values[0].GetNodeName();\n      UpdateAncestorPath(values);\n      continue;\n    }\n\n    // in: {d.output, e.output}\n    // one input port and multi edges link the port\n    if (node->GetLoopType() == LoopType::LOOP) {\n      if (values.size() != 2) {\n        status = {STATUS_BADCONF, \"loop node can only link 2 edges.\"};\n        return status;\n      }\n\n      for (auto &loop_link : loop_links_) {\n        if (loop_link.first != node_name) {\n          continue;\n        }\n\n        for (auto &value : values) {\n          if (value.GetNodeName() == loop_link.second) {\n            continue;\n          }\n\n          single_match_result.emplace_back(value);\n          graph_match_map_[node_name] = value.GetNodeName();\n          return status;\n        }\n      }\n    }\n\n    UpdateAncestorPath(values);\n    std::vector<IndexPort> lca_nodes;\n    auto status_single_lca =\n        CheckLeastCommonAncestorsAnyTwoNodes(values, lca_nodes);\n    if (status_single_lca != STATUS_SUCCESS) {\n      status = {STATUS_BADCONF,\n                node_name + \": \" + iter.first +\n                    \" port match failed, err: \" + status_single_lca.Errormsg()};\n      return status;\n    }\n\n    IndexPort single_match_node;\n    status_single_lca = LeastCommonAncestors(lca_nodes, single_match_node);\n    if (status_single_lca != STATUS_SUCCESS) {\n      status = {STATUS_BADCONF,\n                node_name + \": \" + iter.first +\n                    \" port match failed, err: \" + status_single_lca.Errormsg()};\n      return status;\n    }\n\n    // true: output port; false: input port\n    // scene 2)\n    if (CheckPortMatch(single_match_node)) {\n      status = {STATUS_BADCONF,\n                node_name + \": \" + iter.first + \" match at \" +\n                    single_match_node.GetNodeName() + \": \" +\n                    single_match_node.GetPortName() +\n                    \". One port links multi edges can not match at one port.\"};\n      return status;\n    }\n\n    // scene 4)\n    auto single_match_real_node =\n        CastNode(all_nodes_[single_match_node.GetNodeName()]);\n    if (single_match_real_node != nullptr &&\n        single_match_real_node->GetConditionType() != ConditionType::IF_ELSE) {\n      status = {STATUS_BADCONF,\n                node_name + \": \" + iter.first + \" match at \" +\n                    single_match_node.GetNodeName() + \": \" +\n                    single_match_node.GetPortName() +\n                    \". One port links multi edges can not match at multi ports \"\n                    \"when the match node is condition node.\"};\n      return status;\n    }\n    single_port_match_map_[iter.first] = single_match_node.GetNodeName();\n\n    for (auto &value : values) {\n      status = CheckBranchPathMatch(value.GetNodeName(),\n                                    single_match_node.GetNodeName());\n      if (status != STATUS_SUCCESS) {\n        return status;\n      }\n    }\n\n    if (!CheckUnmatchExpands(values.size())) {\n      status = {STATUS_BADCONF,\n                \"from \" + node_name + \" to \" + single_match_node.GetNodeName() +\n                    \" path branches have unmatched expand node.\"};\n      return status;\n    }\n\n    graph_match_map_[node_name] = single_match_real_node->GetName();\n    single_match_result.emplace_back(single_match_node);\n  }\n\n  // multi branch match at one node\n  if (single_match_result.size() == 1) {\n    graph_match_map_[node_name] = single_match_result[0].GetNodeName();\n    end_if_map_[node_name] = single_match_result[0].GetNodeName();\n    return status;\n  }\n\n  UpdateAncestorPath(single_match_result);\n  IndexPort multi_match_node;\n  auto status_multi_lca =\n      LeastCommonAncestors(single_match_result, multi_match_node);\n  if (status_multi_lca != STATUS_SUCCESS) {\n    status = {STATUS_BADCONF, node_name + \" match failed at multi ports err: \" +\n                                  status_multi_lca.Errormsg()};\n    return status;\n  }\n\n  auto output_port = CheckPortMatch(multi_match_node);\n  auto multi_match_real_node =\n      CastNode(all_nodes_[multi_match_node.GetNodeName()]);\n\n  if (multi_match_real_node != nullptr) {\n    if (!output_port &&\n        multi_match_real_node->GetConditionType() == ConditionType::IF_ELSE) {\n      auto err_msg = node_name + \" match from multi ports at \" +\n                     multi_match_node.GetNodeName() + \":\" +\n                     multi_match_node.GetPortName() + \". \" +\n                     multi_match_node.GetNodeName() +\n                     \" can not be if-else node\";\n      status = {STATUS_BADCONF, err_msg};\n      return status;\n    }\n  }\n\n  for (auto &single_match : single_match_result) {\n    status = CheckBranchPathMatch(single_match.GetNodeName(),\n                                  multi_match_node.GetNodeName());\n    if (status != STATUS_SUCCESS) {\n      return status;\n    }\n  }\n\n  if (!CheckUnmatchExpands(single_match_result.size())) {\n    status = {STATUS_BADCONF, \"from \" + node_name + \" to \" +\n                                  multi_match_node.GetNodeName() +\n                                  \" path branches have unmatched expand node.\"};\n    return status;\n  }\n\n  // scene 5) 6) 7) 8)\n  graph_match_map_[node_name] =\n      all_nodes_[multi_match_node.GetNodeName()]->GetName();\n  graph_single_port_match_map_[node_name] = single_port_match_map_;\n  return status;\n}\n\nStatus GraphChecker::CheckOverHierarchyMatch() {\n  return ovc_->Check(graph_match_map_, graph_single_port_match_map_,\n                     end_if_map_);\n}\n\nvoid GraphChecker::FindNearestNeighborMatchExpand(const std::string &node,\n                                                  std::string &match_node) {\n  int expand_collapse_flag = 1;\n  std::string tmp{node};\n  std::string pre_node_name;\n  std::shared_ptr<Node> pre_node;\n  while (true) {\n    pre_node_name = graph_match_map_[tmp];\n\n    if (!pre_node_name.empty() && pre_node_name == EXTERNAL) {\n      break;\n    }\n\n    pre_node = CastNode(all_nodes_[pre_node_name]);\n\n    if (pre_node->GetOutputType() == FlowOutputType::COLLAPSE) {\n      expand_collapse_flag++;\n    }\n\n    if (pre_node->GetOutputType() == FlowOutputType::EXPAND) {\n      expand_collapse_flag--;\n    }\n\n    if (expand_collapse_flag == 0) {\n      break;\n    }\n\n    tmp = pre_node_name;\n  };\n\n  if (expand_collapse_flag != 0) {\n    return;\n  }\n\n  match_node = pre_node_name;\n  graph_match_map_[node] = pre_node_name;\n}\n\nStatus GraphChecker::CheckCollapseMatch(const std::shared_ptr<Node> &node) {\n  Status status{STATUS_SUCCESS};\n  if (node->GetInputNum() == 0) {\n    return status;\n  }\n\n  if (node->GetOutputType() != FlowOutputType::COLLAPSE) {\n    return status;\n  }\n\n  std::string match_node;\n  FindNearestNeighborMatchExpand(node->GetName(), match_node);\n  if (match_node.empty()) {\n    status = {STATUS_BADCONF,\n              \"can't find a expand node for \" + node->GetName()};\n    return status;\n  }\n\n  return status;\n}\n\nstd::unordered_map<std::string, std::string> GraphChecker::GetGraphMatchMap() {\n  return graph_match_map_;\n}\n\nStatus GraphChecker::CheckLeastCommonAncestorsAnyTwoNodes(\n    const std::vector<IndexPort> &match_nodes,\n    std::vector<IndexPort> &res_nodes) {\n  Status status{STATUS_SUCCESS};\n  for (size_t i = 0; i < match_nodes.size(); ++i) {\n    auto first_node = match_nodes[i];\n    for (size_t j = i + 1; j < match_nodes.size(); ++j) {\n      auto second_node = match_nodes[j];\n      auto res = lca_->Find(first_node, second_node);\n      if (res.GetNodeName().empty() && res.GetPortName().empty()) {\n        std::string err_msg =\n            \"can not find LeastCommonAncestors node between \" +\n            first_node.GetNodeName() + \":\" + first_node.GetPortName() +\n            \" and \" + second_node.GetNodeName() + \":\" +\n            second_node.GetPortName();\n        status = {STATUS_BADCONF, err_msg};\n        return status;\n      }\n\n      if (CheckPortMatch(res)) {\n        status = {\n            STATUS_BADCONF,\n            first_node.GetNodeName() + \": \" + first_node.GetPortName() +\n                \" and \" + second_node.GetNodeName() + \":\" +\n                second_node.GetPortName() + \" match at \" + res.GetNodeName() +\n                \": \" + res.GetNodeName() +\n                \". One port links multi edges can not match at one port.\"};\n        return status;\n      }\n\n      if (j == i + 1) {\n        res_nodes.emplace_back(res);\n      }\n    }\n  }\n\n  return status;\n}\n\nStatus GraphChecker::LeastCommonAncestors(\n    const std::vector<IndexPort> &match_nodes, IndexPort &res_match_node) {\n  Status status{STATUS_OK};\n  res_match_node = match_nodes[0];\n  for (size_t i = 1; i < match_nodes.size(); ++i) {\n    auto res = lca_->Find(res_match_node, match_nodes[i]);\n    auto tmp_node = res_match_node;\n    res_match_node = res;\n    if (res.GetNodeName().empty() && res.GetPortName().empty()) {\n      std::string err_msg =\n          \"can not find LeastCommonAncestors node between \" +\n          tmp_node.GetNodeName() + \":\" + tmp_node.GetPortName() + \" and \" +\n          match_nodes[i].GetNodeName() + \":\" + match_nodes[i].GetPortName();\n      status = {STATUS_BADCONF, err_msg};\n      return status;\n    }\n  }\n\n  return status;\n}\n\n}  // namespace modelbox\n"
  },
  {
    "path": "src/libmodelbox/engine/graph.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/graph.h\"\n\n#include <utility>\n\n#include \"modelbox/base/log.h\"\n#include \"modelbox/base/uuid.h\"\n#include \"modelbox/external_data_map.h\"\n#include \"modelbox/graph_checker.h\"\n#include \"modelbox/profiler.h\"\n#include \"scheduler/flow_scheduler.h\"\n\nnamespace modelbox {\n\nconstexpr const char *GRAPH_NODE_TYPE = \"type\";\nconstexpr const char *GRAPH_NODE_FLOWUNIT = \"flowunit\";\nconstexpr const char *GRAPH_NODE_INPUT = \"input\";\nconstexpr const char *GRAPH_NODE_OUTPUT = \"output\";\nconstexpr const char *GRAPH_KEY_DEVICE = \"device\";\nconstexpr const char *GRAPH_KEY_DEVICE_ID = \"deviceid\";\nconstexpr const char *GRAPH_KEY_QUEUE_SIZE = \"queue_size\";\nconstexpr const char *GRAPH_KEY_BATCH_SIZE = \"batch_size\";\nconstexpr const char *GRAPH_KEY_CHECK_NODE_OUTPUT = \"need_check_output\";\n\nGraph::Graph() : scheduler_(nullptr) {}\n\nGraph::~Graph() {\n  if (!is_stop_) {\n    Shutdown();\n  }\n  src_to_dst_.clear();\n  dst_to_src_.clear();\n  topo_order_.clear();\n  nodes_.clear();\n  if (flow_stats_ != nullptr) {\n    flow_stats_->DelItem(id_);\n  }\n}\n\nStatus Graph::Initialize(const std::shared_ptr<FlowUnitManager> &flowunit_mgr,\n                         const std::shared_ptr<DeviceManager> &device_mgr,\n                         std::shared_ptr<Profiler> profiler,\n                         const std::shared_ptr<Configuration> &config) {\n  if (flowunit_mgr == nullptr || device_mgr == nullptr || config == nullptr) {\n    const auto *msg = \"argument is invalid\";\n    auto ret = Status(STATUS_INVALID, msg);\n    MBLOG_ERROR << ret.WrapErrormsgs();\n    return ret;\n  }\n\n  flowunit_mgr_ = flowunit_mgr;\n  device_mgr_ = device_mgr;\n  profiler_ = std::move(profiler);\n  flow_stats_ = Statistics::GetGlobalItem()->GetItem(STATISTICS_ITEM_FLOW);\n  config_ = config;\n  auto ret = GetUUID(&id_);\n  if (ret != STATUS_OK) {\n    MBLOG_ERROR << \"Get uuid for graph failed\";\n    return ret;\n  }\n\n  if (flow_stats_ != nullptr) {\n    graph_stats_ = flow_stats_->AddItem(id_);\n    if (graph_stats_ == nullptr) {\n      MBLOG_ERROR << \"Get stats for graph \" << id_\n                  << \" failed, err: \" << StatusError.Errormsg();\n    }\n  }\n\n  return STATUS_OK;\n}\n\nstd::string Graph::GetId() const { return id_; }\n\nstd::string Graph::GetName() const { return name_; }\n\nStatus Graph::CheckLoopStructureNode() {\n  Status status{STATUS_OK};\n  for (auto &loop : loop_structures_) {\n    auto pos = std::find(loop.begin(), loop.end(), *(loop.end() - 1));\n    for (auto iter = pos; iter != loop.end(); iter++) {\n      auto real_node = std::dynamic_pointer_cast<Node>(GetNode(*iter));\n      if (real_node == nullptr) {\n        auto err_msg = \"invalid node \" + *iter;\n        status = {STATUS_FAULT, err_msg};\n        break;\n      }\n\n      if (real_node->GetFlowType() == STREAM) {\n        auto err_msg = \"loop node \" + *iter + \" can not be stream node.\";\n        status = {STATUS_FAULT, err_msg};\n        break;\n      }\n    }\n  }\n  return status;\n}\n\nvoid Graph::ShowGraphInfo(const std::shared_ptr<GCGraph> &g) {\n  name_ = g->GetGraphName();\n  MBLOG_INFO << \"Build graph name:\" << name_ << \", id:\" << id_;\n  g->ShowAllSubGraph();\n  g->ShowAllNode();\n  g->ShowAllEdge();\n}\n\nStatus Graph::CheckGraph() {\n  auto start_nodes = GetStartNodes();\n  auto graph_checker = std::make_shared<GraphChecker>(\n      topo_order_, start_nodes, loop_links_, loop_structures_, src_to_dst_);\n  Status res = graph_checker->Check();\n  if (!res) {\n    return res;\n  }\n\n  graph_checker->SetMatchNodes();\n  graph_checker->ShowMatchNodes();\n\n  return res;\n}\n\nStatus Graph::Build(const std::shared_ptr<GCGraph> &g) {\n  if (g == nullptr) {\n    return STATUS_INVALID;\n  }\n\n  if (flowunit_mgr_ == nullptr || device_mgr_ == nullptr ||\n      config_ == nullptr) {\n    const auto *msg = \"graph is not initialized\";\n    auto ret = Status(STATUS_INVALID, msg);\n    return ret;\n  }\n\n  ShowGraphInfo(g);\n\n  // build node and add link\n  Status status = BuildGraph(g);\n  if (!status) {\n    const auto *msg = \"build graph failed, please check graph config.\";\n    auto ret = Status(status, msg);\n    return ret;\n  }\n\n  status = IsValidGraph();\n  if (!status) {\n    const auto *msg = \"invalid graph, please check graph config.\";\n    auto ret = Status(status, msg);\n    return ret;\n  }\n\n  status = FindLoopStructure();\n  if (!status) {\n    const auto *msg = \"loop node is illegal, please check graph config.\";\n    auto ret = Status(status, msg);\n    return ret;\n  }\n\n  status = GenerateTopology();\n  if (!status) {\n    const auto *msg = \"generate topology fail, please check graph config.\";\n    auto ret = Status(status, msg);\n    return ret;\n  }\n\n  status = UpdatePriority();\n  if (!status) {\n    const auto *msg = \"update proiority fail, please check graph config.\";\n    auto ret = Status(status, msg);\n    return ret;\n  }\n\n  status = InitPort();\n  if (!status) {\n    const auto *msg = \"init port fail, please check graph config.\";\n    auto ret = Status(status, msg);\n    return ret;\n  }\n\n  status = CheckLoopStructureNode();\n  if (!status) {\n    const auto *msg = \"check loop node fail, please check graph config.\";\n    auto ret = Status(status, msg);\n    return ret;\n  }\n\n  status = CheckGraph();\n  if (!status) {\n    const auto *msg = \"check graph failed, please check graph config.\";\n    auto ret = Status(status, msg);\n    return ret;\n  }\n\n  status = InitScheduler();\n  if (!status) {\n    const auto *msg = \"init scheduler fail.\";\n    auto ret = Status(status, msg);\n    return ret;\n  }\n\n  return STATUS_OK;\n}\n\nStatus Graph::AddNode(const std::shared_ptr<NodeBase> &node) {\n  if (node == nullptr) {\n    const auto *msg = \"node is null pointer.\";\n    return {STATUS_INVALID, msg};\n  }\n\n  auto ite = nodes_.find(node->GetName());\n  if (ite != nodes_.end()) {\n    auto msg = \"node is already exist. name: \" + node->GetName();\n    return {STATUS_INVALID, msg};\n  }\n\n  nodes_[node->GetName()] = node;\n  return STATUS_OK;\n}\n\nstd::shared_ptr<NodeBase> Graph::GetNode(const std::string &nodeName) const {\n  auto ite = nodes_.find(nodeName);\n  if (ite == nodes_.end()) {\n    return nullptr;\n  }\n\n  return ite->second;\n}\n\nstd::shared_ptr<InPort> Graph::GetInPort(const std::string &nodeName,\n                                         const std::string &portName) const {\n  auto ite = nodes_.find(nodeName);\n  if (ite == nodes_.end()) {\n    return nullptr;\n  }\n\n  auto node = ite->second;\n  if (node == nullptr) {\n    auto msg = \"node is null pointer, never here. name: \" + node->GetName();\n    MBLOG_ERROR << msg;\n    return nullptr;\n  }\n\n  return node->GetInputPort(portName);\n}\n\nstd::shared_ptr<OutPort> Graph::GetOutPort(const std::string &nodeName,\n                                           const std::string &portName) const {\n  auto ite = nodes_.find(nodeName);\n  if (ite == nodes_.end()) {\n    auto msg = \"node is not found, name: \" + nodeName;\n    StatusError = {STATUS_BADCONF, msg};\n    return nullptr;\n  }\n\n  auto node = ite->second;\n  if (node == nullptr) {\n    auto msg = \"node is null pointer, never here. name: \" + node->GetName();\n    MBLOG_ERROR << msg;\n    return nullptr;\n  }\n\n  return node->GetOutputPort(portName);\n}\n\nstd::unordered_map<std::shared_ptr<NodeBase>,\n                   std::vector<std::shared_ptr<IPort>>>\nGraph::GetNotifyPort() const {\n  std::unordered_map<std::shared_ptr<NodeBase>,\n                     std::vector<std::shared_ptr<IPort>>>\n      node_ports;\n  for (const auto &node : nodes_) {\n    std::vector<std::shared_ptr<IPort>> ports;\n    // add in ports\n    const auto &in_ports = node.second->GetInputPorts();\n    std::copy(in_ports.begin(), in_ports.end(), std::back_inserter(ports));\n\n    // add event port\n    const auto &event_port = node.second->GetEventPort();\n    ports.push_back(std::dynamic_pointer_cast<IPort>(event_port));\n\n    // add external port\n    const auto &external_ports = node.second->GetExternalPorts();\n    std::copy(external_ports.begin(), external_ports.end(),\n              std::back_inserter(ports));\n\n    node_ports.emplace(node.second, std::move(ports));\n  }\n\n  return node_ports;\n}\n\nStatus Graph::AddLink(const std::string &srcNodeName,\n                      const std::string &srcPortName,\n                      const std::string &dstNodeName,\n                      const std::string &dstPortName) {\n  auto srcPort = GetOutPort(srcNodeName, srcPortName);\n  if (srcPort == nullptr) {\n    auto msg =\n        \"src port is not exist. node: \" + srcNodeName + \" port: \" + srcPortName;\n    return {STATUS_BADCONF, msg};\n  }\n\n  auto dstPort = GetInPort(dstNodeName, dstPortName);\n  if (dstPort == nullptr) {\n    auto msg =\n        \"dst port is not exist. node: \" + dstNodeName + \" port: \" + dstPortName;\n    return {STATUS_BADCONF, msg};\n  }\n\n  return AddLink(srcPort, dstPort);\n}\n\nStatus Graph::AddLink(const std::shared_ptr<OutPort> &src,\n                      const std::shared_ptr<InPort> &dst) {\n  if (src == nullptr) {\n    const auto *msg = \"src port is null pointer.\";\n    return {STATUS_INVALID, msg};\n  }\n\n  if (dst == nullptr) {\n    const auto *msg = \"dst port is null pointer.\";\n    return {STATUS_INVALID, msg};\n  }\n\n  auto srcNode = src->GetNode();\n  if (srcNode == nullptr) {\n    const auto *msg = \"src node is null point.\";\n    return {STATUS_INVALID, msg};\n  }\n\n  auto dstNode = dst->GetNode();\n  if (dstNode == nullptr) {\n    const auto *msg = \"dst node is null point.\";\n    return {STATUS_INVALID, msg};\n  }\n\n  auto dstLinks = src_to_dst_.find(src);\n  if (dstLinks != src_to_dst_.end()) {\n    auto ite = dstLinks->second.find(dst);\n    if (ite != dstLinks->second.end()) {\n      auto msg = \"link is already exist. srcNode: \" + srcNode->GetName() +\n                 \" srcPort: \" + src->GetName() + \"->\" +\n                 \" dstNode: \" + dstNode->GetName() +\n                 \" dstPort: \" + dst->GetName();\n      return {STATUS_INVALID, msg};\n    }\n  }\n\n  src_to_dst_[src].insert(dst);\n  dst_to_src_[dst].insert(src);\n\n  auto msg = \"add link, \" + srcNode->GetName() + \":\" + src->GetName() + \" -> \" +\n             dstNode->GetName() + \":\" + dst->GetName();\n  MBLOG_INFO << msg;\n\n  return STATUS_OK;\n}\n\nstd::set<std::shared_ptr<InPort>> Graph::GetDstPortsByPort(\n    const std::shared_ptr<OutPort> &port) const {\n  std::set<std::shared_ptr<InPort>> ports;\n  if (port == nullptr) {\n    return ports;\n  }\n\n  auto ite = src_to_dst_.find(port);\n  if (ite == src_to_dst_.end()) {\n    return ports;\n  }\n\n  ports = ite->second;\n  return ports;\n}\n\nstd::set<std::shared_ptr<OutPort>> Graph::GetSrcPortsByPort(\n    const std::shared_ptr<InPort> &port) const {\n  std::set<std::shared_ptr<OutPort>> ports;\n  if (port == nullptr) {\n    return ports;\n  }\n\n  auto ite = dst_to_src_.find(port);\n  if (ite == dst_to_src_.end()) {\n    return ports;\n  }\n\n  ports = ite->second;\n  return ports;\n}\n\nstd::set<std::shared_ptr<NodeBase>> Graph::GetStartNodes() const {\n  std::set<std::shared_ptr<NodeBase>> startNode;\n  for (const auto &node : nodes_) {\n    auto inputNum = node.second->GetInputNum();\n    if (inputNum == 0) {\n      startNode.insert(node.second);\n    }\n  }\n\n  return startNode;\n}\n\nstd::set<std::shared_ptr<NodeBase>> Graph::GetEndNodes() const {\n  std::set<std::shared_ptr<NodeBase>> endNode;\n  for (const auto &node : nodes_) {\n    auto inputNum = node.second->GetInputNum();\n    if (inputNum == 0) {\n      endNode.insert(node.second);\n    }\n  }\n\n  return endNode;\n}\n\nstd::set<std::shared_ptr<NodeBase>> Graph::GetEndPointNodes() const {\n  std::set<std::shared_ptr<NodeBase>> endNode;\n  for (const auto &node : nodes_) {\n    auto outports = node.second->GetOutputPorts();\n    for (const auto &iter : outports) {\n      if (iter->GetConnectInPort().size() <= 0) {\n        endNode.insert(node.second);\n      }\n    }\n  }\n\n  return endNode;\n}\n\nstd::set<std::shared_ptr<NodeBase>> Graph::GetAllNodes() const {\n  std::set<std::shared_ptr<NodeBase>> allNode;\n  for (const auto &node : nodes_) {\n    allNode.insert(node.second);\n  }\n\n  return allNode;\n}\n\nstd::set<std::shared_ptr<NodeBase>> Graph::GetDstNodesByNode(\n    const std::string &nodeName) const {\n  std::set<std::shared_ptr<NodeBase>> nodes;\n  auto node = GetNode(nodeName);\n  if (node == nullptr) {\n    return nodes;\n  }\n\n  auto outports = node->GetOutputPorts();\n  for (const auto &port : outports) {\n    auto linkPorts = GetDstPortsByPort(port);\n    for (const auto &linkport : linkPorts) {\n      nodes.insert(linkport->GetNode());\n    }\n  }\n\n  return nodes;\n}\n\nstd::set<std::shared_ptr<NodeBase>> Graph::GetSrcNodesByNode(\n    const std::string &nodeName) const {\n  std::set<std::shared_ptr<NodeBase>> nodes;\n  auto node = GetNode(nodeName);\n  if (node == nullptr) {\n    return nodes;\n  }\n\n  auto inports = node->GetInputPorts();\n  for (const auto &port : inports) {\n    auto linkPorts = GetSrcPortsByPort(port);\n    for (const auto &linkport : linkPorts) {\n      nodes.insert(linkport->GetNode());\n    }\n  }\n\n  return nodes;\n}\n\nstd::shared_ptr<ExternalDataMap> Graph::CreateExternalDataMap() {\n  if (input_node_ == nullptr) {\n    MBLOG_ERROR << \"virtual input_node is nullptr\";\n    return nullptr;\n  }\n  auto session = session_manager_.CreateSession(graph_stats_);\n  auto init_stream = std::make_shared<Stream>(session);\n  auto extern_data =\n      std::make_shared<ExternalDataMapImpl>(input_node_, init_stream);\n  session->SetSessionIO(extern_data);\n  return extern_data;\n}\n\nStatus Graph::UpdateGraphConfigToNode(const std::shared_ptr<GCGraph> &g,\n                                      const std::shared_ptr<GCNode> &node) {\n  auto graph_config = g->GetConfiguration();\n  auto node_config = node->GetConfiguration();\n  auto update_node_config = [=](const std::string &key) {\n    if (node_config->Contain(key) == true) {\n      return;\n    }\n\n    if (graph_config->Contain(key) == false) {\n      return;\n    }\n\n    node_config->Copy(*graph_config, key);\n  };\n\n  update_node_config(GRAPH_KEY_BATCH_SIZE);\n  update_node_config(GRAPH_KEY_QUEUE_SIZE);\n  update_node_config(GRAPH_KEY_DEVICE_ID);\n  update_node_config(GRAPH_KEY_CHECK_NODE_OUTPUT);\n\n  return STATUS_OK;\n}\n\nStatus Graph::BuildFlowunitNode(const std::shared_ptr<GCGraph> &g,\n                                const std::shared_ptr<GCNode> &gcnode,\n                                bool strict) {\n  auto name = gcnode->GetNodeName();\n  auto node_config = gcnode->GetConfiguration();\n  auto device = node_config->GetString(GRAPH_KEY_DEVICE, \"\");\n  auto deviceid = node_config->GetString(GRAPH_KEY_DEVICE_ID, \"\");\n  auto flowunit = node_config->GetString(GRAPH_NODE_FLOWUNIT, \"\");\n  auto inports = gcnode->GetInputPorts();\n  auto outports = gcnode->GetOutputPorts();\n\n  if (flowunit.empty()) {\n    auto msg = \"node \" + name + \": flowunit name is empty.\";\n    return {STATUS_INVALID, msg};\n  }\n\n  if (inports->size() == 0 && outports->size() == 0) {\n    if (strict == false) {\n      MBLOG_INFO << \"skip orphan node: \" << name;\n      return STATUS_SUCCESS;\n    }\n\n    auto msg = \"orphan node: '\" + name +\n               \"', use graph.strict=false to disable orphan check\";\n    return {STATUS_BADCONF, msg};\n  }\n\n  if (UpdateGraphConfigToNode(g, gcnode) == false) {\n    const auto *msg =\n        \"update node config failed, please check node config in graph scope\";\n    return {STATUS_BADCONF, msg};\n  }\n\n  auto node = std::make_shared<Node>();\n  node->SetFlowUnitInfo(flowunit, device, deviceid, flowunit_mgr_);\n  node->SetProfiler(profiler_);\n  node->SetStats(graph_stats_);\n  node->SetSessionManager(&session_manager_);\n  node->SetName(name);\n  auto status = InitNode(node, *inports, *outports, node_config);\n  if (!status) {\n    return status;\n  }\n\n  status = AddNode(node);\n  if (!status) {\n    auto msg = \"add node failed. name: '\" + name + \"'\";\n    return {status, msg};\n  }\n\n  return STATUS_SUCCESS;\n}\n\nStatus Graph::BuildInputNode(const std::shared_ptr<GCNode> &gcnode) {\n  auto name = gcnode->GetNodeName();\n  auto node_config = gcnode->GetConfiguration();\n  if (input_node_ports_.find(name) != input_node_ports_.end()) {\n    auto msg = \"virtual input port is already exist. name: '\" + name + \"'\";\n    return {STATUS_INVALID, msg};\n  }\n  input_node_ports_.insert(name);\n  input_node_config_map_.emplace(name, node_config);\n  return STATUS_SUCCESS;\n}\n\nStatus Graph::BuildOutputNode(const std::shared_ptr<GCNode> &gcnode) {\n  auto name = gcnode->GetNodeName();\n  auto node_config = gcnode->GetConfiguration();\n  if (output_node_ports_.find(name) != output_node_ports_.end()) {\n    auto msg = \"virtual out port is already exist. name: '\" + name + \"'\";\n    return {STATUS_INVALID, msg};\n  }\n  output_node_ports_.insert(name);\n  output_node_config_map_.emplace(name, node_config);\n  return STATUS_SUCCESS;\n}\n\nStatus Graph::BuildNode(const std::shared_ptr<GCGraph> &g,\n                        const std::shared_ptr<GCNode> &gcnode, bool strict) {\n  auto name = gcnode->GetNodeName();\n  auto node_config = gcnode->GetConfiguration();\n  auto type = node_config->GetString(GRAPH_NODE_TYPE, \"\");\n  auto flowunit = node_config->GetString(GRAPH_NODE_FLOWUNIT, \"\");\n\n  if (flowunit.length() > 0 && type.length() == 0) {\n    type = GRAPH_NODE_FLOWUNIT;\n  }\n\n  Status status = STATUS_SUCCESS;\n  if (type == GRAPH_NODE_FLOWUNIT) {\n    status = BuildFlowunitNode(g, gcnode, strict);\n  } else if (type == GRAPH_NODE_INPUT) {\n    status = BuildInputNode(gcnode);\n  } else if (type == GRAPH_NODE_OUTPUT) {\n    status = BuildOutputNode(gcnode);\n  } else {\n    if (strict) {\n      auto msg =\n          \"unsupport node type. name: '\" + name + \"' type: '\" + type + \"'\";\n      status = {STATUS_NOTSUPPORT, msg};\n    }\n  }\n\n  return status;\n}\n\nStatus Graph::BuildNodes(const std::shared_ptr<GCGraph> &g) {\n  auto strict = config_->GetBool(\"graph.strict\", true);\n\n  auto nodes = g->GetAllNodes();\n\n  for (auto &ite : nodes) {\n    auto gcnode = ite.second;\n    auto name = gcnode->GetNodeName();\n    MBLOG_INFO << \"begin build node \" << name;\n    auto status = BuildNode(g, gcnode, strict);\n    if (!status) {\n      MBLOG_ERROR << status;\n      return status;\n    }\n    MBLOG_INFO << \"build node \" << name << \" success\";\n  }\n  return STATUS_OK;\n}\n\nStatus Graph::BuildVirtualNodes(const std::shared_ptr<GCGraph> &g) {\n  if (!input_node_ports_.empty()) {\n    input_node_name_ = *input_node_ports_.begin();\n    input_node_ = std::make_shared<InputVirtualNode>(\"cpu\", \"0\", device_mgr_);\n    auto input_config = input_node_config_map_.begin()->second;\n    auto status = input_node_->Init({}, input_node_ports_, input_config);\n    if (!status) {\n      const auto *msg = \"init virtual input node failed.\";\n      return {status, msg};\n    }\n    input_node_->SetName(input_node_name_);\n    status = AddNode(input_node_);\n    if (!status) {\n      const auto *msg = \"add virtual input node failed.\";\n      return {status, msg};\n    }\n  }\n\n  if (!output_node_ports_.empty()) {\n    output_node_name_ = *output_node_ports_.begin();\n    auto output_config = output_node_config_map_.begin()->second;\n    auto output_type = output_config->GetString(\"output_type\", \"match\");\n    if (output_type == \"match\") {\n      output_node_ =\n          std::make_shared<OutputVirtualNode>(\"cpu\", \"0\", device_mgr_);\n    } else if (output_type == \"unmatch\") {\n      output_node_ =\n          std::make_shared<OutputUnmatchVirtualNode>(\"cpu\", \"0\", device_mgr_);\n    } else {\n      return {STATUS_INVALID, \"Invalid Output Type\"};\n    }\n\n    auto status = output_node_->Init(output_node_ports_, {}, output_config);\n    if (!status) {\n      const auto *msg = \"init virtual output node failed.\";\n      return {status, msg};\n    }\n    output_node_->SetName(output_node_name_);\n    status = AddNode(output_node_);\n    if (!status) {\n      const auto *msg = \"add virtual output node failed.\";\n      return {status, msg};\n    }\n  }\n  return STATUS_OK;\n}\n\nStatus Graph::BuildEdges(const std::shared_ptr<GCGraph> &g) {\n  auto edges = g->GetAllEdges();\n  for (auto &ite : edges) {\n    auto gcedge = ite.second;\n    auto srcNode = gcedge->GetHeadNode();\n    auto srcNodeName = srcNode->GetNodeName();\n    auto srcPortName = gcedge->GetHeadOutPort();\n    auto dstNode = gcedge->GetTailNode();\n    auto dstNodeName = dstNode->GetNodeName();\n    auto dstPortName = gcedge->GetTailInPort();\n\n    if (input_node_ports_.find(srcNodeName) != input_node_ports_.end()) {\n      srcPortName = srcNodeName;\n      srcNodeName = input_node_name_;\n    }\n\n    if (output_node_ports_.find(dstNodeName) != output_node_ports_.end()) {\n      dstPortName = dstNodeName;\n      dstNodeName = output_node_name_;\n    }\n\n    auto status = AddLink(srcNodeName, srcPortName, dstNodeName, dstPortName);\n    if (!status) {\n      const auto *msg = \"add link failed.\";\n      return {status, msg};\n    }\n  }\n\n  return STATUS_OK;\n}\n\nStatus Graph::OpenNodes() {\n  ThreadPool pool(std::thread::hardware_concurrency());\n  pool.SetName(\"Node-Open\");\n  std::vector<std::future<Status>> result;\n  for (auto &itr : nodes_) {\n    auto node = itr.second;\n    auto ret = pool.Submit(node->GetName(), &NodeBase::Open, node.get());\n    result.push_back(std::move(ret));\n  }\n\n  for (auto &fut : result) {\n    const auto *msg = \"open node failed, please check log.\";\n    if (!fut.valid()) {\n      return {STATUS_FAULT, msg};\n    }\n\n    auto ret = fut.get();\n    if (!ret) {\n      return ret;\n    }\n  }\n\n  return STATUS_OK;\n}\n\nvoid Graph::CloseNodes() const {\n  ThreadPool pool(std::thread::hardware_concurrency());\n  pool.SetName(\"Node-Close\");\n\n  std::vector<std::future<void>> result;\n  for (const auto &itr : nodes_) {\n    auto node = itr.second;\n    auto ret =\n        pool.Submit(node->GetName() + \"_close\", &NodeBase::Close, node.get());\n    result.push_back(std::move(ret));\n  }\n\n  for (auto &fut : result) {\n    if (!fut.valid()) {\n      continue;\n    }\n\n    fut.get();\n  }\n}\n\nStatus Graph::BuildGraph(const std::shared_ptr<GCGraph> &g) {\n  auto status = BuildNodes(g);\n  if (!status) {\n    return status;\n  }\n\n  status = BuildVirtualNodes(g);\n  if (!status) {\n    return status;\n  }\n\n  status = BuildEdges(g);\n  if (!status) {\n    return status;\n  }\n\n  status = OpenNodes();\n  return status;\n}\n\nStatus Graph::IsValidGraph() const {\n  if (nodes_.empty()) {\n    const auto *msg = \"graph is empty, no node.\";\n    return {STATUS_BADCONF, msg};\n  }\n\n  auto status = IsAllPortConnect();\n  if (!status) {\n    const auto *msg = \"not all port connect.\";\n    return {status, msg};\n  }\n\n  status = IsAllNodeConnect();\n  if (!status) {\n    const auto *msg = \"not all node connect.\";\n    return {status, msg};\n  }\n\n  return STATUS_OK;\n}\n\nStatus Graph::IsAllPortConnect() const {\n  for (const auto &node : nodes_) {\n    // 某些输入port、输出port是可选的, TODO\n    auto inports = node.second->GetInputPorts();\n    for (const auto &port : inports) {\n      auto ite = dst_to_src_.find(port);\n      if (ite == dst_to_src_.end()) {\n        auto msg = \"in port is not connect. node: \" + node.second->GetName() +\n                   \" port: \" + port->GetName();\n        return {STATUS_BADCONF, msg};\n      }\n    }\n\n    auto outports = node.second->GetOutputPorts();\n    for (const auto &port : outports) {\n      auto ite = src_to_dst_.find(port);\n      if (ite == src_to_dst_.end()) {\n        auto msg = \"out port is not connect. node: \" + node.second->GetName() +\n                   \" port: \" + port->GetName();\n        return {STATUS_BADCONF, msg};\n      }\n    }\n  }\n  return STATUS_OK;\n}\n\nStatus Graph::IsAllNodeConnect() const {\n  std::map<std::string, int> nodeType;\n  int idx = 0;\n  for (const auto &node : nodes_) {\n    nodeType[node.first] = idx++;\n  }\n\n  for (const auto &link : src_to_dst_) {\n    for (const auto &linkport : link.second) {\n      auto srcNode = link.first->GetNode();\n      auto dstNode = linkport->GetNode();\n      auto srcType = nodeType[srcNode->GetName()];\n      auto dstType = nodeType[dstNode->GetName()];\n      if (srcType == dstType) {\n        continue;\n      }\n\n      auto msg = \"node. srcNode: \" + srcNode->GetName() + \" #\" +\n                 std::to_string(srcType) + \" dstNode: \" + dstNode->GetName() +\n                 \" #\" + std::to_string(dstType);\n      MBLOG_DEBUG << msg;\n\n      auto mergeType = srcType < dstType ? srcType : dstType;\n      for (const auto &node : nodeType) {\n        if (node.second == srcType || node.second == dstType) {\n          nodeType[node.first] = mergeType;\n          auto msg = \"merge. src: \" + node.first + \", \" +\n                     std::to_string(node.second) + \" -> \" +\n                     std::to_string(mergeType);\n          MBLOG_DEBUG << msg;\n        }\n      }\n    }\n  }\n\n  for (const auto &node : nodeType) {\n    auto msg = \"node: \" + node.first + \" #\" + std::to_string(node.second);\n    MBLOG_INFO << msg;\n  }\n  auto firstType = nodeType.begin();\n  for (const auto &node : nodeType) {\n    if (node.second != firstType->second) {\n      const auto *msg = \"not all node union.\";\n      return Status(STATUS_BADCONF, msg);\n    }\n  }\n\n  return STATUS_OK;\n}\n\nStatus Graph::UpdatePriority() {\n  auto callback = [](const std::shared_ptr<NodeBase> &node, int order) {\n    auto inports = node->GetInputPorts();\n    for (const auto &port : inports) {\n      port->SetPriority(order);\n      auto msg = \"set priority. node: \" + node->GetName() +\n                 \" port: \" + port->GetName() +\n                 \" priority: \" + std::to_string(order);\n      MBLOG_INFO << msg;\n    }\n    return true;\n  };\n\n  return Topology(callback);\n}\n\nStatus Graph::Topology(const std::function<bool(std::shared_ptr<NodeBase> node,\n                                                int order)> &callback) const {\n  int idx = 0;\n  for (const auto &node : topo_order_) {\n    auto ret = callback(node, idx);\n    if (!ret) {\n      auto msg = \"callback fail. topo idx: \" + std::to_string(idx) + \", \" +\n                 node->GetName();\n      MBLOG_WARN << msg;\n    } else {\n      auto msg = \"callback success. topo idx: \" + std::to_string(idx) + \", \" +\n                 node->GetName();\n      MBLOG_DEBUG << msg;\n    }\n    idx++;\n  }\n\n  return STATUS_OK;\n}\n\nvoid Graph::FindLoopSeq(std::shared_ptr<NodeBase> &root_node,\n                        std::vector<std::string> &vis) {\n  auto dstNodes = GetDstNodesByNode(root_node->GetName());\n  if (dstNodes.empty()) {\n    return;\n  }\n\n  for (auto dstNode : dstNodes) {\n    if (std::find(vis.begin(), vis.end(), dstNode->GetName()) != vis.end()) {\n      vis.push_back(dstNode->GetName());\n      loop_structures_.push_back(vis);\n      vis.pop_back();\n      return;\n    }\n\n    vis.push_back(dstNode->GetName());\n    FindLoopSeq(dstNode, vis);\n    vis.pop_back();\n  }\n}\n\nvoid Graph::FindLoopWithNode(std::shared_ptr<NodeBase> &root_node,\n                             std::vector<std::string> &vis) {\n  vis.push_back(root_node->GetName());\n  FindLoopSeq(root_node, vis);\n}\n\nvoid Graph::FillLoopLink() {\n  for (auto &loop : loop_structures_) {\n    auto loop_link_from = *(loop.end() - 2);\n    auto loop_link_to = *(loop.end() - 1);\n    loop_links_.insert(std::make_pair(loop_link_to, loop_link_from));\n  }\n}\n\nStatus Graph::CheckLoopNode() {\n  Status status{STATUS_EOF};\n  for (auto &node : nodes_) {\n    auto tmp_node = std::dynamic_pointer_cast<Node>(node.second);\n    // virtual node\n    if (tmp_node == nullptr) {\n      continue;\n    }\n\n    if (tmp_node->GetLoopType() != LOOP) {\n      continue;\n    }\n\n    if (tmp_node->GetInputNum() != 1) {\n      return {STATUS_FAULT, \"loop node input should be one.\"};\n    }\n\n    if (tmp_node->GetOutputNum() != 2) {\n      return {STATUS_FAULT, \"loop node output shoulde be two.\"};\n    }\n\n    status = STATUS_OK;\n  }\n\n  return status;\n}\n\nStatus Graph::FindLoopStructure() {\n  auto status = CheckLoopNode();\n  if (status == STATUS_FAULT) {\n    return status;\n  }\n\n  if (status == STATUS_EOF) {\n    MBLOG_DEBUG << \"there is no loop node.\";\n    return STATUS_OK;\n  }\n\n  auto connectNode = GetStartNodes();\n  if (connectNode.empty()) {\n    const auto *msg = \"start node is not exist.\";\n    return {STATUS_BADCONF, msg};\n  }\n\n  std::vector<std::string> vis;\n  while (!connectNode.empty()) {\n    auto nodeIte = connectNode.begin();\n    auto node = *nodeIte;\n    connectNode.erase(nodeIte);\n\n    FindLoopWithNode(node, vis);\n    vis.clear();\n  }\n\n  for (auto &loop : loop_structures_) {\n    for (auto &item : loop) {\n      MBLOG_INFO << \"item: \" << item;\n    }\n  }\n\n  FillLoopLink();\n  for (auto &loop : loop_links_) {\n    MBLOG_INFO << loop.first << \", \" << loop.second;\n  }\n\n  return STATUS_OK;\n}\n\nStatus Graph::GenerateTopology() {\n  std::vector<std::shared_ptr<NodeBase>> topoNode;\n  auto connectNode = GetStartNodes();\n  if (connectNode.empty()) {\n    const auto *msg = \"start node is not exist.\";\n    return {STATUS_BADCONF, msg};\n  }\n\n  while (!connectNode.empty()) {\n    auto nodeIte = connectNode.begin();\n    auto node = *nodeIte;\n    connectNode.erase(nodeIte);\n    auto ite = std::find(topoNode.begin(), topoNode.end(), node);\n    if (ite != topoNode.end()) {\n      continue;\n    }\n\n    // all inport is in topoNode set\n    bool topo = true;\n    auto srcNodes = GetSrcNodesByNode(node->GetName());\n    for (const auto &srcNode : srcNodes) {\n      if (loop_links_.find(node->GetName()) != loop_links_.end() &&\n          loop_links_[node->GetName()] == srcNode->GetName()) {\n        continue;\n      }\n\n      auto ite = std::find(topoNode.begin(), topoNode.end(), srcNode);\n      if (ite == topoNode.end()) {\n        auto msg = \"srcNode not topnode. srcNode: \" + node->GetName() +\n                   \" node: \" + node->GetName();\n        MBLOG_DEBUG << msg;\n        topo = false;\n      }\n    }\n\n    if (topo) {\n      auto msg = \"add new topnode. node: \" + node->GetName();\n      MBLOG_DEBUG << msg;\n      topoNode.push_back(node);\n      auto dstNodes = GetDstNodesByNode(node->GetName());\n      for (const auto &dstNode : dstNodes) {\n        connectNode.insert(dstNode);\n        auto msg = \"add connect node. \" + node->GetName() + \" -> \" +\n                   dstNode->GetName();\n        MBLOG_DEBUG << msg;\n      }\n    }\n  }\n\n  auto i = 0;\n  for (const auto &node : topoNode) {\n    auto msg = \"topo index: \" + std::to_string(i) + \", \" + node->GetName();\n    MBLOG_INFO << msg;\n    ++i;\n  }\n\n  if (topoNode.size() != nodes_.size()) {\n    const auto *msg = \"not all node connect.\";\n    return {STATUS_BADCONF, msg};\n  }\n\n  topo_order_ = topoNode;\n\n  return STATUS_OK;\n}\n\nStatus Graph::InitPort() {\n  for (auto &portIte : dst_to_src_) {\n    auto inport = portIte.first;\n    for (const auto &outport : portIte.second) {\n      auto msg = \"port connect, \" + outport->GetNode()->GetName() + \":\" +\n                 outport->GetName() + \" -> \" + inport->GetNode()->GetName() +\n                 \":\" + inport->GetName();\n      MBLOG_INFO << msg;\n      outport->ConnectPort(inport);\n    }\n  }\n\n  return STATUS_OK;\n}\n\nStatus Graph::InitNode(std::shared_ptr<Node> &node,\n                       const std::set<std::string> &input_port_names,\n                       const std::set<std::string> &output_port_names,\n                       std::shared_ptr<Configuration> &config) {\n  auto status = node->Init(input_port_names, output_port_names, config);\n  return status;\n}\n\nStatus Graph::InitScheduler() {\n  scheduler_ = std::make_shared<FlowScheduler>();\n  size_t thread_num = nodes_.size() * 2;\n  if (thread_num < std::thread::hardware_concurrency()) {\n    thread_num = std::thread::hardware_concurrency();\n  }\n\n  if (!config_->Contain(\"graph.thread-num\")) {\n    config_->SetProperty(\"graph.thread-num\", thread_num);\n  }\n\n  if (!config_->Contain(\"graph.thread-num\")) {\n    config_->SetProperty(\"graph.max-thread-num\", thread_num * 4);\n  }\n\n  auto schedule_state = graph_stats_->AddItem(\"scheduler\");\n  auto status = scheduler_->Init(config_, schedule_state);\n  if (!status) {\n    const auto *msg = \"init scheduler failed.\";\n    MBLOG_FATAL << msg;\n    return {status, msg};\n  }\n\n  status = scheduler_->Build(*this);\n  if (!status) {\n    const auto *msg = \"build scheduler failed.\";\n    MBLOG_FATAL << msg;\n    return {status, msg};\n  }\n\n  return STATUS_OK;\n}\n\nStatus Graph::Run() {\n  if (scheduler_ == nullptr) {\n    const auto *message = \"scheduler is not initialized.\";\n    return {STATUS_SHUTDOWN, message};\n  }\n\n  if (profiler_ != nullptr) {\n    profiler_->Start();\n  }\n\n  return scheduler_->Run();\n}\n\nvoid Graph::RunAsync() {\n  if (scheduler_ == nullptr) {\n    const auto *message = \"scheduler is not initialized.\";\n    StatusError = {STATUS_SHUTDOWN, message};\n    return;\n  }\n\n  if (profiler_ != nullptr) {\n    profiler_->Start();\n  }\n\n  scheduler_->RunAsync();\n}\n\nStatus Graph::Wait(int64_t milliseconds, Status *ret_val) {\n  if (scheduler_ == nullptr) {\n    const auto *message = \"scheduler is not initialized.\";\n    return {STATUS_SHUTDOWN, message};\n  }\n\n  auto status = scheduler_->Wait(milliseconds, ret_val);\n  return status;\n}\n\nStatus Graph::Shutdown() {\n  if (scheduler_ != nullptr) {\n    scheduler_->Shutdown();\n    scheduler_ = nullptr;\n  }\n  if (profiler_ != nullptr) {\n    profiler_->Stop();\n  }\n\n  CloseNodes();\n\n  is_stop_ = true;\n\n  return STATUS_OK;\n}\n\nDynamicGraph::DynamicGraph() : Graph() {}\nDynamicGraph::~DynamicGraph() { Shutdown(); }\nStatus DynamicGraph::Shutdown() { return STATUS_OK; }\nStatus DynamicGraph::IsValidGraph() const { return STATUS_OK; }\nStatus DynamicGraph::InitScheduler() { return STATUS_OK; }\n\nStatus DynamicGraph::InitNode(std::shared_ptr<Node> &node,\n                              const std::set<std::string> &input_port_names,\n                              const std::set<std::string> &output_port_names,\n                              std::shared_ptr<Configuration> &config) {\n  auto status = node->Init(input_port_names, output_port_names, config);\n  return status;\n}\n\n}  // namespace modelbox\n"
  },
  {
    "path": "src/libmodelbox/engine/inner_event.cc",
    "content": "\n/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <utility>\n\n#include \"modelbox/inner_event.h\"\n\nnamespace modelbox {\n\nFlowUnitEvent::FlowUnitEvent() = default;\n\nFlowUnitEvent::~FlowUnitEvent() = default;\n\nvoid FlowUnitEvent::SetPrivate(const std::string &key,\n                               const std::shared_ptr<void> &private_content) {\n  auto iter = private_map_.find(key);\n  if (iter == private_map_.end()) {\n    private_map_.emplace(key, private_content);\n  }\n}\nstd::shared_ptr<void> FlowUnitEvent::GetPrivate(const std::string &key) {\n  auto iter = private_map_.find(key);\n  if (iter == private_map_.end()) {\n    return nullptr;\n  }\n  return private_map_[key];\n}\n\nFlowUnitInnerEvent::FlowUnitInnerEvent(EventCode code) : code_(code), match_key_(nullptr){};\n\nFlowUnitInnerEvent::~FlowUnitInnerEvent() = default;\n\nint FlowUnitInnerEvent::GetPriority() { return priority_; }\n\nvoid FlowUnitInnerEvent::SetDataCtxMatchKey(MatchKey *match_key) {\n  match_key_ = match_key;\n}\n\nMatchKey *FlowUnitInnerEvent::GetDataCtxMatchKey() { return match_key_; }\n\nFlowUnitInnerEvent::EventCode FlowUnitInnerEvent::GetEventCode() {\n  return code_;\n}\n\nstd::shared_ptr<FlowUnitEvent> FlowUnitInnerEvent::GetUserEvent() {\n  return user_event_;\n}\nvoid FlowUnitInnerEvent::SetUserEvent(std::shared_ptr<FlowUnitEvent> event) {\n  user_event_ = std::move(event);\n}\n}  // namespace modelbox\n"
  },
  {
    "path": "src/libmodelbox/engine/match_stream.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\"){}\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <utility>\n\n#include \"modelbox/match_stream.h\"\n\n#include \"modelbox/buffer_index_info.h\"\n#include \"modelbox/port.h\"\n#include \"modelbox/session.h\"\n\nnamespace modelbox {\nMatchKey* MatchKey::AsKey(BufferIndexInfo* match_at_buffer) {\n  return (MatchKey*)match_at_buffer;\n}\n\nMatchKey* MatchKey::AsKey(Stream* match_at_stream) {\n  return (MatchKey*)match_at_stream;\n}\n\nMatchStreamData::MatchStreamData() = default;\n\nMatchStreamData::~MatchStreamData() = default;\n\nvoid MatchStreamData::SetStreamMatchKey(MatchKey* match_at) {\n  match_at_ = match_at;\n}\n\nMatchKey* MatchStreamData::GetStreamMatchKey() { return match_at_; }\n\nvoid MatchStreamData::SetSession(std::shared_ptr<Session> session) {\n  session_ = std::move(session);\n}\n\nstd::shared_ptr<Session> MatchStreamData::GetSession() { return session_; }\n\nvoid MatchStreamData::SetEvent(std::shared_ptr<FlowUnitInnerEvent>& event) {\n  event_ = event;\n}\n\nstd::shared_ptr<FlowUnitInnerEvent> MatchStreamData::GetEvent() {\n  return event_;\n}\n\nvoid MatchStreamData::SetBufferList(std::shared_ptr<PortDataMap> port_buffers) {\n  port_to_stream_data_ = std::move(port_buffers);\n}\n\nstd::shared_ptr<PortDataMap> MatchStreamData::GetBufferList() const {\n  return port_to_stream_data_;\n}\n\nsize_t MatchStreamData::GetDataCount() const {\n  auto& first_port_stream_data = port_to_stream_data_->begin()->second;\n  return first_port_stream_data.size();\n}\n\n\nMatchBufferCache::MatchBufferCache(\n    size_t port_count,\n    std::unordered_map<std::string, size_t>* stream_count_each_port)\n    : port_count_(port_count),\n      stream_count_each_port_(stream_count_each_port) {}\n\nMatchBufferCache::~MatchBufferCache() = default;\n\nStatus MatchBufferCache::CacheBuffer(const std::string& port_name,\n                                     std::shared_ptr<Buffer>& buffer) {\n  // Check state\n  auto buffer_index = BufferManageView::GetIndexInfo(buffer);\n  if (!buffer_cache_.empty()) {\n    // multi port stream length not equal\n    if (is_end_flag_ && !buffer_index->IsEndFlag()) {\n      MBLOG_ERROR << \"port \" << port_name\n                  << \" missmatch, still has data when other port received end\";\n      return STATUS_FAULT;\n    }\n\n    if (!is_end_flag_ && buffer_index->IsEndFlag()) {\n      MBLOG_ERROR << \"port \" << port_name\n                  << \" missmatch, received end when other port still has data\";\n      return STATUS_FAULT;\n    }\n  }\n\n  buffer_cache_[port_name] = buffer;\n  is_end_flag_ = buffer_index->IsEndFlag();\n  auto& buffer_count_at_port = cur_buffer_count_each_port_[port_name];\n  ++buffer_count_at_port;\n\n  if (buffer_index->IsPlaceholder()) {\n    is_placeholder_ = true;\n  }\n\n  // port data already exist, in case if-else combine end flag\n  if (buffer_count_at_port > 1) {\n    return STATUS_EXIST;\n  }\n\n  return STATUS_OK;\n}\n\nbool MatchBufferCache::IsMatched() const {\n  if (port_count_ != buffer_cache_.size()) {\n    return false;\n  }\n\n  if (!is_end_flag_) {\n    return true;\n  }\n\n  if (stream_count_each_port_->empty()) {\n    // has no input port, no need to check\n    return true;\n  }\n\n  // if combine condition node output, will have multi end_flag received\n  for (const auto& buffer_count_item : cur_buffer_count_each_port_) {\n    const auto& port_name = buffer_count_item.first;\n    auto buffer_count = buffer_count_item.second;\n    auto max_buffer_count = (*stream_count_each_port_)[port_name];\n    if (buffer_count < max_buffer_count) {\n      return false;\n    }\n  }\n\n  // all end flag received\n  return true;\n}\n\nbool MatchBufferCache::IsEndFlag() const { return is_end_flag_; }\n\nconst std::unordered_map<std::string, std::shared_ptr<Buffer>>&\nMatchBufferCache::GetBuffers() const {\n  if (!is_placeholder_) {\n    return buffer_cache_;\n  }\n\n  // match at placeholder, all buffer mark as placeholder\n  for (const auto& item : buffer_cache_) {\n    const auto& buffer = item.second;\n    auto index_info = BufferManageView::GetIndexInfo(buffer);\n    index_info->MarkAsPlaceholder();\n  }\n  return buffer_cache_;\n}\n\nvoid InPortStreamInfo::ReceiveBuffer(std::shared_ptr<Buffer>& buffer) {}\n\nsize_t InPortStreamInfo::GetReceivedBufferCount() { return 0; }\n\nsize_t InPortStreamInfo::GetReceivedStreamCount() { return 0; }\n\nbool InPortStreamInfo::ReachEnd() { return false; }\n\nMatchStreamCache::MatchStreamCache(\n    std::string node_name, size_t port_count,\n    std::unordered_map<std::string, size_t>* stream_count_each_port)\n    : node_name_(std::move(node_name)),\n      port_count_(port_count),\n      stream_count_each_port_(stream_count_each_port) {}\n\nMatchStreamCache::~MatchStreamCache() = default;\n\nStatus MatchStreamCache::CacheBuffer(const std::string& port_name,\n                                     std::shared_ptr<Buffer>& buffer) {\n  std::shared_ptr<MatchBufferCache> match_buffer;\n  auto buffer_index_info = BufferManageView::GetIndexInfo(buffer);\n  size_t buffer_index = 0;\n\n  if ((*stream_count_each_port_)[port_name] > 1) {\n    // combine condition result, rewrite input index info\n    auto buffer_before_condition =\n        buffer_index_info->GetInheritInfo()->GetInheritFrom();\n    // should not change origin input, need copy buffer\n    buffer = buffer->Copy();\n    BufferManageView::SetIndexInfo(buffer, buffer_before_condition);\n    buffer_index = buffer_before_condition->GetIndex();\n  } else {\n    buffer_index = buffer_index_info->GetIndex();\n  }\n\n  auto item = match_buffers_.find(buffer_index);\n  if (item == match_buffers_.end()) {\n    match_buffer = std::make_shared<MatchBufferCache>(port_count_,\n                                                      stream_count_each_port_);\n    match_buffers_[buffer_index] = match_buffer;\n  } else {\n    match_buffer = item->second;\n  }\n\n  auto ret = match_buffer->CacheBuffer(port_name, buffer);\n  if (ret == STATUS_FAULT) {\n    MBLOG_ERROR << \"node \" << node_name_ << \" match port \" << port_name\n                << \" failed\";\n    return ret;\n  }\n  // ret could be exist, success\n\n  if (!match_buffer->IsMatched()) {\n    return ret;\n  }\n\n  // move to ready cache\n  ready_match_buffers_[buffer_index] = match_buffer;\n  match_buffers_.erase(buffer_index);\n  ++cur_input_count_in_stream_;\n\n  // check stream end\n  if (match_buffer->IsEndFlag()) {\n    end_flag_received_ = true;\n    total_input_count_in_stream_ = buffer_index + 1;\n  }\n\n  return ret;\n}\n\nstd::shared_ptr<PortDataMap> MatchStreamCache::PopReadyMatchBuffers(\n    bool in_order, bool gather_all) {\n  std::list<std::shared_ptr<MatchBufferCache>> pop_match_buffers;\n  if (gather_all && !(end_flag_received_ && ready_match_buffers_.size() ==\n                                                total_input_count_in_stream_)) {\n    // not all received\n    return nullptr;\n  }\n\n  for (auto match_buffer_iter = ready_match_buffers_.begin();\n       match_buffer_iter != ready_match_buffers_.end();) {\n    const auto& buffer_index = match_buffer_iter->first;\n    auto& match_buffer = match_buffer_iter->second;\n\n    if (in_order && buffer_index != index_in_order_) {\n      break;\n    }\n\n    pop_match_buffers.push_back(match_buffer);\n    match_buffer_iter = ready_match_buffers_.erase(match_buffer_iter);\n    ++index_in_order_;\n  }\n\n  auto ready_port_buffers = std::make_shared<PortDataMap>();\n  size_t match_buffer_count = pop_match_buffers.size();\n  for (auto& match_buffer_cache : pop_match_buffers) {\n    auto port_buffer_map = match_buffer_cache->GetBuffers();\n    for (auto& item : port_buffer_map) {\n      const auto& port_name = item.first;\n      auto& buffer = item.second;\n      auto& buffer_list = (*ready_port_buffers)[port_name];\n      if (buffer_list.empty()) {\n        buffer_list.reserve(match_buffer_count);\n      }\n      (*ready_port_buffers)[port_name].push_back(buffer);\n    }\n  }\n\n  return ready_port_buffers;\n}\n\nvoid MatchStreamCache::SetSession(std::shared_ptr<Session> session) {\n  session_ = std::move(session);\n}\n\nstd::shared_ptr<Session> MatchStreamCache::GetSession() { return session_; }\n\nbool MatchStreamCache::IsStreamEnd() {\n  return total_input_count_in_stream_ != 0 &&\n         cur_input_count_in_stream_ == total_input_count_in_stream_;\n}\n\nsize_t MatchStreamCache::TotalInputCount() {\n  return total_input_count_in_stream_;\n}\n\nvoid MatchStreamCache::UpdateInputStreamInfo(const std::string& port_name,\n                                             std::shared_ptr<Buffer>& buffer) {\n  // will use this info to analysis app problem and performance\n  auto item = in_port_stream_info_map_.find(port_name);\n  std::shared_ptr<InPortStreamInfo> stream_info;\n  if (item == in_port_stream_info_map_.end()) {\n    stream_info = std::make_shared<InPortStreamInfo>();\n    in_port_stream_info_map_[port_name] = stream_info;\n  } else {\n    stream_info = item->second;\n  }\n\n  stream_info->ReceiveBuffer(buffer);\n}\n\nInputMatchStreamManager::InputMatchStreamManager(std::string node_name,\n                                                 size_t queue_size,\n                                                 size_t port_count)\n    : node_name_(std::move(node_name)),\n      queue_size_(queue_size),\n      port_count_(port_count) {}\n\nInputMatchStreamManager::~InputMatchStreamManager() = default;\n\nsize_t InputMatchStreamManager::GetInputStreamCount() {\n  return match_stream_cache_map_.size();\n}\n\nvoid InputMatchStreamManager::SetInputBufferInOrder(bool is_input_in_order) {\n  is_input_in_order_ = is_input_in_order;\n}\n\nvoid InputMatchStreamManager::SetInputStreamGatherAll(bool need_gather_all) {\n  need_gather_all_ = need_gather_all;\n}\n\nvoid InputMatchStreamManager::UpdateStreamCountEachPort(\n    std::unordered_map<std::string, size_t>&& stream_count_each_port) {\n  stream_count_each_port_ = stream_count_each_port;\n}\n\nStatus InputMatchStreamManager::LoadData(\n    std::vector<std::shared_ptr<InPort>>& data_ports,\n    const std::function<bool(std::shared_ptr<Buffer>)>& drop_filter) {\n  if (port_inherit_backward_level_.empty() &&\n      !InitInheritBackwardLevel(data_ports)) {\n    // can not process data\n    return STATUS_OK;\n  }\n\n  bool has_read_data = false;\n  for (auto& data_port : data_ports) {\n    const auto& port_name = data_port->GetName();\n    auto read_count = GetReadCount(port_name);\n    if (read_count == 0) {\n      // too much cache for this port, stop read this port\n      data_port->SetActiveState(false);\n      continue;\n    }\n\n    std::vector<std::shared_ptr<Buffer>> buffer_list;\n    data_port->Recv(buffer_list, read_count);\n    if (!buffer_list.empty()) {\n      has_read_data = true;\n    }\n    auto backward_level = port_inherit_backward_level_[port_name];\n    for (auto& buffer : buffer_list) {\n      if (drop_filter && drop_filter(buffer)) {\n        continue;\n      }\n\n      auto ret = CacheBuffer(data_port->GetName(), buffer, backward_level);\n      if (!ret) {\n        return {STATUS_FAULT, \"port \" + port_name + \" match stream failed\"};\n      }\n    }\n  }\n\n  if (has_read_data) {\n    // cache changed, we need activate all port to trigger node run\n    for (auto& port : data_ports) {\n      port->SetActiveState(true);\n    }\n  }\n\n  return STATUS_OK;\n}\n\nStatus InputMatchStreamManager::GenMatchStreamData(\n    std::list<std::shared_ptr<MatchStreamData>>& match_stream_list) {\n  for (auto cache_iter = match_stream_cache_map_.begin();\n       cache_iter != match_stream_cache_map_.end();) {\n    auto* match_key = cache_iter->first;\n    auto& match_stream_cache = cache_iter->second;\n    auto ready_port_buffers = match_stream_cache->PopReadyMatchBuffers(\n        is_input_in_order_, need_gather_all_);\n\n    if (ready_port_buffers != nullptr && !ready_port_buffers->empty()) {\n      auto match_stream_data = std::make_shared<MatchStreamData>();\n      match_stream_data->SetBufferList(ready_port_buffers);\n      match_stream_data->SetStreamMatchKey(match_key);\n      match_stream_data->SetSession(match_stream_cache->GetSession());\n      match_stream_list.push_back(match_stream_data);\n\n      DecreaseAllPortBufferCount(match_stream_data->GetDataCount());\n    }\n\n    if (match_stream_cache->IsStreamEnd()) {\n      MBLOG_DEBUG\n          << \"node \" << node_name_ << \", stop input match stream \" << match_key\n          << \", total input count \" << match_stream_cache->TotalInputCount()\n          << \", id \"\n          << match_stream_cache->GetSession()->GetSessionCtx()->GetSessionId();\n      cache_iter = match_stream_cache_map_.erase(cache_iter);\n      continue;\n    }\n\n    ++cache_iter;\n  }\n\n  return STATUS_OK;\n}\n\nStatus InputMatchStreamManager::CacheBuffer(const std::string& port_name,\n                                            std::shared_ptr<Buffer>& buffer,\n                                            size_t backward_level) {\n  auto buffer_index_info = BufferManageView::GetIndexInfo(buffer);\n  auto stream = buffer_index_info->GetStream();\n  if (stream->GetSession()->IsAbort()) {\n    // no need to cache buffer, session is abort\n    return STATUS_OK;\n  }\n  // Match different port\n  auto* stream_match_key =\n      GetInputStreamMatchKey(buffer_index_info, backward_level);\n  auto match_stream_cache_item = match_stream_cache_map_.find(stream_match_key);\n  std::shared_ptr<MatchStreamCache> match_stream_cache;\n  if (match_stream_cache_item == match_stream_cache_map_.end()) {\n    match_stream_cache = std::make_shared<MatchStreamCache>(\n        node_name_, port_count_, &stream_count_each_port_);\n    match_stream_cache->SetSession(\n        buffer_index_info->GetStream()->GetSession());\n    match_stream_cache_map_[stream_match_key] = match_stream_cache;\n    MBLOG_DEBUG << \"node \" << node_name_ << \", start input match stream \"\n                << stream_match_key << \", id \"\n                << stream->GetSession()->GetSessionCtx()->GetSessionId();\n  } else {\n    match_stream_cache = match_stream_cache_item->second;\n  }\n\n  auto ret = match_stream_cache->CacheBuffer(port_name, buffer);\n  if (ret == STATUS_SUCCESS) {\n    // in case exist and fault, will not record buffer count\n    IncreaseOnePortBufferCount(port_name);\n  } else if (ret == STATUS_FAULT) {\n    return ret;\n  }\n\n  return STATUS_SUCCESS;\n}\n\nMatchKey* InputMatchStreamManager::GetInputStreamMatchKey(\n    const std::shared_ptr<BufferIndexInfo>& index_info, size_t backward_level) {\n  MatchKey* stream_match_key = nullptr;\n  // go back to find same level inherit info\n  auto cur_buffer_index = index_info;\n  std::shared_ptr<BufferInheritInfo> cur_inherit_info =\n      index_info->GetInheritInfo();\n  for (size_t i = 0; i < backward_level; ++i) {\n    cur_buffer_index = cur_inherit_info->GetInheritFrom();\n    cur_inherit_info = cur_buffer_index->GetInheritInfo();\n  }\n\n  auto inherit_buffer = cur_inherit_info->GetInheritFrom();\n  if (cur_inherit_info->GetType() == BufferProcessType::EXPAND) {\n    /**\n     * 1.match at expand level, one buffer will expand a stream,\n     * so child stream match at parent buffer\n     * in port1: buffer1, buffer2     out port1: stream1, stream2\n     * in port2: buffer1, buffer2 ==> out port2: stream1, stream2\n     * in port3: buffer1, buffer2     out port3: stream1, stream2\n     **/\n    stream_match_key = MatchKey::AsKey(inherit_buffer.get());\n  } else if (cur_inherit_info->GetType() ==\n             BufferProcessType::CONDITION_START) {\n    /**\n     * 1.match at if-else level, one stream will devide in to two stream,\n     * so child stream match at parent stream\n     **/\n    stream_match_key = MatchKey::AsKey(inherit_buffer->GetStream().get());\n  } else {\n    MBLOG_ERROR << \"node \" << node_name_\n                << \", get input stream match key failed, wrong inherit type \"\n                << (size_t)(cur_inherit_info->GetType());\n    return nullptr;\n  }\n\n  return stream_match_key;\n}\n\nbool InputMatchStreamManager::InitInheritBackwardLevel(\n    std::vector<std::shared_ptr<InPort>>& data_ports) {\n  size_t min_deepth = SIZE_MAX;\n  std::unordered_map<std::string, size_t> port_inherit_deepth_map;\n  auto all_port_has_data = true;\n  std::vector<std::shared_ptr<InPort>> valid_ports;\n  valid_ports.reserve(data_ports.size());\n  for (auto& port : data_ports) {\n    std::shared_ptr<Buffer> first_buffer;\n    auto get_result = port->GetQueue()->Front(&first_buffer);\n    if (!get_result) {\n      // this port has no data, can not test match releationship\n      all_port_has_data = false;\n      continue;\n    }\n\n    auto buffer_index = BufferManageView::GetIndexInfo(first_buffer);\n    auto inherit_info = buffer_index->GetInheritInfo();\n\n    auto deepth = inherit_info->GetDeepth();\n    min_deepth = std::min(min_deepth, deepth);\n    port_inherit_deepth_map[port->GetName()] = deepth;\n\n    valid_ports.push_back(port);\n  }\n\n  if (!all_port_has_data) {\n    // we need supress the valid port, only invalid port has data can trigger\n    // this node run\n    for (auto& valid_port : valid_ports) {\n      valid_port->SetActiveState(false);\n    }\n\n    return false;\n  }\n\n  // all match to min deepth\n  for (auto& port_deepth_item : port_inherit_deepth_map) {\n    const auto& port_name = port_deepth_item.first;\n    auto& deepth = port_deepth_item.second;\n    auto backward_level = deepth - min_deepth;\n    port_inherit_backward_level_[port_name] = backward_level;\n    MBLOG_INFO << \"node \" << node_name_ << \", port \" << port_name\n               << \", inherit backward level \" << backward_level;\n  }\n\n  return true;\n}\n\nvoid InputMatchStreamManager::IncreaseOnePortBufferCount(\n    const std::string& port_name, size_t count) {\n  port_cache_count_map_[port_name] += count;\n}\n\nvoid InputMatchStreamManager::DecreaseAllPortBufferCount(size_t count) {\n  for (auto& count_item : port_cache_count_map_) {\n    count_item.second -= count;\n  }\n}\n\nsize_t InputMatchStreamManager::GetReadCount(const std::string& port_name) {\n  auto cur_cache_count = port_cache_count_map_[port_name];\n  if (cur_cache_count >= max_cache_count_) {\n    MBLOG_WARN << \"node \" << node_name_ << \", port \" << port_name\n               << \", cache count \" << cur_cache_count << \" is great than max \"\n               << max_cache_count_;\n    return 0;\n  }\n\n  auto read_count = max_cache_count_ - cur_cache_count;\n  if (read_count > queue_size_) {\n    read_count = queue_size_;\n  }\n\n  // find shortest port\n  const std::string* shortest_port_name = nullptr;\n  size_t shortest_port_length = SIZE_MAX;\n  for (auto& port_cache_count_item : port_cache_count_map_) {\n    if (port_cache_count_item.second < shortest_port_length) {\n      shortest_port_length = port_cache_count_item.second;\n      shortest_port_name = &port_cache_count_item.first;\n    }\n  }\n\n  // no shortest or current port is shortest, read queue_size\n  if (shortest_port_name == nullptr || *shortest_port_name == port_name) {\n    return read_count;\n  }\n\n  // this port has too much cache, wait shortest port\n  if (cur_cache_count + read_count - shortest_port_length > 2 * queue_size_) {\n    MBLOG_DEBUG << \"node \" << node_name_ << \", port \" << port_name\n                << \" cache count \" << cur_cache_count + read_count\n                << \" is great than min port \" << *shortest_port_name\n                << \" cache count \" << shortest_port_length;\n    return 0;\n  }\n\n  return read_count;\n}\n\nvoid InputMatchStreamManager::Clean() {\n  for (auto iter = match_stream_cache_map_.begin();\n       iter != match_stream_cache_map_.end();) {\n    auto& match_cache = iter->second;\n    if (match_cache->GetSession()->IsAbort()) {\n      // session abort\n      iter = match_stream_cache_map_.erase(iter);\n      continue;\n    }\n\n    ++iter;\n  }\n}\n\nvoid OutputMatchStream::SetSession(std::shared_ptr<Session> session) {\n  session_ = std::move(session);\n}\n\nstd::shared_ptr<Session> OutputMatchStream::GetSession() { return session_; }\n\nsize_t OutputMatchStream::Size() { return port_stream_map_.size(); }\n\nbool OutputMatchStream::Empty() { return port_stream_map_.empty(); }\n\nstd::shared_ptr<Stream> OutputMatchStream::GetStream(\n    const std::string& port_name) {\n  auto item = port_stream_map_.find(port_name);\n  if (item == port_stream_map_.end()) {\n    return nullptr;\n  }\n\n  return item->second;\n}\n\nstd::shared_ptr<Stream> OutputMatchStream::CreateStream(\n    const std::string& port_name) {\n  auto stream = std::make_shared<Stream>(session_);\n  port_stream_map_[port_name] = stream;\n  return stream;\n}\n\nOutputMatchStreamManager::OutputMatchStreamManager(\n    std::string node_name, std::set<std::string>&& output_port_names)\n    : node_name_(std::move(node_name)), output_port_names_(output_port_names) {}\n\nOutputMatchStreamManager::~OutputMatchStreamManager() = default;\n\nsize_t OutputMatchStreamManager::GetOutputStreamCount() {\n  return output_stream_map_.size();\n}\n\nvoid OutputMatchStreamManager::SetNeedNewIndex(bool need_new_index) {\n  need_new_index_ = need_new_index;\n}\n\nStatus OutputMatchStreamManager::UpdateStreamInfo(\n    const std::unordered_map<std::string, std::vector<std::shared_ptr<Buffer>>>&\n        stream_data_map,\n    const std::unordered_map<std::string, std::shared_ptr<DataMeta>>&\n        port_stream_meta,\n    const std::shared_ptr<Session>& session) {\n  if (stream_data_map.empty() || stream_data_map.begin()->second.empty()) {\n    // no data to process\n    return STATUS_OK;\n  }\n  auto* match_key = GetOutputStreamMatchKey(stream_data_map);\n  if (match_key == nullptr) {\n    MBLOG_ERROR << \"node \" << node_name_\n                << \" get output stream match key failed\";\n    return STATUS_FAULT;\n  }\n  auto& output_match_stream = output_stream_map_[match_key];\n  if (output_match_stream.Empty()) {\n    MBLOG_DEBUG << \"node \" << node_name_ << \", start output match stream \"\n                << match_key << \", id \"\n                << session->GetSessionCtx()->GetSessionId();\n    output_match_stream.SetSession(session);\n    GenerateOutputStream(output_match_stream, stream_data_map, port_stream_meta,\n                         session);\n  }\n\n  size_t end_stream_count = 0;\n  std::stringstream stream_count_stats;\n  for (const auto& stream_data : stream_data_map) {\n    const auto& port_name = stream_data.first;\n    const auto& port_data_list = stream_data.second;\n    auto stream = output_match_stream.GetStream(port_name);\n    if (stream == nullptr) {\n      MBLOG_ERROR << \"port [\" << port_name\n                  << \"] in output data is not defined in node\";\n      return STATUS_FAULT;\n    }\n    for (const auto& port_data : port_data_list) {\n      if (port_data == nullptr) {\n        // if-else empty output, drop it\n        continue;\n      }\n      auto buffer_index = BufferManageView::GetIndexInfo(port_data);\n      buffer_index->SetStream(stream);\n      SetIndexInStream(buffer_index, stream);\n      if (buffer_index->IsEndFlag()) {\n        // output index will not great than end_flag index\n        stream->SetMaxBufferCount(buffer_index->GetIndex() + 1);\n      }\n    }\n\n    if (stream->ReachEnd()) {  // output stream is over\n      ++end_stream_count;\n      stream_count_stats << \", port \" << port_name << \", out \";\n      stream_count_stats << stream->GetBufferCount();\n    }\n  }\n\n  if (end_stream_count == 0) {\n    return STATUS_OK;\n  }\n\n  if (end_stream_count != stream_data_map.size()) {\n    MBLOG_ERROR << \"node \" << node_name_\n                << \", all output port stream should finish togather\";\n    return STATUS_FAULT;\n  }\n\n  output_stream_map_.erase(match_key);\n  MBLOG_DEBUG << \"node \" << node_name_ << \", stop output match stream \"\n              << match_key << stream_count_stats.str() << \", id \"\n              << session->GetSessionCtx()->GetSessionId();\n  return STATUS_OK;\n}\n\nMatchKey* OutputMatchStreamManager::GetOutputStreamMatchKey(\n    const std::unordered_map<std::string, std::vector<std::shared_ptr<Buffer>>>&\n        stream_data_map) {\n  std::shared_ptr<Buffer> not_null_output_buffer;\n  for (const auto& port_iter : stream_data_map) {\n    const auto& port_data_list = port_iter.second;\n    not_null_output_buffer = port_data_list.front();\n    if (not_null_output_buffer != nullptr) {\n      break;\n    }\n  }\n\n  if (not_null_output_buffer == nullptr) {\n    MBLOG_ERROR << \"node \" << node_name_ << \", all port output buffer is null\";\n    return nullptr;\n  }\n\n  auto output_buffer_index_info =\n      BufferManageView::GetIndexInfo(not_null_output_buffer);\n  auto output_inherit_info = output_buffer_index_info->GetInheritInfo();\n  auto inherit_from_buffer = output_inherit_info->GetInheritFrom();\n  if (output_inherit_info->GetType() == BufferProcessType::EXPAND) {\n    // output match at expand buffer\n    return MatchKey::AsKey(inherit_from_buffer.get());\n  }\n\n  // output match at condition stream\n  return MatchKey::AsKey(inherit_from_buffer->GetStream().get());\n}\n\nvoid OutputMatchStreamManager::GenerateOutputStream(\n    OutputMatchStream& output_match_stream,\n    const std::unordered_map<std::string, std::vector<std::shared_ptr<Buffer>>>&\n        stream_data_map,\n    const std::unordered_map<std::string, std::shared_ptr<DataMeta>>&\n        port_stream_meta,\n    const std::shared_ptr<Session>& session) {\n  // visit input stream, for collapse, will visit expand input\n  std::shared_ptr<Buffer> not_null_output_buffer;\n  for (const auto& port_iter : stream_data_map) {\n    const auto& port_data_list = port_iter.second;\n    not_null_output_buffer = port_data_list.front();\n    if (not_null_output_buffer != nullptr) {\n      break;\n    }\n  }\n  auto out_buffer_index =\n      BufferManageView::GetIndexInfo(not_null_output_buffer);\n  std::shared_ptr<StreamOrder> stream_order;\n  size_t input_buffer_index = 0;\n  auto inherit_stream_meta = std::make_shared<DataMeta>();\n  const auto& input_stream_data_map =\n      out_buffer_index->GetProcessInfo()->GetParentBuffers();\n  for (const auto& in_port_data_item : input_stream_data_map) {\n    const auto& port_data_list = in_port_data_item.second;\n    const auto& first_in_buffer = port_data_list.front();\n    auto in_port_stream = first_in_buffer->GetStream();\n    // combine all input port stream meta\n    auto stream_meta = in_port_stream->GetStreamMeta();\n    if (stream_meta != nullptr) {\n      auto metas = stream_meta->GetMetas();\n      for (auto& meta_item : metas) {\n        inherit_stream_meta->SetMeta(meta_item.first, meta_item.second);\n      }\n    }\n    // all input stream order is same, only get one\n    if (stream_order == nullptr) {\n      stream_order = in_port_stream->GetStreamOrder()->Copy();\n      input_buffer_index = first_in_buffer->GetIndex();\n    }\n  }\n\n  // modify stream order\n  auto process_info = out_buffer_index->GetProcessInfo();\n  if (process_info->GetType() == BufferProcessType::EXPAND) {\n    // need record expand at which buffer\n    stream_order->Expand(input_buffer_index);\n  } else if (process_info->GetType() == BufferProcessType::COLLAPSE) {\n    stream_order->Collapse();\n  }\n\n  // generate output stream\n  for (const auto& output_port_name : output_port_names_) {\n    if (stream_data_map.find(output_port_name) == stream_data_map.end()) {\n      // output port has no data, no need to create output stream\n      continue;\n    }\n\n    auto new_stream = output_match_stream.CreateStream(output_port_name);\n    // write stream meta\n    auto new_stream_meta = std::make_shared<DataMeta>(*inherit_stream_meta);\n    auto port_stream_meta_item = port_stream_meta.find(output_port_name);\n    if (port_stream_meta_item != port_stream_meta.end()) {\n      const auto& stream_meta = port_stream_meta_item->second;\n      for (auto& meta_item : stream_meta->GetMetas()) {\n        new_stream_meta->SetMeta(meta_item.first, meta_item.second);\n      }\n    }\n    new_stream->SetStreamMeta(new_stream_meta);\n    // write stream order\n    new_stream->SetStreamOrder(stream_order);\n  }\n}\n\nvoid OutputMatchStreamManager::SetIndexInStream(\n    const std::shared_ptr<BufferIndexInfo>& buffer_index,\n    const std::shared_ptr<Stream>& stream) {\n  if (need_new_index_) {\n    buffer_index->SetIndex(stream->GetBufferCount());\n  }\n\n  stream->IncreaseBufferCount();\n}\n\nvoid OutputMatchStreamManager::Clean() {\n  for (auto iter = output_stream_map_.begin();\n       iter != output_stream_map_.end();) {\n    auto& output_match_stream = iter->second;\n    if (output_match_stream.GetSession()->IsAbort()) {\n      iter = output_stream_map_.erase(iter);\n      continue;\n    }\n\n    ++iter;\n  }\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/libmodelbox/engine/node.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\"){}\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/node.h\"\n\n#include <functional>\n#include <utility>\n\n#include \"modelbox/port.h\"\n#include \"modelbox/session.h\"\n\nnamespace modelbox {\n\n#define ReturnPortNames(port_list)     \\\n  std::set<std::string> name_list;     \\\n  for (auto& port : (port_list)) {     \\\n    name_list.insert(port->GetName()); \\\n  }                                    \\\n  return name_list;\n\n#define ReturnPort(port_list, target_name)  \\\n  for (auto& port : (port_list)) {          \\\n    if (port->GetName() == (target_name)) { \\\n      return port;                          \\\n    }                                       \\\n  }                                         \\\n  return nullptr;\n\nNodeBase::NodeBase() = default;\n\nNodeBase::~NodeBase() = default;\n\nvoid NodeBase::SetName(const std::string& name) { name_ = name; }\n\nstd::string NodeBase::GetName() const { return name_; }\n\nvoid NodeBase::Close(){};\n\nvoid NodeBase::SetPriority(int32_t priortity) { priority_ = priortity; }\n\nint32_t NodeBase::GetPriority() const { return priority_; }\n\nvoid NodeBase::SetQueueSize(int32_t queue_size) { queue_size_ = queue_size; }\n\nint32_t NodeBase::GetQueueSize() const { return queue_size_; }\n\nstd::shared_ptr<EventPort> NodeBase::GetEventPort() { return event_port_; }\n\nStatus NodeBase::Init(const std::set<std::string>& input_port_names,\n                      const std::set<std::string>& output_port_names,\n                      const std::shared_ptr<Configuration>& config) {\n  config_ = config;\n  queue_size_ = config_->GetUint64(\"queue_size\", DEFAULT_QUEUE_SIZE);\n  if (0 == queue_size_) {\n    MBLOG_ERROR << \"queue size config is zero\";\n    return STATUS_INVALID;\n  }\n  event_queue_size_ = DEFAULT_QUEUE_EVENT;\n  return InitPorts(input_port_names, output_port_names, config);\n}\n\nStatus NodeBase::InitPorts(const std::set<std::string>& input_port_names,\n                           const std::set<std::string>& output_port_names,\n                           const std::shared_ptr<Configuration>& config) {\n  // create event port\n  event_port_ = std::make_shared<EventPort>(EVENT_PORT_NAME, shared_from_this(),\n                                            GetPriority(), event_queue_size_);\n  // create input port\n  input_ports_.clear();\n  input_ports_.reserve(input_port_names.size());\n  for (const auto& input_port_name : input_port_names) {\n    auto port_queue_size =\n        config->GetUint64(\"queue_size_\" + input_port_name, queue_size_);\n    if (0 == port_queue_size) {\n      MBLOG_ERROR << \"queue size in zero for input \" << input_port_name;\n      return STATUS_INVALID;\n    }\n\n    input_ports_.push_back(std::make_shared<InPort>(\n        input_port_name, shared_from_this(), GetPriority(), port_queue_size));\n  }\n  // create default external port if node has no input port\n  if (input_port_names.empty()) {\n    auto extern_queue_size =\n        config_->GetUint64(\"queue_size_external\", DEFAULT_QUEUE_SIZE_EXTERNAL);\n    if (extern_queue_size == 0) {\n      MBLOG_ERROR << \"queue_size_external config is zero\";\n      return STATUS_INVALID;\n    }\n    extern_ports_.push_back(\n        std::make_shared<InPort>(EXTERNAL_PORT_NAME, shared_from_this(),\n                                 GetPriority(), extern_queue_size));\n  }\n  // create output port\n  output_ports_.clear();\n  output_ports_.reserve(output_port_names.size());\n  for (const auto& output_port_name : output_port_names) {\n    auto output_port =\n        std::make_shared<OutPort>(output_port_name, shared_from_this());\n    output_ports_.push_back(output_port);\n  }\n\n  return STATUS_SUCCESS;\n}\n\nsize_t NodeBase::GetInputNum() { return input_ports_.size(); }\n\nsize_t NodeBase::GetExternNum() { return extern_ports_.size(); }\n\nsize_t NodeBase::GetOutputNum() { return output_ports_.size(); }\n\nstd::set<std::string> NodeBase::GetInputNames() {\n  ReturnPortNames(input_ports_);\n}\n\nstd::set<std::string> NodeBase::GetExternNames() {\n  ReturnPortNames(extern_ports_);\n}\n\nstd::set<std::string> NodeBase::GetOutputNames() {\n  ReturnPortNames(output_ports_);\n}\n\nstd::vector<std::shared_ptr<InPort>> NodeBase::GetInputPorts() const {\n  return input_ports_;\n}\n\nstd::vector<std::shared_ptr<OutPort>> NodeBase::GetOutputPorts() const {\n  return output_ports_;\n}\n\nstd::vector<std::shared_ptr<InPort>> NodeBase::GetExternalPorts() const {\n  return extern_ports_;\n}\n\nstd::shared_ptr<InPort> NodeBase::GetInputPort(const std::string& port_name) {\n  ReturnPort(input_ports_, port_name);\n}\n\nstd::shared_ptr<InPort> NodeBase::GetExternalPort(\n    const std::string& port_name) {\n  ReturnPort(extern_ports_, port_name);\n}\n\nstd::shared_ptr<OutPort> NodeBase::GetOutputPort(const std::string& port_name) {\n  ReturnPort(output_ports_, port_name);\n}\n\nvoid NodeBase::SetAllInportActivated(bool flag) {\n  for (auto& port : input_ports_) {\n    port->SetActiveState(flag);\n  }\n}\n\nStatus NodeBase::SendBatchEvent(\n    std::vector<std::shared_ptr<FlowUnitInnerEvent>>& event_list,\n    bool update_active_time) {\n  if (!event_port_) {\n    MBLOG_ERROR << \"Event port in null\";\n    return STATUS_FAULT;\n  }\n\n  auto status = event_port_->SendBatch(event_list);\n  if (!status) {\n    return status;\n  }\n\n  event_port_->NotifyPushEvent(update_active_time);\n  return STATUS_SUCCESS;\n}\n\nStatus NodeBase::SendEvent(std::shared_ptr<FlowUnitInnerEvent>& event,\n                           bool update_active_time) {\n  if (!event_port_) {\n    MBLOG_ERROR << \"Event port in null\";\n    return STATUS_FAULT;\n  }\n\n  auto status = event_port_->Send(event);\n  if (!status) {\n    return status;\n  }\n\n  event_port_->NotifyPushEvent(update_active_time);\n  return STATUS_SUCCESS;\n}\n\nvoid NodeBase::Shutdown() {\n  for (auto& port : input_ports_) {\n    port->Shutdown();\n  }\n\n  for (auto& port : extern_ports_) {\n    port->Shutdown();\n  }\n\n  event_port_->Shutdown();\n}\n\nNode::Node() = default;\n\nNode::~Node() = default;\n\nStatus Node::Init(const std::set<std::string>& input_port_names,\n                  const std::set<std::string>& output_port_names,\n                  const std::shared_ptr<Configuration>& config) {\n  auto ret = NodeBase::Init(input_port_names, output_port_names, config);\n  if (!ret) {\n    return ret;\n  }\n\n  flowunit_group_ = std::make_shared<FlowUnitGroup>(\n      flowunit_name_, flowunit_type_, flowunit_device_id_, config, profiler_);\n  if (flowunit_group_ == nullptr) {\n    return STATUS_INVALID;\n  }\n\n  ret = flowunit_group_->Init(input_port_names, output_port_names,\n                              flowunit_manager_);\n  if (!ret) {\n    return ret;\n  }\n\n  flowunit_group_->SetNode(std::dynamic_pointer_cast<Node>(shared_from_this()));\n\n  auto port_count = GetInputNum();\n  if (port_count == 0) {\n    port_count = GetExternNum();\n  }\n\n  input_match_stream_mgr_ =\n      std::make_shared<InputMatchStreamManager>(name_, queue_size_, port_count);\n  output_match_stream_mgr_ =\n      std::make_shared<OutputMatchStreamManager>(name_, GetOutputNames());\n  ret = InitNodeProperties();\n  if (!ret) {\n    return ret;\n  }\n\n  return STATUS_OK;\n}\n\nstd::unordered_map<std::string, std::shared_ptr<Node>> Node::GetMatchNodes() {\n  return match_node_;\n}\n\nvoid Node::SetMatchNode(const std::string& name,\n                        std::shared_ptr<Node> match_node) {\n  match_node_[name] = std::move(match_node);\n}\n\nstd::shared_ptr<Node> Node::GetMatchNode() { return match_node_[\"match_node\"]; }\n\nstd::shared_ptr<Node> Node::GetMatchNode(const std::string& port_name) {\n  return match_node_[port_name];\n}\n\nstd::shared_ptr<FlowUnitDesc> Node::GetFlowUnitDesc() {\n  return flowunit_group_->GetExecutorUnit()->GetFlowUnitDesc();\n}\n\nStatus Node::InitNodeProperties() {\n  // read flowunit desc\n  auto flowunit_desc = flowunit_group_->GetExecutorUnit()->GetFlowUnitDesc();\n\n  SetExceptionVisible(flowunit_desc->IsExceptionVisible());\n  SetInputContiguous(flowunit_desc->IsInputContiguous());\n\n  SetFlowType(flowunit_desc->GetFlowType());\n  SetOutputType(flowunit_desc->GetOutputType());\n  SetConditionType(flowunit_desc->GetConditionType());\n  SetLoopType(flowunit_desc->GetLoopType());\n\n  input_match_stream_mgr_->SetInputStreamGatherAll(\n      GetOutputType() == COLLAPSE && flowunit_desc->IsCollapseAll());\n\n  // update constrain\n  UpdatePropConstrain(flowunit_desc);\n\n  // Set input & output stream options\n  if (GetFlowType() == STREAM || GetOutputType() == COLLAPSE) {\n    input_match_stream_mgr_->SetInputBufferInOrder(true);\n  }\n\n  if (GetConditionType() != ConditionType::NONE) {\n    input_match_stream_mgr_->SetInputBufferInOrder(true);\n  }\n\n  if (GetLoopType() != LoopType::NOT_LOOP) {\n    input_match_stream_mgr_->SetInputStreamGatherAll(true);\n  }\n\n  output_match_stream_mgr_->SetNeedNewIndex(NeedNewIndex());\n  return STATUS_OK;\n}\n\nvoid Node::UpdatePropConstrain(\n    const std::shared_ptr<FlowUnitDesc>& flowunit_desc) {\n  /**\n   * constrain, Take effect by order\n   * 1. expand: default normal\n   * 2. collapse: default normal\n   * 3. condition: only normal\n   * 4. loop: only normal\n   * 5. origin: default stream\n   **/\n  // constrain expand & collapse, not recommand to set flow type when use expand\n  // & collapse\n  auto output_type = GetOutputType();\n  if (output_type != FlowOutputType::ORIGIN) {\n    SetConditionType(ConditionType::NONE);\n    SetLoopType(LoopType::NOT_LOOP);\n    if (!flowunit_desc->IsUserSetFlowType()) {\n      SetFlowType(FlowType::NORMAL);\n    }\n    return;\n  }\n\n  // constrain condition\n  if (GetConditionType() != ConditionType::NONE) {\n    SetFlowType(NORMAL);\n    SetLoopType(LoopType::NOT_LOOP);\n    return;\n  }\n\n  // constrain loop\n  if (GetLoopType() != LoopType::NOT_LOOP) {\n    SetFlowType(NORMAL);\n    return;\n  }\n\n  // constrain origin\n  if (!flowunit_desc->IsUserSetFlowType()) {\n    SetFlowType(FlowType::STREAM);\n  }\n}\n\nvoid Node::SetFlowUnitInfo(const std::string& flowunit_name,\n                           const std::string& flowunit_type,\n                           const std::string& flowunit_device_id,\n                           std::shared_ptr<FlowUnitManager> flowunit_manager) {\n  flowunit_name_ = flowunit_name;\n  flowunit_type_ = flowunit_type;\n  flowunit_device_id_ = flowunit_device_id;\n  flowunit_manager_ = std::move(flowunit_manager);\n}\n\nstd::shared_ptr<FlowUnitGroup> Node::GetFlowUnitGroup() {\n  return flowunit_group_;\n}\n\nvoid Node::SetProfiler(std::shared_ptr<Profiler> profiler) {\n  profiler_ = std::move(profiler);\n}\n\nvoid Node::SetStats(std::shared_ptr<StatisticsItem> graph_stats) {\n  graph_stats_ = std::move(graph_stats);\n}\n\nstd::shared_ptr<ExternalData> Node::CreateExternalData(\n    const std::shared_ptr<Device>& device) {\n  if (session_mgr_ == nullptr) {\n    MBLOG_ERROR << \"session manager is null\";\n    return nullptr;\n  }\n\n  auto port = GetExternalPort(EXTERNAL_PORT_NAME);\n  if (!port) {\n    MBLOG_WARN << \"node has no external port\";\n    return nullptr;\n  }\n\n  auto session = session_mgr_->CreateSession(graph_stats_);\n  auto init_stream = std::make_shared<Stream>(session);\n  return std::make_shared<ExternalDataImpl>(port, device, init_stream);\n}\n\nbool Node::NeedNewIndex() {\n  if (GetOutputType() == EXPAND ||\n      (GetOutputType() == ORIGIN &&\n       (GetFlowType() == STREAM || GetConditionType() == IF_ELSE))) {\n    return true;\n  }\n\n  return false;\n}\n\nstd::unordered_map<std::string, size_t> Node::GetStreamCountEachPort() {\n  std::unordered_map<std::string, size_t> stream_count_each_port;\n  for (auto& in_port : input_ports_) {\n    auto port_count = in_port->GetConnectedPortNumber();\n    if (port_count == 0) {\n      continue;\n    }\n\n    stream_count_each_port[in_port->GetName()] = port_count;\n    if (GetLoopType() == LOOP) {\n      stream_count_each_port[in_port->GetName()] = 1;\n    }\n  }\n  return stream_count_each_port;\n}\n\nStatus Node::Open() {\n  auto external_data_create_func =\n      std::bind(&Node::CreateExternalData, this, std::placeholders::_1);\n  auto ret = flowunit_group_->Open(external_data_create_func);\n  if (!ret) {\n    MBLOG_ERROR << \"open flowunit \" << flowunit_name_ << \" failed\";\n    return ret;\n  }\n\n  is_flowunit_opened_ = true;\n  return STATUS_SUCCESS;\n}\n\nvoid Node::Close() {\n  if (flowunit_group_ == nullptr || !is_flowunit_opened_) {\n    return;\n  }\n\n  is_flowunit_opened_ = false;\n  auto ret = flowunit_group_->Close();\n  if (!ret) {\n    MBLOG_ERROR << \"close flowunit \" << flowunit_name_ << \" failed, error \"\n                << ret;\n  }\n}\n\nStatus Node::GenDataContextList(\n    std::list<std::shared_ptr<MatchStreamData>>& match_stream_data_list,\n    std::list<std::shared_ptr<FlowUnitDataContext>>& data_ctx_list) {\n  // one data context only generate one output match stream at time\n  std::set<std::shared_ptr<FlowUnitDataContext>> data_ctx_set;\n  for (auto& match_stream_data : match_stream_data_list) {\n    Status ret = STATUS_SUCCESS;\n    if (match_stream_data->GetEvent() != nullptr) {\n      ret = AppendDataContextByEvent(match_stream_data, data_ctx_set);\n    } else {\n      ret = AppendDataContextByData(match_stream_data, data_ctx_set);\n    }\n\n    if (!ret) {\n      MBLOG_ERROR << \"append data context failed, err: \" << ret;\n      return STATUS_FAULT;\n    }\n  }\n\n  data_ctx_list.assign(data_ctx_set.begin(), data_ctx_set.end());\n  return STATUS_SUCCESS;\n}\n\nStatus Node::AppendDataContextByEvent(\n    const std::shared_ptr<MatchStreamData>& match_stream_data,\n    std::set<std::shared_ptr<FlowUnitDataContext>>& data_ctx_set) {\n  auto event = match_stream_data->GetEvent();\n  auto* data_ctx_match_key = event->GetDataCtxMatchKey();\n  auto data_ctx_item = data_ctx_map_.find(data_ctx_match_key);\n  if (data_ctx_item == data_ctx_map_.end()) {\n    // might be finished\n    return STATUS_OK;\n  }\n\n  auto data_ctx = data_ctx_item->second;\n  data_ctx_set.insert(data_ctx);\n  if (GetOutputType() == COLLAPSE) {\n    // collapse sub streams to one stream, each sub stream collpase to one\n    // buffer stream collapse node process one sub stream at one process\n    if (event->GetEventCode() != FlowUnitInnerEvent::COLLAPSE_NEXT_STREAM) {\n      return {\n          STATUS_INVALID,\n          \"only support collpase next stream event at collapse node \" + name_};\n    }\n\n    auto stream_collapse_ctx =\n        std::dynamic_pointer_cast<StreamCollapseFlowUnitDataContext>(data_ctx);\n    stream_collapse_ctx->CollapseNextStream();\n  } else if (GetOutputType() == EXPAND) {\n    if (event->GetEventCode() == FlowUnitInnerEvent::EXPAND_UNFINISH_DATA) {\n      // one buffer expand to one stream, the stream still has data\n      // sent by flowunit developer\n      data_ctx->SetEvent(event->GetUserEvent());\n    } else if (event->GetEventCode() ==\n               FlowUnitInnerEvent::EXPAND_NEXT_STREAM) {\n      // expand next buffer\n      // sent by stream expand node\n      auto stream_expand_ctx =\n          std::dynamic_pointer_cast<StreamExpandFlowUnitDataContext>(data_ctx);\n      stream_expand_ctx->ExpandNextBuffer();\n    } else {\n      return {STATUS_INVALID, \"not support event \" +\n                                  std::to_string(event->GetEventCode()) +\n                                  \" at expand node \" + name_};\n    }\n  } else {\n    // usecase: notify flowunit to process last data again\n    if (event->GetEventCode() != FlowUnitInnerEvent::EXPAND_UNFINISH_DATA) {\n      return {STATUS_INVALID, \"only support user event at node \" + name_};\n    }\n\n    if (GetFlowType() != STREAM) {\n      return {\n          STATUS_INVALID,\n          \"only support user event at stream node, not normal node \" + name_};\n    }\n\n    data_ctx->SetEvent(event->GetUserEvent());\n  }\n  return STATUS_OK;\n}\n\nStatus Node::AppendDataContextByData(\n    const std::shared_ptr<MatchStreamData>& match_stream_data,\n    std::set<std::shared_ptr<FlowUnitDataContext>>& data_ctx_set) {\n  MatchKey* data_ctx_match_key = nullptr;\n  if (GetFlowType() == STREAM) {\n    if (GetOutputType() == COLLAPSE) {\n      // collapse will match at expand, child stream after expand match at one\n      // buffer in parent stream, we need parent stream info to gather all child\n      // stream\n      auto* match_at_ancestor_buffer =\n          (BufferIndexInfo*)match_stream_data->GetStreamMatchKey();\n      auto ancestor_stream = match_at_ancestor_buffer->GetStream();\n      data_ctx_match_key = MatchKey::AsKey(ancestor_stream.get());\n    } else {  // EXPAND, ORIGIN\n      /**\n       * expand: will expand one buffer for each node run, other data left in\n       * ctx\n       * origin: one match input to one match output\n       **/\n      data_ctx_match_key = match_stream_data->GetStreamMatchKey();\n    }\n  } else {  // NORMAL\n    if (GetOutputType() == EXPAND) {\n      /** expand buffer concurrently, will generate multi output\n       * match_stream\n       **/\n      auto data_count = match_stream_data->GetDataCount();\n      auto first_port_data =\n          match_stream_data->GetBufferList()->begin()->second;\n      for (size_t i = 0; i < data_count; ++i) {\n        auto& buffer = first_port_data[i];\n        data_ctx_match_key =\n            MatchKey::AsKey(BufferManageView::GetIndexInfo(buffer).get());\n        auto data_ctx = AppendDataToDataContext(data_ctx_match_key,\n                                                match_stream_data, true, i);\n        data_ctx_set.insert(data_ctx);\n      }\n      return STATUS_OK;\n    }\n\n    /**\n     * collapse: collapse dirrerent match_stream concurrently\n     * origin: one match input to one match output\n     **/\n    data_ctx_match_key = match_stream_data->GetStreamMatchKey();\n  }\n\n  auto data_ctx =\n      AppendDataToDataContext(data_ctx_match_key, match_stream_data);\n  data_ctx_set.insert(data_ctx);\n  return STATUS_SUCCESS;\n}\n\nstd::shared_ptr<FlowUnitDataContext> Node::GetDataContext(MatchKey* key) {\n  auto item = data_ctx_map_.find(key);\n  if (item != data_ctx_map_.end()) {\n    return item->second;\n  }\n\n  return nullptr;\n}\n\nstd::shared_ptr<FlowUnitDataContext> Node::CreateDataContext(\n    MatchKey* key, const std::shared_ptr<Session>& session) {\n  std::shared_ptr<FlowUnitDataContext> data_ctx;\n  if (GetFlowType() == STREAM) {\n    if (GetOutputType() == EXPAND) {\n      data_ctx =\n          std::make_shared<StreamExpandFlowUnitDataContext>(this, key, session);\n    } else if (GetOutputType() == COLLAPSE) {\n      data_ctx = std::make_shared<StreamCollapseFlowUnitDataContext>(this, key,\n                                                                     session);\n    } else {\n      data_ctx =\n          std::make_shared<StreamFlowUnitDataContext>(this, key, session);\n      session->AddStateListener(data_ctx);\n    }\n  } else {  // NORMAL\n    if (GetOutputType() == EXPAND) {\n      data_ctx =\n          std::make_shared<NormalExpandFlowUnitDataContext>(this, key, session);\n    } else if (GetOutputType() == COLLAPSE) {\n      data_ctx = std::make_shared<NormalCollapseFlowUnitDataContext>(this, key,\n                                                                     session);\n    } else if (GetLoopType() == LOOP) {\n      data_ctx =\n          std::make_shared<LoopNormalFlowUnitDataContext>(this, key, session);\n    } else {\n      data_ctx =\n          std::make_shared<NormalFlowUnitDataContext>(this, key, session);\n    }\n  }\n\n  data_ctx_map_[key] = data_ctx;\n  return data_ctx;\n}\n\nstd::shared_ptr<FlowUnitDataContext> Node::AppendDataToDataContext(\n    MatchKey* key, const std::shared_ptr<MatchStreamData>& match_stream_data,\n    bool append_single_buffer, size_t buffer_index) {\n  auto data_ctx = GetDataContext(key);\n  if (data_ctx == nullptr) {\n    data_ctx = CreateDataContext(key, match_stream_data->GetSession());\n  }\n\n  auto stream_data_map = match_stream_data->GetBufferList();\n  if (append_single_buffer == false) {\n    data_ctx->WriteInputData(stream_data_map);\n    return data_ctx;\n  }\n\n  auto split_stream_data_map = std::make_shared<PortDataMap>();\n  for (auto& port_data_item : *stream_data_map) {\n    const auto& port_name = port_data_item.first;\n    auto& data_list = port_data_item.second;\n    (*split_stream_data_map)[port_name].push_back(data_list[buffer_index]);\n  }\n\n  data_ctx->WriteInputData(split_stream_data_map);\n  return data_ctx;\n}\n\nStatus Node::Recv(\n    RunType type,\n    std::list<std::shared_ptr<FlowUnitDataContext>>& data_ctx_list) {\n  std::list<std::shared_ptr<MatchStreamData>> match_stream_data_list;\n  auto ret = GenInputMatchStreamData(type, match_stream_data_list);\n  if (!ret) {\n    MBLOG_ERROR << \"node \" << name_ << \" generate match stream failed, error \"\n                << ret;\n    return ret;\n  }\n\n  if (match_stream_data_list.empty()) {\n    return STATUS_SUCCESS;\n  }\n\n  ret = GenDataContextList(match_stream_data_list, data_ctx_list);\n  if (!ret) {\n    return ret;\n  }\n\n  return STATUS_SUCCESS;\n}\n\nStatus Node::Process(\n    std::list<std::shared_ptr<FlowUnitDataContext>>& data_ctx_list) {\n  auto ret = flowunit_group_->Run(data_ctx_list);\n  if (!ret) {\n    MBLOG_ERROR << \"node \" << name_ << \" run flowunit group failed, error \"\n                << ret;\n    return ret;\n  }\n\n  return STATUS_SUCCESS;\n}\n\nStatus Node::Send(\n    std::list<std::shared_ptr<FlowUnitDataContext>>& data_ctx_list) {\n  for (auto& data_ctx : data_ctx_list) {\n    std::unordered_map<std::string, BufferPtrList> stream_data_map;\n    data_ctx->PopOutputData(stream_data_map);\n    auto ret = output_match_stream_mgr_->UpdateStreamInfo(\n        stream_data_map, data_ctx->GetOutputPortStreamMeta(),\n        data_ctx->GetSession());\n    if (!ret) {\n      return ret;\n    }\n\n    for (auto& output_port : output_ports_) {\n      const auto& port_name = output_port->GetName();\n      auto item = stream_data_map.find(port_name);\n      if (item == stream_data_map.end()) {\n        if (GetLoopType() == LoopType::LOOP) {\n          // only one port has data for loop node\n          continue;\n        }\n\n        MBLOG_ERROR << \"node \" << name_ << \", missing output for port \"\n                    << port_name;\n        return STATUS_FAULT;\n      }\n\n      auto& output_datas = item->second;\n      std::vector<std::shared_ptr<Buffer>> valid_output;\n      valid_output.reserve(output_datas.size());\n      for (auto& buffer : output_datas) {\n        if (buffer == nullptr) {\n          continue;\n        }\n\n        valid_output.push_back(buffer);\n      }\n      output_port->Send(valid_output);\n    }\n  }\n  return STATUS_SUCCESS;\n}\n\nvoid Node::Clean(\n    std::list<std::shared_ptr<FlowUnitDataContext>>& data_ctx_list) {\n  // clear data for this run\n  for (auto& data_ctx : data_ctx_list) {\n    data_ctx->ClearData();\n  }\n\n  input_match_stream_mgr_->Clean();\n  CleanDataContext();\n  output_match_stream_mgr_->Clean();\n\n  MBLOG_DEBUG << \"node: \" << name_\n              << \", resource state after run, input stream \"\n              << input_match_stream_mgr_->GetInputStreamCount() << \", data ctx \"\n              << data_ctx_map_.size() << \", output stream \"\n              << output_match_stream_mgr_->GetOutputStreamCount();\n}\n\nvoid Node::CleanDataContext() {\n  // remove finished & closed data for this node\n  for (auto data_ctx_iter = data_ctx_map_.begin();\n       data_ctx_iter != data_ctx_map_.end();) {\n    auto& data_ctx = data_ctx_iter->second;\n    if (!data_ctx->IsFinished() && !data_ctx->GetSession()->IsAbort()) {\n      ++data_ctx_iter;\n      continue;\n    }\n\n    auto sess_ctx = data_ctx->GetSessionContext();\n    if (GetFlowType() == STREAM && sess_ctx != nullptr) {\n      MBLOG_INFO << \"node: \" << name_\n                 << \", sess id: \" << sess_ctx->GetSessionId()\n                 << \", data ctx finished\";\n    }\n\n    data_ctx->Dispose();\n    data_ctx_iter = data_ctx_map_.erase(data_ctx_iter);\n  }\n}\n\nStatus Node::Run(RunType type) {\n  std::list<std::shared_ptr<FlowUnitDataContext>> data_ctx_list;\n  size_t process_count = 0;\n  auto ret = Recv(type, data_ctx_list);\n\n  if (!ret) {\n    return ret;\n  }\n\n  std::list<std::shared_ptr<FlowUnitDataContext>> process_ctx_list;\n\n  auto output_names_is_empty = GetOutputNames().empty();\n\n  for (auto& ctx : data_ctx_list) {\n    // process data according to batch size\n    process_count++;\n    process_ctx_list.push_back(ctx);\n\n    if (process_ctx_list.size() < flowunit_group_->GetBatchSize()) {\n      if (process_count < data_ctx_list.size()) {\n        continue;\n      }\n    }\n\n    ret = Process(process_ctx_list);\n    if (!ret) {\n      return ret;\n    }\n\n    if (!output_names_is_empty) {\n      ret = Send(process_ctx_list);\n      if (!ret) {\n        return ret;\n      }\n    } else {\n      SetLastError(process_ctx_list);\n    }\n\n    process_ctx_list.clear();\n  }\n\n  Clean(data_ctx_list);\n  return STATUS_SUCCESS;\n}\n\nvoid Node::SetLastError(\n    std::list<std::shared_ptr<FlowUnitDataContext>>& data_ctx_list) {\n  for (auto& data_ctx : data_ctx_list) {\n    auto sess = data_ctx->GetSession();\n    auto last_status = data_ctx->GetLastStatus();\n    if (last_status != modelbox::STATUS_OK &&\n        last_status != modelbox::STATUS_CONTINUE) {\n      sess->SetError(std::make_shared<FlowUnitError>(last_status.Errormsg()));\n      continue;\n    }\n\n    for (const auto& input_map : data_ctx->GetErrorInputs()) {\n      auto error_buffer_list = input_map.second;\n      if (!error_buffer_list.empty()) {\n        sess->SetError(std::make_shared<FlowUnitError>(\n            error_buffer_list[0]->GetErrorMsg()));\n      }\n    }\n  }\n}\n\nStatus Node::GenInputMatchStreamData(\n    RunType type,\n    std::list<std::shared_ptr<MatchStreamData>>& match_stream_data_list) {\n  switch (type) {\n    case RunType::DATA:\n      if (GetInputNum() == 0) {\n        return GenMatchStreamFromDataPorts(extern_ports_,\n                                           match_stream_data_list);\n      }\n\n      std::call_once(input_stream_count_update_flag_, [this]() {\n        input_match_stream_mgr_->UpdateStreamCountEachPort(\n            GetStreamCountEachPort());\n      });\n      return GenMatchStreamFromDataPorts(input_ports_, match_stream_data_list);\n\n    case RunType::EVENT:\n      return GenMatchStreamFromEventPorts(match_stream_data_list);\n\n    default:\n      MBLOG_ERROR << \"Invalid node run type \" << type;\n      return STATUS_INVALID;\n  }\n}\n\nStatus Node::GenMatchStreamFromDataPorts(\n    std::vector<std::shared_ptr<InPort>>& data_ports,\n    std::list<std::shared_ptr<MatchStreamData>>& match_stream_data_list) {\n  auto ret = input_match_stream_mgr_->LoadData(data_ports);\n  if (!ret) {\n    return ret;\n  }\n\n  return input_match_stream_mgr_->GenMatchStreamData(match_stream_data_list);\n}\n\nStatus Node::GenMatchStreamFromEventPorts(\n    std::list<std::shared_ptr<MatchStreamData>>& match_stream_data_list) {\n  FlowunitEventList events;\n  auto status = event_port_->Recv(events);\n  if (!events || events->empty()) {\n    return STATUS_SUCCESS;\n  }\n\n  event_port_->NotifyPopEvent();\n  for (auto& event : *events) {\n    auto match_stream = std::make_shared<MatchStreamData>();\n    match_stream->SetEvent(event);\n    match_stream_data_list.push_back(match_stream);\n  }\n\n  return STATUS_SUCCESS;\n}\n\nvoid Node::SetOutputType(FlowOutputType type) { output_type_ = type; }\n\nvoid Node::SetFlowType(FlowType type) { flow_type_ = type; }\n\nvoid Node::SetConditionType(ConditionType type) { condition_type_ = type; }\n\nvoid Node::SetLoopType(LoopType type) { loop_type_ = type; }\n\nvoid Node::SetInputContiguous(bool is_input_contiguous) {\n  is_input_contiguous_ = is_input_contiguous;\n}\n\nvoid Node::SetExceptionVisible(bool is_exception_visible) {\n  is_exception_visible_ = is_exception_visible;\n}\n\nFlowOutputType Node::GetOutputType() { return output_type_; }\n\nFlowType Node::GetFlowType() { return flow_type_; }\n\nConditionType Node::GetConditionType() { return condition_type_; }\n\nLoopType Node::GetLoopType() { return loop_type_; }\n\nbool Node::IsInputContiguous() { return is_input_contiguous_; }\n\nbool Node::IsExceptionVisible() { return is_exception_visible_; }\n\nvoid Node::SetSessionManager(SessionManager* session_mgr) {\n  session_mgr_ = session_mgr;\n}\n\nvoid Node::SetLoopOutPortName(const std::string& port_name) {\n  loop_out_port_name_ = port_name;\n}\n\nstd::string Node::GetLoopOutPortName() { return loop_out_port_name_; }\n\n}  // namespace modelbox\n"
  },
  {
    "path": "src/libmodelbox/engine/port.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/port.h\"\n\n#include <utility>\n\nnamespace modelbox {\n\nIPort::IPort(const std::string& name, const std::shared_ptr<NodeBase>& node)\n    : Port(name, node){};\n\nIPort::~IPort() = default;\n\nPort::Port(std::string name, const std::shared_ptr<NodeBase>& node)\n    : name_(std::move(name)), node_(node) {}\n\nPort::~Port() = default;\n\nconst std::string& Port::GetName() { return name_; }\nstd::shared_ptr<NodeBase> Port::GetNode() {\n  auto node = node_.lock();\n  return node;\n}\n\nvoid Port::Shutdown() {}\n\nInPort::InPort(const std::string& name, const std::shared_ptr<NodeBase>& node,\n               uint32_t priority, size_t event_capacity)\n    : NotifyPort(name, node, priority, event_capacity) {}\n\nInPort::~InPort() = default;\n\nStatus InPort::Init() {\n  auto node = node_.lock();\n  if (node == nullptr) {\n    return STATUS_INVALID;\n  }\n\n  return STATUS_SUCCESS;\n}\n\nvoid InPort::Recv(std::vector<std::shared_ptr<Buffer>>& buffer_vector,\n                  uint32_t left_buffer_num) {\n  if (left_buffer_num == 0) {\n    if (queue_->RemainCapacity() == 0) {\n      SetActiveState(false);\n    }\n    return;\n  }\n  queue_->PopBatch(&buffer_vector, -1, left_buffer_num);\n\n  if (!buffer_vector.empty()) {\n    NotifyPopEvent();\n  }\n}\n\nbool InPort::SetOutputPort(const std::shared_ptr<OutPort>& output_port) {\n  for (const auto& output_exist_port : output_ports) {\n    if (output_port == output_exist_port.lock()) {\n      return false;\n    }\n  }\n\n  output_ports.push_back(output_port);\n  return true;\n}\n\nsize_t InPort::GetConnectedPortNumber() { return output_ports.size(); }\n\nstd::vector<std::weak_ptr<OutPort>> InPort::GetAllOutPort() {\n  return output_ports;\n}\n\nOutPort::OutPort(const std::string& name, const std::shared_ptr<NodeBase>& node)\n    : Port(name, node) {}\n\nOutPort::~OutPort() = default;\n\nStatus OutPort::Init() {\n  auto node = node_.lock();\n  if (node == nullptr) {\n    return STATUS_INVALID;\n  }\n  return STATUS_SUCCESS;\n}\n\nStatus OutPort::Send(std::vector<std::shared_ptr<Buffer>>& buffers) {\n  bool loop;\n  auto real_node = std::dynamic_pointer_cast<Node>(GetNode());\n  LoopType loop_type{NOT_LOOP};\n  if (real_node != nullptr) {\n    loop_type = real_node->GetLoopType();\n  }\n\n  for (const auto& input_port : connected_input_ports_) {\n    loop = false;\n    auto queue = input_port->GetQueue();\n    auto priority = input_port->GetPriority();\n    std::vector<std::shared_ptr<Buffer>> port_buffers;\n    port_buffers.reserve(buffers.size());\n    for (auto& origin_buffer : buffers) {\n      auto buffer = origin_buffer->Copy();\n      BufferManageView::SetIndexInfo(\n          buffer, BufferManageView::GetIndexInfo(origin_buffer));\n      BufferManageView::SetPriority(buffer, real_node->GetPriority());\n      port_buffers.push_back(buffer);\n      // only loop flowunit itself in the loop structure\n      auto buffer_priority = BufferManageView::GetPriority(buffer);\n      if (loop_type == LOOP) {\n        BufferManageView::SetPriority(buffer, buffer_priority + 1);\n        continue;\n      }\n\n      if (buffer_priority < priority) {\n        BufferManageView::SetPriority(buffer, priority);\n        continue;\n      }\n\n      // during loop\n      loop = true;\n    }\n\n    while (port_buffers.size() > 0) {\n      if (loop_type == LOOP || loop) {\n        if (queue->PushBatchForce(&port_buffers, false, 0) == 0) {\n          break;\n        }\n      } else {\n        if (0 == queue->PushBatchForce(&port_buffers, true, 0)) {\n          break;\n        }\n      }\n\n      if (port_buffers.size() > 0) {\n        input_port->NotifyPushEvent();\n      }\n    }\n  }\n\n  for (const auto& input_port : connected_input_ports_) {\n    input_port->NotifyPushEvent();\n  }\n\n  return STATUS_SUCCESS;\n}\n\nbool OutPort::ConnectPort(const std::shared_ptr<InPort>& inport) {\n  if (inport == nullptr) {\n    return false;\n  }\n\n  if (!inport->SetOutputPort(shared_from_this())) {\n    return false;\n  }\n\n  auto pair = connected_input_ports_.emplace(inport);\n  return pair.second;\n}\n\nvoid OutPort::Shutdown() {\n  for (const auto& inport : connected_input_ports_) {\n    inport->Shutdown();\n  }\n}\n\nstd::set<std::shared_ptr<InPort>> OutPort::GetConnectInPort() {\n  return connected_input_ports_;\n}\n\nEventPort::EventPort(const std::string& name,\n                     const std::shared_ptr<NodeBase>& node, uint32_t priority,\n                     size_t event_capacity)\n    : NotifyPort(name, node, priority, event_capacity){};\n\nEventPort::~EventPort() = default;\n\nStatus EventPort::Init() { return STATUS_OK; };\n\nStatus EventPort::SendBatch(\n    std::vector<std::shared_ptr<FlowUnitInnerEvent>>& event_list) {\n  queue_->PushBatchForce(&event_list, true, 0);\n  return STATUS_SUCCESS;\n}\n\nStatus EventPort::Send(std::shared_ptr<FlowUnitInnerEvent>& event) {\n  queue_->PushForce(event);\n  return STATUS_SUCCESS;\n}\n\n}  // namespace modelbox\n"
  },
  {
    "path": "src/libmodelbox/engine/scheduler/flow_scheduler.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"flow_scheduler.h\"\n\n#include <modelbox/base/os.h>\n\nnamespace modelbox {\n\nconstexpr const char* TASK_FLOW_SCHEDUER_NAME = \"Flow-Scheduler\";\nconstexpr const char* TASK_FLOW_POOL_NAME = \"Flow-Workers\";\n\nSchedulerCommand::SchedulerCommand(SchedulerCommandType type,\n                                   std::shared_ptr<PriorityPort> port)\n    : type_(type), port_(std::move(port)) {}\n\nSchedulerCommand::~SchedulerCommand() = default;\n\nSchedulerCommandType SchedulerCommand::GetType() { return type_; }\n\nstd::shared_ptr<PriorityPort> SchedulerCommand::GetPort() { return port_; }\n\nint SchedulerCommand::GetPriority() { return 0; }\n\nSchedulerPort::SchedulerPort(const std::string& name)\n    : SchedulerPort(name, SIZE_MAX) {}\n\nSchedulerPort::SchedulerPort(const std::string& name, size_t event_capacity)\n    : NotifyPort(name, nullptr, 0, event_capacity) {\n  queue_ = std::make_shared<SchedulerQueue>(event_capacity);\n}\n\nSchedulerPort::~SchedulerPort() = default;\n\nStatus SchedulerPort::Init() { return STATUS_OK; };\n\nFlowScheduler::FlowScheduler() = default;\n\nFlowScheduler::~FlowScheduler() {\n  if (tp_) {\n    tp_ = nullptr;\n  }\n}\n\nStatus FlowScheduler::Init(std::shared_ptr<Configuration> config,\n                           std::shared_ptr<StatisticsItem> stats,\n                           std::shared_ptr<ThreadPool> thread_pool) {\n  stats_ = stats;\n  if (stats) {\n    stats_status_ = stats_->AddItem(\"status\", std::string(\"initial\", true));\n  } else {\n    stats_status_ = std::make_shared<StatisticsItem>();\n  }\n\n  if (thread_pool == nullptr) {\n    auto threads = config->GetUint32(\"graph.thread-num\",\n                                     std::thread::hardware_concurrency() * 2);\n    auto max_threads = config->GetUint32(\"graph.max-thread-num\", threads * 32);\n    auto thread_pool = std::make_shared<ThreadPool>(threads, max_threads);\n    thread_pool->SetName(TASK_FLOW_POOL_NAME);\n    tp_ = thread_pool;\n    thread_create_ = true;\n\n    if (data_hub_ == nullptr) {\n      data_hub_ = std::make_shared<DefaultDataHub>();\n    }\n\n    if (scheduler_event_port_ == nullptr) {\n      scheduler_event_port_ =\n          std::make_shared<SchedulerPort>(\"_Scheduler_Event\");\n      scheduler_event_port_->Init();\n      auto priority_port =\n          std::make_shared<PriorityPort>(scheduler_event_port_);\n      if (!(data_hub_->AddPort(priority_port))) {\n        MBLOG_ERROR << \"failed to add port to data hub\";\n        return STATUS_FAULT;\n      }\n    }\n\n    MBLOG_INFO << \"init scheduler with \" << threads << \" threads, max \"\n               << max_threads;\n  }\n\n  return STATUS_OK;\n}\n\nStatus FlowScheduler::Build(const Graph& graph) {\n  if (data_hub_ == nullptr || tp_ == nullptr) {\n    return {STATUS_SHUTDOWN, \"Scheduler not init.\"};\n  }\n\n  auto node_port_list = graph.GetNotifyPort();\n  if (node_port_list.empty()) {\n    MBLOG_ERROR << \"graph has no flow unit group\";\n    return STATUS_FAULT;\n  }\n\n  for (const auto& iter_pair : node_port_list) {\n    std::vector<std::shared_ptr<PriorityPort>> priority_ports;\n    priority_ports.reserve(iter_pair.second.size());\n    for (const auto& port : iter_pair.second) {\n      if (!port) {\n        MBLOG_ERROR << \"port must not be nullptr.\";\n        return STATUS_FAULT;\n      }\n\n      auto priority_port = std::make_shared<PriorityPort>(port);\n      if (!(data_hub_->AddPort(priority_port))) {\n        MBLOG_ERROR << \"failed to add port to data hub\";\n        return STATUS_FAULT;\n      }\n\n      priority_ports.emplace_back(priority_port);\n    }\n\n    node_port_map_.emplace(iter_pair.first, std::move(priority_ports));\n  }\n\n  MBLOG_DEBUG << \"flow scheduler build.\";\n  return STATUS_OK;\n}\n\nvoid FlowScheduler::RunAsync() {\n  if (tp_ == nullptr) {\n    return;\n  }\n\n  mode_ = ASYNC;\n  is_stop_ = false;\n  run_fut_ =\n      tp_->Submit(TASK_FLOW_SCHEDUER_NAME, &FlowScheduler::RunImpl, this);\n  MBLOG_DEBUG << \"flow scheduler is running.\";\n}\n\nStatus FlowScheduler::Run() {\n  if (tp_ == nullptr) {\n    return {STATUS_SHUTDOWN, \"Scheduler not init.\"};\n  }\n\n  mode_ = SYNC;\n  is_stop_ = false;\n  MBLOG_DEBUG << \"flow scheduler is running.\";\n  return RunImpl();\n}\n\nvoid FlowScheduler::EnableActivePort(const std::shared_ptr<NodeBase>& node) {\n  auto iter = node_port_map_.find(node);\n  if (iter != node_port_map_.end()) {\n    data_hub_->AddToActivePort(iter->second);\n  }\n\n  std::unique_lock<std::mutex> lock(status_mutex_);\n  nodes_runing_status_[node] = false;\n}\nvoid FlowScheduler::DisableActivePort(const std::shared_ptr<NodeBase>& node) {\n  auto iter = node_port_map_.find(node);\n  if (iter != node_port_map_.end()) {\n    data_hub_->RemoveFromActivePort(iter->second);\n  }\n\n  std::unique_lock<std::mutex> lock(status_mutex_);\n  nodes_runing_status_[node] = true;\n}\n\nvoid FlowScheduler::SendSchedulerCommand(\n    SchedulerCommandType type, const std::shared_ptr<PriorityPort>& port) {\n  auto cmd = std::make_shared<SchedulerCommand>(type, port);\n  scheduler_event_port_->Send(cmd);\n  scheduler_event_port_->NotifyPushEvent();\n}\n\nvoid FlowScheduler::RunWapper(\n    const std::shared_ptr<NodeBase>& node, RunType type,\n    const std::shared_ptr<PriorityPort>& active_port) {\n  Status status = STATUS_FAULT;\n  try {\n    MBLOG_DEBUG << \"run \" << node->GetName() << \" begin\";\n    status = node->Run(type);\n    MBLOG_DEBUG << \"run \" << node->GetName() << \" end\";\n  } catch (std::exception& e) {\n    MBLOG_WARN << \"node \" << node->GetName()\n               << \" run exception caught: \" << e.what();\n    status = {STATUS_FAULT, e.what()};\n  }\n\n  if (!status) {\n    MBLOG_ERROR << \"node (\" << node->GetName()\n                << \") run return: \" << status.WrapErrormsgs();\n    auto cmd_type = (status == STATUS_STOP)\n                        ? SchedulerCommandType::COMMAND_STOP\n                        : SchedulerCommandType::COMMAND_ERROR;\n    SendSchedulerCommand(cmd_type, active_port);\n  }\n\n  EnableActivePort(node);\n  std::unique_lock<std::mutex> lock(notify_mutex_);\n  running_node_count_--;\n  if (is_wait_stop_) {\n    cv_.notify_one();\n  }\n}\n\nStatus FlowScheduler::RunNode(\n    const std::shared_ptr<PriorityPort>& active_port) {\n  if (tp_ == nullptr) {\n    return {STATUS_FAULT, \"scheduler not init.\"};\n  }\n\n  if (!active_port) {\n    return {STATUS_INVALID, \"active port must not be nullptr.\"};\n  }\n\n  auto port = active_port->GetPort();\n  if (!port) {\n    return {STATUS_INVALID,\n            \"unexcept! port must not be nullptr, flow scheduler will be \"\n            \"shutdown.\"};\n  }\n\n  auto node = active_port->GetNode();\n  if (!node) {\n    return {STATUS_INVALID,\n            \"unexcept! can not find node in graph, flow scheduler will be \"\n            \"shutdown.\"};\n  }\n\n  DisableActivePort(node);\n\n  auto type = (typeid(*port) == typeid(EventPort) ? EVENT : DATA);\n  MBLOG_DEBUG << \"begin run node \" << node->GetName() << \" for type: \" << type;\n  running_node_count_++;\n  auto fut = tp_->Submit(node->GetName(), &FlowScheduler::RunWapper, this, node,\n                         type, active_port);\n  if (!fut.valid()) {\n    MBLOG_ERROR << \"Submit task \" << node->GetName() << \"failed.\";\n    EnableActivePort(node);\n    running_node_count_--;\n  }\n\n  return STATUS_OK;\n}\n\nStatus FlowScheduler::RunCommand(\n    const std::shared_ptr<PriorityPort>& active_port) {\n  auto port = active_port->GetPort();\n  if (!port) {\n    return {STATUS_INVALID,\n            \"unexcept! port must not be nullptr, flow scheduler will be \"\n            \"shutdown.\"};\n  }\n\n  auto scheduler_port = std::dynamic_pointer_cast<SchedulerPort>(port);\n  auto command = scheduler_port->Recv();\n  scheduler_event_port_->NotifyPopEvent();\n  data_hub_->AddToActivePort(active_port);\n  if (!command) {\n    return STATUS_OK;\n  }\n\n  switch (command->GetType()) {\n    case SchedulerCommandType::COMMAND_STOP:\n      return STATUS_STOP;\n    case SchedulerCommandType::COMMAND_ERROR:\n      return STATUS_FAULT;\n    default:\n      return {STATUS_INVALID, \"invalid scheduler command type.\"};\n  }\n\n  return STATUS_OK;\n}\n\nbool FlowScheduler::IsSchedulerCommand(\n    const std::shared_ptr<PriorityPort>& active_port) {\n  return scheduler_event_port_ == active_port->GetPort();\n}\n\nvoid FlowScheduler::WaitNodeFinish() {\n  auto pred = [this] { return running_node_count_ == 0; };\n  std::unique_lock<std::mutex> lock(notify_mutex_);\n  is_wait_stop_ = true;\n\n  cv_.wait(lock, pred);\n}\n\nvoid FlowScheduler::CheckScheduleStatus(const bool& printlog) {\n  // If the current status is normal, no information is printed. The logic for\n  // determining abnormalities is as follows:\n  // 1. If the node is running, it indicates that the current node has been\n  // running for 60 seconds and no response is returned. In this case, an\n  // exception may occur. It is probably blocked in the Send function.\n  // 2. The node is not in the running state, but the port contains data. The\n  // possible cause is that the threads in the thread pool are exhausted.\n  bool is_print_threadpool = false;\n  bool is_no_response = false;\n  for (const auto& iter : node_port_map_) {\n    auto node = iter.first;\n    for (const auto& port_iter : iter.second) {\n      if (!port_iter->GetPort()) {\n        continue;\n      }\n\n      std::unique_lock<std::mutex> lock(status_mutex_);\n      auto node_status = nodes_runing_status_.find(node);\n      // node is not running\n      if (node_status == nodes_runing_status_.end() ||\n          node_status->second == false) {\n        if (port_iter->GetPort()->Empty()) {\n          continue;\n        }\n\n        is_no_response = true;\n\n        if (printlog) {\n          MBLOG_WARN << \"node:\" << node->GetName()\n                     << \" is not running, but port:\"\n                     << port_iter->GetPort()->GetName()\n                     << \" has data:\" << port_iter->GetPort()->GetDataCount()\n                     << \" priority:\" << port_iter->GetPriority()\n                     << \", scheduler may be blocking.\";\n          is_print_threadpool = true;\n        }\n      } else {\n        is_no_response = true;\n\n        // node is running\n        if (printlog == false) {\n          continue;\n        }\n        MBLOG_WARN << \"node:\" << node->GetName()\n                   << \" running long time, and port:\"\n                   << port_iter->GetPort()->GetName()\n                   << \" has data:\" << port_iter->GetPort()->GetDataCount()\n                   << \" priority:\" << port_iter->GetPriority()\n                   << \", node may be blocking \"\n                   << ((port_iter->GetPort()->GetDataCount() > 0)\n                           ? \"\"\n                           : \"in Send function \")\n                   << \"or thread pool is busy.\";\n        is_print_threadpool = true;\n      }\n    }\n  }\n\n  // If no exception occurs, do not print any information to prevent excessive\n  // information from being printed when the HTTP server is used.\n  if (is_print_threadpool) {\n    MBLOG_INFO << \"Thread Pool Status:\";\n    MBLOG_INFO << \"                    max thread size: \"\n               << tp_->GetMaxThreadsNum();\n    MBLOG_INFO << \"                    worker thread size: \"\n               << tp_->GetThreadsNum();\n    MBLOG_INFO << \"                    wating work count: \"\n               << tp_->GetWaitingWorkCount();\n\n    MBLOG_INFO << \"running_node_count: \" << running_node_count_;\n  }\n\n  is_no_response_ = is_no_response;\n  check_count_++;\n}\n\nStatus FlowScheduler::RunImpl() {\n  MBLOG_DEBUG << \"flow schedule is begin run.\";\n  os->Thread->SetName(\"Flow-Scheduler\");\n  std::shared_ptr<PriorityPort> active_port = nullptr;\n  Status status = STATUS_OK;\n  is_stop_ = false;\n  is_wait_stop_ = false;\n  bool has_print = false;\n  is_no_response_ = false;\n  int timeout_count = 0;\n  time_t last_check_time = 0;\n  stats_status_->SetValue(std::string(\"running\"));\n\n  while (!is_stop_) {\n    if (is_no_response_) {\n      time_t currtime;\n      time(&currtime);\n      if (last_check_time != currtime) {\n        CheckScheduleStatus(false);\n        last_check_time = currtime;\n      }\n    }\n\n    status = data_hub_->SelectActivePort(&active_port, check_timeout_);\n    if (status == STATUS_TIMEDOUT) {\n      // The system displays the current status information every 60 seconds if\n      // the system is idle.\n      if (!has_print && timeout_count >= max_check_timeout_count_) {\n        CheckScheduleStatus(!has_print);\n        has_print = true;\n        timeout_count = 0;\n        if (is_no_response_) {\n          stats_status_->SetValue(std::string(\"blocking\"));\n        }\n      } else if (is_no_response_ == false) {\n        stats_status_->SetValue(std::string(\"idle\"));\n      }\n\n      timeout_count++;\n      continue;\n    }\n\n    if (status == STATUS_NODATA) {\n      timeout_count = 0;\n      continue;\n    }\n\n    if (timeout_count > 0 && is_no_response_ == false) {\n      stats_status_->SetValue(std::string(\"running\"));\n    }\n\n    has_print = false;\n    timeout_count = 0;\n    status = IsSchedulerCommand(active_port) ? RunCommand(active_port)\n                                             : RunNode(active_port);\n    if (!status) {\n      break;\n    }\n  }\n\n  is_stop_ = true;\n\n  if (!status) {\n    MBLOG_ERROR << \"the scheduler caught an error : \" << status;\n  }\n\n  ShutdownNodes();\n  WaitNodeFinish();\n  stats_status_->SetValue(std::string(\"shutdown\"));\n  return status;\n}\n\nvoid FlowScheduler::ShutdownNodes() {\n  for (auto& iter : node_port_map_) {\n    iter.first->Shutdown();\n  }\n}\n\nvoid FlowScheduler::Shutdown() {\n  is_stop_ = true;\n  if (tp_ == nullptr) {\n    return;\n  }\n\n  MBLOG_INFO << \"shutdown flow scheduler.\";\n  SendSchedulerCommand(SchedulerCommandType::COMMAND_STOP, nullptr);\n  if (run_fut_.valid()) {\n    run_fut_.wait();\n  }\n  if (thread_create_) {\n    tp_->Shutdown();\n    thread_create_ = false;\n  }\n\n  tp_ = nullptr;\n}\n\nStatus FlowScheduler::Wait(int64_t milliseconds, Status* ret_val) {\n  if (is_stop_ == true || tp_ == nullptr) {\n    return STATUS_SHUTDOWN;\n  }\n\n  if (!run_fut_.valid()) {\n    MBLOG_WARN << \"async run future is invalid.\";\n    return STATUS_FAULT;\n  }\n\n  if (0 == milliseconds) {\n    run_fut_.wait();\n  } else if (milliseconds > 0) {\n    auto status = run_fut_.wait_for(std::chrono::milliseconds(milliseconds));\n    if (status != std::future_status::ready) {\n      return STATUS_TIMEDOUT;\n    }\n  }\n\n  if (is_no_response_) {\n    return STATUS_NORESPONSE;\n  }\n\n  if (is_stop_ == false && milliseconds < 0) {\n    return STATUS_BUSY;\n  }\n\n  auto ret = run_fut_.get();\n  if (ret_val != nullptr) {\n    *ret_val = ret;\n  }\n\n  // TODO thread pool should provide force stop api\n  return STATUS_OK;\n}\n\n}  // namespace modelbox\n"
  },
  {
    "path": "src/libmodelbox/engine/scheduler/flow_scheduler.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#ifndef MODELBOX_PIPELINE_SCHEDULER_H_\n#define MODELBOX_PIPELINE_SCHEDULER_H_\n\n#include <modelbox/base/thread_pool.h>\n#include <modelbox/graph.h>\n\n#include <atomic>\n#include <thread>\n#include <utility>\n\n#include \"../common/data_hub.h\"\n#include \"modelbox/scheduler.h\"\n\nnamespace modelbox {\n\nenum class SchedulerCommandType {\n  COMMAND_STOP = 1,\n  COMMAND_ERROR = 2,\n};\n\nconstexpr const int SCHED_CHECK_TIMEOUT_MS = 1000;\nconstexpr const int SCHED_MAX_CHECK_TIMEOUT_COUNT = 60;\n\nclass SchedulerCommand {\n public:\n  SchedulerCommand(SchedulerCommandType type,\n                   std::shared_ptr<PriorityPort> port);\n  virtual ~SchedulerCommand();\n\n  SchedulerCommandType GetType();\n\n  std::shared_ptr<PriorityPort> GetPort();\n\n  int GetPriority();\n\n private:\n  SchedulerCommandType type_;\n  std::shared_ptr<PriorityPort> port_;\n};\n\nstruct SchedulerCommandCompare {\n  auto operator()(std::shared_ptr<SchedulerCommand> const& a,\n                  std::shared_ptr<SchedulerCommand> const& b) const -> bool {\n    if (nullptr == a->GetPort() && b->GetPort()) {\n      return false;\n    }\n\n    if (a->GetPort() && nullptr == b->GetPort()) {\n      return true;\n    }\n\n    if (nullptr == a->GetPort() && nullptr == b->GetPort()) {\n      return true;\n    }\n\n    return a->GetPort()->GetPriority() < b->GetPort()->GetPriority();\n  }\n};\n\nusing SchedulerQueue = PriorityBlockingQueue<std::shared_ptr<SchedulerCommand>,\n                                             SchedulerCommandCompare>;\n\nclass SchedulerPort\n    : public NotifyPort<SchedulerCommand, SchedulerCommandCompare> {\n public:\n  SchedulerPort(const std::string& name);\n  SchedulerPort(const std::string& name, size_t event_capacity);\n\n  ~SchedulerPort() override;\n\n  Status Init() override;\n};\n\nclass FlowScheduler : public Scheduler {\n public:\n  FlowScheduler();\n  ~FlowScheduler() override;\n  Status Init(std::shared_ptr<Configuration> config,\n              std::shared_ptr<StatisticsItem> stats = nullptr,\n              std::shared_ptr<ThreadPool> thread_pool = nullptr) override;\n  Status Build(const Graph& graph) override;\n  Status Run() override;\n  void RunAsync() override;\n  void Shutdown() override;\n\n  Status Wait(int64_t milliseconds, Status* ret_val = nullptr) override;\n\n  void SetCheckTimeout(int timeout) { check_timeout_ = timeout; }\n  void SetMaxCheckTimeoutCount(int max_timeout_count) {\n    max_check_timeout_count_ = max_timeout_count;\n  }\n  int64_t GetCheckCount() const { return check_count_; }\n\n private:\n  std::shared_ptr<DataHub> data_hub_;\n  std::shared_ptr<ThreadPool> tp_;\n  bool thread_create_ = false;\n\n  std::atomic<bool> is_stop_{false};\n  std::atomic<bool> is_built_{false};\n\n  std::future<Status> run_fut_;\n  std::atomic<int> mode_{SYNC};\n\n  std::shared_ptr<SchedulerPort> scheduler_event_port_;\n  std::unordered_map<std::shared_ptr<NodeBase>,\n                     std::vector<std::shared_ptr<PriorityPort>>>\n      node_port_map_;\n\n  std::atomic<uint32_t> running_node_count_{0};\n  bool is_wait_stop_{false};\n  bool is_no_response_{false};\n  std::mutex notify_mutex_;\n  std::condition_variable cv_;\n\n  std::shared_ptr<StatisticsItem> stats_;\n  std::shared_ptr<StatisticsItem> stats_status_;\n\n  std::mutex status_mutex_;\n  std::unordered_map<std::shared_ptr<NodeBase>, bool> nodes_runing_status_;\n\n  int check_timeout_{SCHED_CHECK_TIMEOUT_MS};\n  int max_check_timeout_count_{SCHED_MAX_CHECK_TIMEOUT_COUNT};\n  std::atomic<int64_t> check_count_{0};\n\n  Status RunImpl();\n  void RunWapper(const std::shared_ptr<NodeBase>& node, RunType type,\n                 const std::shared_ptr<PriorityPort>& active_port);\n\n  Status RunNode(const std::shared_ptr<PriorityPort>& active_port);\n  Status RunCommand(const std::shared_ptr<PriorityPort>& active_port);\n  void SendSchedulerCommand(SchedulerCommandType type,\n                            const std::shared_ptr<PriorityPort>& port);\n  bool IsSchedulerCommand(const std::shared_ptr<PriorityPort>& active_port);\n\n  void EnableActivePort(const std::shared_ptr<NodeBase>& node);\n  void DisableActivePort(const std::shared_ptr<NodeBase>& node);\n  void WaitNodeFinish();\n  void ShutdownNodes();\n  void CheckScheduleStatus(const bool &printlog);\n};\n\n}  // namespace modelbox\n\n#endif\n"
  },
  {
    "path": "src/libmodelbox/engine/session.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/session.h\"\n\n#include <utility>\n\nnamespace modelbox {\n\nSessionIO::SessionIO() = default;\n\nSessionIO::~SessionIO() = default;\n\nSessionStateListener::SessionStateListener() = default;\n\nSessionStateListener::~SessionStateListener() = default;\n\nvoid SessionStateListener::NotifySessionClose(){};\n\nSession::Session(const std::shared_ptr<StatisticsItem> &graph_stats)\n    : ctx_(std::make_shared<SessionContext>(graph_stats)) {}\n\nSession::~Session() {\n  auto io = io_handle_.lock();\n  if (io == nullptr) {\n    return;\n  }\n\n  io->SessionEnd(error_);\n}\n\nvoid Session::AddStateListener(\n    const std::shared_ptr<SessionStateListener> &listener) {\n  std::lock_guard<std::mutex> lock(state_listener_list_lock_);\n  state_listener_list_.push_back(listener);\n}\n\nvoid Session::SetSessionIO(const std::shared_ptr<SessionIO> &io_handle) {\n  io_handle_ = io_handle;\n  has_io_ = true;\n}\n\nstd::shared_ptr<SessionIO> Session::GetSessionIO() { return io_handle_.lock(); }\n\nbool Session::HasSessionIO() { return has_io_; }\n\nstd::shared_ptr<SessionContext> Session::GetSessionCtx() { return ctx_; }\n\n/**\n * @brief will cause session end after current data in engine processed over\n **/\nvoid Session::Close() {\n  std::lock_guard<std::mutex> state_lock(state_lock_);\n  if (closed_) {\n    return;\n  }\n\n  closed_ = true;\n  error_ = std::make_shared<FlowUnitError>(\"EOF\");\n\n  std::lock_guard<std::mutex> lock(state_listener_list_lock_);\n  for (auto &state_listener : state_listener_list_) {\n    auto listener = state_listener.lock();\n    if (listener == nullptr) {\n      continue;\n    }\n\n    listener->NotifySessionClose();\n  }\n}\n\nbool Session::IsClosed() { return closed_; }\n\n/**\n * @brief abort session imediately\n **/\nvoid Session::Abort() { abort_ = true; }\n\nbool Session::IsAbort() { return abort_; }\n\nvoid Session::SetError(std::shared_ptr<FlowUnitError> error) {\n  error_ = std::move(error);\n}\n\nstd::shared_ptr<FlowUnitError> Session::GetError() { return error_; }\n\nSessionManager::SessionManager() = default;\n\nSessionManager::~SessionManager() = default;\n\nstd::shared_ptr<Session> SessionManager::CreateSession(\n    const std::shared_ptr<StatisticsItem> &graph_stats) {\n  auto *session = new Session(graph_stats);\n  auto session_id = session->GetSessionCtx()->GetSessionId();\n  auto session_ptr =\n      std::shared_ptr<Session>(session, [session_id, this](Session *ptr) {\n        DeleteSession(session_id);\n        delete ptr;\n      });\n  std::lock_guard<std::mutex> lock(sessions_lock_);\n  sessions_[session_id] = session_ptr;\n  return session_ptr;\n}\n\nvoid SessionManager::DeleteSession(const SessionId &id) {\n  std::lock_guard<std::mutex> lock(sessions_lock_);\n  sessions_.erase(id);\n  MBLOG_INFO << \"session \" << id << \" is over, running session count \"\n             << sessions_.size();\n}\n\nstd::unordered_map<SessionId, std::weak_ptr<Session>>\nSessionManager::GetSessions() {\n  std::lock_guard<std::mutex> lock(sessions_lock_);\n  return sessions_;\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/libmodelbox/engine/session_context.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <utility>\n\n#include \"modelbox/session_context.h\"\n\n#include \"modelbox/base/uuid.h\"\n#include \"modelbox/profiler.h\"\n#include \"modelbox/virtual_node.h\"\nnamespace modelbox {\n\nSessionContext::SessionContext(\n    const std::shared_ptr<StatisticsItem> &graph_stats) {\n  ConfigurationBuilder config_builder;\n  config_ = config_builder.Build();\n  auto ret = GetUUID(&session_id_);\n  if (ret != STATUS_OK) {\n    MBLOG_WARN << \"Get uuid failed, set session id to timestamp\";\n    session_id_ = std::to_string(GetCurrentTime());\n  }\n\n  if (graph_stats != nullptr) {\n    graph_stats_ = graph_stats;\n    graph_session_stats_ = graph_stats_->AddItem(session_id_);\n  }\n  MBLOG_INFO << \"session context start se id:\" << GetSessionId();\n}\n\nSessionContext::~SessionContext() {\n  MBLOG_INFO << \"session context finish se id:\" << GetSessionId();\n  if (graph_stats_ != nullptr) {\n    graph_stats_->DelItem(session_id_);\n  }\n}\n\nvoid SessionContext::SetPrivate(const std::string &key,\n                                std::shared_ptr<void> private_content,\n                                std::size_t type_id) {\n  std::lock_guard<std::mutex> lock(private_map_lock_);\n  private_map_[key] = std::move(private_content);\n  private_map_type_[key] = type_id;\n}\n\nvoid SessionContext::SetSessionId(const std::string &session_id) {\n  session_id_ = session_id;\n}\n\nstd::string SessionContext::GetSessionId() { return session_id_; }\n\nstd::shared_ptr<Configuration> SessionContext::GetConfig() { return config_; }\n\nvoid SessionContext::SetError(std::shared_ptr<FlowUnitError> error) {\n  error_ = std::move(error);\n}\n\nstd::shared_ptr<FlowUnitError> SessionContext::GetError() { return error_; }\n\nstd::shared_ptr<StatisticsItem> SessionContext::GetStatistics(\n    SessionContexStatsType type) {\n  switch (type) {\n    case SessionContexStatsType::SESSION:\n      return graph_session_stats_;\n\n    case SessionContexStatsType::GRAPH:\n      return graph_stats_;\n\n    default:\n      return nullptr;\n  }\n}\n\nstd::shared_ptr<void> SessionContext::GetPrivate(const std::string &key) {\n  std::lock_guard<std::mutex> lock(private_map_lock_);\n  auto iter = private_map_.find(key);\n  if (iter == private_map_.end()) {\n    return nullptr;\n  }\n\n  return private_map_[key];\n}\n\nstd::size_t SessionContext::GetPrivateType(const std::string &key) {\n  std::lock_guard<std::mutex> lock(private_map_lock_);\n  auto iter = private_map_type_.find(key);\n  if (iter == private_map_type_.end()) {\n    return 0;\n  }\n\n  return private_map_type_[key];\n}\n\n}  // namespace modelbox\n"
  },
  {
    "path": "src/libmodelbox/engine/single_node.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <utility>\n\n#include \"modelbox/single_node.h\"\n\nnamespace modelbox {\n#define DEFAULT_SINGLE_NODE_QUEUE_SIZE 8192\nSingleNode::SingleNode(const std::string& unit_name,\n                       const std::string& unit_type,\n                       const std::string& unit_device_id,\n                       std::shared_ptr<FlowUnitManager> flowunit_mgr,\n                       std::shared_ptr<Configuration> config,\n                       std::shared_ptr<Profiler> profiler,\n                       std::shared_ptr<StatisticsItem> graph_stats)\n    : config_(std::move(config)) {\n  SetFlowUnitInfo(unit_name, unit_type, unit_device_id,\n                  std::move(flowunit_mgr));\n  SetProfiler(std::move(profiler));\n  SetStats(std::move(graph_stats));\n}\n\nStatus SingleNode::Init() {\n  flowunit_group_ = std::make_shared<FlowUnitGroup>(\n      flowunit_name_, flowunit_type_, flowunit_device_id_, config_, profiler_);\n  std::set<std::string> input_port_names;\n  auto input_ports =\n      flowunit_manager_->GetFlowUnitDesc(flowunit_type_, flowunit_name_)\n          ->GetFlowUnitInput();\n\n  for (auto& input_port : input_ports) {\n    auto input_port_name = input_port.GetPortName();\n    queue_size_ = config_->GetUint64(\"queue_size\", DEFAULT_SINGLE_NODE_QUEUE_SIZE);\n    if (0 == queue_size_) {\n      return {STATUS_INVALID, \"invalid queue_size config: 0\"};\n    }\n    auto in_queue_size =\n        config_->GetUint64(\"queue_size_\" + input_port_name, queue_size_);\n    if (0 == in_queue_size) {\n      return {STATUS_INVALID,\n              \"invalid queue_size_\" + input_port_name + \" config: 0\"};\n    }\n\n    input_ports_.emplace_back(std::make_shared<InPort>(\n        input_port_name,\n        std::dynamic_pointer_cast<NodeBase>(shared_from_this()), GetPriority(),\n        in_queue_size));\n  }\n\n  auto out_ports =\n      flowunit_manager_->GetFlowUnitDesc(flowunit_type_, flowunit_name_)\n          ->GetFlowUnitOutput();\n\n  for (auto& output_port : out_ports) {\n    auto output_port_name = output_port.GetPortName();\n    output_ports_.emplace_back(\n        std::make_shared<OutPort>(output_port_name, shared_from_this()));\n  }\n  std::set<std::string> input_ports_name;\n  std::set<std::string> output_ports_name;\n  auto status = flowunit_group_->Init(input_ports_name, output_ports_name,\n                                      flowunit_manager_, false);\n  if (status != STATUS_OK) {\n    MBLOG_ERROR << \"failed init flowunit group\";\n    return status;\n  }\n\n  flowunit_group_->SetNode(std::dynamic_pointer_cast<Node>(shared_from_this()));\n  return STATUS_OK;\n}\n\nstd::shared_ptr<FlowUnitDataContext> SingleNode::CreateDataContext() {\n  auto flowunit_desc =\n      flowunit_manager_->GetFlowUnitDesc(flowunit_type_, flowunit_name_);\n  if (flowunit_desc->GetFlowType() == NORMAL) {\n    data_context_ =\n        std::make_shared<NormalFlowUnitDataContext>(this, nullptr, nullptr);\n  } else {\n    MBLOG_ERROR << \"flowunit type is stream, return null\";\n  }\n  return data_context_;\n}\n\nStatus SingleNode::RecvData(const std::shared_ptr<DataHandler>& data) {\n  auto input_ports = GetInputPorts();\n  auto data_map = std::make_shared<PortDataMap>();\n\n  for (auto& iter : input_ports) {\n    auto name = iter->GetName();\n    if (input_ports.size() == 1) {\n      name = DEFAULT_PORT_NAME;\n    }\n\n    auto bufferlist = data->GetBufferList(name);\n    if (!bufferlist) {\n      MBLOG_ERROR << \"bufferlist is nullptr, RecvData error \";\n      return STATUS_INVALID;\n    }\n\n    bufferlist->Swap(data_map->at(name));\n  }\n\n  if (data_context_ == nullptr) {\n    data_context_ =\n        std::make_shared<NormalFlowUnitDataContext>(this, nullptr, nullptr);\n  }\n\n  auto data_ctx =\n      std::static_pointer_cast<NormalFlowUnitDataContext>(data_context_);\n  data_ctx->WriteInputData(data_map);\n  return STATUS_OK;\n}\n\nStatus SingleNode::Process() {\n  if (!flowunit_group_) {\n    MBLOG_ERROR << \"flowunit_group not created . \";\n    return STATUS_INVALID;\n  }\n  std::list<std::shared_ptr<FlowUnitDataContext>> data_ctx_list;\n  data_ctx_list.push_back(data_context_);\n  auto status = flowunit_group_->Run(data_ctx_list);\n  if (status != STATUS_OK) {\n    return STATUS_FAULT;\n  }\n  return STATUS_OK;\n}\n\nStatus SingleNode::PushDataToDataHandler(\n    std::shared_ptr<DataHandler>& data_handler) {\n  if (data_context_ == nullptr || data_handler == nullptr) {\n    return STATUS_INVALID;\n  }\n  PortDataMap port_data_map;\n  data_context_->PopOutputData(port_data_map);\n  if (port_data_map.size() == 0) {\n    return STATUS_NODATA;\n  }\n  for (auto& iter : port_data_map) {\n    std::string port_name = iter.first;\n\n    for (auto buffer : iter.second) {\n      data_handler->PushData(buffer, port_name);\n    }\n  }\n\n  data_context_->ClearData();\n  return STATUS_OK;\n}\n\nvoid SingleNode::Run(const std::shared_ptr<DataHandler>& data) {\n  auto status = RecvData(data);\n  if (status != STATUS_OK) {\n    MBLOG_ERROR << \"failed recv data ...\";\n    return;\n  }\n\n  status = Process();\n  if (status != STATUS_OK) {\n    MBLOG_ERROR << \"process data failed ...\";\n    return;\n  }\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/libmodelbox/engine/stream.cc",
    "content": "\n/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <modelbox/stream.h>\n\n#include <utility>\n\nnamespace modelbox {\n\nDataMeta::DataMeta() = default;\n\nDataMeta::DataMeta(const DataMeta &other) { private_map_ = other.private_map_; }\n\nDataMeta::~DataMeta() { private_map_.clear(); }\n\nvoid DataMeta::SetMeta(const std::string &key, std::shared_ptr<void> meta) {\n  private_map_[key] = std::move(meta);\n}\n\nstd::shared_ptr<void> DataMeta::GetMeta(const std::string &key) {\n  auto iter = private_map_.find(key);\n  if (iter == private_map_.end()) {\n    return nullptr;\n  }\n  return private_map_[key];\n}\n\nstd::unordered_map<std::string, std::shared_ptr<void>> DataMeta::GetMetas() {\n  return private_map_;\n}\n\nStreamOrder::StreamOrder() { index_at_each_expand_level_.push_back(0); }\n\nbool StreamOrder::operator<(const StreamOrder &other_stream_order) {\n  auto this_index = index_at_each_expand_level_.begin();\n  auto other_index = other_stream_order.index_at_each_expand_level_.begin();\n  while (true) {\n    if (other_index == other_stream_order.index_at_each_expand_level_.end()) {\n      // not short than this\n      return false;\n    }\n\n    if (this_index == index_at_each_expand_level_.end()) {\n      // short than other\n      return true;\n    }\n\n    if (*this_index < *other_index) {\n      return true;\n    }\n\n    if (*this_index > *other_index) {\n      return false;\n    }\n\n    // this level is same, compare next level\n    ++this_index;\n    ++other_index;\n  }\n}\n\nstd::shared_ptr<StreamOrder> StreamOrder::Copy() {\n  auto stream_order = std::make_shared<StreamOrder>();\n  stream_order->index_at_each_expand_level_ = index_at_each_expand_level_;\n  return stream_order;\n}\n\nvoid StreamOrder::Expand(size_t index_in_this_level) {\n  index_at_each_expand_level_.push_back(index_in_this_level);\n}\n\nvoid StreamOrder::Collapse() { index_at_each_expand_level_.pop_back(); }\n\nStream::Stream(std::shared_ptr<Session> session)\n    : session_(std::move(session)) {}\n\nStream::~Stream() = default;\n\nstd::shared_ptr<Session> Stream::GetSession() { return session_; }\n\nvoid Stream::SetMaxBufferCount(size_t max_buffer_count) {\n  max_buffer_count_ = max_buffer_count;\n}\n\nbool Stream::ReachEnd() {\n  if (max_buffer_count_ == 0) {\n    return false;\n  }\n\n  return max_buffer_count_ <= cur_buffer_count_;\n}\n\nsize_t Stream::GetBufferCount() { return cur_buffer_count_; }\n\nvoid Stream::IncreaseBufferCount() { ++cur_buffer_count_; }\n\nvoid Stream::SetStreamMeta(std::shared_ptr<DataMeta> data_meta) {\n  data_meta_ = std::move(data_meta);\n}\n\nstd::shared_ptr<DataMeta> Stream::GetStreamMeta() { return data_meta_; }\n\nstd::shared_ptr<StreamOrder> Stream::GetStreamOrder() { return stream_order_; }\n\nvoid Stream::SetStreamOrder(std::shared_ptr<StreamOrder> stream_order) {\n  stream_order_ = std::move(stream_order);\n}\n\n}  // namespace modelbox\n"
  },
  {
    "path": "src/libmodelbox/engine/tensor.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <modelbox/tensor.h>\n\nnamespace modelbox {\n\nTensorBuffer::TensorBuffer() = default;\n\nTensorBuffer::TensorBuffer(const std::shared_ptr<Device>& device)\n    : Buffer(device) {}\nTensorBuffer::TensorBuffer(const std::shared_ptr<DeviceMemory>& dev_mem)\n    : Buffer(dev_mem) {}\n\nTensorBuffer::TensorBuffer(const TensorBuffer& other) = default;\n\nTensorBuffer::~TensorBuffer() = default;\n\nconst std::vector<size_t>& TensorBuffer::Shape() const { return shape_; }\n\nvoid TensorBuffer::SetType(ModelBoxDataType type) { type_ = type; }\n\nModelBoxDataType TensorBuffer::GetType() { return type_; }\n\nstd::shared_ptr<Buffer> TensorBuffer::Copy() const {\n  return std::make_shared<TensorBuffer>(*this);\n}\n\nstd::shared_ptr<Buffer> TensorBuffer::DeepCopy() const {\n  auto tensor = std::make_shared<TensorBuffer>();\n  auto status = tensor->DeepCopy(*this);\n  if (!status) {\n    MBLOG_WARN << \"TensorBuffer DeepCopy failed: \" << status;\n    return nullptr;\n  }\n\n  tensor->shape_ = shape_;\n  tensor->type_ = type_;\n  return tensor;\n}\n\nStatus TensorBuffer::DeepCopy(const TensorBuffer& other) {\n  const auto* other_buffer = dynamic_cast<const Buffer*>(&other);\n  if (other_buffer == nullptr) {\n    return {STATUS_INVALID, \"tensor buffer is invalid.\"};\n  }\n\n  auto status = Buffer::DeepCopy(*(other_buffer));\n  if (!status) {\n    return status;\n  }\n\n  shape_ = other.shape_;\n  type_ = other.type_;\n\n  return STATUS_OK;\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/libmodelbox/engine/tensor_list.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include <modelbox/tensor_list.h>\n\nnamespace modelbox {\nvoid TensorList::SetType(ModelBoxDataType type) {\n  for (auto& buffer : bl_->buffer_list_) {\n    auto tensor = std::dynamic_pointer_cast<TensorBuffer>(buffer);\n    tensor->SetType(type);\n  }\n}\n\nstd::vector<std::vector<size_t>> TensorList::GetShape() const {\n  std::vector<std::vector<size_t>> shapes(bl_->buffer_list_.size());\n  size_t i = 0;\n  for (auto& buffer : bl_->buffer_list_) {\n    if (buffer) {\n      auto tensor = std::dynamic_pointer_cast<TensorBuffer>(buffer);\n      const auto& shape = tensor->Shape();\n      shapes[i++] = shape;\n    }\n  }\n\n  return shapes;\n}\n\nsize_t TensorList::Size() const { return bl_->Size(); }\n\nsize_t TensorList::GetBytes() const { return bl_->GetBytes(); }\n\nstd::shared_ptr<TensorBuffer> TensorList::operator[](size_t pos) {\n  if (!bl_->At(pos)) {\n    return nullptr;\n  }\n\n  return std::dynamic_pointer_cast<TensorBuffer>(bl_->At(pos));\n}\n\nstd::shared_ptr<const TensorBuffer> TensorList::operator[](size_t pos) const {\n  if (!bl_->At(pos)) {\n    return nullptr;\n  }\n\n  return std::dynamic_pointer_cast<TensorBuffer>(bl_->At(pos));\n}\n\nstd::shared_ptr<TensorBuffer> TensorList::At(size_t idx) {\n  if (!bl_->At(idx)) {\n    return nullptr;\n  }\n\n  return std::dynamic_pointer_cast<TensorBuffer>(bl_->At(idx));\n}\n\nstd::shared_ptr<const TensorBuffer> TensorList::At(size_t idx) const {\n  if (!bl_->At(idx)) {\n    return nullptr;\n  }\n\n  return std::dynamic_pointer_cast<TensorBuffer>(bl_->At(idx));\n}\n\nvoid TensorList::PushBack(const std::shared_ptr<TensorBuffer>& buf) {\n  bl_->PushBack(buf);\n}\n\nStatus TensorList::CopyMeta(const std::shared_ptr<TensorList>& tl,\n                            bool is_override) {\n  if (!tl || Size() != tl->Size()) {\n    return STATUS_FAULT;\n  }\n\n  return bl_->CopyMeta(tl->bl_, is_override);\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/libmodelbox/engine/type.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include <modelbox/type.h>\n\nnamespace modelbox {\n\nsize_t GetDataTypeSize(ModelBoxDataType type) {\n#define CASE(T) \\\n  case T:       \\\n    return DataTypeSize<T>::Size;\n  switch (type) {\n    CASE(MODELBOX_FLOAT);\n    CASE(MODELBOX_DOUBLE);\n    CASE(MODELBOX_INT32);\n    CASE(MODELBOX_UINT32);\n    CASE(MODELBOX_UINT16);\n    CASE(MODELBOX_UINT8);\n    CASE(MODELBOX_INT16);\n    CASE(MODELBOX_INT8);\n    CASE(MODELBOX_STRING);\n    CASE(MODELBOX_INT64);\n    CASE(MODELBOX_UINT64);\n    CASE(MODELBOX_BOOL);\n    CASE(MODELBOX_HALF);\n    default:\n      return 0;\n  }\n#undef CASE\n}\n}  // namespace modelbox"
  },
  {
    "path": "src/libmodelbox/engine/virtual_node.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/virtual_node.h\"\n\n#include <modelbox/session.h>\n#include <modelbox/session_context.h>\n#include <stdint.h>\n\n#include <utility>\n\nnamespace modelbox {\n\nInputVirtualNode::InputVirtualNode(\n    std::string device_name, std::string device_id,\n    std::shared_ptr<DeviceManager> device_manager)\n    : device_name_(std::move(device_name)), device_id_(std::move(device_id)) {\n  queue_size_ = -1;\n  priority_ = 0;\n  device_mgr_ = std::move(device_manager);\n}\n\nInputVirtualNode::~InputVirtualNode() = default;\n\nStatus InputVirtualNode::Init(const std::set<std::string>& input_port_names,\n                              const std::set<std::string>& output_port_names,\n                              const std::shared_ptr<Configuration>& config) {\n  // NOLINTNEXTLINE\n  auto status = NodeBase::Init(input_port_names, output_port_names, config);\n  if (status != STATUS_SUCCESS) {\n    return status;\n  }\n\n  extern_ports_.clear();\n  auto ext_queue_size =\n      config->GetUint64(\"queue_size_external\", DEFAULT_QUEUE_SIZE_EXTERNAL);\n  for (const auto& output_port_name : output_port_names) {\n    auto port = std::make_shared<InPort>(\n        output_port_name,\n        std::dynamic_pointer_cast<NodeBase>(shared_from_this()), GetPriority(),\n        ext_queue_size);\n    extern_ports_.emplace_back(port);\n  }\n\n  for (auto& port : extern_ports_) {\n    port->Init();\n  }\n\n  return STATUS_OK;\n}\n\nStatus InputVirtualNode::Open() { return STATUS_OK; }\n\nstd::shared_ptr<Device> InputVirtualNode::GetDevice() {\n  if (device_mgr_ == nullptr) {\n    MBLOG_ERROR << \"device_mgr is nullptr \";\n    return nullptr;\n  }\n\n  auto device = device_mgr_->CreateDevice(device_name_, device_id_);\n  if (device == nullptr) {\n    MBLOG_ERROR << \"device is nullptr.\"\n                << \" device_name: \" << device_name_\n                << \" device_id_: \" << device_id_;\n    return nullptr;\n  }\n  return device;\n}\n\nStatus InputVirtualNode::Run(RunType type) {\n  // data from ExternalDataMap has already set inherit info, and match is this\n  // input virtual node, we could simply send data to output\n  std::unordered_map<std::string, std::vector<std::shared_ptr<Buffer>>>\n      ports_data_cache;\n  // recv port data\n  for (auto& port : extern_ports_) {\n    auto& data_cache = ports_data_cache[port->GetName()];\n    port->Recv(data_cache, -1);\n  }\n  // send to output port\n  for (auto& port : output_ports_) {\n    auto& data_cache = ports_data_cache[port->GetName()];\n    if (data_cache.empty()) {\n      continue;\n    }\n\n    port->Send(data_cache);\n  }\n  return STATUS_SUCCESS;\n}\n\nOutputVirtualNode::OutputVirtualNode(\n    const std::string& device_name, const std::string& device_id,\n    std::shared_ptr<DeviceManager> device_manager)\n    : device_name_(device_name), device_id_(device_id) {\n  queue_size_ = -1;\n  priority_ = 0;\n  device_mgr_ = std::move(device_manager);\n  target_device_ = device_mgr_->CreateDevice(device_name, device_id);\n}\n\nOutputVirtualNode::~OutputVirtualNode() = default;\n\nStatus OutputVirtualNode::Init(const std::set<std::string>& input_port_names,\n                               const std::set<std::string>& output_port_names,\n                               const std::shared_ptr<Configuration>& config) {\n  // NOLINTNEXTLINE\n  auto status = NodeBase::Init(input_port_names, output_port_names, config);\n  if (status != STATUS_SUCCESS) {\n    return status;\n  }\n\n  auto port_count = GetInputNum();\n  if (port_count == 0) {\n    port_count = GetExternNum();\n  }\n  input_match_stream_mgr_ =\n      std::make_shared<InputMatchStreamManager>(name_, queue_size_, port_count);\n  input_match_stream_mgr_->SetInputBufferInOrder(true);\n  input_match_stream_mgr_->SetInputStreamGatherAll(false);\n\n  if (config->GetString(\"device\") == device_name_) {\n    need_move_to_device_ = true;\n  }\n\n  return STATUS_SUCCESS;\n}\n\nStatus OutputVirtualNode::Open() { return STATUS_SUCCESS; }\n\n/**\n * @brief remove the data can not send out, in case user debug\n **/\nvoid OutputVirtualNode::EraseInvalidData() {\n  for (auto& in_port : input_ports_) {\n    auto in_queue = in_port->GetQueue();\n    std::shared_ptr<Buffer> buffer;\n    while (in_queue->Front(&buffer)) {\n      auto index_info = BufferManageView::GetIndexInfo(buffer);\n      if (index_info->GetStream()->GetSession()->HasSessionIO()) {\n        // front data in this port is valid, jump to run\n        break;\n      }\n\n      in_queue->Pop(&buffer);\n    }\n  }\n}\n\nStatus OutputVirtualNode::Run(RunType type) {\n  EraseInvalidData();\n  std::list<std::shared_ptr<MatchStreamData>> match_stream_data_list;\n  auto ret = input_match_stream_mgr_->LoadData(\n      input_ports_, [](const std::shared_ptr<Buffer>& buffer) {\n        // no need to cache buffer that can not send to user\n        auto index_info = BufferManageView::GetIndexInfo(buffer);\n        return !(index_info->GetStream()->GetSession()->HasSessionIO());\n      });\n  if (!ret) {\n    MBLOG_ERROR << \"OutputVirtualNode load data from input ports failed, error \"\n                << ret;\n    return ret;\n  }\n\n  ret = input_match_stream_mgr_->GenMatchStreamData(match_stream_data_list);\n  if (!ret) {\n    MBLOG_ERROR << \"OutputVirtualNode generate match stream data failed, error \"\n                << ret;\n    return ret;\n  }\n\n  if (match_stream_data_list.empty()) {\n    return STATUS_SUCCESS;\n  }\n\n  for (auto& match_stream_data : match_stream_data_list) {\n    auto buffer_count = match_stream_data->GetDataCount();\n    if (buffer_count == 0) {\n      continue;\n    }\n\n    auto stream_data_map = match_stream_data->GetBufferList();\n    auto session = match_stream_data->GetSession();\n\n    if (session->IsAbort()) {\n      MBLOG_INFO << \"session \" << session->GetSessionCtx()->GetSessionId()\n                 << \", processed over\";\n      continue;\n    }\n\n    // for session end, when all data processed, session will be released\n    // automatically\n    // send session data to user\n    auto io =\n        std::dynamic_pointer_cast<ExternalDataMapImpl>(session->GetSessionIO());\n    if (io == nullptr) {\n      // user release io handle, no need to push output data\n      continue;\n    }\n    OutputBufferList output;\n    std::shared_ptr<FlowUnitError> last_error;\n    for (auto& port_data : *stream_data_map) {\n      const auto& port_name = port_data.first;\n      auto& data_list = port_data.second;\n      std::vector<std::shared_ptr<Buffer>> valid_output;\n      for (auto& data : data_list) {\n        auto index_info = BufferManageView::GetIndexInfo(data);\n        if (index_info->IsEndFlag()) {\n          continue;\n        }\n\n        if (index_info->IsPlaceholder()) {\n          continue;\n        }\n\n        if (data->HasError()) {\n          last_error = std::make_shared<FlowUnitError>(data->GetErrorMsg());\n        }\n\n        if (need_move_to_device_ && data->GetDevice() != target_device_) {\n          data = data->CopyTo(target_device_);\n        }\n        valid_output.push_back(data);\n      }\n\n      if (valid_output.empty()) {\n        continue;\n      }\n\n      output[port_name] = std::make_shared<BufferList>(valid_output);\n    }\n\n    if (output.empty()) {\n      continue;\n    }\n    io->PushGraphOutputBuffer(output);\n    io->SetLastError(last_error);\n  }\n\n  return STATUS_SUCCESS;\n}\n\nstd::shared_ptr<Device> OutputVirtualNode::GetDevice() {\n  if (device_mgr_ == nullptr) {\n    MBLOG_ERROR << \"device_mgr is nullptr \";\n    return nullptr;\n  }\n\n  auto device = device_mgr_->CreateDevice(device_name_, device_id_);\n  if (device == nullptr) {\n    MBLOG_ERROR << \"device is nullptr.\"\n                << \" device_name: \" << device_name_\n                << \" device_id_: \" << device_id_;\n    return nullptr;\n  }\n  return device;\n}\n\nSessionUnmatchCache::SessionUnmatchCache(\n    const std::set<std::string>& port_names) {}\n\nvoid SessionUnmatchCache::SetTargetDevice(\n    std::shared_ptr<Device> target_device) {\n  target_device_ = std::move(target_device);\n}\n\nStatus SessionUnmatchCache::CacheBuffer(const std::string& port_name,\n                                        const std::shared_ptr<Buffer>& buffer) {\n  if (buffer->HasError()) {\n    last_error_ = std::make_shared<FlowUnitError>(buffer->GetErrorMsg());\n  }\n\n  auto buffer_index = BufferManageView::GetIndexInfo(buffer);\n\n  // cache data\n  auto& port_streams = port_streams_map_[port_name];\n  auto stream = buffer_index->GetStream();\n  port_streams[stream].push_back(buffer);\n\n  return STATUS_OK;\n}\n\nstd::shared_ptr<FlowUnitError> SessionUnmatchCache::GetLastError() {\n  return last_error_;\n}\n\nStatus SessionUnmatchCache::PopCache(OutputBufferList& output_buffer_list) {\n  size_t empty_port = 0;\n  for (auto& port_streams_item : port_streams_map_) {\n    const auto& port_name = port_streams_item.first;\n    auto& port_streams = port_streams_item.second;\n    if (port_streams.empty()) {\n      output_buffer_list[port_name] = std::make_shared<BufferList>();\n      ++empty_port;\n      continue;\n    }\n\n    auto first_stream_item = port_streams.begin();\n    auto& first_stream_data_list = first_stream_item->second;\n    std::vector<std::shared_ptr<Buffer>> valid_data_list;\n    for (auto& buffer : first_stream_data_list) {\n      auto index = BufferManageView::GetIndexInfo(buffer);\n      if (index->IsEndFlag()) {\n        continue;\n      }\n\n      if (index->IsPlaceholder()) {\n        continue;\n      }\n\n      if (target_device_ != nullptr && buffer->GetDevice() != target_device_) {\n        valid_data_list.push_back(buffer->CopyTo(target_device_));\n        continue;\n      }\n\n      valid_data_list.push_back(buffer);\n    }\n    output_buffer_list[port_name] =\n        std::make_shared<BufferList>(valid_data_list);\n    port_streams.erase(first_stream_item);\n  }\n\n  if (empty_port == port_streams_map_.size()) {\n    return STATUS_NODATA;\n  }\n\n  return STATUS_CONTINUE;\n}\n\nOutputUnmatchVirtualNode::OutputUnmatchVirtualNode(\n    const std::string& device_name, const std::string& device_id,\n    std::shared_ptr<DeviceManager> device_manager)\n    : device_name_(device_name), device_id_(device_id) {\n  queue_size_ = -1;\n  priority_ = 0;\n  device_mgr_ = std::move(device_manager);\n  target_device_ = device_mgr_->GetDevice(device_name, device_id);\n}\n\nOutputUnmatchVirtualNode::~OutputUnmatchVirtualNode() = default;\n\nStatus OutputUnmatchVirtualNode::Init(\n    const std::set<std::string>& input_port_names,\n    const std::set<std::string>& output_port_names,\n    const std::shared_ptr<Configuration>& config) {\n  if (config->GetString(\"device\") == device_name_) {\n    need_move_to_device_ = true;\n  }\n\n  // NOLINTNEXTLINE\n  return NodeBase::Init(input_port_names, output_port_names, config);\n}\n\nStatus OutputUnmatchVirtualNode::Open() { return STATUS_SUCCESS; }\n\nStatus OutputUnmatchVirtualNode::Run(RunType type) {\n  for (auto& in_port : input_ports_) {\n    std::vector<std::shared_ptr<Buffer>> buffers;\n    in_port->Recv(buffers, -1);\n    for (auto& buffer : buffers) {\n      auto buffer_index_info = BufferManageView::GetIndexInfo(buffer);\n      auto session = buffer_index_info->GetStream()->GetSession();\n      if (session->IsAbort()) {\n        continue;\n      }\n      auto cache_item = session_cache_map_.find(session);\n      std::shared_ptr<SessionUnmatchCache> session_cache;\n      if (cache_item == session_cache_map_.end()) {\n        session_cache = std::make_shared<SessionUnmatchCache>(GetInputNames());\n        session_cache_map_[session] = session_cache;\n        if (need_move_to_device_) {\n          session_cache->SetTargetDevice(target_device_);\n        }\n      } else {\n        session_cache = cache_item->second;\n      }\n      session_cache->CacheBuffer(in_port->GetName(), buffer);\n    }\n  }\n\n  for (auto iter = session_cache_map_.begin();\n       iter != session_cache_map_.end();) {\n    const auto& session = iter->first;\n    auto& cache = iter->second;\n    auto io =\n        std::dynamic_pointer_cast<ExternalDataMapImpl>(session->GetSessionIO());\n    if (io != nullptr) {\n      OutputBufferList output_buffer_list;\n      io->SetLastError(cache->GetLastError());\n      while (cache->PopCache(output_buffer_list) != STATUS_NODATA) {\n        io->PushGraphOutputBuffer(output_buffer_list);\n      }\n    }\n\n    iter = session_cache_map_.erase(iter);\n  }\n\n  return STATUS_SUCCESS;\n}\n\nstd::shared_ptr<Device> OutputUnmatchVirtualNode::GetDevice() {\n  if (device_mgr_ == nullptr) {\n    MBLOG_ERROR << \"device_mgr is nullptr\";\n    return nullptr;\n  }\n\n  auto device = device_mgr_->CreateDevice(device_name_, device_id_);\n  if (device == nullptr) {\n    MBLOG_ERROR << \"device is nullptr.\"\n                << \" device_name: \" << device_name_\n                << \" device_id_: \" << device_id_;\n    return nullptr;\n  }\n  return device;\n}\n\n}  // namespace modelbox\n"
  },
  {
    "path": "src/libmodelbox/flow/flow.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/flow.h\"\n\n#include <fstream>\n#include <utility>\n\n#include \"modelbox/base/timer.h\"\n\nnamespace modelbox {\n\nStatus FlowSetupLog(const std::shared_ptr<Configuration>& config) {\n  if (!config) {\n    return {STATUS_INVALID, \"config is nullptr.\"};\n  }\n\n  auto ret = config->GetSubConfig(\"log\");\n  if (!StatusError) {\n    return StatusError;\n  }\n\n  auto str_level = ret->GetString(\"level\", \"\");\n  if (str_level.length() == 0) {\n    return {STATUS_NOTFOUND, \"Config log level not found.\"};\n  }\n\n  auto level = LogLevelStrToLevel(str_level);\n  if (level == LOG_OFF && !StatusError) {\n    return {StatusError};\n  }\n\n  ModelBoxLogger.GetLogger()->SetLogLevel(level);\n\n  return STATUS_OK;\n}\n\nFlow::Flow() = default;\n\nFlow::~Flow() { Clear(); };\n\nvoid Flow::RegisterFlowUnit(\n    const std::shared_ptr<modelbox::FlowUnitBuilder>& flowunit_builder) {\n  auto flowunit_factory =\n      std::make_shared<RegFlowUnitFactory>(flowunit_builder);\n  auto driver_desc = std::make_shared<DriverDesc>();\n  driver_desc->SetClass(DRIVER_CLASS_FLOWUNIT);\n  driver_desc->SetType(flowunit_factory->GetFlowUnitFactoryType());\n  driver_desc->SetName(flowunit_factory->GetFlowUnitFactoryName());\n  auto driver = std::make_shared<Driver>();\n  driver->SetDriverDesc(driver_desc);\n  flowunit_factory->SetDriver(driver);\n  flowunit_factory_list_.emplace_back(std::move(flowunit_factory));\n}\n\nStatus Flow::InitComponent() {\n  auto ret = drivers_->Initialize(config_->GetSubConfig(\"driver\"));\n  if (!ret) {\n    MBLOG_ERROR << \"init driver failed, err \" << ret;\n    return ret;\n  }\n\n  Defer {\n    if (ret == STATUS_OK) {\n      return;\n    }\n\n    Clear();\n  };\n\n  ret = drivers_->Scan();\n  if (!ret) {\n    MBLOG_ERROR << \"driver scan failed, err \" << ret;\n    return ret;\n  }\n\n  TimerGlobal::Start();\n  timer_run_ = true;\n\n  ret = device_mgr_->Initialize(drivers_, config_);\n  if (!ret) {\n    MBLOG_ERROR << \"Inital device failed, \" << ret.WrapErrormsgs();\n    return {ret, \"Inital device failed.\"};\n  }\n\n  ret = flowunit_mgr_->Initialize(drivers_, device_mgr_, config_);\n  if (!ret) {\n    MBLOG_ERROR << \"Initial flowunit manager failed, \" << ret.WrapErrormsgs();\n    return {ret, \"Initial flowunit manager failed.\"};\n  }\n\n  ret = profiler_->Init();\n  if (!ret) {\n    MBLOG_ERROR << \"Initial profiler failed, \" << ret.WrapErrormsgs();\n    return {ret, \"Initial profiler failed.\"};\n  }\n\n  ret = graph_->Initialize(flowunit_mgr_, device_mgr_, profiler_, config_);\n  if (!ret) {\n    MBLOG_ERROR << \"Initial graph failed, \" << ret.WrapErrormsgs();\n    return {ret, \"Initial graph failed.\"};\n  }\n\n  return STATUS_OK;\n}\n\nvoid Flow::Clear() {\n  if (graph_) {\n    graph_->Shutdown();\n  }\n  graph_ = nullptr;\n  graphconfig_ = nullptr;\n  flowunit_mgr_ = nullptr;\n  device_mgr_ = nullptr;\n  graphconf_mgr_ = nullptr;\n  profiler_ = nullptr;\n  args_ = nullptr;\n  if (drivers_) {\n    drivers_->Clear();\n  }\n\n  if (timer_run_) {\n    TimerGlobal::Stop();\n    timer_run_ = false;\n  }\n}\n\nStatus Flow::GetInputArgs(\n    std::shared_ptr<Configuration>& config,\n    const std::unordered_map<std::string, std::string>& input_args) {\n  // get default args\n  auto sub_keys = config->GetSubKeys(\"args\");\n  if (!sub_keys.empty()) {\n    auto args_config = config->GetSubConfig(\"args\");\n    args_ = std::make_shared<std::unordered_map<std::string, std::string>>();\n    for (const auto& key : sub_keys) {\n      auto value = args_config->GetSubConfig(key)->GetString(\"default\");\n      args_->insert({key, value});\n    }\n  }\n\n  // get final args\n  for (const auto& input_arg : input_args) {\n    if (args_ == nullptr || args_->find(input_arg.first) == args_->end()) {\n      MBLOG_ERROR << \"input args key:\" << input_arg.first\n                  << \" is not in the flow.\";\n      return STATUS_FAULT;\n    }\n\n    MBLOG_INFO << \"input args key:\" << input_arg.first << \" use custom value.\";\n    (*args_)[input_arg.first] = input_arg.second;\n  }\n\n  return STATUS_OK;\n}\n\nStatus Flow::InitByName(\n    const std::string& name,\n    const std::unordered_map<std::string, std::string>& args,\n    const std::string& flow_dir) {\n  Status ret;\n  std::string flow_path;\n  auto status = GetGraphFilePathByName(name, flow_dir, flow_path);\n  if (status != STATUS_OK) {\n    MBLOG_ERROR << \"failed find toml file, errmsg:\" << status.Errormsg();\n    return status;\n  }\n\n  std::shared_ptr<Configuration> config;\n  ret = GetConfigByGraphFile(flow_path, config, FORMAT_AUTO);\n  if (ret != STATUS_OK) {\n    MBLOG_ERROR << \"read config from  toml:\" << flow_path\n                << \"failed, err :\" << ret.Errormsg();\n    return ret;\n  }\n\n  ret = GetInputArgs(config, args);\n  if (ret != STATUS_OK) {\n    return ret;\n  }\n\n  return Init(config);\n}\n\nStatus Flow::Init(const std::shared_ptr<FlowGraphDesc>& flow_graph_desc) {\n  config_ = flow_graph_desc->GetConfig();\n  drivers_ = std::make_shared<Drivers>();\n  device_mgr_ = std::make_shared<DeviceManager>();\n  flowunit_mgr_ = std::make_shared<FlowUnitManager>();\n  profiler_ = std::make_shared<Profiler>(device_mgr_, config_);\n  graph_ = std::make_shared<Graph>();\n\n  auto factory_list = flowunit_factory_list_;\n  flow_graph_desc->GetFuncFactoryList(factory_list);\n\n  for (auto& fu_factory : factory_list) {\n    flowunit_mgr_->Register(fu_factory);\n  }\n\n  auto ret = InitComponent();\n  if (ret != STATUS_OK) {\n    return ret;\n  }\n\n  gcgraph_ = flow_graph_desc->GenGCGraph(flowunit_mgr_);\n  if (gcgraph_ == nullptr) {\n    MBLOG_ERROR << \"generate graph failed\";\n    return StatusError;\n  }\n\n  return STATUS_OK;\n}\n\nStatus Flow::StartRun() {\n  auto ret = Build();\n  if (!ret) {\n    return ret;\n  }\n\n  ret = RunAsync();\n  if (!ret) {\n    return ret;\n  }\n\n  return STATUS_OK;\n}\n\nStatus Flow::Init(std::shared_ptr<Configuration> config) {\n  config_ = std::move(config);\n  drivers_ = std::make_shared<Drivers>();\n  device_mgr_ = std::make_shared<DeviceManager>();\n  flowunit_mgr_ = std::make_shared<FlowUnitManager>();\n  graphconf_mgr_ = std::make_shared<GraphConfigManager>();\n  profiler_ = std::make_shared<Profiler>(device_mgr_, config_);\n  graph_ = std::make_shared<Graph>();\n  graphconfig_ = nullptr;\n\n  if (config_ == nullptr) {\n    return {STATUS_INVALID, \"Load config failed, config is invalid.\"};\n  }\n\n  if (config_->GetBool(\"flow.enable\", true) == false) {\n    return {STATUS_PERMIT, \"flow is disabled\"};\n  }\n\n  FlowSetupLog(config_);\n\n  for (auto& fu_factory : flowunit_factory_list_) {\n    flowunit_mgr_->Register(fu_factory);\n  }\n\n  auto ret = InitComponent();\n  if (!ret) {\n    return ret;\n  }\n\n  ret = graphconf_mgr_->Initialize(drivers_, config_);\n  if (!ret) {\n    MBLOG_ERROR << \"Init graph config failed, \" << ret.WrapErrormsgs();\n    return {ret, \"Init graph config failed.\"};\n  }\n\n  graphconfig_ = graphconf_mgr_->LoadGraphConfig(config_);\n  if (graphconfig_ == nullptr) {\n    MBLOG_ERROR << \"Load graph config failed\";\n    return {StatusError, \"load graph failed.\"};\n  }\n\n  return STATUS_OK;\n}\n\nStatus Flow::GuessConfFormat(const std::string& configfile,\n                             const std::string& data, enum Format* format) {\n  *format = FORMAT_UNKNOWN;\n  std::string extension = configfile.substr(configfile.find_last_of('.') + 1);\n  if (extension == \"json\") {\n    *format = FORMAT_JSON;\n    return STATUS_OK;\n  }\n\n  if (extension == \"toml\") {\n    *format = FORMAT_TOML;\n    return STATUS_OK;\n  }\n\n  if (data.length() <= 0) {\n    return {STATUS_NOTSUPPORT, \"unknown file format\"};\n  }\n\n  size_t i = 0;\n  for (i = 0; i < data.length(); i++) {\n    if (data[i] != ' ' && data[i] != '\\t' && data[i] != '\\n' &&\n        data[i] != '\\r') {\n      break;\n    }\n  }\n\n  if (i == data.length()) {\n    i = data.length() - 1;\n  }\n\n  if (data[i] == '{') {\n    *format = FORMAT_JSON;\n  } else {\n    *format = FORMAT_TOML;\n  }\n\n  if (*format == FORMAT_UNKNOWN) {\n    return {STATUS_NOTSUPPORT, \"unknown file format\"};\n  }\n\n  return STATUS_OK;\n}\n\nStatus Flow::ConfigFileRead(const std::string& configfile, Format format,\n                            std::istringstream* ifs) {\n  Status ret;\n  std::string toml_data;\n  std::string file_content;\n  std::ifstream infile(configfile);\n  Format config_format = format;\n\n  if (infile.fail()) {\n    std::string msg = \"read file \" + configfile + \" failed, \" + StrError(errno);\n    return {STATUS_BADCONF, msg};\n  }\n\n  Defer { infile.close(); };\n  std::string data((std::istreambuf_iterator<char>(infile)),\n                   std::istreambuf_iterator<char>());\n\n  if (config_format == FORMAT_AUTO) {\n    ret = GuessConfFormat(configfile, data, &config_format);\n    if (!ret) {\n      return {ret, \"unsupport file format\"};\n    }\n  }\n\n  if (config_format == FORMAT_JSON) {\n    ret = JsonToToml(data, &toml_data);\n    if (!ret) {\n      return {ret, \"json file is invalid.\"};\n    }\n  } else if (config_format == FORMAT_TOML) {\n    toml_data = data;\n  } else {\n    return {STATUS_NOTSUPPORT, \"unsupport file format\"};\n  }\n\n  ifs->str(toml_data);\n  return STATUS_OK;\n}\n\nStatus Flow::Init(const std::string& name, const std::string& graph,\n                  Format format) {\n  Status ret;\n  ConfigurationBuilder config_builder;\n  std::shared_ptr<Configuration> config;\n  std::istringstream ifs;\n\n  if (graph.length() <= 0) {\n    return {STATUS_NOTSUPPORT, \"unknown file format\"};\n  }\n\n  if (graph[0] == '{') {\n    std::string toml_data;\n    ret = JsonToToml(graph, &toml_data);\n    if (!ret) {\n      return {ret, \"invalid graph\"};\n    }\n    ifs.str(toml_data);\n  } else {\n    ifs.str(graph);\n  }\n\n  config = config_builder.Build(ifs, name);\n  if (config == nullptr) {\n    return {StatusError,\n            \"Load config file failed, detail: \" + StatusError.Errormsg()};\n  }\n\n  ret = Init(config);\n  if (!ret) {\n    MBLOG_WARN << \"Init failed, graph: \" << name << \" status: \" << ret;\n  }\n\n  return ret;\n}\n\nStatus Flow::GetGraphFilePathByName(const std::string& flow_name,\n                                    const std::string& graph_dir,\n                                    std::string& graph_path) {\n  std::vector<std::string> toml_list;\n  std::string filter = \"*.toml\";\n  auto status = ListSubDirectoryFiles(graph_dir, filter, &toml_list);\n  if (status != STATUS_OK) {\n    MBLOG_WARN << \"find \" << flow_name << \" toml file in directory \"\n               << graph_dir << \" failed.\";\n  }\n\n  filter = \"*.json\";\n  std::vector<std::string> json_list;\n  status = ListSubDirectoryFiles(graph_dir, filter, &json_list);\n  if (status != STATUS_OK) {\n    MBLOG_WARN << \"find \" << flow_name << \" json file in directory \"\n               << graph_dir << \" failed.\";\n  }\n\n  toml_list.insert(toml_list.end(), json_list.begin(), json_list.end());\n  if (toml_list.empty()) {\n    std::string err_msg =\n        \"there is no graph file named \" + flow_name + \" in \" + graph_dir;\n    return {STATUS_NOTFOUND, err_msg};\n  }\n\n  for (const auto& iter : toml_list) {\n    std::shared_ptr<Configuration> config;\n    auto status = GetConfigByGraphFile(iter, config, Flow::FORMAT_AUTO);\n    if (status != STATUS_OK) {\n      continue;\n    }\n\n    if (config->GetString(\"flow.name\") == flow_name) {\n      graph_path = iter;\n      MBLOG_DEBUG << \"found flow name: \" << config->GetString(\"flow.name\")\n                  << \", toml path = \" << iter;\n      return STATUS_OK;\n    }\n  }\n\n  return {STATUS_NOTFOUND,\n          \"failed find flow name:\" + flow_name + \"'s toml file\"};\n}\n\nStatus Flow::GetConfigByGraphFile(const std::string& configfile,\n                                  std::shared_ptr<Configuration>& config,\n                                  Format format) {\n  ConfigurationBuilder config_builder;\n  std::istringstream ifs;\n\n  auto ret = ConfigFileRead(configfile, format, &ifs);\n  if (!ret) {\n    return ret;\n  }\n\n  config = config_builder.Build(ifs, configfile);\n  if (config == nullptr) {\n    return {StatusError,\n            \"Load config file failed, detail: \" + StatusError.Errormsg()};\n  }\n  return STATUS_OK;\n}\n\nStatus Flow::Init(const std::string& configfile, Format format) {\n  Status ret;\n  std::shared_ptr<Configuration> config;\n\n  ret = GetConfigByGraphFile(configfile, config, format);\n  if (ret != STATUS_OK) {\n    MBLOG_ERROR << \"read config from  toml:\" << configfile\n                << \"failed, err :\" << ret.Errormsg();\n    return ret;\n  }\n  // TODO: Add args configuration\n  ret = Init(config);\n  if (!ret) {\n    MBLOG_WARN << \"Init failed, configfile: \" << configfile\n               << \" status: \" << ret;\n  }\n\n  return ret;\n}\n\nStatus Flow::Init(std::istream& is, const std::string& fname) {\n  ConfigurationBuilder config_builder;\n\n  auto config = config_builder.Build(is, fname);\n  if (config == nullptr) {\n    return {StatusError,\n            \"Load config file failed, detail: \" + StatusError.Errormsg()};\n  }\n\n  auto status = Init(config);\n  if (!status) {\n    MBLOG_WARN << \"Init failed, configfile: \" << fname << \" status: \" << status;\n  }\n\n  return status;\n}\n\nStatus Flow::Build() {\n  if (graph_ == nullptr || (graphconfig_ == nullptr && gcgraph_ == nullptr)) {\n    return {STATUS_FAULT, \"Flow not initialized.\"};\n  }\n\n  if (graphconfig_ != nullptr) {\n    gcgraph_ = graphconfig_->Resolve();\n    if (gcgraph_ == nullptr) {\n      MBLOG_ERROR << \"graph config resolve failed, \"\n                  << StatusError.WrapErrormsgs();\n      return STATUS_FAULT;\n    }\n  }\n\n  // update args\n  if (args_ != nullptr && !args_->empty()) {\n    auto nodes = gcgraph_->GetAllNodes();\n    for (auto& node : nodes) {\n      auto node_config = node.second->GetConfiguration();\n      auto node_keys = node_config->GetKeys();\n      for (const auto& key : node_keys) {\n        auto value = node_config->GetString(key);\n        auto value_name = value.substr(1, value.size());\n        if (std::regex_match(value, std::regex(\"^\\\\$.*\")) &&\n            args_->find(value_name) != args_->end()) {\n          node_config->SetProperty(key, (*args_)[value_name]);\n        }\n      }\n    }\n  }\n\n  auto ret = graph_->Build(gcgraph_);\n  if (ret != STATUS_OK) {\n    MBLOG_ERROR << \"build graph failed, \" << ret.WrapErrormsgs();\n    return ret;\n  }\n\n  return STATUS_OK;\n}\n\nStatus Flow::Run() {\n  if (graph_ == nullptr) {\n    return {STATUS_FAULT, \"Flow not initialized.\"};\n  }\n\n  auto ret = graph_->Run();\n  if (ret != STATUS_OK) {\n    MBLOG_ERROR << \"graph run failed, \" << ret.WrapErrormsgs();\n    return ret;\n  }\n  return STATUS_OK;\n}\n\nStatus Flow::RunAsync() {\n  if (graph_ == nullptr) {\n    return {STATUS_FAULT, \"Flow not initialized.\"};\n  }\n\n  graph_->RunAsync();\n  return STATUS_OK;\n}\n\nStatus Flow::Wait(int64_t millisecond, Status* ret_val) {\n  if (graph_ == nullptr) {\n    return {STATUS_INPROGRESS, \"Flow not initialized.\"};\n  }\n\n  auto ret = graph_->Wait(millisecond, ret_val);\n  if (ret != STATUS_OK) {\n    if (ret == STATUS_BUSY || ret == STATUS_SHUTDOWN ||\n        ret == STATUS_NORESPONSE) {\n      return ret;\n    }\n    MBLOG_ERROR << \"flow wait error, \" << ret.WrapErrormsgs();\n    return ret;\n  }\n  return STATUS_OK;\n}\n\nvoid Flow::Stop() {\n  if (graph_ == nullptr) {\n    MBLOG_ERROR << \"Flow not initialized.\";\n    return;\n  }\n  graph_->Shutdown();\n\n  graph_->Wait(1000);\n}\n\nstd::shared_ptr<ExternalDataMap> Flow::CreateExternalDataMap() {\n  if (graph_ == nullptr) {\n    MBLOG_ERROR << \"graph is nullptr\";\n    return nullptr;\n  }\n  return graph_->CreateExternalDataMap();\n}\n\nstd::shared_ptr<FlowStreamIO> Flow::CreateStreamIO() {\n  if (graph_ == nullptr) {\n    MBLOG_ERROR << \"graph is nullptr\";\n    return nullptr;\n  }\n\n  auto external_data_map = graph_->CreateExternalDataMap();\n  if (external_data_map == nullptr) {\n    MBLOG_ERROR << \"create external data for graph failed\";\n    return nullptr;\n  }\n\n  return std::make_shared<FlowStreamIO>(external_data_map);\n}\n\nstd::shared_ptr<Profiler> Flow::GetProfiler() { return profiler_; }\n\nstd::string Flow::GetGraphId() const {\n  if (graph_ == nullptr) {\n    MBLOG_ERROR << \"graph is nullptr\";\n    return \"\";\n  }\n\n  return graph_->GetId();\n}\n\nstd::string Flow::GetGraphName() const {\n  if (graph_ == nullptr) {\n    MBLOG_ERROR << \"graph is nullptr\";\n    return \"\";\n  }\n\n  return graph_->GetName();\n}\n}  // namespace modelbox"
  },
  {
    "path": "src/libmodelbox/include/modelbox/buffer.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_BUFFER_H_\n#define MODELBOX_BUFFER_H_\n\n#include <modelbox/base/any.h>\n#include <modelbox/base/device.h>\n#include <modelbox/base/log.h>\n#include <modelbox/base/utils.h>\n#include <modelbox/buffer_type.h>\n#include <modelbox/error.h>\n#include <modelbox/stream.h>\n\n#include <algorithm>\n#include <atomic>\n#include <memory>\n\nnamespace modelbox {\nclass SessionContext;\nclass Buffer;\nclass BufferList;\nclass BufferIndexInfo;\n\n/**\n * @brief Meta of buffer\n */\nclass BufferMeta {\n public:\n  BufferMeta();\n\n  /**\n   * @brief Copy constructor\n   */\n  BufferMeta(const BufferMeta& other);\n  virtual ~BufferMeta();\n\n  BufferMeta& SetStreamMetaContent(const std::string& key,\n                                   const std::shared_ptr<void>& content);\n\n  /**\n   * @brief Copy meta from anoter buffer meta\n   * @param buf_meta other buffer meta\n   * @param is_override override existing meta key\n   * @return copy result\n   */\n  Status CopyMeta(const std::shared_ptr<BufferMeta>& buf_meta,\n                  bool is_override = false);\n\n  /**\n   * @brief Set meta key pair\n   * @param key meta key\n   * @param value meta value\n   */\n  template <typename T>\n  void Set(const std::string& key, T&& value) {\n    custom_meta_.Set(key, value);\n  }\n\n  /**\n   * @brief Get value of key\n   * @param key meta key\n   * @param value meta value\n   * @return whether the key exists\n   */\n  template <typename T>\n  bool Get(const std::string& key, T&& value) {\n    return custom_meta_.Get(key, value);\n  }\n\n  /**\n   * @brief Get value of key return tuple\n   * @param key meta key\n   * @return meta tuple\n   */\n  std::tuple<Any*, bool> Get(const std::string& key);\n  /**\n   * @brief Copy meta\n   * @param other other meta\n   * @return reference of current buffer meta\n   */\n  BufferMeta& operator=(const BufferMeta& other);\n\n  /**\n   * @brief Deep copy meta\n   * @param other other meta\n   * @return reference of current buffer meta\n   */\n  BufferMeta& DeepCopy(const BufferMeta& other);\n\n private:\n  Collection custom_meta_;\n};\n\n/**\n * @brief Buffer type enum\n */\nenum class BufferEnumType {\n  /// RAW data\n  RAW = 0,\n  /// Image\n  IMG = 1,\n  /// String\n  STR = 2,\n};\n\n/**\n * @brief Data buffer, the basic unit of data processing in the flow\n */\nclass Buffer : public std::enable_shared_from_this<Buffer> {\n public:\n  /**\n   * @brief New data buffer\n   */\n  Buffer();\n\n  /**\n   * @brief Create a new buffer related with specific device\n   * @param device related device\n   * @param dev_mem_flags Flags to create device memory\n   */\n  Buffer(const std::shared_ptr<Device>& device, uint32_t dev_mem_flags = 0);\n\n  /**\n   * @brief Create a new buffer related with specific device memory\n   * @param dev_mem related device memory\n   */\n  Buffer(const std::shared_ptr<DeviceMemory>& dev_mem);\n\n  /**\n   * @brief Copy constructor, copy meta from other buffer\n   * @param other other buffer\n   */\n  Buffer(const Buffer& other);\n\n  virtual ~Buffer();\n\n  /**\n   * @brief Create a buffer, apply for memory\n   * @param size memory size\n   * @return create result\n   */\n  virtual Status Build(size_t size);\n\n  /**\n   * @brief Create a buffer from existing memory\n   * @param data data pointer\n   * @param data_size data size\n   * @param func data free function\n   * @return create result\n   */\n  virtual Status Build(void* data, size_t data_size,\n                       const DeleteFunction& func = nullptr);\n\n  /**\n   * @brief Create a buffer from existing host memory\n   * @param data data pointer to host memory\n   * @param data_size data size\n   * @param func data free function\n   * @return create result\n   */\n  virtual Status BuildFromHost(void* data, size_t data_size,\n                               const DeleteFunction& func = nullptr);\n\n  /**\n   * @brief Get buffer mutable data pointer, if data is immutable, null pointer\n   * will return\n   * @return mutable buffer data pointer\n   */\n  virtual void* MutableData();\n\n  /**\n   * @brief Get buffer data pointer, buffer pointer is readonly\n   * @return const buffer data pointer\n   */\n  virtual const void* ConstData();\n\n  /**\n   * @brief Is there an error\n   * @return whether has an error\n   */\n  virtual bool HasError() const;\n\n  /**\n   * @brief Save error\n   * @param error_code buffer error code\n   * @param error_msg buffer error message\n   */\n  virtual void SetError(const std::string& error_code,\n                        const std::string& error_msg);\n\n  /**\n   * @brief Get buffer error code\n   * @return error code\n   */\n  virtual std::string GetErrorCode() const;\n\n  /**\n   * @brief Get buffer error message\n   * @return error message\n   */\n  virtual std::string GetErrorMsg() const;\n\n  /**\n   * @brief Get buffer size in bytes\n   * @return buffer size in tyes\n   */\n  virtual size_t GetBytes() const;\n\n  /**\n   * @brief Copy meta from other buffer.\n   * @param buf other buffer.\n   * @param is_override may override existing meta.\n   * @return copy result\n   */\n  virtual Status CopyMeta(const std::shared_ptr<Buffer>& buf,\n                          bool is_override = false);\n\n  /**\n   * @brief Set meta key to buffer\n   * @param key meta key\n   * @param value meta value\n   */\n  template <typename T>\n  void Set(const std::string& key, T&& value) {\n    meta_->Set(key, value);\n  }\n\n  /**\n   * @brief Get meta key from the buffer\n   * @param key meta key\n   * @param value meta value\n   * @return whether the key exists\n   */\n  template <typename T>\n  bool Get(const std::string& key, T&& value) {\n    return meta_->Get(key, value);\n  }\n\n  /**\n   * @brief Get value of key return tuple\n   * @param key meta key\n   * @return meta tuple\n   */\n  std::tuple<Any*, bool> Get(const std::string& key);\n\n  /**\n   * @brief Get meta key from the buffer, when the key does not exist, return to\n   * the default value\n   * @param key meta key\n   * @param value meta value\n   * @param default_value if key does not exist, return this value\n   */\n  template <typename T, typename U>\n  void Get(const std::string& key, T&& value, const U& default_value) {\n    auto ret = meta_->Get(key, value);\n    if (!ret) {\n      value = default_value;\n    }\n  }\n\n  /**\n   * @brief Get the device object related to the buffer\n   * @return related device of the buffer\n   */\n  std::shared_ptr<Device> GetDevice() const;\n\n  /**\n   * @brief Copy buffer, only copy object, share same data memory\n   * @return related device of the buffer\n   */\n  virtual std::shared_ptr<Buffer> Copy() const;\n\n  /**\n   * @brief Copy buffer, include meta and memory\n   * @return related device of the buffer\n   */\n  virtual std::shared_ptr<Buffer> DeepCopy() const;\n\n  /**\n   * @brief Copy buffer, include meta and memory\n   * @param dest_device copy data to other device\n   * @return related device of the buffer\n   */\n  virtual std::shared_ptr<Buffer> CopyTo(\n      const std::shared_ptr<Device>& dest_device) const;\n\n  /**\n   * @brief Get buffer type\n   * @return buffer type\n   */\n  BufferEnumType GetBufferType() const;\n\n  /**\n   * @brief Set buffer type\n   * @param type type to set\n   */\n  void SetGetBufferType(BufferEnumType type);\n\n  /**\n   * @brief Get device memory of buffer\n   * @return device memory\n   */\n  std::shared_ptr<DeviceMemory> GetDeviceMemory() const;\n\n protected:\n  /**\n   * @brief Deep copy buffer\n   * @return device memory\n   */\n  Status DeepCopy(const Buffer& other);\n\n  /**\n   * @brief Set data mutable\n   * @return set result\n   */\n  Status SetBufferMutable(bool is_mutable);\n\n  /**\n   * @brief Set delayed copy destination device\n   * @param dest_device destination device\n   */\n  void SetDelayedCopyDestinationDevice(std::shared_ptr<Device> dest_device);\n\n  /**\n   * @brief Set delayed copy destination device memory flag\n   * @param mem_flags device memory flag\n   */\n  void SetDelayedCopyDestinationMemFlags(uint32_t mem_flags);\n\n  /**\n   * @brief Clear delayed copy destination info\n   */\n  void ClearDelayedCopyDestinationInfo();\n\n  /**\n   * @brief Get whether need to delayed copy\n   * @param dest_device destination device\n   * @return delayed copy flag.\n   */\n  bool GetDelayedCopyFlag(const std::shared_ptr<Device>& dest_device);\n\n  /**\n   * @brief  copy buffer data to destination device\n   * @return copy result\n   */\n  Status MoveToTargetDevice();\n\n private:\n  friend class BufferList;\n  friend class ExternalDataMapImpl;\n  template <typename QueueType, typename Compare>\n  friend class NotifyPort;\n  friend class BufferManageView;\n  friend class FlowUnitExecData;\n\n  void SetPriority(int priority);\n\n  int GetPriority();\n\n  /// @brief Buffer meta\n  std::shared_ptr<BufferMeta> meta_;\n\n  /// @brief Buffer device memory\n  std::shared_ptr<DeviceMemory> dev_mem_;\n\n  uint32_t dev_mem_flags_{0};\n\n  /// @brief Record delayed copy destination buffer device, when input buffer\n  /// device type is inconsistent with input port type.\n  std::shared_ptr<Device> delayed_copy_dest_device_;\n\n  uint32_t delayed_copy_dest_mem_flags_{0};\n\n  /// @brief Buffer type\n  BufferEnumType type_{BufferEnumType::RAW};\n\n  /// @brief Buffer index info in stream\n  std::shared_ptr<BufferIndexInfo> index_info_;\n\n  std::shared_ptr<DataError> data_error_;\n\n  int priority_{0};\n};\n\n}  // namespace modelbox\n\n#endif  // MODELBOX_BUFFER_H_"
  },
  {
    "path": "src/libmodelbox/include/modelbox/buffer_index_info.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_BUFFER_INDEX_INFO_H_\n#define MODELBOX_BUFFER_INDEX_INFO_H_\n\n#include <functional>\n#include <list>\n#include <map>\n#include <memory>\n#include <unordered_map>\n#include <vector>\n\nnamespace modelbox {\n\nclass Stream;\nclass BufferIndexInfo;\nclass Node;\nclass Buffer;\nclass DataError;\n\n/**\n * @brief define all process for buffer\n **/\nenum class BufferProcessType : size_t {\n  EXPAND,\n  CONDITION_START,\n  COLLAPSE,\n  ORIGIN\n};\n\n/**\n * @brief during buffer process, some operation is key operation\n * we need direct record these operation\n * key operation: expand, conditino_start\n * for each key operation, we make new inherit info, so we could trace back\n * there key operation\n **/\nclass BufferInheritInfo {\n public:\n  void SetType(BufferProcessType type);\n\n  BufferProcessType GetType();\n\n  void SetInheritFrom(const std::shared_ptr<BufferIndexInfo> &buffer_index);\n\n  std::shared_ptr<BufferIndexInfo> GetInheritFrom();\n\n  size_t GetDeepth();\n\n private:\n  BufferProcessType type_{BufferProcessType::EXPAND};\n  std::shared_ptr<BufferIndexInfo> inherit_from_buffer_;\n  size_t inherit_deepth_{0};\n};\n\n/**\n * @brief record info for each process at buffer\n * easy to trace through buffer\n **/\nclass BufferProcessInfo {\n public:\n  void SetParentBuffers(\n      const std::string &port_name,\n      std::list<std::shared_ptr<BufferIndexInfo>> &&port_buffers);\n\n  const std::map<std::string, std::list<std::shared_ptr<BufferIndexInfo>>>\n      &GetParentBuffers();\n\n  void SetType(BufferProcessType type);\n\n  BufferProcessType GetType();\n\n private:\n  std::map<std::string, std::list<std::shared_ptr<BufferIndexInfo>>>\n      parent_buffers_;\n  BufferProcessType type_{BufferProcessType::ORIGIN};\n};\n\n/**\n * @brief record index info in stream for each buffer\n **/\nclass BufferIndexInfo {\n public:\n  BufferIndexInfo();\n\n  virtual ~BufferIndexInfo();\n\n  void SetInheritInfo(std::shared_ptr<BufferInheritInfo> inherit_info);\n\n  std::shared_ptr<BufferInheritInfo> GetInheritInfo();\n\n  void SetStream(std::shared_ptr<Stream> stream_belong_to);\n\n  std::shared_ptr<Stream> GetStream();\n\n  void SetIndex(size_t index);\n\n  size_t GetIndex();\n\n  bool IsFirstBufferInStream();\n\n  /**\n   * @brief mark end for stream\n   **/\n  void MarkAsEndFlag();\n\n  bool IsEndFlag();\n\n  /**\n   * @brief in case: user drop one buffer, we need keep index\n   **/\n  void MarkAsPlaceholder();\n\n  bool IsPlaceholder();\n\n  void SetProcessInfo(std::shared_ptr<BufferProcessInfo> process_info);\n\n  std::shared_ptr<BufferProcessInfo> GetProcessInfo();\n\n private:\n  std::shared_ptr<Stream> stream_belong_to_;\n  size_t index_in_current_stream_{0};\n  std::shared_ptr<BufferInheritInfo> inherit_info_;\n  std::shared_ptr<BufferProcessInfo>\n      process_info_;  // record how to generate this buffer\n\n  bool is_end_flag_{false};\n  bool is_placeholder_{false};\n};\n\n/**\n * @brief To access manage info in buffer\n **/\nclass BufferManageView {\n public:\n  static std::shared_ptr<BufferIndexInfo> GetIndexInfo(\n      const std::shared_ptr<Buffer> &buffer);\n\n  static void SetIndexInfo(const std::shared_ptr<Buffer> &buffer,\n                           std::shared_ptr<BufferIndexInfo> buffer_index_info);\n\n  /**\n   * @brief buffer generate from input, we call input buffer as parent\n   **/\n  static std::shared_ptr<BufferIndexInfo> GetFirstParentBuffer(\n      const std::shared_ptr<Buffer> &buffer);\n\n  static void SetPriority(const std::shared_ptr<Buffer> &buffer, int priority);\n\n  static int GetPriority(const std::shared_ptr<Buffer> &buffer);\n\n  static void SetError(const std::shared_ptr<Buffer> &buffer,\n                       const std::shared_ptr<DataError> &data_error);\n\n  static std::shared_ptr<DataError> GetError(\n      const std::shared_ptr<Buffer> &buffer);\n\n  /**\n   * @brief record the direct input where this buffer comes from\n   **/\n  template <typename T>\n  static void GenProcessInfo(\n      const std::unordered_map<std::string, T> &parent_data,\n      size_t data_count_per_port,\n      const std::function<std::shared_ptr<Buffer>(const T &container,\n                                                  size_t idx)> &get_buffer_at,\n      std::vector<std::shared_ptr<BufferProcessInfo>> &process_info_list,\n      bool all_in_one_process = false);\n};\n\ntemplate <typename T>\nvoid BufferManageView::GenProcessInfo(\n    const std::unordered_map<std::string, T> &parent_data,\n    size_t data_count_per_port,\n    const std::function<std::shared_ptr<Buffer>(const T &container, size_t idx)>\n        &get_buffer_at,\n    std::vector<std::shared_ptr<BufferProcessInfo>> &process_info_list,\n    bool all_in_one_process) {\n  if (all_in_one_process) {\n    process_info_list.push_back(std::make_shared<BufferProcessInfo>());\n  } else {\n    process_info_list.reserve(data_count_per_port);\n    for (size_t i = 0; i < data_count_per_port; ++i) {\n      process_info_list.push_back(std::make_shared<BufferProcessInfo>());\n    }\n  }\n\n  for (auto &port_data_item : parent_data) {\n    auto &port_name = port_data_item.first;\n    auto &data_list = port_data_item.second;\n    auto index_info_list =\n        std::make_shared<std::list<std::shared_ptr<BufferIndexInfo>>>();\n    for (size_t i = 0; i < data_count_per_port; ++i) {\n      auto buffer = get_buffer_at(data_list, i);\n      auto index_info = BufferManageView::GetIndexInfo(buffer);\n      index_info_list->push_back(index_info);\n      if (!all_in_one_process) {\n        auto inherit_info = process_info_list[i];\n        inherit_info->SetParentBuffers(port_name, std::move(*index_info_list));\n        index_info_list->clear();\n      }\n    }\n    if (all_in_one_process) {\n      auto inherit_info = process_info_list.front();\n      inherit_info->SetParentBuffers(port_name, std::move(*index_info_list));\n    }\n  }\n}\n\n}  // namespace modelbox\n\n#endif  // MODELBOX_BUFFER_INDEX_INFO_H_"
  },
  {
    "path": "src/libmodelbox/include/modelbox/buffer_list.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_BUFFER_LIST_H_\n#define MODELBOX_BUFFER_LIST_H_\n\n#include <modelbox/base/device.h>\n#include <modelbox/base/log.h>\n#include <modelbox/base/utils.h>\n#include <modelbox/buffer.h>\n#include <modelbox/buffer_type.h>\n#include <modelbox/tensor.h>\n\n#include <algorithm>\n#include <atomic>\n#include <memory>\n#include <unordered_map>\n\nnamespace modelbox {\n\nclass BufferList;\nusing BufferListMap =\n    std::unordered_map<std::string, std::shared_ptr<BufferList>>;\n\n/**\n * @brief Buffer list\n */\nclass BufferList {\n public:\n  /**\n   * @brief Buffer list\n   */\n  BufferList();\n\n  /**\n   * @brief Buffer list with device\n   * @param device pointer to device\n   * @param device_mem_flags Flags to crete device mem\n   */\n  BufferList(const std::shared_ptr<Device>& device,\n             uint32_t device_mem_flags = 0);\n\n  /**\n   * @brief Buffer list from buffer\n   * @param buffer pointer to buffer\n   */\n  BufferList(const std::shared_ptr<Buffer>& buffer);\n\n  /**\n   * @brief Buffer list from vector of buffers\n   * @param buffer_vector buffer vector\n   */\n  BufferList(const std::vector<std::shared_ptr<Buffer>>& buffer_vector);\n\n  /**\n   * @brief BufferList from other BufferList\n   * @param other BufferList\n   */\n  BufferList(const BufferList& other);\n\n  virtual ~BufferList();\n\n  /**\n   * @brief Builder buffer, create memory\n   * @param data_size_list buffer size list\n   * @param contiguous all buffer in single mem area\n   */\n  virtual Status Build(const std::vector<size_t>& data_size_list,\n                       bool contiguous = true);\n\n  /**\n   * @brief Builder buffer from host memory\n   * @param data_size_list buffer size list\n   * @param data memory pointer to host.\n   * @param data_size host memory size.\n   * @param func host memory delete or free function.\n   */\n  virtual Status BuildFromHost(const std::vector<size_t>& data_size_list,\n                               void* data, size_t data_size,\n                               const DeleteFunction& func = nullptr);\n\n  /**\n   * @brief Get bufferlist size\n   * @return bufffer list size\n   */\n  virtual size_t Size() const;\n\n  /**\n   * @brief Get bufferlist memory bytes number\n   * @return buffer list memory bytes number\n   */\n  virtual size_t GetBytes() const;\n\n  /**\n   * @brief Buffer iterator begin\n   * @return iterator\n   */\n  virtual std::vector<std::shared_ptr<Buffer>>::iterator begin();\n\n  /**\n   * @brief Buffer iterator begin\n   * @return iterator\n   */\n  virtual std::vector<std::shared_ptr<Buffer>>::const_iterator begin() const;\n\n  /**\n   * @brief Buffer iterator end\n   * @return iterator\n   */\n  virtual std::vector<std::shared_ptr<Buffer>>::iterator end();\n\n  /**\n   * @brief Buffer iterator end\n   * @return iterator\n   */\n  virtual std::vector<std::shared_ptr<Buffer>>::const_iterator end() const;\n\n  /**\n   * @brief Get buffer at pos\n   * @param pos position of buffer\n   * @return pointer to buffer\n   */\n  virtual std::shared_ptr<Buffer>& operator[](size_t pos);\n\n  /**\n   * @brief Get buffer at pos\n   * @param pos position of buffer\n   * @return pointer to buffer\n   */\n  virtual const std::shared_ptr<Buffer>& operator[](size_t pos) const;\n\n  /**\n   * @brief Get buffer at pos\n   * @param idx position of buffer\n   * @return pointer to buffer\n   */\n  virtual std::shared_ptr<Buffer> At(size_t idx);\n\n  /**\n   * @brief Get buffer at pos\n   * @param idx position of buffer\n   * @return pointer to buffer\n   */\n  virtual std::shared_ptr<Buffer> At(size_t idx) const;\n\n  /**\n   * @brief Push new buffer to buffer list\n   * @param buf pointer to buffer\n   */\n  virtual void PushBack(const std::shared_ptr<Buffer>& buf);\n\n  /**\n   * @brief Assign buffer list\n   * @param buffer_list buffer list to assign\n   */\n  virtual void Assign(const std::vector<std::shared_ptr<Buffer>>& buffer_list);\n\n  /**\n   * @brief Swap buffer list\n   * @param buffer_list buffer list to swap\n   */\n  virtual void Swap(std::vector<std::shared_ptr<Buffer>>& buffer_list);\n\n  /**\n   * @brief Get mutable buffer data pointer\n   * @param idx buffer index\n   * @return buffer data pointer\n   */\n  virtual void* MutableBufferData(size_t idx);\n\n  /**\n   * @brief Get unmutable buffer data pointer\n   * @param idx buffer index\n   * @return buffer data pointer\n   */\n  virtual const void* ConstBufferData(size_t idx) const;\n\n  /**\n   * @brief Get mutable buffer data pointer from begining\n   * @return buffer data pointer from begining\n   */\n  virtual void* MutableData();\n\n  /**\n   * @brief Get unmutable buffer data pointer from begining\n   * @return buffer data pointer from begining\n   */\n  virtual const void* ConstData() const;\n\n  /**\n   * @brief Set meta to all buffers\n   * @param key meta key\n   * @param value meta value\n   */\n  template <typename T>\n  void Set(const std::string& key, T&& value) {\n    for (const auto& buffer : buffer_list_) {\n      buffer->Set(key, value);\n    }\n  }\n\n  /**\n   * @brief Copy meta from another buffer list\n   * @param bufferlist another buffer list\n   * @param is_override override exists meta.\n   */\n  virtual Status CopyMeta(const std::shared_ptr<BufferList>& bufferlist,\n                          bool is_override = false);\n\n  /**\n   * @brief Get device memory pointer of buffer list\n   * @return pointer to device memory\n   */\n  std::shared_ptr<DeviceMemory> GetDeviceMemory();\n\n  /**\n   * @brief Get device memory pointer of buffer list\n   * @return pointer to device memory\n   */\n  std::vector<std::shared_ptr<DeviceMemory>> GetAllBufferDeviceMemory();\n\n  /**\n   * @brief Get device of buffer list\n   * @return pointer to device\n   */\n  std::shared_ptr<Device> GetDevice();\n\n  /**\n   * @brief Make all buffer memory contiguous\n   * @return make result\n   */\n  Status MakeContiguous();\n\n  /**\n   * @brief Copy a vector of buffer into a buffer list\n   * @param buffer_vector the vector of buffer\n   */\n  void Copy(const std::vector<std::shared_ptr<Buffer>>& buffer_vector);\n  /**\n   * @brief Reset buffer list\n   * @return reset result\n   */\n  Status Reset();\n\n  /**\n   * @brief Move all buffer to one device\n   * if buffer_list has device, then move this device\n   * if buffer_list has no device, move to the device of first buffer\n   * @return Move result\n   */\n  Status MoveAllBufferToTargetDevice();\n\n  /**\n   * @brief Set error\n   * @param error_code buffer error code\n   * @param error_msg buffer error message\n   * @return buffer reference\n   */\n  void SetError(const std::string& error_code, const std::string& error_msg);\n\n  /**\n   * @brief push current device data to buffer list.\n   * support host data to cpu\n   * @param device_data data in current flowunit device\n   * @param data_size size of data\n   * @param func to manage device_data, avoid extra copy\n   * @return result\n   */\n  Status EmplaceBack(void* device_data, size_t data_size,\n                     const DeleteFunction& func = nullptr);\n\n  /**\n   * @brief push current device data to buffer list.\n   * support host data to cpu\n   * @param device_data data in current flowunit device\n   * @param data_size size of data\n   * @return result\n   */\n  Status EmplaceBack(const std::shared_ptr<void>& device_data,\n                     size_t data_size);\n\n  /**\n   * @brief push host data to device buffer list.\n   * not recommend for host data to cpu\n   * @param host_data host data\n   * @param data_size size of data\n   * @return result\n   */\n  Status EmplaceBackFromHost(void* host_data, size_t data_size);\n\n  /**\n   * @brief get front buffer in bufferlist\n   * @return null if no front buffer\n   */\n  std::shared_ptr<Buffer> Front();\n\n  /**\n   * @brief get last buffer in bufferlist\n   * @return null if no back buffer\n   */\n  std::shared_ptr<Buffer> Back();\n\n  /**\n   * @brief test whether buffer list supports mem contiguous\n   * @return support or not\n   **/\n  bool SupportMemContiguous();\n\n private:\n  friend class FlowUnitExecData;\n  friend class FlowUnitGroup;\n  friend class TensorList;\n\n  void SetNoContiguous();\n  bool IsContiguous() const;\n  Status SetMutable(bool is_mutable);\n  Status CopyToNewBufferList(std::shared_ptr<DeviceMemory>& dev_mem);\n  Status GenerateDeviceMemory(\n      const std::vector<std::shared_ptr<DeviceMemory>>& buffer_dev_mems);\n  bool is_contiguous_{false};\n  std::shared_ptr<DeviceMemory> dev_mem_;\n  uint32_t dev_mem_flags_{0};\n  std::vector<std::shared_ptr<Buffer>> buffer_list_;\n\n  Status BuildContiguous(const std::shared_ptr<Device>& device,\n                         const std::vector<size_t>& data_size_list);\n  Status BuildSeparate(const std::shared_ptr<Device>& device,\n                       const std::vector<size_t>& data_size_list);\n};\n\n}  // namespace modelbox\n\n#endif  // MODELBOX_BUFFER_LIST_H_\n"
  },
  {
    "path": "src/libmodelbox/include/modelbox/buffer_type.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#ifndef MODELBOX_BUFFER_TYPE_H_\n#define MODELBOX_BUFFER_TYPE_H_\n\n#include <modelbox/base/device.h>\n\n#include <map>\n#include <memory>\n#include <mutex>\n#include <string>\n#include <vector>\n\nnamespace modelbox {\n\nconstexpr const char *ROOT_BUFFER_TYPE = \"RAW\";\n\nclass BufferType {\n public:\n  BufferType();\n  BufferType(std::string type);\n  virtual ~BufferType();\n  const std::string &GetType() const;\n  std::shared_ptr<BufferType> GetParentType();\n  std::vector<std::shared_ptr<BufferType>> GetChildrenType();\n\n private:\n  void SetType(std::string type);\n  bool AddChildType(const std::shared_ptr<BufferType> &child);\n  bool AddParentType(const std::shared_ptr<BufferType> &parent);\n  void RemoveType();\n  void ClearChildType();\n  void ClearParentType();\n  bool IsAncestor(const BufferType &other);\n  bool IsOffspring(const BufferType &other);\n  std::string type_;\n  std::shared_ptr<BufferType> parent_;\n  std::vector<std::shared_ptr<BufferType>> children_;\n  friend class BufferTypeTree;\n};\n\nclass BufferTypeTree {\n public:\n  bool AddRootType(const std::string &root_type);\n  bool AddType(const std::string &type, const std::string &parent_type);\n  bool RemoveType(const std::string &type);\n  bool IsCompatible(const std::string &type, const std::string &ancestor_type);\n  std::shared_ptr<BufferType> GetType(const std::string &type);\n  static BufferTypeTree *GetInstance();\n\n  virtual ~BufferTypeTree();\n private:\n  BufferTypeTree();\n  std::string root_;\n  std::map<std::string, std::shared_ptr<BufferType>> nodes_;\n  static std::shared_ptr<BufferTypeTree> instance_;\n};\n}  // namespace modelbox\n\n#endif  // MODELBOX_BUFFER_TYPE_H_"
  },
  {
    "path": "src/libmodelbox/include/modelbox/context.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef HANDLER_CONTEXT_\n#define HANDLER_CONTEXT_\n\n#include \"modelbox/buffer.h\"\n#include \"modelbox/buffer_list.h\"\n#include \"modelbox/graph.h\"\n#include \"modelbox/node.h\"\n\nnamespace modelbox {\ntypedef std::unordered_map<std::string, std::shared_ptr<Buffer>> BufferMap;\nenum DataHandlerType { INPUT = 0, OUTPUT };\nenum BindNodeType { STREAM_NODE = 0, BUFFERLIST_NODE = 1, VIRTUAL_NODE = 2 };\nclass DataHandler;\nclass ModelBoxEngine;\n\nclass GraphState {\n public:\n  std::shared_ptr<GCGraph> gcgraph_;\n  std::shared_ptr<DynamicGraph> graph_;\n  std::shared_ptr<void> external_data_;\n  Status error_{STATUS_SUCCESS};\n};\nclass HandlerContext {\n public:\n  friend class ModelBoxEngine;\n  HandlerContext(std::weak_ptr<ModelBoxEngine> &env);\n  virtual ~HandlerContext();\n\n  virtual Status PushData(const std::string &key,\n                          const std::shared_ptr<BufferList> &bufferlist) = 0;\n\n  virtual std::shared_ptr<BufferList> GetBufferList(const std::string &key) = 0;\n\n  virtual Status RunGraph(const std::shared_ptr<DataHandler> &) = 0;\n\n  void SetMeta(const std::string &key, const std::string &value);\n\n  std::string GetMeta(const std::string &key);\n\n  std::shared_ptr<GraphState> GetGraphState();\n\n  void SetGraphState(const std::shared_ptr<GraphState> & /*state*/);\n\n  virtual void Close();\n\n  void SetFlowUnitDesc(const std::shared_ptr<FlowUnitDesc> &desc);\n\n  std::shared_ptr<FlowUnitDesc> GetFlowUnitDesc();\n\n  std::shared_ptr<FlowUnitDesc> desc_;\n  std::unordered_map<std::string, std::shared_ptr<BufferList>> data_map_;\n  std::weak_ptr<ModelBoxEngine> env_;\n\n private:\n  std::unordered_map<std::string, std::string> meta_;\n  std::shared_ptr<GraphState> graph_state_;\n};\n\nclass InputContext : public HandlerContext {\n public:\n  InputContext(std::weak_ptr<ModelBoxEngine> env);\n  ~InputContext() override;\n  void SetExternPtr(std::shared_ptr<void> extern_data_map,\n                    std::shared_ptr<BufferList> extern_buffer_list);\n  Status PushData(const std::string &key,\n                  const std::shared_ptr<BufferList> &bufferlist) override;\n\n  std::shared_ptr<BufferList> GetBufferList(const std::string &key) override;\n\n  Status RunGraph(const std::shared_ptr<DataHandler> &handler) override;\n  void Close() override;\n\n private:\n  std::weak_ptr<ModelBoxEngine> env_;\n  std::shared_ptr<void> extern_data_map_;\n  std::shared_ptr<BufferList> extern_buffer_list_;\n};\n\nclass StreamContext : public HandlerContext {\n public:\n  StreamContext(std::weak_ptr<ModelBoxEngine> env);\n  ~StreamContext() override;\n  Status PushData(const std::string &key,\n                  const std::shared_ptr<BufferList> &bufferlist) override;\n\n  std::shared_ptr<BufferList> GetBufferList(const std::string &key) override;\n\n  Status RunGraph(const std::shared_ptr<DataHandler> & /*handler*/) override;\n\n private:\n  bool end_flag_;\n};\n\nclass BufferListContext : public HandlerContext {\n public:\n  BufferListContext(std::weak_ptr<ModelBoxEngine> env);\n  ~BufferListContext() override;\n  Status PushData(const std::string &key,\n                  const std::shared_ptr<BufferList> &bufferlist) override;\n\n  std::shared_ptr<BufferList> GetBufferList(const std::string &key) override;\n\n  Status RunGraph(const std::shared_ptr<DataHandler> & /*handler*/) override;\n};\n\n}  // namespace modelbox\n#endif"
  },
  {
    "path": "src/libmodelbox/include/modelbox/data_context.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_DATA_CONTEXT_H_\n#define MODELBOX_DATA_CONTEXT_H_\n\n#include <modelbox/buffer.h>\n#include <modelbox/buffer_list.h>\n#include <modelbox/error.h>\n#include <modelbox/inner_event.h>\n#include <modelbox/match_stream.h>\n#include <modelbox/session.h>\n#include <modelbox/session_context.h>\n#include <modelbox/statistics.h>\n#include <modelbox/stream.h>\n\n#include <unordered_set>\n\n#define CONFIG_NODES \"nodes\"\n#define CONFIG_NODE \"node.\"\n#define CONFIG_FLOWUNIT \"flowunit.\"\n\nnamespace modelbox {\n\nusing FlowunitEventList =\n    std::shared_ptr<std::vector<std::shared_ptr<FlowUnitInnerEvent>>>;\n\nclass Node;\n\nenum NotifyEvent { RECV_DATA, ERROR };\n\nclass ExternalData {\n public:\n  ExternalData();\n  virtual ~ExternalData();\n  virtual std::shared_ptr<BufferList> CreateBufferList() = 0;\n  virtual Status Send(std::shared_ptr<BufferList> buffer_list) = 0;\n  virtual std::shared_ptr<SessionContext> GetSessionContext() = 0;\n  virtual Status SetOutputMeta(std::shared_ptr<DataMeta> meta) = 0;\n  virtual Status Shutdown() = 0;\n  virtual Status Close() = 0;\n\n  virtual std::shared_ptr<Configuration> GetSessionConfig() = 0;\n};\n\nclass InPort;\nclass ExternalDataImpl : public ExternalData {\n public:\n  ExternalDataImpl(std::shared_ptr<InPort> port, std::shared_ptr<Device> device,\n                   const std::shared_ptr<Stream> &init_stream);\n  ~ExternalDataImpl() override;\n\n  std::shared_ptr<BufferList> CreateBufferList() override;\n  Status Send(std::shared_ptr<BufferList> buffer_list) override;\n\n  std::shared_ptr<SessionContext> GetSessionContext() override;\n\n  Status SetOutputMeta(std::shared_ptr<DataMeta> meta) override;\n\n  Status Shutdown() override;\n\n  Status Close() override;\n\n  std::shared_ptr<Configuration> GetSessionConfig() override;\n\n private:\n  void SendCacheBuffer();\n  bool is_closed_{false};\n\n  std::shared_ptr<BufferIndexInfo> root_buffer_;\n\n  std::shared_ptr<InPort> ext_port_;\n  std::shared_ptr<Device> device_;\n  std::shared_ptr<Stream> input_stream_;\n  std::weak_ptr<Session> session_;\n  std::weak_ptr<SessionContext> session_ctx_;\n\n  std::shared_ptr<DataMeta> output_meta_;\n};\n\nenum class DataContextStatsType { NODE, SESSION, GRAPH };\n\nclass DataContext {\n public:\n  DataContext();\n\n  virtual ~DataContext();\n\n  virtual std::shared_ptr<BufferList> Input(const std::string &port) const = 0;\n\n  virtual std::shared_ptr<BufferList> Output(const std::string &port) = 0;\n\n  virtual std::shared_ptr<BufferListMap> Input() const = 0;\n\n  virtual std::shared_ptr<BufferListMap> Output() = 0;\n\n  virtual std::shared_ptr<BufferList> External() = 0;\n\n  virtual std::shared_ptr<FlowUnitEvent> Event() = 0;\n\n  virtual bool HasError() = 0;\n\n  virtual void SendEvent(std::shared_ptr<FlowUnitEvent> event) = 0;\n\n  virtual void SetPrivate(const std::string &key,\n                          std::shared_ptr<void> private_content) = 0;\n\n  virtual std::shared_ptr<void> GetPrivate(const std::string &key) = 0;\n\n  virtual std::shared_ptr<DataMeta> GetInputMeta(const std::string &port) = 0;\n\n  virtual std::shared_ptr<DataMeta> GetInputGroupMeta(\n      const std::string &port) = 0;\n\n  virtual void SetOutputMeta(const std::string &port,\n                             std::shared_ptr<DataMeta> data_meta) = 0;\n\n  virtual std::shared_ptr<SessionContext> GetSessionContext() = 0;\n\n  virtual std::shared_ptr<Configuration> GetSessionConfig() = 0;\n\n  virtual std::shared_ptr<StatisticsItem> GetStatistics(\n      DataContextStatsType type = DataContextStatsType::NODE) = 0;\n};\n\nclass FlowUnitDataContext : public DataContext, public SessionStateListener {\n  // Implement interface DataContext\n public:\n  FlowUnitDataContext(Node *node, MatchKey *data_ctx_match_key,\n                      const std::shared_ptr<Session> &session);\n\n  ~FlowUnitDataContext() override;\n\n  std::shared_ptr<BufferList> Input(const std::string &port) const override;\n\n  std::shared_ptr<BufferList> Output(const std::string &port) override;\n\n  std::shared_ptr<BufferListMap> Input() const override;\n\n  std::shared_ptr<BufferListMap> Output() override;\n\n  std::shared_ptr<BufferList> External() override;\n\n  void SetEvent(const std::shared_ptr<FlowUnitEvent> &event);\n\n  std::shared_ptr<FlowUnitEvent> Event() override;\n\n  bool HasError() override;\n\n  void SetPrivate(const std::string &key,\n                  std::shared_ptr<void> private_content) override;\n\n  std::shared_ptr<void> GetPrivate(const std::string &key) override;\n\n  void SendEvent(std::shared_ptr<FlowUnitEvent> event) override;\n\n  std::shared_ptr<DataMeta> GetInputMeta(const std::string &port) override;\n\n  std::shared_ptr<DataMeta> GetInputGroupMeta(const std::string &port) override;\n\n  void SetOutputMeta(const std::string &port,\n                     std::shared_ptr<DataMeta> data_meta) override;\n\n  std::shared_ptr<SessionContext> GetSessionContext() override;\n\n  std::shared_ptr<Configuration> GetSessionConfig() override;\n\n  std::shared_ptr<StatisticsItem> GetStatistics(\n      DataContextStatsType type) override;\n\n  // common function for FlowUnitDataContext\n\n  const std::unordered_map<std::string, std::vector<std::shared_ptr<Buffer>>>\n      &GetInputs() const;\n\n  const std::unordered_map<std::string, std::vector<std::shared_ptr<Buffer>>>\n      &GetErrorInputs() const;\n\n  const std::unordered_map<std::string, std::shared_ptr<BufferList>>\n      &GetOutputs() const;\n\n  const std::unordered_map<std::string, std::vector<std::shared_ptr<Buffer>>>\n      &GetExternals() const;\n\n  void SetOutput(\n      const std::unordered_map<std::string, std::shared_ptr<BufferList>>\n          &data_list);\n\n  void SetStatus(const Status &status);\n\n  Status GetStatus();\n\n  Status GetLastStatus();\n\n  bool IsErrorStatus();\n\n  void AddDestroyCallback(const std::function<void()> &func);\n\n  bool IsDataErrorVisible();\n\n  bool IsFinished();\n\n  Status PopOutputData(PortDataMap &stream_data_map);\n\n  std::unordered_map<std::string, std::shared_ptr<DataMeta>>\n  GetOutputPortStreamMeta();\n\n  bool IsSkippable();\n\n  void SetSkippable(bool skippable);\n\n  void SetDataPreError(bool is_error);\n\n  /**\n   * @brief call after flowunit process\n   **/\n  Status PostProcess();\n\n  std::shared_ptr<Session> GetSession();\n\n  void NotifySessionClose() override;\n\n  // would be different in specify FlowUnitDataContext\n\n  // buffers in stream_data_map is in order\n  virtual void WriteInputData(std::shared_ptr<PortDataMap> stream_data_map);\n\n  virtual std::shared_ptr<FlowUnitInnerEvent> GenerateSendEvent();\n\n  virtual bool IsDataPre();\n\n  virtual bool IsDataPost();\n\n  virtual void DealWithDataPreError(const std::string &error_code,\n                                    const std::string &error_msg);\n\n  /**\n   * @brief call after flowunit group run\n   **/\n  virtual void UpdateProcessState();\n  virtual void ClearData();\n\n  void Dispose();\n\n protected:\n  virtual void UpdateBufferIndexInfo(\n      const std::shared_ptr<BufferIndexInfo> &cur_buffer,\n      const std::shared_ptr<BufferIndexInfo> &parent_buffer);\n\n  virtual bool SkipInheritInputToMatchNode();\n\n  void SetCurrentInputData(std::shared_ptr<PortDataMap> stream_data_map);\n\n  virtual void UpdateInputInfo();\n\n  virtual Status GenerateOutputPlaceholder();\n\n  virtual Status GenerateOutputError();\n\n  virtual Status GenerateOutput();\n\n  virtual Status AppendEndFlag();\n\n  virtual bool NeedStreamEndFlag();\n\n  void FillPlaceholderOutput(bool from_valid_input = false,\n                             bool same_with_input_num = true);\n\n  void FillErrorOutput(bool from_valid, const std::string &error_code,\n                       const std::string &error_msg,\n                       bool same_with_input_num = true);\n\n  bool HasValidOutput();\n\n  size_t GetOutputBufferNum();\n\n  virtual Status CheckOutputData();\n\n  bool IsContinueProcess();\n\n  Status process_status_{STATUS_OK};\n  Status last_process_status_{STATUS_OK};\n\n  MatchKey *data_ctx_match_key_{nullptr};\n  std::shared_ptr<Session> session_;\n  std::weak_ptr<SessionContext> session_context_;\n\n  // record last input in case event sent out of Node::Run\n  PortDataMap last_input_valid_data_;\n\n  // total input\n  std::shared_ptr<PortDataMap> cur_input_;\n  // valid data for flowunit process\n  PortDataMap cur_input_valid_data_;\n  // empty for drop, empty for condition\n  PortDataMap cur_input_placeholder_;\n  // end for one stream, empty buffer\n  PortDataMap cur_input_end_flag_;\n  // error buffer\n  PortDataMap cur_input_error_;\n  // flowunit output\n  std::unordered_map<std::string, std::shared_ptr<BufferList>>\n      cur_output_valid_data_;\n  // empty for drop, empty for condition\n  PortDataMap cur_output_placeholder_;\n  // error buffer\n  PortDataMap cur_output_error_;\n  // total output\n  PortDataMap cur_output_;\n\n  Node *node_;\n\n  // state for ctx\n  bool is_exception_visible_{false};\n  bool is_finished_{false};  // will not process this data ctx again\n\n  // state for stream\n  bool is_empty_stream_{false};  // end_flag is first buffer of stream\n  bool end_flag_received_{false};\n  size_t input_stream_max_buffer_count_{0};\n  size_t input_stream_cur_buffer_count_{0};\n  bool end_flag_generated_{false};\n  bool is_datapre_error_{false};\n\n  // state for single run\n  bool is_skippable_{false};  // no data\n  std::mutex wait_user_events_lock_;\n  std::unordered_set<std::shared_ptr<FlowUnitEvent>>\n      wait_user_events_;  // user send event, wait to process\n\n  bool input_has_stream_start_{false};\n  bool input_has_stream_end_{false};\n  bool input_valid_has_error_buffer_{false};\n\n private:\n  void InitStatistic();\n\n  Status UpdateOutputIndexInfo();\n\n  std::shared_ptr<BufferProcessInfo> GetCurNodeProcessInfo(\n      const std::shared_ptr<BufferIndexInfo> &index_info);\n\n  std::unordered_map<std::string, std::shared_ptr<DataMeta>> input_port_meta_;\n  std::unordered_map<std::string, std::shared_ptr<DataMeta>> output_port_meta_;\n\n  std::unordered_map<std::string, std::shared_ptr<void>> private_map_;\n\n  std::shared_ptr<FlowUnitEvent> user_event_;\n  PortDataMap cur_event_input_data_;  // record for event\n\n  std::shared_ptr<StatisticsItem> node_stats_;\n  std::shared_ptr<StatisticsItem> session_stats_;\n  std::shared_ptr<StatisticsItem> graph_stats_;\n\n  std::list<std::function<void()>> destroy_callback_list_;\n};\n\nclass NormalFlowUnitDataContext : public FlowUnitDataContext {\n public:\n  NormalFlowUnitDataContext(Node *node, MatchKey *data_ctx_match_key,\n                            const std::shared_ptr<Session> &session);\n  ~NormalFlowUnitDataContext() override;\n\n  void SendEvent(std::shared_ptr<FlowUnitEvent> event) override;\n\n  void UpdateProcessState() override;\n\n protected:\n  bool NeedStreamEndFlag() override;\n\n  void UpdateBufferIndexInfo(\n      const std::shared_ptr<BufferIndexInfo> &cur_buffer,\n      const std::shared_ptr<BufferIndexInfo> &parent_buffer) override;\n};\n\nclass LoopNormalFlowUnitDataContext : public NormalFlowUnitDataContext {\n public:\n  LoopNormalFlowUnitDataContext(Node *node, MatchKey *data_ctx_match_key,\n                                const std::shared_ptr<Session> &session);\n  ~LoopNormalFlowUnitDataContext() override;\n\n protected:\n  Status GenerateOutput() override;\n\n  Status AppendEndFlag() override;\n\n  Status CheckOutputData() override;\n\n  std::string output_port_for_this_loop_;\n\n  PortDataMap cached_output_placeholder_;  // send cache after this loop decide\n\n  PortDataMap cached_input_end_flag_;  // process after this loop decide\n};\n\nclass StreamFlowUnitDataContext : public FlowUnitDataContext {\n public:\n  StreamFlowUnitDataContext(Node *node, MatchKey *data_ctx_match_key,\n                            const std::shared_ptr<Session> &session);\n  ~StreamFlowUnitDataContext() override;\n\n  bool IsDataPre() override;\n  bool IsDataPost() override;\n\n  void UpdateProcessState() override;\n\n protected:\n  bool NeedStreamEndFlag() override;\n\n  void UpdateBufferIndexInfo(\n      const std::shared_ptr<BufferIndexInfo> &cur_buffer,\n      const std::shared_ptr<BufferIndexInfo> &parent_buffer) override;\n\n  PortDataMap cached_input_end_flag_;  // process after output stream end\n};\n\nclass NormalExpandFlowUnitDataContext : public FlowUnitDataContext {\n public:\n  NormalExpandFlowUnitDataContext(Node *node, MatchKey *data_ctx_match_key,\n                                  const std::shared_ptr<Session> &session);\n\n  ~NormalExpandFlowUnitDataContext() override;\n\n  void UpdateProcessState() override;\n\n protected:\n  bool NeedStreamEndFlag() override;\n\n  void UpdateBufferIndexInfo(\n      const std::shared_ptr<BufferIndexInfo> &cur_buffer,\n      const std::shared_ptr<BufferIndexInfo> &parent_buffer) override;\n};\n\nclass StreamExpandFlowUnitDataContext : public FlowUnitDataContext {\n public:\n  StreamExpandFlowUnitDataContext(Node *node, MatchKey *data_ctx_match_key,\n                                  const std::shared_ptr<Session> &session);\n  ~StreamExpandFlowUnitDataContext() override;\n\n  void WriteInputData(std::shared_ptr<PortDataMap> stream_data_map) override;\n\n  void ExpandNextBuffer();\n\n  bool IsDataPre() override;\n  bool IsDataPost() override;\n\n  std::shared_ptr<FlowUnitInnerEvent> GenerateSendEvent() override;\n\n  void UpdateProcessState() override;\n\n protected:\n  std::shared_ptr<PortDataMap> ReadFirstInCache();\n\n  bool IsNextExpand(const std::shared_ptr<PortDataMap> &data);\n\n  bool NeedStreamEndFlag() override;\n\n  void UpdateBufferIndexInfo(\n      const std::shared_ptr<BufferIndexInfo> &cur_buffer,\n      const std::shared_ptr<BufferIndexInfo> &parent_buffer) override;\n\n private:\n  // only read one buffer each process\n  std::list<std::shared_ptr<PortDataMap>> stream_data_cache_;\n  size_t cur_data_pose_in_first_cache_{0};\n  size_t cur_expand_buffer_index_{0};\n  bool cur_expand_buffer_index_received_{false};\n  bool next_expand_buffer_event_generated_{false};\n};\n\nclass NormalCollapseFlowUnitDataContext : public FlowUnitDataContext {\n public:\n  NormalCollapseFlowUnitDataContext(Node *node, MatchKey *data_ctx_match_key,\n                                    const std::shared_ptr<Session> &session);\n  ~NormalCollapseFlowUnitDataContext() override;\n\n  void SendEvent(std::shared_ptr<FlowUnitEvent> event) override {\n    // not support user send event\n  }\n\n  bool IsDataPre() override;\n  bool IsDataPost() override;\n\n  void UpdateProcessState() override;\n\n  Status GenerateOutputError() override;\n\n protected:\n  bool SkipInheritInputToMatchNode() override { return true; };\n\n  Status GenerateOutputPlaceholder() override;\n\n  bool NeedStreamEndFlag() override;\n\n  Status CheckOutputData() override;\n\n  Status GenerateOutput() override;\n\n  void UpdateBufferIndexInfo(\n      const std::shared_ptr<BufferIndexInfo> &cur_buffer,\n      const std::shared_ptr<BufferIndexInfo> &parent_buffer) override;\n\n private:\n  size_t output_buffer_for_current_stream_{0};\n};\n\nclass StreamCollapseFlowUnitDataContext : public FlowUnitDataContext {\n public:\n  StreamCollapseFlowUnitDataContext(Node *node, MatchKey *data_ctx_match_key,\n                                    const std::shared_ptr<Session> &session);\n  ~StreamCollapseFlowUnitDataContext() override;\n\n  void SendEvent(std::shared_ptr<FlowUnitEvent> event) override;\n\n  void WriteInputData(std::shared_ptr<PortDataMap> stream_data_map) override;\n\n  void CollapseNextStream();\n\n  bool IsDataPre() override;\n  bool IsDataPost() override;\n\n  std::shared_ptr<FlowUnitInnerEvent> GenerateSendEvent() override;\n\n  void UpdateProcessState() override;\n\n  Status GenerateOutputError() override;\n\n protected:\n  void UpdateInputInfo() override;\n\n  bool SkipInheritInputToMatchNode() override { return true; };\n\n  Status GenerateOutputPlaceholder() override;\n\n  bool NeedStreamEndFlag() override;\n\n  Status CheckOutputData() override;\n\n  Status GenerateOutput() override;\n\n  void UpdateBufferIndexInfo(\n      const std::shared_ptr<BufferIndexInfo> &cur_buffer,\n      const std::shared_ptr<BufferIndexInfo> &parent_buffer) override;\n\n  void AppendToCache(const std::shared_ptr<PortDataMap> &stream_data_map);\n\n private:\n  std::unordered_map<size_t, std::shared_ptr<PortDataMap>> stream_data_cache_;\n  size_t current_collapse_order_{0};\n  bool input_is_expand_from_end_buffer_{false};\n  size_t output_buffer_for_current_stream_{0};\n};\n\nclass FlowUnitExecData;\n\nclass ExecutorDataContext : public DataContext {\n public:\n  ExecutorDataContext(std::shared_ptr<FlowUnitDataContext> origin_ctx,\n                      std::shared_ptr<FlowUnitExecData> data);\n  ~ExecutorDataContext() override;\n\n  std::shared_ptr<BufferList> Input(const std::string &port) const override;\n\n  std::shared_ptr<BufferList> Output(const std::string &port) override;\n\n  std::shared_ptr<BufferListMap> Input() const override;\n\n  std::shared_ptr<BufferListMap> Output() override;\n\n  std::shared_ptr<BufferList> External() override;\n\n  bool HasError() override;\n\n  std::shared_ptr<FlowUnitEvent> Event() override;\n\n  void SendEvent(std::shared_ptr<FlowUnitEvent> event) override;\n\n  void SetPrivate(const std::string &key,\n                  std::shared_ptr<void> private_content) override;\n\n  std::shared_ptr<void> GetPrivate(const std::string &key) override;\n\n  std::shared_ptr<DataMeta> GetInputMeta(const std::string &port) override;\n\n  std::shared_ptr<DataMeta> GetInputGroupMeta(const std::string &port) override;\n\n  void SetOutputMeta(const std::string &port,\n                     std::shared_ptr<DataMeta> data_meta) override;\n\n  std::shared_ptr<SessionContext> GetSessionContext() override;\n\n  void SetStatus(const Status &status);\n\n  std::shared_ptr<Configuration> GetSessionConfig() override;\n\n  std::shared_ptr<StatisticsItem> GetStatistics(\n      DataContextStatsType type) override;\n\n  void Clear();\n\n private:\n  std::shared_ptr<FlowUnitDataContext> origin_ctx_;\n  std::shared_ptr<FlowUnitExecData> data_;\n};\n\n}  // namespace modelbox\n#endif\n"
  },
  {
    "path": "src/libmodelbox/include/modelbox/data_handler.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef DATA_HANDLER_H_\n#define DATA_HANDLER_H_\n#include \"buffer.h\"\n#include \"buffer_list.h\"\n#include \"graph.h\"\n#include \"modelbox/context.h\"\n#include \"node.h\"\n\nnamespace modelbox {\n\n#define DEFAULT_PORT_NAME \"__default__\"\n\nclass ModelBoxEngine;\n\n/**\n * @brief data handler used by  dynamic graph,can store data and node\n * information\n */\nclass DataHandler : public std::enable_shared_from_this<DataHandler> {\n public:\n  DataHandler(BindNodeType type = BUFFERLIST_NODE,\n              const std::shared_ptr<ModelBoxEngine> &env = nullptr);\n\n  virtual ~DataHandler();\n\n  /**\n   * @brief close data handler, when data handler is closd,no data can be pushed\n   */\n  void Close();\n  /**\n   * @brief  push data to data handler of the node\n   * @param key port  name of the node\n   * @param data data handler which stores data\n   * @return  return result\n   */\n  Status PushData(std::shared_ptr<DataHandler> &data, const std::string &key);\n\n  Status PushData(std::shared_ptr<Buffer> &data, const std::string &key);\n\n  Status PushData(std::shared_ptr<BufferList> &data, const std::string &key);\n  /**\n   * @brief push meta to data handler\n   * @param key  key of meta info\n   * @param data  data of meta info\n   * @return return result\n   */\n  Status SetMeta(const std::string &key, const std::string &data);\n\n  std::shared_ptr<DataHandler> operator[](const std::string &port_name);\n\n  /**\n   * @brief get data from data handler via key\n   * @param key: port name\n   * @return  data handler contains data of port(key)\n   */\n  std::shared_ptr<DataHandler> GetDataHandler(const std::string &key);\n\n  Status SetDataHandler(\n      const std::map<std::string, std::shared_ptr<DataHandler>> &data_map);\n  /**\n   * @brief get bufferlist via key\n   * @param key: port name\n   * @return bufferlist on port(key)\n   */\n  std::shared_ptr<BufferList> GetBufferList(const std::string &key);\n\n  /**\n   * @brief get data on outports one by one\n   * @return data hadnler store output data\n   */\n  std::shared_ptr<DataHandler> GetData();\n\n  std::string GetMeta(std::string &key);\n  /**\n   * @brief get flow error\n   * @return error code\n   */\n  Status GetError();\n\n  // for output: record the node name\n  void SetNodeName(const std::string &name);\n\n  std::string GetNodeName();\n\n  void SetError(const Status &status);\n\n private:\n  /*\n   check input stream has been closed\n  */\n  bool IsClosed();\n  void SetEnv(const std::shared_ptr<ModelBoxEngine> &env);\n  std::shared_ptr<ModelBoxEngine> GetEnv();\n  Status InsertOutputNode(std::shared_ptr<HandlerContext> &context);\n  /*\n  bind gcgraph for datahandler\n  */\n  Status SetBindGraph(const std::shared_ptr<GraphState> &gcgraph);\n  /*\n  get bind graph\n  */\n  std::shared_ptr<GraphState> GetBindGraph();\n  /*\n  for output: record the node type\n  */\n  DataHandlerType GetDataHandlerType();\n  void SetDataHandlerType(const DataHandlerType &type);\n\n  /* for input: when node has one more port, check whether the input\n   datahanlders is same nodetype or not\n   */\n  Status CheckInputType(BindNodeType &node_type);\n\n  /*\n  for output: save outport names\n  */\n  std::set<std::string> GetPortNames();\n  Status SetPortNames(std::set<std::string> &port_names);\n\n  /*\n  get and set bind node type: stream or normal\n  */\n  BindNodeType GetBindNodeType();\n  void SetBindNodeType(BindNodeType type);\n\n  /*\n  set extern  map and bufferlist, used when feed data\n  */\n  void SetExternData(std::shared_ptr<void> extern_map,\n                     std::shared_ptr<BufferList> &bufferlist);\n\n  /*\n for input: get inport-outport map when datahandler is constructed with one more\n datahandler\n */\n  std::unordered_map<std::string, std::string> GetPortMap();\n\n  std::shared_ptr<DataHandler> GetOutputData(\n      std::shared_ptr<DynamicGraph> &dynamic_graph);\n\n  friend class SingleNode;\n  friend class ModelBoxEngine;\n  friend class InputContext;\n  friend class StreamContext;\n  bool closed_{false};\n\n  std::weak_ptr<ModelBoxEngine> env_;\n\n  Status error_{STATUS_SUCCESS};\n  DataHandlerType data_handler_type_{INPUT};\n  BindNodeType data_type_{BUFFERLIST_NODE};\n\n  std::string node_name_;\n  std::set<std::string> port_names_;\n  std::unordered_map<std::string, std::string> port_to_port_;\n  std::unordered_map<std::string, std::string> port_to_node_;\n  std::unordered_map<std::string, BindNodeType> node_type_map_;\n\n  std::shared_ptr<HandlerContext> context_;\n};\n\n}  // namespace modelbox\n#endif"
  },
  {
    "path": "src/libmodelbox/include/modelbox/data_source_parser_plugin.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_DATA_SOURCE_PARSER_PLUGIN_H_\n#define MODELBOX_FLOWUNIT_DATA_SOURCE_PARSER_PLUGIN_H_\n\n#include <modelbox/base/driver.h>\n#include <modelbox/base/status.h>\n#include <modelbox/session_context.h>\n\n#include <functional>\n#include <string>\n\n#define DATASOURCE_PARSER_STREAM_DEFAULT_RETRY_TIMES (-1)\n#define DATASOURCE_PARSER_FILE_DEFAULT_RETRY_TIMES (10)\n#define DATASOURCE_PARSER_DEFAULT_RETRY_INTERVAL 1000\n#define DATASOURCE_PARSER_RETRY_ON 1\n#define DATASOURCE_PARSER_RETRY_OFF 0\n\nconstexpr const char *DRIVER_CLASS_DATA_SOURCE_PARSER_PLUGIN =\n    \"DRIVER-SOURCE-PARSER\";\n\nnamespace modelbox {\n\nusing DestroyUriFunc = std::function<void(const std::string &uri)>;\n\nclass DataSourceParserPlugin : public Driver {\n public:\n  virtual Status Init(const std::shared_ptr<Configuration> &opts) = 0;\n\n  virtual Status Deinit() = 0;\n\n  virtual Status Parse(const std::shared_ptr<SessionContext> &session_context,\n                       const std::shared_ptr<modelbox::Configuration> &session_config, \n                       const std::string &config, std::string &uri,\n                       DestroyUriFunc &destroy_uri_func) = 0;\n\n  virtual Status GetStreamType(const std::string &config,\n                               std::string &stream_type) = 0;\n\n  int32_t GetRetryTimes() { return retry_max_times_; };\n\n  int32_t GetRetryInterval() { return retry_interval_; };\n\n  int32_t GetRetryEnabled() { return retry_enabled_; };\n\n protected:\n  int32_t retry_interval_{\n      DATASOURCE_PARSER_DEFAULT_RETRY_INTERVAL};  // millisecond\n  int32_t retry_max_times_{\n      DATASOURCE_PARSER_STREAM_DEFAULT_RETRY_TIMES};  // -1: infinite retry\n  int32_t retry_enabled_{\n      DATASOURCE_PARSER_RETRY_OFF};  // 0:  retry disable  1: retry enable\n};\n\n}  // namespace modelbox\n\n#endif  // MODELBOX_FLOWUNIT_DATA_SOURCE_PARSER_PLUGIN_H_"
  },
  {
    "path": "src/libmodelbox/include/modelbox/error.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_ERROR_H_\n#define MODELBOX_ERROR_H_\n\n#include <string>\n\n#include \"modelbox/base/status.h\"\n\nnamespace modelbox {\n\nclass FlowUnitError {\n public:\n  FlowUnitError(std::string desc);\n  FlowUnitError(const std::string& node, const std::string& error_pos,\n                const Status& error_status);\n  virtual ~FlowUnitError();\n  std::string GetDesc();\n  Status GetStatus();\n\n private:\n  std::string desc_;\n  Status error_status_;\n};\n\nclass DataError {\n public:\n  DataError(const std::string &error_code, const std::string &error_msg);\n\n  virtual ~DataError();\n\n  std::string GetErrorCode();\n\n  std::string GetErrorMsg();\n\n  size_t GetErrorDeepth();\n\n  void SetErrorDeepth(size_t error_deepth);\n\n private:\n  bool new_error_{false};\n\n  std::string error_msg_;\n\n  std::string error_code_;\n\n  size_t error_deepth_{0};\n};\n\n}  // namespace modelbox\n\n#endif  // MODELBOX_ERROR_H_"
  },
  {
    "path": "src/libmodelbox/include/modelbox/external_data_map.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_EXTERNAL_DATA_MAP_H_\n#define MODELBOX_EXTERNAL_DATA_MAP_H_\n\n#include <memory>\n\n#include \"modelbox/base/device.h\"\n#include \"modelbox/error.h\"\n#include \"modelbox/node.h\"\n#include \"modelbox/port.h\"\n#include \"modelbox/session.h\"\n#include \"modelbox/statistics.h\"\n\nnamespace modelbox {\nclass SessionContext;\nclass ExternalDataSelect;\n\nclass ExternalDataMap : public SessionIO {\n public:\n  ExternalDataMap();\n  ~ExternalDataMap() override;\n  virtual std::shared_ptr<BufferList> CreateBufferList() = 0;\n  Status SetOutputMeta(const std::string& port_name,\n                       std::shared_ptr<DataMeta> meta) override = 0;\n  Status Send(const std::string& port_name,\n              std::shared_ptr<BufferList> buffer_list) override = 0;\n  Status Recv(OutputBufferList& map_buffer_list,\n              int32_t timeout = 0) override = 0;\n  Status Close() override = 0;\n  Status Shutdown() override = 0;\n  virtual std::shared_ptr<SessionContext> GetSessionContext() = 0;\n  virtual std::shared_ptr<Configuration> GetSessionConfig() = 0;\n  virtual std::shared_ptr<FlowUnitError> GetLastError() = 0;\n\n  virtual void SetPrivate(std::shared_ptr<void> ptr) = 0;\n\n  virtual std::shared_ptr<void> GetPrivate() = 0;\n\n  template <typename T>\n  inline std::shared_ptr<T> GetPrivate() {\n    return std::static_pointer_cast<T>(GetPrivate());\n  }\n};\n\nclass ExternalDataMapImpl\n    : public ExternalDataMap,\n      public std::enable_shared_from_this<ExternalDataMapImpl> {\n public:\n  ExternalDataMapImpl(const std::shared_ptr<Node>& input_node,\n                      const std::shared_ptr<Stream>& init_stream);\n\n  ~ExternalDataMapImpl() override;\n\n  std::shared_ptr<BufferList> CreateBufferList() override;\n\n  Status SetOutputMeta(const std::string& port_name,\n                       std::shared_ptr<DataMeta> meta) override;\n\n  Status Send(const std::string& port_name,\n              std::shared_ptr<BufferList> buffer_list) override;\n\n  Status Recv(OutputBufferList& map_buffer_list, int32_t timeout = 0) override;\n\n  Status Close() override;\n\n  Status Shutdown() override;\n\n  std::shared_ptr<SessionContext> GetSessionContext() override;\n\n  std::shared_ptr<Configuration> GetSessionConfig() override;\n\n  void SetPrivate(std::shared_ptr<void> ptr) override;\n\n  std::shared_ptr<void> GetPrivate() override;\n\n  void SetLastError(std::shared_ptr<FlowUnitError> error);\n\n  std::shared_ptr<FlowUnitError> GetLastError() override;\n\n  void SetSelector(const std::shared_ptr<ExternalDataSelect>& selector);\n\n  bool GetReadyFlag();\n\n  void PushGraphOutputBuffer(OutputBufferList& output);\n\n protected:\n  void SessionEnd(std::shared_ptr<FlowUnitError> error = nullptr) override;\n\n private:\n  Status PushToInputCache(const std::string& port_name,\n                          const std::shared_ptr<BufferList>& buffer_list);\n\n  void PopMachedInput(\n      std::unordered_map<std::string, std::list<std::shared_ptr<Buffer>>>&\n          matched_port_data,\n      size_t& matched_data_size);\n\n  Status SendMatchData(\n      const std::unordered_map<std::string, std::list<std::shared_ptr<Buffer>>>&\n          matched_port_data,\n      size_t matched_data_size);\n\n  // all extern output port stream inherit from init_stream\n  std::shared_ptr<Stream> init_stream_;\n  std::shared_ptr<BufferIndexInfo> root_buffer_;\n  std::weak_ptr<Session> session_;\n  std::weak_ptr<SessionContext> session_ctx_;\n\n  std::shared_ptr<Node> graph_input_node_;\n  std::shared_ptr<Device> graph_input_node_device_;\n  std::unordered_map<std::string, std::shared_ptr<InPort>>\n      graph_input_node_ports_;\n  std::unordered_map<std::string, std::list<std::shared_ptr<Buffer>>>\n      graph_input_ports_cache_;\n  std::unordered_map<std::string, std::shared_ptr<Stream>>\n      graph_input_ports_stream_;\n\n  std::shared_ptr<FlowUnitError> last_error_;\n\n  std::shared_ptr<BlockingQueue<OutputBufferList>> graph_output_cache_;\n  std::weak_ptr<ExternalDataSelect> selector_;\n\n  bool session_end_flag_{false};\n  std::mutex session_state_lock_;\n\n  bool close_flag_{false};\n  bool shutdown_flag_{false};\n  std::recursive_mutex close_state_lock_;\n  std::shared_ptr<void> private_ptr_;\n};\n\nclass ExternalDataSelect\n    : public std::enable_shared_from_this<ExternalDataSelect> {\n public:\n  ExternalDataSelect();\n  virtual ~ExternalDataSelect();\n  void RegisterExternalData(\n      const std::shared_ptr<ExternalDataMap>& externl_data);\n  void RemoveExternalData(const std::shared_ptr<ExternalDataMap>& externl_data);\n\n  Status SelectExternalData(\n      std::list<std::shared_ptr<ExternalDataMap>>& external_list,\n      std::chrono::duration<long, std::milli> waittime =\n          std::chrono::milliseconds(-1));\n\n  bool IsExternalDataReady();\n\n private:\n  friend class ExternalDataMapImpl;\n  void NotifySelect();\n\n  std::mutex external_list_lock_;\n  std::list<std::shared_ptr<ExternalDataMapImpl>> external_list_;\n\n  std::mutex data_ready_mtx_;\n  std::condition_variable data_ready_cv_;\n};\n}  // namespace modelbox\n\n#endif  // MODELBOX_EXTERNAL_DATA_MAP_H_"
  },
  {
    "path": "src/libmodelbox/include/modelbox/external_data_simple.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n#ifndef EXTERNAL_DATA_SIMPLE_\n#define EXTERNAL_DATA_SIMPLE_\n#include <modelbox/flow.h>\n#include <queue>\n\nnamespace modelbox {\nclass ExternalDataSimple {\n public:\n  ExternalDataSimple(std::shared_ptr<ExternalDataMap> &data_map);\n  virtual ~ExternalDataSimple();\n\n  std::shared_ptr<BufferList> CreateBufferList();\n\n  Status PushData(const std::string &port_name,\n                  std::shared_ptr<BufferList> &bufferlist);\n\n  Status PushData(const std::string &port_name, const void *data,\n                  const size_t &data_len,\n                  const std::map<std::string, std::string> &meta = {});\n\n  Status GetResult(const std::string &port_name,\n                   std::shared_ptr<Buffer> &buffer, const int &timeout = 0);\n\n  Status GetResult(const std::string &port_name, std::shared_ptr<void> &data,\n                   size_t &len, const int &timeout = 0);\n\n  void Close();\n\n private:\n  std::shared_ptr<ExternalDataMap> data_map_;\n  std::map<std::string, std::queue<std::shared_ptr<Buffer>>> buffer_list_map_;\n  Status status_;\n};\n\n}  // namespace modelbox\n#endif\n"
  },
  {
    "path": "src/libmodelbox/include/modelbox/flow.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOW_H_\n#define MODELBOX_FLOW_H_\n\n#include <modelbox/base/graph_manager.h>\n#include <modelbox/base/log.h>\n#include <modelbox/base/status.h>\n#include <modelbox/flow_graph_desc.h>\n#include <modelbox/flow_stream_io.h>\n#include <modelbox/flowunit.h>\n#include <modelbox/flowunit_builder.h>\n#include <modelbox/graph.h>\n#include <modelbox/profiler.h>\n\n#include <memory>\n#include <string>\n\nnamespace modelbox {\n\nconstexpr const char* DEFAULT_FLOW_PATH =\n    \"/usr/local/share/modelbox/solutions/graphs\";\n\n/**\n * @brief modelbox flow control\n */\nclass Flow {\n public:\n  /**\n   * @brief Graph format\n   */\n  enum Format {\n    FORMAT_AUTO,\n    FORMAT_TOML,\n    FORMAT_JSON,\n    FORMAT_UNKNOWN,\n  };\n\n  Flow();\n  virtual ~Flow();\n\n  /**\n   * @brief register flow unit to flow\n   * @param flowunit_builder flow unit builder\n   * @return\n   **/\n  void RegisterFlowUnit(\n      const std::shared_ptr<modelbox::FlowUnitBuilder>& flowunit_builder);\n\n  /**\n   * @brief Init flow from file\n   * @param configfile path to config file, support toml and json\n   * @param format config file format, when auto, Flow will guess format.\n   * @return init result.\n   */\n  Status Init(const std::string& configfile, Format format = FORMAT_AUTO);\n\n  /**\n   * @brief Init flow from inline graph\n   * @param name graph name\n   * @param graph inline graph.\n   * @param format config file format, when auto, Flow will guess format.\n   * @return init result.\n   */\n  Status Init(const std::string& name, const std::string& graph,\n              Format format = FORMAT_AUTO);\n\n  /**\n   * @brief Init flow from input stream\n   * @param is input stream of graph.\n   * @param fname graph name\n   * @return init result.\n   */\n  Status Init(std::istream& is, const std::string& fname);\n\n  /**\n   * @brief Init flow from configuration\n   * @param config configuration object\n   * @return init result.\n   */\n  Status Init(std::shared_ptr<Configuration> config);\n\n  /**\n   * @brief  init flow from FlowGraphDesc\n   * @param flow_graph_desc  graph desc\n   * @return init result.\n   */\n  Status Init(const std::shared_ptr<FlowGraphDesc>& flow_graph_desc);\n\n  /**\n   * @brief init flow by name, args and flow directory\n   * @param name flow name\n   * @param args flow args\n   * @param flow_dir scan flow directory\n   * @return init result.\n   */\n  Status InitByName(\n      const std::string& name,\n      const std::unordered_map<std::string, std::string>& args = {},\n      const std::string& flow_dir = DEFAULT_FLOW_PATH);\n\n  /**\n   * @brief return until flow running\n   * @return start result.\n   */\n  Status StartRun();\n\n  /**\n   * @brief Build graph\n   * @return build result.\n   */\n  Status Build();\n\n  /**\n   * @brief Run graph, block until graph is finish.\n   * @return run result.\n   */\n  Status Run();\n\n  /**\n   * @brief Run graph async.\n   * @return run result.\n   */\n  Status RunAsync();\n\n  /**\n   * @brief Wait graph run finish\n   * @param millisecond wait timeout\n   * @param ret_val graph run result\n   * @return wait result.\n   */\n  Status Wait(int64_t millisecond = 0, Status* ret_val = nullptr);\n\n  /**\n   * @brief Force stop graph\n   */\n  void Stop();\n\n  /**\n   * @brief Create external data\n   * @return extern data\n   */\n  std::shared_ptr<ExternalDataMap> CreateExternalDataMap();\n\n  /**\n   * @brief Create stream io to send and recv stream data\n   * @return FlowStreamIO\n   */\n  std::shared_ptr<FlowStreamIO> CreateStreamIO();\n\n  /**\n   * @brief Get profiler\n   * @return profiler\n   */\n  std::shared_ptr<Profiler> GetProfiler();\n\n  /**\n   * @brief Get graph id\n   * @return graph id\n   */\n  std::string GetGraphId() const;\n\n  /**\n   * @brief Get graph name\n   * @return graph name\n   */\n  std::string GetGraphName() const;\n\n private:\n  Status InitComponent();\n\n  void Clear();\n\n  Status ConfigFileRead(const std::string& configfile, Format format,\n                        std::istringstream* ifs);\n\n  Status GetConfigByGraphFile(const std::string& configfile,\n                              std::shared_ptr<Configuration>& config,\n                              Format format);\n\n  Status GetGraphFilePathByName(const std::string& flow_name,\n                                const std::string& graph_dir,\n                                std::string& graph_path);\n\n  Status GetInputArgs(\n      std::shared_ptr<Configuration>& config,\n      const std::unordered_map<std::string, std::string>& input_args = {});\n\n  Status GuessConfFormat(const std::string& configfile, const std::string& data,\n                         enum Format* format);\n\n  std::list<std::shared_ptr<FlowUnitFactory>> flowunit_factory_list_;\n\n  std::shared_ptr<Drivers> drivers_;\n  std::shared_ptr<DeviceManager> device_mgr_;\n  std::shared_ptr<FlowUnitManager> flowunit_mgr_;\n  std::shared_ptr<GraphConfigManager> graphconf_mgr_;\n  std::shared_ptr<Configuration> config_;\n  std::shared_ptr<GraphConfig> graphconfig_;\n  std::shared_ptr<GCGraph> gcgraph_;\n  std::shared_ptr<Graph> graph_;\n  std::shared_ptr<Profiler> profiler_;\n  bool timer_run_ = false;\n  std::shared_ptr<std::unordered_map<std::string, std::string>> args_;\n};\n\n}  // namespace modelbox\n#endif  // MODELBOX_FLOW_H_\n"
  },
  {
    "path": "src/libmodelbox/include/modelbox/flow_graph_desc.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef FLOW_GRAPH_DESC_H_\n#define FLOW_GRAPH_DESC_H_\n\n#include <functional>\n#include <memory>\n#include <string>\n#include <unordered_map>\n#include <vector>\n\n#include \"flow_node_desc.h\"\n#include \"modelbox/base/configuration.h\"\n#include \"modelbox/base/graph_manager.h\"\n\nnamespace modelbox {\n\nclass FlowGraphFunctionInfo {\n public:\n  FlowGraphFunctionInfo(\n      std::string name, std::vector<std::string> input_name_list,\n      std::vector<std::string> output_name_list,\n      std::function<Status(std::shared_ptr<DataContext>)> func);\n\n  std::string GetName();\n\n  std::vector<std::string> GetInputNameList();\n\n  std::vector<std::string> GetOutputNameList();\n\n  std::function<Status(std::shared_ptr<DataContext>)> GetFunc();\n\n private:\n  std::string name_;\n  std::vector<std::string> input_name_list_;\n  std::vector<std::string> output_name_list_;\n  std::function<Status(std::shared_ptr<DataContext>)> func_;\n};\n\n/**\n * @brief To describe a graph in api mode\n **/\nclass FlowGraphDesc {\n  friend class Flow;\n\n public:\n  FlowGraphDesc();\n\n  virtual ~FlowGraphDesc();\n\n  /**\n   * @brief set graph scope queue size\n   * @param queue_size for node input cache\n   */\n  void SetQueueSize(size_t queue_size);\n\n  /**\n   * @brief set graph scope batch size\n   * @param batch_size for node process batch\n   **/\n  void SetBatchSize(size_t batch_size);\n\n  /**\n   * @brief set custom drivers scan directory\n   * @param drivers_dir_list Dir list to scan custom drivers\n   **/\n  void SetDriversDir(const std::vector<std::string> &drivers_dir_list);\n\n  /**\n   * @brief skip modelbox default drivers\n   * @param is_skip True if skip modelbox default drivers\n   **/\n  void SetSkipDefaultDrivers(bool is_skip);\n\n  /**\n   * @brief set directory to save profile info\n   * @param profile_dir directory to write profile info\n   **/\n  void SetProfileDir(const std::string &profile_dir);\n\n  /**\n   * @brief set profile trace on or off\n   * @param profile_trace_enable true to enable profile trace\n   **/\n  void SetProfileTraceEnable(bool profile_trace_enable);\n\n  /**\n   * @brief add input port for flow\n   * @param input_name input port name\n   * @return a node in graph\n   **/\n  std::shared_ptr<FlowNodeDesc> AddInput(const std::string &input_name);\n\n  /**\n   * @brief add output port for flow\n   * @param output_name output port name\n   * @param source_node_port node output port connect to this output port\n   **/\n  void AddOutput(const std::string &output_name,\n                 const std::shared_ptr<FlowPortDesc> &source_node_port);\n\n  /**\n   * @brief add output port for flow\n   * @param output_name output port name\n   * @param source_node output port [0] of node will connect to this output port\n   **/\n  void AddOutput(const std::string &output_name,\n                 const std::shared_ptr<FlowNodeDesc> &source_node);\n\n  /**\n   * @brief add node for flow\n   * @param flowunit_name flowunit name, like resize, crop\n   * @param device choose flowunit implementation\n   * @param config flowunit configuration\n   * @param source_node_ports node output ports connect to this node input ports\n   * @return a node in graph\n   **/\n  std::shared_ptr<FlowNodeDesc> AddNode(\n      const std::string &flowunit_name, const std::string &device,\n      const std::vector<std::string> &config,\n      const std::unordered_map<std::string, std::shared_ptr<FlowPortDesc>>\n          &source_node_ports);\n\n  /**\n   * @brief add node for flow\n   * @param flowunit_name flowunit name, like resize, crop\n   * @param device choose flowunit implementation\n   * @param config flowunit configuration\n   * @param source_node output port [0] of node will connect to this output port\n   * @return a node in graph\n   **/\n  std::shared_ptr<FlowNodeDesc> AddNode(\n      const std::string &flowunit_name, const std::string &device,\n      const std::vector<std::string> &config,\n      const std::shared_ptr<FlowNodeDesc> &source_node);\n\n  /**\n   * @brief add node for flow\n   * @param flowunit_name flowunit name, like resize, crop\n   * @param device choose flowunit implementation\n   * @param source_node_ports node output ports connect to this node input ports\n   * @return a node in graph\n   **/\n  std::shared_ptr<FlowNodeDesc> AddNode(\n      const std::string &flowunit_name, const std::string &device,\n      const std::unordered_map<std::string, std::shared_ptr<FlowPortDesc>>\n          &source_node_ports);\n\n  /**\n   * @brief add node for flow\n   * @param flowunit_name flowunit name, like resize, crop\n   * @param device choose flowunit implementation\n   * @param source_node output port [0] of node will connect to this output port\n   * @return a node in graph\n   **/\n  std::shared_ptr<FlowNodeDesc> AddNode(\n      const std::string &flowunit_name, const std::string &device,\n      const std::shared_ptr<FlowNodeDesc> &source_node);\n\n  /**\n   * @brief add node for flow\n   * @param flowunit_name flowunit name, like resize, crop\n   * @param device choose flowunit implementation\n   * @param config flowunit configuration\n   * @return a node in graph\n   **/\n  std::shared_ptr<FlowNodeDesc> AddNode(\n      const std::string &flowunit_name, const std::string &device = \"cpu\",\n      const std::vector<std::string> &config = {});\n\n  /**\n   * @brief add function node for flow\n   * @param func func to insert as node\n   * @param input_name_list define input port for node\n   * @param output_name_list define output port for node\n   * @param source_node_ports node output ports connect to this node input ports\n   * @return a node in graph\n   **/\n  std::shared_ptr<FlowNodeDesc> AddFunction(\n      const std::function<Status(std::shared_ptr<DataContext>)> &func,\n      const std::vector<std::string> &input_name_list,\n      const std::vector<std::string> &output_name_list,\n      const std::unordered_map<std::string, std::shared_ptr<FlowPortDesc>>\n          &source_node_ports);\n\n  /**\n   * @brief add function node for flow\n   * @param func func to insert as node\n   * @param input_name_list define input port for node\n   * @param output_name_list define output port for node\n   * @param source_node output port [0] of node will connect to this output port\n   * @return a node in graph\n   **/\n  std::shared_ptr<FlowNodeDesc> AddFunction(\n      const std::function<Status(std::shared_ptr<DataContext>)> &func,\n      const std::vector<std::string> &input_name_list,\n      const std::vector<std::string> &output_name_list,\n      const std::shared_ptr<FlowNodeDesc> &source_node);\n\n private:\n  std::unordered_map<std::string, size_t> node_name_idx_map_;\n  size_t function_node_idx_{0};\n\n  std::shared_ptr<Configuration> config_;\n  std::list<std::shared_ptr<FlowGraphFunctionInfo>> function_list_;\n  std::list<std::shared_ptr<FlowNodeDesc>> node_desc_list_;\n\n  std::shared_ptr<Configuration> GetConfig();\n\n  void GetFuncFactoryList(\n      std::list<std::shared_ptr<FlowUnitFactory>> &factory_list);\n\n  std::shared_ptr<GCGraph> GenGCGraph(\n      const std::shared_ptr<modelbox::FlowUnitManager> &flowunit_mgr);\n\n  void AddOutput(const std::string &output_name, const std::string &device,\n                 const std::shared_ptr<FlowPortDesc> &source_node_port);\n\n  Status GenGCNodes(const std::shared_ptr<GCGraph> &gcgraph);\n\n  Status GenGCEdges(\n      const std::shared_ptr<GCGraph> &gcgraph,\n      const std::shared_ptr<modelbox::FlowUnitManager> &flowunit_mgr);\n\n  Status GetInputLinks(\n      const std::shared_ptr<FlowNodeDesc> &dest_node_desc,\n      const std::shared_ptr<FlowUnitManager> &flowunit_mgr,\n      std::unordered_map<std::string, std::shared_ptr<FlowPortDesc>>\n          &input_links);\n\n  Status FormatInputLinks(\n      const std::shared_ptr<FlowUnitManager> &flowunit_mgr,\n      std::unordered_map<std::string, std::shared_ptr<FlowPortDesc>>\n          &input_links);\n\n  std::shared_ptr<FlowUnitDesc> GetFlowUnitDesc(\n      const std::shared_ptr<FlowNodeDesc> &node_desc,\n      const std::shared_ptr<FlowUnitManager> &flowunit_mgr);\n};\n\n}  // namespace modelbox\n\n#endif  // FLOW_GRAPH_DESC_H_"
  },
  {
    "path": "src/libmodelbox/include/modelbox/flow_node_desc.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef FLOW_NODE_DESC_H_\n#define FLOW_NODE_DESC_H_\n#include <utility>\n\n#include \"buffer.h\"\n#include \"buffer_list.h\"\n#include \"graph.h\"\n#include \"node.h\"\n\nnamespace modelbox {\n\nconstexpr const char *GRAPH_NODE_INPUT = \"input\";\nconstexpr const char *GRAPH_NODE_OUTPUT = \"output\";\nconstexpr const char *GRAPH_NODE_FLOWUNIT = \"flowunit\";\n\nclass FlowNodeDesc;\n\n/**\n * @brief port of node\n **/\nclass FlowPortDesc {\n  friend class FlowGraphDesc;\n\n public:\n  FlowPortDesc(std::shared_ptr<FlowNodeDesc> node, std::string port_name);\n\n  FlowPortDesc(std::shared_ptr<FlowNodeDesc> node, size_t port_idx);\n\n  /**\n   * @brief get node\n   * @return node\n   **/\n  std::shared_ptr<FlowNodeDesc> GetNode();\n\n  /**\n   * @brief get node name\n   * @return node name\n   **/\n  std::string GetNodeName();\n\n  /**\n   * @brief describe port in name or idx\n   **/\n  bool IsDescribeInName();\n\n  /**\n   * @brief get port name\n   * @return port name\n   **/\n  std::string GetPortName();\n\n  /**\n   * @brief get port index\n   * @return port index\n   **/\n  size_t GetPortIdx();\n\n private:\n  std::shared_ptr<FlowNodeDesc> node_;\n\n  bool is_in_name_;\n  std::string port_name_;\n  size_t port_idx_{0};\n};\n\nclass FlowNodeDesc : public std::enable_shared_from_this<FlowNodeDesc> {\n  friend class FlowGraphDesc;\n\n public:\n  FlowNodeDesc(std::string node_name);\n\n  virtual ~FlowNodeDesc();\n\n  /**\n   * @brief set custom node name to override default node name\n   * @param node_name custom node name\n   **/\n  void SetNodeName(const std::string &node_name);\n\n  /**\n   * @brief get node name\n   * @return node name\n   **/\n  std::string GetNodeName();\n\n  /**\n   * @brief get output port by output_name\n   * @param output_name name for node output port\n   **/\n  std::shared_ptr<FlowPortDesc> operator[](const std::string &output_name);\n\n  /**\n   * @brief get output port by port index\n   * @param port_idx index for node output port\n   **/\n  std::shared_ptr<FlowPortDesc> operator[](size_t port_idx);\n\n private:\n  void SetNodeType(const std::string &type);\n\n  void SetFlowUnitName(const std::string &flowunit_name);\n\n  std::string GetFlowUnitName();\n\n  void SetDevice(const std::string &device);\n\n  void SetConfig(const std::vector<std::string> &config);\n\n  std::vector<std::string> GetNodeConfig();\n\n  void SetInputLinks(\n      const std::unordered_map<std::string, std::shared_ptr<FlowPortDesc>>\n          &source_node_ports);\n\n  const std::unordered_map<std::string, std::shared_ptr<FlowPortDesc>>\n      &GetInputLinks();\n\n  void Clear();\n\n  std::string node_name_;\n  std::string flowunit_name_;\n  std::string type_;\n  std::string device_;\n  std::vector<std::string> config_;\n  std::unordered_map<std::string, std::shared_ptr<FlowPortDesc>>\n      source_node_ports_;\n};\n\n}  // namespace modelbox\n#endif  // FLOW_NODE_DESC_H_"
  },
  {
    "path": "src/libmodelbox/include/modelbox/flow_stream_io.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n#ifndef FLOW_STREAM_IO_H_\n#define FLOW_STREAM_IO_H_\n\n#include <list>\n#include <memory>\n#include <string>\n\n#include \"modelbox/buffer.h\"\n#include \"modelbox/external_data_map.h\"\n\nnamespace modelbox {\n\nclass FlowStreamIO {\n public:\n  FlowStreamIO(std::shared_ptr<ExternalDataMap> data_map);\n  virtual ~FlowStreamIO();\n\n  /**\n   * @brief create a empty buffer on cpu device\n   * @return cpu buffer\n   **/\n  std::shared_ptr<Buffer> CreateBuffer();\n\n  /**\n   * @brief Send buffer of this stream to flow\n   * @param input_name input node name of flow\n   * @param buffer buffer of this stream\n   * @return Status\n   **/\n  Status Send(const std::string &input_name,\n              const std::shared_ptr<Buffer> &buffer);\n\n  /**\n   * @brief Send buffer of this stream to flow\n   * @param input_name input node name of flow\n   * @param input_list buffer list of this stream\n   * @return Status\n   **/\n  Status Send(const std::string &input_name,\n              const std::vector<std::shared_ptr<Buffer>> &input_list);\n\n  /**\n   * @brief Send buffer of this stream to flow\n   * @param input_name input node name of flow\n   * @param data data pointer\n   * @param size data size\n   * @return Status\n   **/\n  Status Send(const std::string &input_name, void *data, size_t size);\n\n  /**\n   * @brief recv buffer of this stream result from flow\n   * @param output_name output node name of flow\n   * @param buffer result buffer of this stream\n   * @param timeout wait result timeout\n   * @return Status\n   **/\n  Status Recv(const std::string &output_name, std::shared_ptr<Buffer> &buffer,\n              long timeout = 0);\n\n  /**\n   * @brief recv buffer of this stream result from flow\n   * @param output_name output node name of flow\n   * @param timeout wait result timeout\n   * @return buffer\n   **/\n  std::shared_ptr<Buffer> Recv(const std::string &output_name, long timeout);\n\n  /**\n   * @brief close input stream, mark stream end\n   **/\n  void CloseInput();\n\n private:\n  std::shared_ptr<Device> device_;\n  std::shared_ptr<ExternalDataMap> data_map_;\n  std::map<std::string, std::list<std::shared_ptr<Buffer>>>\n      port_data_cache_map_;\n  Status status_;\n};\n\n}  // namespace modelbox\n#endif  // FLOW_STREAM_IO_H_"
  },
  {
    "path": "src/libmodelbox/include/modelbox/flowunit.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOW_UNIT_H_\n#define MODELBOX_FLOW_UNIT_H_\n\n#include <modelbox/base/device.h>\n#include <modelbox/base/driver.h>\n#include <modelbox/base/log.h>\n#include <modelbox/base/status.h>\n#include <modelbox/buffer.h>\n#include <modelbox/buffer_list.h>\n#include <modelbox/data_context.h>\n#include <modelbox/stream.h>\n#include <modelbox/tensor_list.h>\n#include <string.h>\n\n#include <functional>\n#include <map>\n#include <queue>\n#include <regex>\n#include <set>\n#include <string>\n#include <utility>\n#include <vector>\n\nnamespace modelbox {\n\nconstexpr const char *EVENT_PORT_NAME = \"Event_Port\";\nconstexpr const char *EXTERNAL_PORT_NAME = \"External_Port\";\nconstexpr const char *DRIVER_CLASS_FLOWUNIT = \"DRIVER-FLOWUNIT\";\nconstexpr uint32_t STREAM_DEFAULT_BATCH_SIZE = 1;\nconstexpr uint32_t NORMAL_DEFAULT_BATCH_SIZE = 8;\nconstexpr uint32_t STREAM_MAX_BATCH_SIZE = 1;\nconstexpr uint32_t NORMAL_MAX_BATCH_SIZE = 0;\n\nusing BufferPtr = std::shared_ptr<Buffer>;\nusing BufferPtrList = std::vector<BufferPtr>;\n\nclass SchedulerEvent;\n\nenum FlowOutputType {\n  ORIGIN = 0,\n  EXPAND = 1,\n  COLLAPSE = 2,\n};\n\nenum FlowType {\n  STREAM = 0,\n  NORMAL = 1,\n};\n\nenum ConditionType {\n  NONE = 0,\n  IF_ELSE = 1,\n};\n\nenum LoopType {\n  NOT_LOOP = 0,\n  LOOP = 1,\n};\n\nclass FlowUnitPort {\n public:\n  FlowUnitPort(std::string name);\n  FlowUnitPort(std::string name, std::string device_type);\n  FlowUnitPort(std::string name, uint32_t device_mem_flags);\n  FlowUnitPort(std::string name, std::string device_type,\n               uint32_t device_mem_flags);\n  FlowUnitPort(std::string name, std::string device_type, std::string type);\n  FlowUnitPort(std::string name, std::string device_type, std::string type,\n               std::map<std::string, std::string> ext);\n\n  virtual ~FlowUnitPort();\n\n  void SetDeviceType(const std::string &device_type);\n\n  void SetPortName(const std::string &port_name);\n\n  void SetPortType(const std::string &port_type);\n\n  void SetDevice(std::shared_ptr<Device> device);\n\n  void SetProperity(const std::string &key, const std::string &value);\n\n  std::string GetDeviceType() const;\n\n  std::string GetPortName() const;\n\n  std::string GetPortType() const;\n\n  std::shared_ptr<Device> GetDevice() const;\n\n  uint32_t GetDeviceMemFlags() const;\n\n  std::string GetProperity(const std::string &key);\n\n private:\n  std::string port_name_;\n  std::string device_type_;\n  std::string port_type_;\n  std::map<std::string, std::string> ext_;\n  std::shared_ptr<Device> device_;\n  uint32_t device_mem_flags_{0};\n};\n\nclass FlowUnitInput : public FlowUnitPort {\n public:\n  FlowUnitInput(const std::string &name);\n  FlowUnitInput(const std::string &name, const std::string &device_type);\n  FlowUnitInput(const std::string &name, uint32_t device_mem_flags);\n  FlowUnitInput(const std::string &name, const std::string &device_type,\n                uint32_t device_mem_flags);\n  FlowUnitInput(const std::string &name, const std::string &device_type,\n                const std::string &type);\n  FlowUnitInput(const std::string &name, const std::string &device_type,\n                const std::string &type,\n                const std::map<std::string, std::string> &ext);\n  ~FlowUnitInput() override;\n};\n\nclass FlowUnitOutput : public FlowUnitPort {\n public:\n  FlowUnitOutput(const std::string &name);\n  FlowUnitOutput(const std::string &name, uint32_t device_mem_flags);\n  /**\n   * @deprecated\n   **/\n  FlowUnitOutput(const std::string &name, const std::string &device_type);\n  /**\n   * @deprecated\n   **/\n  FlowUnitOutput(const std::string &name, const std::string &device_type,\n                 uint32_t device_mem_flags);\n  /**\n   * @deprecated\n   **/\n  FlowUnitOutput(const std::string &name, const std::string &device_type,\n                 const std::string &type);\n  /**\n   * @deprecated\n   **/\n  FlowUnitOutput(const std::string &name, const std::string &device_type,\n                 const std::string &type,\n                 const std::map<std::string, std::string> &ext);\n  ~FlowUnitOutput() override;\n};\n\nclass FlowUnitOption {\n public:\n  FlowUnitOption(std::string name, std::string type);\n  FlowUnitOption(std::string name, std::string type, bool require);\n  FlowUnitOption(std::string name, std::string type, bool require,\n                 std::string default_value, std::string desc,\n                 std::map<std::string, std::string> values);\n  FlowUnitOption(std::string name, std::string type, bool require,\n                 std::string default_value, std::string desc);\n\n  virtual ~FlowUnitOption();\n\n  void SetOptionName(const std::string &option_name);\n\n  void SetOptionType(const std::string &option_type);\n\n  void SetOptionRequire(bool option_require);\n\n  void SetOptionDesc(const std::string &option_desc);\n\n  void AddOptionValue(const std::string &key, const std::string &value);\n\n  std::string GetOptionName() const;\n\n  std::string GetOptionType() const;\n\n  bool IsRequire() const;\n\n  std::string GetOptionDefault() const;\n\n  std::string GetOptionDesc() const;\n\n  std::map<std::string, std::string> GetOptionValues();\n\n  std::string GetOptionValue(const std::string &key);\n\n private:\n  std::string option_name_;\n  std::string option_type_;\n  bool option_require_{false};\n  std::string option_default_;\n  std::string option_desc_;\n  std::map<std::string, std::string> option_values_;\n};\n\nclass FlowUnitDesc {\n public:\n  FlowUnitDesc();\n  virtual ~FlowUnitDesc();\n\n  std::string GetFlowUnitName();\n\n  std::string GetFlowUnitType();\n\n  std::string GetFlowUnitAliasName();\n\n  std::string GetFlowUnitArgument();\n\n  bool IsCollapseAll();\n\n  bool IsStreamSameCount();\n\n  bool IsInputContiguous() const;\n\n  bool IsResourceNice() const;\n\n  bool IsExceptionVisible();\n\n  ConditionType GetConditionType();\n\n  FlowOutputType GetOutputType();\n\n  bool IsUserSetFlowType();\n\n  FlowType GetFlowType();\n\n  LoopType GetLoopType();\n\n  std::string GetGroupType();\n\n  uint32_t GetMaxBatchSize();\n\n  uint32_t GetDefaultBatchSize();\n\n  std::vector<FlowUnitInput> &GetFlowUnitInput();\n\n  const std::vector<FlowUnitOutput> &GetFlowUnitOutput();\n\n  std::vector<FlowUnitOption> &GetFlowUnitOption();\n\n  std::shared_ptr<DriverDesc> GetDriverDesc();\n\n  std::string GetDescription();\n\n  std::string GetVirtualType();\n\n  void SetFlowUnitName(const std::string &flowunit_name);\n\n  void SetFlowUnitType(const std::string &flowunit_type);\n\n  Status AddFlowUnitInput(const FlowUnitInput &flowunit_input);\n\n  Status AddFlowUnitOutput(const FlowUnitOutput &flowunit_output);\n\n  Status AddFlowUnitOption(const FlowUnitOption &flowunit_option);\n\n  void SetFlowUnitGroupType(const std::string &group_type);\n\n  void SetDriverDesc(std::shared_ptr<DriverDesc> driver_desc);\n\n  void SetFlowUnitAliasName(const std::string &alias_name);\n\n  void SetFlowUnitArgument(const std::string &argument);\n\n  void SetConditionType(ConditionType condition_type);\n\n  void SetLoopType(LoopType loop_type);\n\n  void SetOutputType(FlowOutputType output_type);\n\n  void SetFlowType(FlowType flow_type);\n\n  void SetStreamSameCount(bool is_stream_same_count);\n\n  void SetInputContiguous(bool is_input_contiguous);\n\n  void SetResourceNice(bool is_resource_nice);\n  void SetCollapseAll(bool is_collapse_all);\n\n  void SetExceptionVisible(bool is_exception_visible);\n\n  void SetVirtualType(const std::string &virtual_type);\n\n  void SetDescription(const std::string &description);\n\n  void SetMaxBatchSize(const uint32_t &max_batch_size);\n\n  void SetDefaultBatchSize(const uint32_t &default_batch_size);\n\n protected:\n  FlowOutputType output_type_{ORIGIN};\n\n  bool is_user_set_flow_type_{false};\n  FlowType flow_type_{NORMAL};\n\n  ConditionType condition_type_{NONE};\n\n  LoopType loop_type_{NOT_LOOP};\n\n  bool is_stream_same_count_{false};\n  bool is_collapse_all_{true};\n  bool is_exception_visible_{false};\n  std::string flowunit_name_;\n  std::string flowunit_type_;\n  std::string group_type_;\n  std::string alias_name_;\n  std::string argument_;\n  std::string virtual_type_;\n  std::string flowunit_description_;\n  std::vector<FlowUnitInput> flowunit_input_list_;\n  std::vector<FlowUnitOutput> flowunit_output_list_;\n  std::vector<FlowUnitOption> flowunit_option_list_;\n  std::shared_ptr<DriverDesc> driver_desc_;\n  bool is_input_contiguous_{true};\n  bool is_resource_nice_{true};\n  uint32_t max_batch_size_{0};\n  uint32_t default_batch_size_{0};\n\n private:\n  Status CheckInputDuplication(const FlowUnitInput &flowunit_input);\n  Status CheckOutputDuplication(const FlowUnitOutput &flowunit_output);\n  Status CheckOptionDuplication(const FlowUnitOption &flowunit_option);\n  Status CheckGroupType(const std::string &group_type);\n};\n\nclass FlowUnitInnerEvent;\nclass ExternalData;\n\nusing CreateExternalDataFunc =\n    std::function<std::shared_ptr<ExternalData>(std::shared_ptr<Device>)>;\n\nclass FlowUnitStreamContext {\n public:\n  enum StreamMode { EXPAND_DATA, DEFAULT, COLLAPSE_DATA };\n\n  enum RecvMode { SYNC, ASYNC };\n\n  FlowUnitStreamContext();\n  virtual ~FlowUnitStreamContext();\n\n  bool HasError();\n\n  bool HasError(const std::string &port);\n\n  std::shared_ptr<FlowUnitError> GetError();\n\n  std::shared_ptr<FlowUnitError> GetError(const std::string &port);\n\n  bool HasEvent();\n\n  std::shared_ptr<FlowUnitInnerEvent> RecvEvent();\n\n  void SendEvent(std::shared_ptr<FlowUnitInnerEvent> event);\n\n  bool HasExternalData();\n\n  Status SendExternalData(BufferList &buffer);\n\n  Status RecvExternalData(BufferList &buffer);\n\n  Status RecvData(const std::string &port, BufferList &buffer);\n\n  Status SendData(const std::string &port, BufferList &buffer);\n\n  void NewOutputStream(StreamMode type, const std::string &port,\n                       std::shared_ptr<DataMeta> data_meta);\n\n  const std::shared_ptr<DataMeta> GetInputMeta(const std::string &port);\n\n  const std::shared_ptr<DataMeta> GetInputGroupMeta(const std::string &port);\n\n  std::shared_ptr<DataMeta> GetOutputMeta(const std::string &port);\n\n  std::shared_ptr<SessionContext> GetSessionContext();\n\n  void SetPrivate(const std::string &key, std::shared_ptr<void> private_value);\n\n  std::shared_ptr<void> GetPrivate(const std::string &key);\n\n  template <typename T>\n  inline std::shared_ptr<T> GetPrivate(const std::string &key) {\n    return std::static_pointer_cast<T>(GetPrivate(key));\n  }\n\n  Status CloseAll();\n\n  Status Close(const std::string &port);\n\n  void SetRecvMode(RecvMode recv_mode);\n\n  BufferList &NewBufferList();\n};\n\nclass FlowUnitStream {\n public:\n  FlowUnitStream();\n  virtual ~FlowUnitStream();\n\n  virtual Status Open(const std::shared_ptr<Configuration> &config) = 0;\n\n  /* class when unit is close */\n  virtual Status Close() = 0;\n\n  virtual Status Process(std::shared_ptr<FlowUnitStreamContext> ctx) = 0;\n\n  virtual Status StreamOpen(std::shared_ptr<FlowUnitStreamContext> ctx) = 0;\n\n  virtual Status StreamClose(std::shared_ptr<FlowUnitStreamContext> ctx) = 0;\n\n  virtual Status ParentStreamOpen(\n      std::shared_ptr<FlowUnitStreamContext> ctx) = 0;\n\n  virtual Status ParentStreamClose(\n      std::shared_ptr<FlowUnitStreamContext> ctx) = 0;\n};\n\n/**\n * @brief Flowunit plugin interface\n */\nclass IFlowUnit {\n public:\n  IFlowUnit();\n  virtual ~IFlowUnit();\n\n  /**\n   * @brief Flowunit open function, called when unit is open for processing data\n   * @param config flowunit configuration\n   * @return open result\n   */\n  virtual Status Open(const std::shared_ptr<Configuration> &config) = 0;\n\n  /**\n   * @brief Flowunit close function, called when unit is closed.\n   * @return open result\n   */\n  virtual Status Close();\n\n  /**\n   * @brief Flowunit data process.\n   * @param data_ctx data context.\n   * @return open result\n   */\n  // NOLINTNEXTLINE\n  virtual Status Process(std::shared_ptr<DataContext> data_ctx) = 0;\n\n  /**\n   * @brief Flowunit data pre.\n   * @param data_ctx data context.\n   * @return data pre result\n   */\n  // NOLINTNEXTLINE\n  virtual Status DataPre(std::shared_ptr<DataContext> data_ctx);\n\n  /**\n   * @brief Flowunit data post.\n   * @param data_ctx data context.\n   * @return data post result\n   */\n  // NOLINTNEXTLINE\n  virtual Status DataPost(std::shared_ptr<DataContext> data_ctx);\n\n  /**\n   * @deprecated\n   * @brief Flowunit data group pre.\n   * @param data_ctx data context.\n   * @return data group result\n   */\n  // NOLINTNEXTLINE\n  virtual Status DataGroupPre(std::shared_ptr<DataContext> data_ctx);\n\n  /**\n   * @deprecated\n   * @brief Flowunit data group post.\n   * @param data_ctx data context.\n   * @return data group post result\n   */\n  // NOLINTNEXTLINE\n  virtual Status DataGroupPost(std::shared_ptr<DataContext> data_ctx);\n};\n\nclass FlowUnit : public IFlowUnit {\n public:\n  FlowUnit();\n  ~FlowUnit() override;\n\n  /* called when unit is open for process */\n  Status Open(const std::shared_ptr<Configuration> &config) override;\n\n  /* class when unit is close */\n  Status Close() override;\n\n  virtual void SetFlowUnitDesc(std::shared_ptr<FlowUnitDesc> desc);\n\n  virtual std::shared_ptr<FlowUnitDesc> GetFlowUnitDesc();\n\n  void SetBindDevice(const std::shared_ptr<Device> &device);\n\n  std::shared_ptr<Device> GetBindDevice();\n\n  void SetExternalData(const CreateExternalDataFunc &create_external_data);\n\n  std::shared_ptr<ExternalData> CreateExternalData() const;\n\n protected:\n  CreateExternalDataFunc GetCreateExternalDataFunc();\n  int32_t dev_id_{0};\n\n private:\n  std::shared_ptr<FlowUnitDesc> flowunit_desc_ =\n      std::make_shared<FlowUnitDesc>();\n  std::shared_ptr<Device> device_;\n\n  CreateExternalDataFunc create_ext_data_func_;\n};\n\nclass FlowUnitFactory : public DriverFactory {\n public:\n  FlowUnitFactory();\n  ~FlowUnitFactory() override;\n\n  virtual std::map<std::string, std::shared_ptr<FlowUnitDesc>> FlowUnitProbe();\n\n  void SetDriver(const std::shared_ptr<Driver> &driver) override;\n\n  std::shared_ptr<Driver> GetDriver() override;\n\n  virtual std::string GetFlowUnitFactoryType();\n\n  virtual std::string GetFlowUnitFactoryName();\n\n  virtual std::vector<std::string> GetFlowUnitNames();\n\n  virtual std::string GetVirtualType();\n\n  virtual void SetVirtualType(const std::string &virtual_type);\n\n  virtual std::string GetFlowUnitInputDeviceType();\n\n  virtual std::shared_ptr<FlowUnit> CreateFlowUnit(\n      const std::string &unit_name, const std::string &unit_type);\n\n  virtual std::shared_ptr<FlowUnit> VirtualCreateFlowUnit(\n      const std::string &unit_name, const std::string &unit_type,\n      const std::string &virtual_type);\n\n  virtual void SetFlowUnitFactory(\n      const std::vector<std::shared_ptr<DriverFactory>>\n          &bind_flowunit_factory_list);\n\n private:\n  std::shared_ptr<Driver> driver_;\n};\n\nusing FlowUnitDeviceConfig =\n    std::unordered_map<std::string, std::vector<std::string>>;\n\nclass FlowUnitManager {\n public:\n  FlowUnitManager();\n  virtual ~FlowUnitManager();\n\n  static std::shared_ptr<FlowUnitManager> GetInstance();\n\n  Status Register(const std::shared_ptr<FlowUnitFactory> &factory);\n\n  Status Initialize(const std::shared_ptr<Drivers> &driver,\n                    std::shared_ptr<DeviceManager> device_mgr,\n                    const std::shared_ptr<Configuration> &config);\n\n  virtual std::vector<std::string> GetFlowUnitTypes();\n\n  virtual std::vector<std::string> GetFlowUnitList(\n      const std::string &unit_type);\n\n  virtual std::vector<std::string> GetFlowUnitTypes(\n      const std::string &unit_name);\n\n  std::vector<std::shared_ptr<FlowUnit>> CreateFlowUnit(\n      const std::string &unit_name, const std::string &unit_type = \"\",\n      const std::string &unit_device_id = \"\");\n\n  Status FlowUnitProbe();\n  Status InitFlowUnitFactory(const std::shared_ptr<Drivers> &driver);\n  Status SetUpFlowUnitDesc();\n  void Clear();\n  /**\n   * GetFlowUnitFactoryList(), GetFlowUnitDescList()\n   * only for test\n   */\n\n  std::map<std::pair<std::string, std::string>,\n           std::shared_ptr<FlowUnitFactory>>\n  GetFlowUnitFactoryList();\n  std::map<std::string, std::map<std::string, std::shared_ptr<FlowUnitDesc>>>\n  GetFlowUnitDescList();\n\n  std::vector<std::shared_ptr<FlowUnitDesc>> GetAllFlowUnitDesc();\n\n  std::shared_ptr<FlowUnitDesc> GetFlowUnitDesc(\n      const std::string &flowunit_type, const std::string &flowunit_name);\n\n  std::shared_ptr<DeviceManager> GetDeviceManager();\n\n  int max_executor_thread_num_;\n\n private:\n  Status CheckParams(const std::string &unit_name, const std::string &unit_type,\n                     const std::string &unit_device_id);\n\n  Status ParseUnitDeviceConf(const std::string &unit_name,\n                             const std::string &unit_type,\n                             const std::string &unit_device_id,\n                             FlowUnitDeviceConfig &dev_cfg);\n\n  Status ParseUserDeviceConf(const std::string &unit_type,\n                             const std::string &unit_device_id,\n                             FlowUnitDeviceConfig &dev_cfg);\n\n  Status AutoFillDeviceConf(const std::string &unit_name,\n                            FlowUnitDeviceConfig &dev_cfg);\n\n  void SetDeviceManager(std::shared_ptr<DeviceManager> device_mgr);\n  std::shared_ptr<FlowUnit> CreateSingleFlowUnit(\n      const std::string &unit_name, const std::string &unit_type,\n      const std::string &unit_device_id);\n  std::shared_ptr<DeviceManager> device_mgr_;\n  std::map<std::pair<std::string, std::string>,\n           std::shared_ptr<FlowUnitFactory>>\n      flowunit_factory_;\n\n  std::map<std::string, std::map<std::string, std::shared_ptr<FlowUnitDesc>>>\n      flowunit_desc_list_;\n};\n}  // namespace modelbox\n#endif  // MODELBOX_FLOW_UNIT_H_\n"
  },
  {
    "path": "src/libmodelbox/include/modelbox/flowunit_api_helper.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#ifndef MODELBOX_FLOW_UNIT_API_HELPER_H_\n#define MODELBOX_FLOW_UNIT_API_HELPER_H_\n\n#include \"modelbox/base/driver_api_helper.h\"\n#include \"modelbox/base/utils.h\"\n#include \"modelbox/flowunit.h\"\n\n#pragma GCC visibility push(hidden)\nclass FlowUnitPluginFactory;\nclass FlowUnitPluginBase {\n public:\n  modelbox::FlowUnitDesc Desc;\n\n  virtual std::shared_ptr<modelbox::FlowUnit> CreateFlowUnit() = 0;\n};\n\ntemplate <typename T>\nclass FlowUnitPlugin : public FlowUnitPluginBase {\n public:\n  std::shared_ptr<modelbox::FlowUnit> CreateFlowUnit() override {\n    return std::make_shared<T>();\n  }\n};\n\nclass FlowUnitList {\n public:\n  std::vector<FlowUnitPluginBase *> GetFlowUnitPlugins() {\n    return flowunit_plugin_;\n  }\n\n  FlowUnitPluginBase *GetFlowUnitPlugin(const std::string &name,\n                                        const std::string &type) {\n    for (auto &plugin : flowunit_plugin_) {\n      if (plugin->Desc.GetFlowUnitName() == name &&\n          plugin->Desc.GetDriverDesc()->GetType() == type) {\n        return plugin;\n      }\n    }\n\n    return nullptr;\n  }\n\n  void AddFlowUnitPlugin(FlowUnitPluginBase *plugin) {\n    flowunit_plugin_.push_back(plugin);\n  }\n\n private:\n  std::vector<FlowUnitPluginBase *> flowunit_plugin_;\n};\n\nclass FlowUnitPluginFactory : public modelbox::FlowUnitFactory {\n public:\n  FlowUnitPluginFactory(FlowUnitList *plugin_list)\n      : plugin_list_(plugin_list) {}\n  ~FlowUnitPluginFactory() override = default;\n  std::shared_ptr<modelbox::FlowUnit> CreateFlowUnit(\n      const std::string &unit_name, const std::string &unit_type) override {\n    auto *plugin = plugin_list_->GetFlowUnitPlugin(unit_name, unit_type);\n    if (plugin == nullptr) {\n      return nullptr;\n    }\n\n    return plugin->CreateFlowUnit();\n  }\n\n  std::vector<std::string> GetFlowUnitNames() override {\n    std::vector<std::string> result;\n\n    auto plugins = plugin_list_->GetFlowUnitPlugins();\n    for (auto &plugin : plugins) {\n      auto flowunit_desc = std::make_shared<modelbox::FlowUnitDesc>(plugin->Desc);\n      result.push_back(flowunit_desc->GetFlowUnitName());\n    }\n\n    return result;\n  }\n\n  std::string GetFlowUnitFactoryType() override {\n    auto plugins = plugin_list_->GetFlowUnitPlugins();\n    if (plugins.size() <= 0) {\n      return \"\";\n    }\n\n    return plugins[0]->Desc.GetDriverDesc()->GetType();\n  }\n\n  std::map<std::string, std::shared_ptr<modelbox::FlowUnitDesc>> FlowUnitProbe()\n      override {\n    std::map<std::string, std::shared_ptr<modelbox::FlowUnitDesc>> return_map;\n    auto plugins = plugin_list_->GetFlowUnitPlugins();\n    for (auto &plugin : plugins) {\n      auto flowunit_desc = std::make_shared<modelbox::FlowUnitDesc>(plugin->Desc);\n      return_map.insert(\n          std::make_pair(flowunit_desc->GetFlowUnitName(), flowunit_desc));\n    }\n    return return_map;\n  }\n\n private:\n  FlowUnitList *plugin_list_;\n};\n\n#define MODELBOX_FLOWUNIT_LIST_VAR kFlowUnitList\nextern FlowUnitList *ModelBoxGetFlowUnitPluginList() MODELBOX_DLL_LOCAL;\n#define MODELBOX_FLOWUNIT_PLUGIN_LIST ModelBoxGetFlowUnitPluginList()\n#define MODELBOX_FLOWUNIT_PLUGIN_LIST_DEFINE()                     \\\n  FlowUnitList MODELBOX_FLOWUNIT_LIST_VAR;                         \\\n  MODELBOX_DLL_LOCAL FlowUnitList *ModelBoxGetFlowUnitPluginList() { \\\n    return &MODELBOX_FLOWUNIT_LIST_VAR;                            \\\n  }\n\nextern std::shared_ptr<modelbox::DriverFactory> FlowUnitCreateFactory()\n    MODELBOX_DLL_LOCAL;\n#define MODELBOX_FLOWUNIT_PLUGIN_FACTORY_DEFINE()                               \\\n  MODELBOX_DLL_LOCAL std::shared_ptr<modelbox::DriverFactory>                     \\\n  FlowUnitCreateFactory() {                                                   \\\n    std::shared_ptr<modelbox::DriverFactory> factory =                          \\\n        std::make_shared<FlowUnitPluginFactory>(MODELBOX_FLOWUNIT_PLUGIN_LIST); \\\n    return factory;                                                           \\\n  }\n\n#define MODELBOX_FLOWUNIT_PLUGIN_DEFINE() \\\n  MODELBOX_FLOWUNIT_PLUGIN_LIST_DEFINE()  \\\n  MODELBOX_FLOWUNIT_PLUGIN_FACTORY_DEFINE()\n\n#define MODELBOX_FLOWUINT_PLUGIN_VAR_NAME(clazz) kFlowUnitPlugin_##clazz\n#define MODELBOX_FLOWUINT_PLUGIN_DECLEAR(clazz) \\\n  FlowUnitPlugin<clazz> MODELBOX_FLOWUINT_PLUGIN_VAR_NAME(clazz);\n\n#define MODELBOX_FLOWUNIT_SETTER(clazz, desc)                              \\\n  void FlowUnitPluginInit_##clazz(modelbox::FlowUnitDesc &(desc));         \\\n  auto unused_##clazz = []() {                                             \\\n    auto func = []() {                                                     \\\n      FlowUnitPluginInit_##clazz(                                          \\\n          MODELBOX_FLOWUINT_PLUGIN_VAR_NAME(clazz).Desc);                  \\\n      auto driver_desc = std::make_shared<modelbox::DriverDesc>(           \\\n          MODELBOX_DRIVER_PLUGIN->Desc);                                   \\\n      MODELBOX_FLOWUINT_PLUGIN_VAR_NAME(clazz).Desc.SetDriverDesc(         \\\n          driver_desc);                                                    \\\n      MODELBOX_DRIVER_PLUGIN->SetCreateFacotryFunc(FlowUnitCreateFactory); \\\n      MODELBOX_FLOWUNIT_PLUGIN_LIST->AddFlowUnitPlugin(                    \\\n          &MODELBOX_FLOWUINT_PLUGIN_VAR_NAME(clazz));                      \\\n    };                                                                     \\\n    MODELBOX_DRIVER_PLUGIN_INIT_FUNC(func);                                \\\n    return true;                                                           \\\n  }();                                                                     \\\n  void FlowUnitPluginInit_##clazz(modelbox::FlowUnitDesc &(desc))\n\n/**\n * @brief Define an new flowunit driver\n * @param desc driver description\n */\n#define MODELBOX_DRIVER_FLOWUNIT(desc) \\\n  MODELBOX_FLOWUNIT_PLUGIN_DEFINE()    \\\n  MODELBOX_DRIVER(desc)\n\n/**\n * @brief Define a new flowunit\n * @param clazz class of flowunit\n * @param desc flowunit description\n */\n#define MODELBOX_FLOWUNIT(clazz, desc)     \\\n  MODELBOX_FLOWUINT_PLUGIN_DECLEAR(clazz); \\\n  MODELBOX_FLOWUNIT_SETTER(clazz, desc)\n#pragma GCC visibility pop\n\n#endif  // MODELBOX_FLOW_UNIT_API_HELPER_H_\n"
  },
  {
    "path": "src/libmodelbox/include/modelbox/flowunit_balancer.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOW_UNIT_BALANCER_H_\n#define MODELBOX_FLOW_UNIT_BALANCER_H_\n\n#include <functional>\n#include <list>\n#include <memory>\n#include <unordered_map>\n#include <vector>\n\n#include \"flowunit.h\"\n#include \"flowunit_data_executor.h\"\n#include \"modelbox/base/status.h\"\n\nnamespace modelbox {\n\nenum class FlowUnitBalanceStrategy : int32_t {\n  FU_ROUND_ROBIN,\n  FU_CAPABILITY,\n  FU_NULL\n};\n\nclass FUBalanceStrategyHash {\n public:\n  std::size_t operator()(const FlowUnitBalanceStrategy& value) const {\n    return (size_t)value;\n  }\n};\n\nstd::ostream& operator<<(std::ostream& os, const FlowUnitBalanceStrategy& s);\n\nclass FlowUnitBalancer : public std::enable_shared_from_this<FlowUnitBalancer> {\n public:\n  FlowUnitBalancer();\n\n  virtual ~FlowUnitBalancer();\n\n  Status Init(const std::vector<std::shared_ptr<FlowUnit>>& flowunits);\n\n  std::shared_ptr<FlowUnit> GetFlowUnit(\n      const std::shared_ptr<FlowUnitDataContext>& data_ctx);\n\n  void UnbindFlowUnit(const FlowUnitDataContext* data_ctx_ptr);\n\n  virtual FlowUnitBalanceStrategy GetType() = 0;\n\n protected:\n  virtual Status OnInit();\n\n  virtual std::shared_ptr<FlowUnit> BindFlowUnit(\n      const std::shared_ptr<FlowUnitDataContext>& data_ctx) = 0;\n\n  std::vector<std::shared_ptr<FlowUnit>> flowunits_;\n  std::mutex ctx_to_flowunit_map_lock_;\n  std::unordered_map<const DataContext*, std::shared_ptr<FlowUnit>>\n      ctx_to_flowunit_map_;\n\n private:\n  std::shared_ptr<FlowUnit> FirstBind(\n      const std::shared_ptr<FlowUnitDataContext>& data_ctx);\n};\n\nusing FUBalancerCreateFunc = std::function<std::shared_ptr<FlowUnitBalancer>()>;\n\nclass FlowUnitBalancerFactory {\n public:\n  virtual ~FlowUnitBalancerFactory();\n\n  static FlowUnitBalancerFactory& GetInstance();\n\n  std::shared_ptr<FlowUnitBalancer> CreateBalancer(\n      FlowUnitBalanceStrategy strategy =\n          FlowUnitBalanceStrategy::FU_ROUND_ROBIN);\n\n  void RegistBalancer(const FUBalancerCreateFunc& create_func);\n\n private:\n  FlowUnitBalancerFactory();\n\n  std::unordered_map<FlowUnitBalanceStrategy, FUBalancerCreateFunc,\n                     FUBalanceStrategyHash>\n      balancer_creator_map_;\n};\n\nclass FlowUnitBalancerRegister {\n public:\n  FlowUnitBalancerRegister(const FUBalancerCreateFunc& create_func);\n};\n\n#define REGIST_FLOWUNIT_BALANCER(balancer_class)                         \\\n  FlowUnitBalancerRegister g_flowunit_balancer_regiter_##balancer_class( \\\n      []() { return std::make_shared<balancer_class>(); });\n\nclass FlowUnitBalancerUtil {\n public:\n  void Init(const std::vector<std::shared_ptr<FlowUnit>>& flowunits);\n\n  std::shared_ptr<FlowUnit> GetFlowUnitByDevice(\n      const std::shared_ptr<Device>& device);\n\n  std::set<std::shared_ptr<Device>> GetInputDevices(\n      const std::shared_ptr<FlowUnitDataContext>& data_ctx);\n\n private:\n  std::unordered_map<const Device*, std::shared_ptr<FlowUnit>>\n      device_to_fu_map_;\n};\n\nclass FURoundRobinBalancer : public FlowUnitBalancer {\n public:\n  FlowUnitBalanceStrategy GetType() override;\n\n protected:\n  Status OnInit() override;\n\n  std::shared_ptr<FlowUnit> BindFlowUnit(\n      const std::shared_ptr<FlowUnitDataContext>& data_ctx) override;\n\n private:\n  std::shared_ptr<FlowUnit> GetNextFU();\n\n  FlowUnitBalancerUtil util;\n  size_t fu_index_{0};\n};\n\n};  // namespace modelbox\n\n#endif  // MODELBOX_FLOW_UNIT_BALANCER_H_"
  },
  {
    "path": "src/libmodelbox/include/modelbox/flowunit_builder.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOW_UNIT_BUILDER_H_\n#define MODELBOX_FLOW_UNIT_BUILDER_H_\n\n#include <memory>\n\n#include \"flowunit.h\"\n\nnamespace modelbox {\nclass FlowUnitBuilder {\n public:\n  virtual void Probe(std::shared_ptr<FlowUnitDesc> &desc) = 0;\n\n  virtual std::shared_ptr<FlowUnit> Build() = 0;\n};\n\nclass RegFlowUnitFactory : public FlowUnitFactory {\n public:\n  RegFlowUnitFactory(std::shared_ptr<FlowUnitBuilder> builder);\n\n  std::map<std::string, std::shared_ptr<FlowUnitDesc>> FlowUnitProbe() override;\n\n  std::string GetFlowUnitFactoryType() override;\n\n  std::string GetFlowUnitFactoryName() override;\n\n  std::shared_ptr<FlowUnit> CreateFlowUnit(\n      const std::string &unit_name, const std::string &unit_type) override;\n\n private:\n  std::shared_ptr<FlowUnitBuilder> builder_;\n  std::string unit_type_;\n  std::string unit_name_;\n};\n\n}  // namespace modelbox\n\n#endif  // MODELBOX_FLOW_UNIT_BUILDER_H_"
  },
  {
    "path": "src/libmodelbox/include/modelbox/flowunit_data_executor.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOW_UNIT_DATA_EXECUTOR_H_\n#define MODELBOX_FLOW_UNIT_DATA_EXECUTOR_H_\n\n#include <list>\n#include <map>\n#include <memory>\n#include <utility>\n\n#include \"flowunit.h\"\n#include \"modelbox/base/status.h\"\n#include \"modelbox/buffer.h\"\n#include \"modelbox/data_context.h\"\n\nnamespace modelbox {\n\n/**\n * @brief Bind flowunit with data context\n **/\nclass FlowUnitExecContext {\n public:\n  FlowUnitExecContext(std::shared_ptr<FlowUnitDataContext> data_ctx);\n\n  void SetFlowUnit(std::shared_ptr<FlowUnit> fu);\n\n  const std::shared_ptr<FlowUnit> &GetFlowUnit();\n\n  const std::shared_ptr<FlowUnitDataContext> &GetDataCtx();\n\n private:\n  std::shared_ptr<FlowUnit> bind_fu_;\n  std::shared_ptr<FlowUnitDataContext> data_ctx_;\n};\n\n/**\n * @brief data container for one data context\n * contains input, external and output\n **/\nclass FlowUnitExecData {\n public:\n  enum DataType { IN_DATA, OUT_DATA };\n\n  FlowUnitExecData(const std::shared_ptr<FlowUnit> &fu);\n\n  virtual ~FlowUnitExecData();\n\n  void ReserveCache(size_t buffer_count, DataType type = IN_DATA);\n\n  void AppendToCache(const std::shared_ptr<FlowUnitExecData> &src,\n                     size_t start_idx, size_t count, DataType type = IN_DATA);\n\n  void FlushCache(DataType type = IN_DATA);\n\n  void SetInData(const std::string &name,\n                 const std::vector<std::shared_ptr<Buffer>> &buffer_list);\n\n  std::shared_ptr<BufferListMap> GetInData();\n\n  std::shared_ptr<BufferListMap> GetInDataForUser();\n\n  std::shared_ptr<BufferList> GetInDataForUser(const std::string &name);\n\n  std::shared_ptr<BufferListMap> GetOutData();\n\n  std::shared_ptr<BufferList> GetOutData(const std::string &name);\n\n  Status SetExternalData(\n      const std::string &name,\n      const std::vector<std::shared_ptr<Buffer>> &buffer_list);\n\n  std::shared_ptr<BufferListMap> GetExternalData();\n\n  std::shared_ptr<BufferListMap> GetExternalDataForUser();\n\n  std::shared_ptr<BufferList> GetExternalDataForUser(const std::string &name);\n\n  size_t GetInBufferNum();\n\n  size_t GetExtBufferNum();\n\n  size_t GetOutBufferNum(bool accumulate_all_port = false);\n\n  void SetStatus(const Status &status);\n\n  Status GetStatus() const;\n\n  bool HasInData(const std::string &name) const;\n\n  bool HasOutData(const std::string &name) const;\n\n  bool HasExternData(const std::string &name) const;\n\n  void SetupUserInput();\n\n  Status SetupUserOutput(bool one_to_one, bool data_in_one_port);\n\n  Status CheckStatus(bool one_to_one, bool data_in_one_port);\n\n private:\n  void MakeCopyForUserOutput();\n\n  void FillErrorOutput(size_t out_count, bool data_in_one_port);\n\n  Status SaveProcessOneToOne(const std::shared_ptr<BufferListMap> &parent_data,\n                             size_t data_count, bool data_in_one_port);\n\n  Status SaveProcessNToM(const std::shared_ptr<BufferListMap> &parent_data);\n\n  std::shared_ptr<FlowUnit> fu_;\n  std::shared_ptr<BufferListMap> in_data_;\n  std::shared_ptr<BufferListMap> in_data_for_user_;\n  std::unordered_map<std::string, std::vector<std::shared_ptr<Buffer>>>\n      in_data_cache_;\n  std::shared_ptr<BufferListMap> out_data_;\n  std::unordered_map<std::string, std::vector<std::shared_ptr<Buffer>>>\n      out_data_cache_;\n  std::shared_ptr<BufferListMap> ext_data_;\n  std::shared_ptr<BufferListMap> ext_data_for_user_;\n  Status status_{STATUS_OK};\n};\n\nusing FUExecContextList = std::list<std::shared_ptr<FlowUnitExecContext>>;\nusing BatchedFUExecDataCtx = std::vector<std::shared_ptr<ExecutorDataContext>>;\nusing BatchedFUExecDataCtxList = std::vector<BatchedFUExecDataCtx>;\nusing BatchedFUExecData = std::vector<std::shared_ptr<FlowUnitExecData>>;\nusing BatchedFUExecDataList = std::vector<BatchedFUExecData>;\n\n/**\n * @brief This mapper contains all data for one flowunit\n * we have multi flowunit for one node, and each flowunit correspondes to\n * the node implementation in one device\n **/\nclass FlowUnitExecDataMapper {\n public:\n  void AddExecCtx(const std::shared_ptr<FlowUnitExecContext> &exec_ctx);\n\n  void LoadDataFromExecCtx();\n\n  Status MapData(bool need_reshape, size_t batch_size, bool is_stream);\n\n  Status MoveToTargetDevice(bool need_contiguous);\n\n  void SetupUserInput();\n\n  BatchedFUExecDataCtxList GetBatchedExecDataCtxList();\n\n  Status CheckOutputDataNumber(bool data_in_one_port);\n\n  Status CheckStatus(bool one_to_one, bool data_in_one_port);\n\n  Status SetupUserOutput(bool one_to_one, bool data_in_one_port);\n\n  Status SaveDataToExecCtx();\n\n  void Clear();\n\n private:\n  enum MapType { DIRECT_MAP, RESHAPE_NORMAL, RESHAPE_STREAM };\n\n  bool NeedReshape(size_t batch_size);\n\n  Status DirectMap();\n\n  Status ReshapeNormal(size_t batch_size);\n\n  void BuildMappedDataNormal(size_t batch_size);\n\n  void FillMappedDataNormal(size_t batch_size);\n\n  Status ReshapeStream(size_t batch_size);\n\n  void BuildMappedDataStream();\n\n  void FillMappedDataStream(size_t batch_size);\n\n  Status MoveDataToTargetDevice(std::shared_ptr<BufferListMap> &data,\n                                bool need_contiguous);\n\n  Status WriteBackStream();\n\n  Status WriteBackNormal();\n\n  Status FillExecCtxOutput();\n\n  Status CheckAllOutputNumEqual(const std::shared_ptr<FlowUnitExecData> &data,\n                                bool data_in_one_port);\n\n  Status CheckOutputNumEqualInput(const std::shared_ptr<FlowUnitExecData> &data,\n                                  bool data_in_one_port);\n\n  // data ctx list of same flowunit that come from node receive\n  std::vector<std::shared_ptr<FlowUnitExecContext>> origin_exec_ctx_list_;\n  // data from diff data ctx\n  std::vector<std::shared_ptr<FlowUnitExecData>> origin_data_list_;\n  std::vector<size_t> origin_shapes_;\n  // after reshape and copy\n  MapType map_type_;\n  BatchedFUExecDataList mapped_data_list_;\n  std::vector<std::vector<size_t>> mapped_shapes_;\n  // data ctx list that fu process can see\n  BatchedFUExecDataCtxList mapped_exec_data_ctx_list_;\n};\n\nusing ExecViewVisitFunc =\n    std::function<void(FlowUnit *flowunit, const BatchedFUExecDataCtxList\n                                               &batched_exec_data_ctx_list)>;\n\n/**\n * @brief contains multi flowunit data for one node\n * we use mapper to manage each flowunit data\n **/\nclass FlowUnitExecDataView {\n public:\n  FlowUnitExecDataView(FUExecContextList exec_ctx_list);\n\n  virtual ~FlowUnitExecDataView();\n\n  Status LoadInputFromExecCtx(bool need_reshape, bool is_stream,\n                              size_t batch_size, bool need_contiguous);\n\n  const std::vector<FlowUnit *> &GetFlowUnits();\n\n  const BatchedFUExecDataCtxList &GetFlowUnitProcessData(FlowUnit *flowunit);\n\n  Status CheckOutputDataNumber(bool data_in_one_port);\n\n  Status CheckStatus(bool one_to_one, bool data_in_one_port);\n\n  Status SetupUserOutput(bool one_to_one, bool data_in_one_port);\n\n  Status SaveOutputToExecCtx();\n\n  void Clear();\n\n private:\n  Status DevideExecCtxByFlowunit();\n\n  FUExecContextList exec_ctx_list_;\n  // data mapper for flowunits. each mapper contains all origin input data for\n  // one flowunits\n  std::unordered_map<FlowUnit *, std::shared_ptr<FlowUnitExecDataMapper>>\n      mapper_of_flowunit_;\n  // data wrapper for flowunits. each wrapper contains all process data for one\n  // flowunit\n  std::vector<FlowUnit *> flowunit_list_;\n  std::unordered_map<FlowUnit *, BatchedFUExecDataCtxList> data_of_flowunit_;\n  std::mutex data_of_flowunit_lock_;\n\n  class LoadConfig {\n   public:\n    LoadConfig(bool need_reshape, bool is_stream, size_t batch_size,\n               bool need_contiguous)\n        : need_reshape_{need_reshape},\n          is_stream_{is_stream},\n          batch_size_{batch_size},\n          need_contiguous_{need_contiguous} {}\n\n    bool need_reshape_;\n    bool is_stream_;\n    size_t batch_size_;\n    bool need_contiguous_;\n  };\n\n  Status DataLoadTask(\n      const LoadConfig &cfg, FlowUnit *flowunit,\n      const std::shared_ptr<FlowUnitExecDataMapper> &exec_data_mapper);\n\n  Status PackLoadTasks(const LoadConfig &cfg,\n                       std::vector<std::shared_ptr<Executor>> &executors,\n                       std::vector<std::function<Status()>> &tasks);\n\n  Status PackSaveTasks(std::vector<std::shared_ptr<Executor>> &executors,\n                       std::vector<std::function<Status()>> &tasks);\n};\n\nclass FlowUnitDataExecutor {\n public:\n  FlowUnitDataExecutor(std::weak_ptr<Node> node_ref, size_t batch_size);\n\n  virtual ~FlowUnitDataExecutor();\n\n  virtual Status Process(const FUExecContextList &exec_ctx_list);\n\n  Status DataCtxExecuteFunc(FlowUnit *flowunit,\n                            const BatchedFUExecDataCtxList &process_data,\n                            size_t data_ctx_idx);\n\n  void SetNeedCheckOutput(bool need_check);\n\n private:\n  Status LoadExecuteInput(const std::shared_ptr<Node> &node,\n                          FlowUnitExecDataView &exec_view);\n\n  Status Execute(FlowUnitExecDataView &exec_view);\n\n  Status SaveExecuteOutput(const std::shared_ptr<Node> &node,\n                           FlowUnitExecDataView &exec_view);\n\n  std::weak_ptr<Node> node_ref_;\n  size_t batch_size_;\n  bool need_check_output_{false};\n};\n\n}  // namespace modelbox\n\n#endif  // MODELBOX_FLOW_UNIT_DATA_EXECUTOR_H_"
  },
  {
    "path": "src/libmodelbox/include/modelbox/flowunit_group.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_GROUP_H_\n#define MODELBOX_FLOWUNIT_GROUP_H_\n\n#include <algorithm>\n#include <list>\n#include <set>\n#include <utility>\n\n#include \"modelbox/flowunit.h\"\n#include \"modelbox/flowunit_data_executor.h\"\n#include \"modelbox/profiler.h\"\n\nnamespace modelbox {\n\nusing OutputBuffer =\n    std::unordered_map<std::string, std::vector<std::shared_ptr<BufferList>>>;\n\nusing SchedulerEventList =\n    std::shared_ptr<std::vector<std::shared_ptr<SchedulerEvent>>>;\nclass FlowUnitBalancer;\nclass Node;\nclass FlowUnitGroup {\n public:\n  FlowUnitGroup(std::string unit_name, std::string unit_type,\n                std::string unit_device_id,\n                std::shared_ptr<Configuration> config,\n                std::shared_ptr<Profiler> profiler);\n\n  virtual ~FlowUnitGroup();\n\n  Status Init(const std::set<std::string> &input_ports_name,\n              const std::set<std::string> &output_ports_name,\n              const std::shared_ptr<FlowUnitManager> &flowunit_mgr,\n              bool checkport = true);\n\n  Status CheckInputAndOutput(const std::set<std::string> &input_ports_name,\n                             const std::set<std::string> &output_ports_name);\n\n  Status Run(std::list<std::shared_ptr<FlowUnitDataContext>> &data_ctx_list);\n\n  Status Destory();\n\n  std::shared_ptr<FlowUnit> GetExecutorUnit();\n\n  void SetNode(const std::shared_ptr<Node> &node);\n\n  Status Open(const CreateExternalDataFunc &create_func);\n\n  Status Close();\n\n  uint32_t GetBatchSize() const;\n\n private:\n  std::weak_ptr<Node> node_;\n  uint32_t batch_size_;\n\n  std::vector<std::shared_ptr<FlowUnit>> flowunit_group_;\n  std::string unit_name_;\n  std::string unit_type_;\n  std::string unit_device_id_;\n  std::shared_ptr<Configuration> config_;\n  std::shared_ptr<Profiler> profiler_;\n  std::shared_ptr<FlowUnitTrace> flowunit_trace_;\n  std::once_flag trace_init_flag_;\n\n  std::shared_ptr<FlowUnitBalancer> balancer_;\n  std::shared_ptr<FlowUnitDataExecutor> executor_;\n\n  void InitTrace();\n\n  std::shared_ptr<TraceSlice> StartTrace(FUExecContextList &exec_ctx_list);\n\n  void StopTrace(std::shared_ptr<TraceSlice> &slice);\n\n  void PreProcess(FUExecContextList &exec_ctx_list);\n\n  Status Process(FUExecContextList &exec_ctx_list);\n\n  Status PostProcess(FUExecContextList &exec_ctx_list);\n\n  void PostProcessEvent(FUExecContextList &exec_ctx_list);\n\n  FUExecContextList CreateExecCtx(\n      std::list<std::shared_ptr<FlowUnitDataContext>> &data_ctx_list);\n};\n}  // namespace modelbox\n#endif  // MODELBOX_FLOWUNIT_GROUP_H_\n"
  },
  {
    "path": "src/libmodelbox/include/modelbox/graph.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_GRAPH_H_\n#define MODELBOX_GRAPH_H_\n\n#include <modelbox/flowunit.h>\n#include <modelbox/scheduler.h>\n#include <modelbox/session.h>\n\n#include <memory>\n#include <vector>\n\n#include \"modelbox/base/graph_manager.h\"\n#include \"modelbox/node.h\"\n#include \"modelbox/statistics.h\"\n#include \"modelbox/virtual_node.h\"\nnamespace modelbox {\n\nclass Graph {\n public:\n  Graph();\n  virtual ~Graph();\n\n  /**\n   * @brief Initialize graph\n   * @param flowunit_mgr flowunit manager\n   * @param device_mgr device manager\n   * @param profiler profiler\n   * @param config configuration\n   * @return initialize result\n   */\n  Status Initialize(const std::shared_ptr<FlowUnitManager> &flowunit_mgr,\n                    const std::shared_ptr<DeviceManager> &device_mgr,\n                    std::shared_ptr<Profiler> profiler,\n                    const std::shared_ptr<Configuration> &config);\n  /**\n   * @brief Build a graph\n   * @param g graph data pointer\n   * @return initialize result\n   */\n  Status Build(const std::shared_ptr<GCGraph> &g);\n\n  Status Topology(const std::function<bool(std::shared_ptr<NodeBase> node,\n                                           int order)> &callback) const;\n\n  Status AddNode(const std::shared_ptr<NodeBase> &node);\n\n  std::shared_ptr<NodeBase> GetNode(const std::string &nodeName) const;\n\n  std::shared_ptr<InPort> GetInPort(const std::string &nodeName,\n                                    const std::string &portName) const;\n\n  std::unordered_map<std::shared_ptr<NodeBase>,\n                     std::vector<std::shared_ptr<IPort>>>\n  GetNotifyPort() const;\n\n  std::shared_ptr<OutPort> GetOutPort(const std::string &nodeName,\n                                      const std::string &portName) const;\n\n  Status AddLink(const std::string &srcNodeName, const std::string &srcPortName,\n                 const std::string &dstNodeName,\n                 const std::string &dstPortName);\n\n  Status AddLink(const std::shared_ptr<OutPort> &src,\n                 const std::shared_ptr<InPort> &dst);\n\n  std::set<std::shared_ptr<InPort>> GetDstPortsByPort(\n      const std::shared_ptr<OutPort> &port) const;\n\n  std::set<std::shared_ptr<OutPort>> GetSrcPortsByPort(\n      const std::shared_ptr<InPort> &port) const;\n\n  std::set<std::shared_ptr<NodeBase>> GetStartNodes() const;\n\n  std::set<std::shared_ptr<NodeBase>> GetEndNodes() const;\n\n  std::set<std::shared_ptr<NodeBase>> GetAllNodes() const;\n\n  std::set<std::shared_ptr<NodeBase>> GetDstNodesByNode(\n      const std::string &nodeName) const;\n\n  std::set<std::shared_ptr<NodeBase>> GetSrcNodesByNode(\n      const std::string &nodeName) const;\n\n  std::shared_ptr<ExternalDataMap> CreateExternalDataMap();\n\n  Status Run();\n\n  void RunAsync();\n\n  virtual Status Shutdown();\n\n  Status Wait(int64_t milliseconds = 0, Status *ret_val = nullptr);\n\n  std::string GetId() const;\n\n  std::string GetName() const;\n\n  std::set<std::shared_ptr<NodeBase>> GetEndPointNodes() const;\n\n private:\n  void ShowGraphInfo(const std::shared_ptr<GCGraph> &g);\n\n  Status CheckGraph();\n\n  Status BuildFlowunitNode(const std::shared_ptr<GCGraph> &g,\n                           const std::shared_ptr<GCNode> &gcnode, bool strict);\n\n  Status BuildInputNode(const std::shared_ptr<GCNode> &gcnode);\n\n  Status BuildOutputNode(const std::shared_ptr<GCNode> &gcnode);\n\n  Status BuildNode(const std::shared_ptr<GCGraph> &g,\n                   const std::shared_ptr<GCNode> &gcnode, bool strict);\n\n  Status BuildNodes(const std::shared_ptr<GCGraph> &g);\n\n  Status BuildVirtualNodes(const std::shared_ptr<GCGraph> &g);\n\n  Status BuildEdges(const std::shared_ptr<GCGraph> &g);\n\n  Status BuildGraph(const std::shared_ptr<GCGraph> &g);\n\n  Status OpenNodes();\n\n  void CloseNodes() const;\n\n  virtual Status IsValidGraph() const;\n  void FindLoopWithNode(std::shared_ptr<NodeBase> &root_node,\n                        std::vector<std::string> &vis);\n\n  void FindLoopSeq(std::shared_ptr<NodeBase> &root_node,\n                   std::vector<std::string> &vis);\n\n  Status FindLoopStructure();\n\n  void FillLoopLink();\n\n  void FillLoopEndPort();\n\n  Status CheckLoopNode();\n\n  Status IsAllPortConnect() const;\n\n  Status IsAllNodeConnect() const;\n\n  Status UpdatePriority();\n\n  Status GenerateTopology();\n\n  Status CheckLoopStructureNode();\n\n  Status InitPort();\n\n  virtual Status InitScheduler();\n\n  Status UpdateGraphConfigToNode(const std::shared_ptr<GCGraph> &g,\n                                 const std::shared_ptr<GCNode> &node);\n\n  virtual Status InitNode(std::shared_ptr<Node> &node,\n                          const std::set<std::string> &input_port_names,\n                          const std::set<std::string> &output_port_names,\n                          std::shared_ptr<Configuration> &config);\n\n  SessionManager session_manager_;\n\n  std::map<std::string, std::shared_ptr<NodeBase>> nodes_;\n\n  std::map<std::shared_ptr<OutPort>, std::set<std::shared_ptr<InPort>>>\n      src_to_dst_;\n\n  std::map<std::shared_ptr<InPort>, std::set<std::shared_ptr<OutPort>>>\n      dst_to_src_;\n\n  std::vector<std::shared_ptr<NodeBase>> topo_order_;\n\n  std::shared_ptr<Scheduler> scheduler_;\n\n  std::shared_ptr<FlowUnitManager> flowunit_mgr_;\n\n  std::shared_ptr<DeviceManager> device_mgr_;\n\n  std::shared_ptr<Profiler> profiler_;\n\n  std::shared_ptr<StatisticsItem> flow_stats_;\n  std::shared_ptr<StatisticsItem> graph_stats_;\n\n  std::shared_ptr<Configuration> config_;\n\n  std::string input_node_name_;\n\n  std::set<std::string> input_node_ports_;\n\n  std::unordered_map<std::string, std::shared_ptr<Configuration>>\n      input_node_config_map_;\n\n  std::shared_ptr<Node> input_node_;\n\n  std::string output_node_name_;\n\n  std::set<std::string> output_node_ports_;\n\n  std::unordered_map<std::string, std::shared_ptr<Configuration>>\n      output_node_config_map_;\n\n  std::shared_ptr<NodeBase> output_node_;\n\n  std::string name_;\n\n  std::string id_;\n\n  std::vector<std::vector<std::string>> loop_structures_;\n\n  std::map<std::string, std::string> loop_links_;\n\n  bool is_stop_{false};\n};\n\nclass DynamicGraph : public Graph {\n public:\n  DynamicGraph();\n  ~DynamicGraph() override;\n\n  Status Shutdown() override;\n  Status IsValidGraph() const override;\n  Status InitScheduler() override;\n\n  Status InitNode(std::shared_ptr<Node> &node,\n                  const std::set<std::string> &input_port_names,\n                  const std::set<std::string> &output_port_names,\n                  std::shared_ptr<Configuration> &config) override;\n};\n\n}  // namespace modelbox\n\n#endif  // MODELBOX_GRAPH_H\n"
  },
  {
    "path": "src/libmodelbox/include/modelbox/graph_checker.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_GRAPH_CHECKER_H\n#define MODELBOX_GRAPH_CHECKER_H\n\n#include <map>\n#include <memory>\n#include <set>\n#include <unordered_map>\n#include <utility>\n#include <vector>\n\n#include \"modelbox/base/status.h\"\n#include \"modelbox/node.h\"\n#include \"modelbox/virtual_node.h\"\n\nnamespace modelbox {\n\nclass Graph;\nclass NodeBase;\nclass InputVirtualNode;\nclass OutputVirtualNode;\nclass OutputUnmatchVirtualNode;\n\nenum IndexPortType { INPUT, OUTPUT, UNKNOWN };\n\nclass IndexPort {\n public:\n  IndexPort();\n  IndexPort(std::string node, std::string port,\n            const IndexPortType &type = IndexPortType::UNKNOWN);\n  virtual ~IndexPort();\n\n  std::string ToString() const;\n\n  void SetNodeName(std::string node_name);\n\n  void SetPortName(std::string port_name);\n\n  void SetPortType(IndexPortType port_type);\n\n  const std::string &GetNodeName() const;\n\n  const std::string &GetPortName() const;\n\n  const IndexPortType &GetPortType() const;\n\n private:\n  std::string node_name_;\n  std::string port_name_;\n  IndexPortType port_type_{IndexPortType::UNKNOWN};\n};\n\nusing NodeStreamConnection = std::map<std::string, std::vector<IndexPort>>;\n\nclass LeastCommonAncestor {\n public:\n  LeastCommonAncestor(\n      std::unordered_map<std::string, std::shared_ptr<NodeBase>> all_nodes);\n  virtual ~LeastCommonAncestor();\n  void Update(const std::vector<IndexPort> &values,\n              const std::unordered_map<std::string, std::string> &match_map);\n  IndexPort Find(const IndexPort &node_a, const IndexPort &node_b);\n\n private:\n  void InitMap();\n  // match_a_name & match_b_name : match_node_name\n  std::string GetMatchPortName(const std::string &match_a_name,\n                               const std::string &match_b_name,\n                               const std::string &match_node_name);\n  std::string GetMatchPortName(const std::string &port_name,\n                               const std::string &match_a_name,\n                               const std::string &match_b_name,\n                               const std::string &match_node_name);\n  IndexPort ProcessSameNode(const IndexPort &node_a, const IndexPort &node_b);\n  void FindMatchNode(const IndexPort &node_a, const IndexPort &node_b,\n                     std::string &match_a_name, std::string &match_b_name,\n                     std::string &match_node_name, std::string &port_name);\n  void GetIndexPortType(const std::string &node_name,\n                        const std::string &port_name, IndexPortType &port_type);\n\n  std::unordered_map<std::string, std::shared_ptr<NodeBase>> all_nodes_;\n  std::map<int, std::vector<int>> paths_;\n  std::unordered_map<int, std::string> index_name_map_;\n  std::unordered_map<std::string, int> name_index_map_;\n};\n\nclass OverHierarchyCheck {\n public:\n  OverHierarchyCheck(\n      const std::unordered_map<std::string, std::shared_ptr<NodeBase>>\n          &all_nodes,\n      std::set<std::shared_ptr<NodeBase>> start_nodes,\n      std::map<std::string, std::string> loop_links,\n      std::vector<std::vector<std::string>> loop_structures,\n      std::map<std::shared_ptr<OutPort>, std::set<std::shared_ptr<InPort>>>\n          edges);\n  virtual ~OverHierarchyCheck();\n\n  Status Check(\n      const std::unordered_map<std::string, std::string> &graph_match_map,\n      const std::unordered_map<std::string,\n                               std::unordered_map<std::string, std::string>>\n          &graph_single_port_match_map,\n      const std::unordered_map<std::string, std::string> &end_if_map);\n\n private:\n  void InitFirstNode(const std::shared_ptr<Node> &node);\n  Status CheckInputPortsColorReady(\n      std::shared_ptr<IndexPort> &index_port,\n      const std::vector<std::shared_ptr<InPort>> &input_ports);\n  Status CheckInputPorts(\n      const std::shared_ptr<Node> &node,\n      const std::unordered_map<std::string,\n                               std::unordered_map<std::string, std::string>>\n          &graph_single_port_match_map);\n  void GetColorMap(\n      const std::shared_ptr<Node> &node,\n      const std::vector<std::shared_ptr<OutPort>> &output_ports,\n      const std::unordered_map<std::string, std::string> &graph_match_map,\n      const std::unordered_map<std::string,\n                               std::unordered_map<std::string, std::string>>\n          &graph_single_port_match_map,\n      const std::unordered_map<std::string, std::string> &end_if_map);\n  std::shared_ptr<NodeBase> FindLoopLinkNode(const std::shared_ptr<Node> &node);\n  void SetOutPortColor(const std::shared_ptr<Node> &node,\n                       const std::vector<std::shared_ptr<OutPort>> &out_ports,\n                       const std::vector<int> &new_color);\n  bool CheckEndIfPort(\n      const std::shared_ptr<InPort> &input_port,\n      const std::shared_ptr<IndexPort> &index_port,\n      const std::unordered_map<std::string,\n                               std::unordered_map<std::string, std::string>>\n          &graph_single_port_match_map);\n  bool CheckEndIfNode(\n      const std::shared_ptr<Node> &node,\n      const std::unordered_map<std::string, std::string> &end_if_map);\n\n  std::unordered_map<std::string, std::shared_ptr<NodeBase>> all_nodes_;\n  std::set<std::shared_ptr<NodeBase>> start_nodes_;\n  std::map<std::string, std::string> loop_links_;\n  std::vector<std::vector<std::string>> loop_structures_;\n  std::map<std::shared_ptr<OutPort>, std::set<std::shared_ptr<InPort>>> edges_;\n  std::unordered_map<std::string, std::vector<int>> color_map_;\n  std::unordered_map<std::string, bool> visited_;\n  int max_color_{0};\n};\n\nclass GraphChecker {\n public:\n  GraphChecker(const std::vector<std::shared_ptr<NodeBase>> &nodes,\n               const std::set<std::shared_ptr<NodeBase>> &start_nodes,\n               std::map<std::string, std::string> loop_links,\n               std::vector<std::vector<std::string>> loop_structures,\n               const std::map<std::shared_ptr<OutPort>,\n                              std::set<std::shared_ptr<InPort>>> &edges);\n  virtual ~GraphChecker();\n\n  void SetMatchNodes();\n  void ShowMatchNodes();\n  Status Check();\n\n private:\n  Status CalNodeStreamMap(const std::shared_ptr<NodeBase> &node,\n                          NodeStreamConnection &node_stream_map);\n  Status CheckNodeMatch(const std::shared_ptr<Node> &node,\n                        const NodeStreamConnection &node_stream_map);\n  Status CheckCollapseMatch(const std::shared_ptr<Node> &node);\n  Status CheckBranchPathMatch(const std::string &start, const std::string &end);\n  Status CheckOverHierarchyMatch();\n  Status CheckUnmatchExpands(size_t size);\n  Status CheckLeastCommonAncestorsAnyTwoNodes(\n      const std::vector<IndexPort> &match_nodes,\n      std::vector<IndexPort> &res_nodes);\n  Status LeastCommonAncestors(const std::vector<IndexPort> &match_nodes,\n                              IndexPort &res_match_node);\n  std::unordered_map<std::string, std::string> GetGraphMatchMap();\n  bool CheckPortMatch(const IndexPort &match_pair);\n  void FindNearestNeighborMatchExpand(const std::string &node,\n                                      std::string &match_node);\n  void UpdateAncestorPath(const std::vector<IndexPort> &values);\n\n  std::vector<std::shared_ptr<NodeBase>> nodes_;\n  std::map<std::string, std::string> loop_links_;\n  std::vector<std::vector<std::string>> loop_structures_;\n  std::shared_ptr<LeastCommonAncestor> lca_;\n  std::shared_ptr<OverHierarchyCheck> ovc_;\n  std::unordered_map<std::string, std::shared_ptr<NodeBase>> all_nodes_;\n  std::unordered_map<std::string, std::string> graph_match_map_;\n  std::map<std::string, NodeStreamConnection> node_stream_connection_map_;\n  std::unordered_map<std::string, std::unordered_map<std::string, std::string>>\n      graph_single_port_match_map_;\n  std::unordered_map<std::string, std::string> end_if_map_;\n  size_t expands_{0};\n};\n\n}  // namespace modelbox\n\n#endif"
  },
  {
    "path": "src/libmodelbox/include/modelbox/iam_auth.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_IAM_AUTH_H_\n#define MODELBOX_FLOWUNIT_IAM_AUTH_H_\n\n#include <modelbox/base/status.h>\n#include <modelbox/token_header.h>\n\n#include <functional>\n#include <memory>\n\nnamespace modelbox {\n/**\n * @brief\n * User: means algorithm user\n * Service: means algorithm developer or platform developer\n */\nclass IAMAuth {\n public:\n  IAMAuth();\n  virtual ~IAMAuth();\n\n  /*\n   * @brief get iamauth instance\n   * @return iamauth instance\n   */\n  static std::shared_ptr<IAMAuth> GetInstance();\n\n  /**\n   * @brief initilize timer\n   * @return successful or fault\n   */\n  modelbox::Status Init();\n\n  /**\n   * @brief set iam host address\n   * @param host - in, iam host address\n   */\n  void SetIAMHostAddress(const std::string &host);\n\n  /**\n   * @brief Set Consignee info: ak, sk, domain_id and project_id\n   * @param service_ak - in, access key for vas\n   * @param service_sk - in, secret key for vas\n   * @param domain_id - in, domain id\n   * @param project_id - in, project id\n   * @return successful or fault\n   */\n  modelbox::Status SetConsigneeInfo(const std::string &service_ak,\n                                    const std::string &service_sk,\n                                    const std::string &domain_id,\n                                    const std::string &project_id);\n  /**\n   * @brief If service cert has been set, then you can get\n   * user agency Project credential to access user cloud resource\n   * @param agency_user_credential - out, agency credential\n   * @param agency_info - in, agency info\n   * @param user_id - in, user id\n   * @return successful or fault\n   */\n  modelbox::Status GetUserAgencyProjectCredential(\n      UserAgencyCredential &agency_user_credential,\n      const AgencyInfo &agency_info, const std::string &user_id = \"\");\n\n  /**\n   * @brief If service cert has been set, then you can get\n   * user agency Project token to access user cloud resource\n   * @param agency_user_token - out, agency user token\n   * @param agency_info - in, agency info\n   * @param project_info - in, project info\n   * @return successful or fault\n   */\n  modelbox::Status GetUserAgencyProjectToken(UserAgencyToken &agency_user_token,\n                                             const AgencyInfo &agency_info,\n                                             const ProjectInfo &project_info);\n\n  /**\n   * @brief If user agency Project credential expires,notice me\n   * @param agency_info - in, agency info\n   */\n  void ExpireUserAgencyProjectCredential(const AgencyInfo &agency_info);\n\n  /**\n   * @brief If user agency Project token expires,notice me\n   * @param agency_info - in, agency info\n   */\n  void ExpireUserAgencyProjectToken(const AgencyInfo &agency_info);\n\n  /**\n   * @brief Save agency project credential\n   * @param credential - in, credential token\n   */\n  void SetUserAgencyCredential(const UserAgencyCredential &credential);\n\n  /**\n   * @brief Remove agency project credential\n   * @param userId - in, user id\n   */\n  void RemoveUserAgencyCredential(const std::string &userId);\n\n  /**\n   * @brief Save vas token\n   * @param token - in, vas token from iva\n   */\n  void SetAgentToken(const AgentToken &token);\n\n  /**\n   * @brief set update token callback function\n   * @param callback -in\n   */\n  void SetUpdateAgentTokenCallBack(std::function<void()> &callback);\n};\n}  // namespace modelbox\n#endif  // MODELBOX_FLOWUNIT_IAM_AUTH_H_"
  },
  {
    "path": "src/libmodelbox/include/modelbox/inner_event.h",
    "content": "\n/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_INNER_EVENT_H_\n#define MODELBOX_INNER_EVENT_H_\n#include <memory>\n#include <string>\n#include <unordered_map>\n\nnamespace modelbox {\n\nclass MatchKey;\n\nclass FlowUnitEvent {\n public:\n  FlowUnitEvent();\n  virtual ~FlowUnitEvent();\n  void SetPrivate(const std::string &key,\n                  const std::shared_ptr<void> &private_content);\n  std::shared_ptr<void> GetPrivate(const std::string &key);\n\n  template <typename T>\n  inline std::shared_ptr<T> GetPrivate(const std::string &key) {\n    return std::static_pointer_cast<T>(GetPrivate(key));\n  }\n\n private:\n  std::unordered_map<std::string, std::shared_ptr<void>> private_map_;\n};\n\nclass FlowUnitInnerEvent {\n public:\n  enum EventCode {\n    EXPAND_UNFINISH_DATA = 0,\n    EXPAND_NEXT_STREAM,\n    COLLAPSE_NEXT_STREAM\n  };\n  FlowUnitInnerEvent(EventCode code);\n  virtual ~FlowUnitInnerEvent();\n  void SetDataCtxMatchKey(MatchKey *match_key);\n  MatchKey *GetDataCtxMatchKey();\n  std::shared_ptr<FlowUnitEvent> GetUserEvent();\n  void SetUserEvent(std::shared_ptr<FlowUnitEvent> event);\n  int GetPriority();\n  EventCode GetEventCode();\n\n private:\n  int priority_ = 0;\n  EventCode code_ = EXPAND_UNFINISH_DATA;\n  std::shared_ptr<FlowUnitEvent> user_event_;\n  MatchKey *match_key_;\n};\n}  // namespace modelbox\n\n#endif"
  },
  {
    "path": "src/libmodelbox/include/modelbox/match_stream.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_MATCH_STREAM_H_\n#define MODELBOX_MATCH_STREAM_H_\n\n#include <memory>\n#include <set>\n#include <unordered_map>\n#include <vector>\n\n#include \"modelbox/buffer.h\"\n#include \"modelbox/inner_event.h\"\n\nnamespace modelbox {\n\nclass InPort;\n\nusing PortDataMap =\n    std::unordered_map<std::string, std::vector<std::shared_ptr<Buffer>>>;\n\nclass MatchKey {\n public:\n  static MatchKey* AsKey(BufferIndexInfo* match_at_buffer);\n\n  static MatchKey* AsKey(Stream* match_at_stream);\n};\n\nclass MatchStreamData {\n public:\n  MatchStreamData();\n  virtual ~MatchStreamData();\n\n  void SetStreamMatchKey(MatchKey* match_at);\n\n  MatchKey* GetStreamMatchKey();\n\n  void SetSession(std::shared_ptr<Session> session);\n\n  std::shared_ptr<Session> GetSession();\n\n  void SetEvent(std::shared_ptr<FlowUnitInnerEvent>& event);\n\n  std::shared_ptr<FlowUnitInnerEvent> GetEvent();\n\n  void SetBufferList(std::shared_ptr<PortDataMap> port_buffers);\n\n  std::shared_ptr<PortDataMap> GetBufferList() const;\n\n  size_t GetDataCount() const;\n\n private:\n  MatchKey* match_at_{nullptr};\n  std::shared_ptr<Session> session_;\n  std::shared_ptr<FlowUnitInnerEvent> event_;\n  std::shared_ptr<PortDataMap> port_to_stream_data_;\n};\n\nclass MatchBufferCache {\n public:\n  MatchBufferCache(\n      size_t port_count,\n      std::unordered_map<std::string, size_t>* stream_count_each_port);\n  virtual ~MatchBufferCache();\n\n  Status CacheBuffer(const std::string& port_name,\n                     std::shared_ptr<Buffer>& buffer);\n\n  bool IsMatched() const;\n\n  bool IsEndFlag() const;\n\n  const std::unordered_map<std::string, std::shared_ptr<Buffer>>& GetBuffers()\n      const;\n\n private:\n  size_t port_count_;\n  std::unordered_map<std::string, size_t> end_flag_count_;\n  bool is_end_flag_{false};\n  bool is_placeholder_{false};\n  std::unordered_map<std::string, std::shared_ptr<Buffer>> buffer_cache_;\n  std::unordered_map<std::string, size_t> cur_buffer_count_each_port_;\n  std::unordered_map<std::string, size_t>* stream_count_each_port_;\n};\n\n/**\n * Manage stream info for each input port\n * will use this info to analysis app problem and performance\n **/\nclass InPortStreamInfo {\n public:\n  void ReceiveBuffer(std::shared_ptr<Buffer>& buffer);\n\n  size_t GetReceivedBufferCount();\n\n  size_t GetReceivedStreamCount();\n\n  bool ReachEnd();\n};\n\nclass MatchStreamCache {\n public:\n  MatchStreamCache(\n      std::string node_name, size_t port_count,\n      std::unordered_map<std::string, size_t>* stream_count_each_port);\n\n  virtual ~MatchStreamCache();\n\n  Status CacheBuffer(const std::string& port_name,\n                     std::shared_ptr<Buffer>& buffer);\n\n  std::shared_ptr<PortDataMap> PopReadyMatchBuffers(bool in_order,\n                                                    bool gather_all);\n\n  void SetSession(std::shared_ptr<Session> session);\n\n  std::shared_ptr<Session> GetSession();\n\n  bool IsStreamEnd();\n\n  size_t TotalInputCount();\n\n private:\n  void UpdateInputStreamInfo(const std::string& port_name,\n                             std::shared_ptr<Buffer>& buffer);\n\n  std::string node_name_;\n  std::map<size_t, std::shared_ptr<MatchBufferCache>>\n      match_buffers_;  // ordered by buffer index\n  std::map<size_t, std::shared_ptr<MatchBufferCache>>\n      ready_match_buffers_;  // ordered by buffer index, all port buffer\n                             // received\n\n  size_t port_count_;\n  std::unordered_map<std::string, size_t>* stream_count_each_port_;\n\n  std::unordered_map<std::string, std::shared_ptr<InPortStreamInfo>>\n      in_port_stream_info_map_;\n\n  size_t index_in_order_{0};\n  bool end_flag_received_{false};\n  size_t total_input_count_in_stream_{0};\n  size_t cur_input_count_in_stream_{0};\n\n  std::shared_ptr<Session> session_;\n};\n\nclass InputMatchStreamManager {\n public:\n  InputMatchStreamManager(std::string node_name, size_t queue_size,\n                          size_t port_count);\n\n  virtual ~InputMatchStreamManager();\n\n  size_t GetInputStreamCount();\n\n  void SetInputBufferInOrder(bool is_input_in_order);\n\n  void SetInputStreamGatherAll(bool need_gather_all);\n\n  void UpdateStreamCountEachPort(\n      std::unordered_map<std::string, size_t>&& stream_count_each_port);\n\n  Status LoadData(std::vector<std::shared_ptr<InPort>>& data_ports,\n                  const std::function<bool(std::shared_ptr<Buffer>)>&\n                      drop_filter = nullptr);\n\n  Status GenMatchStreamData(\n      std::list<std::shared_ptr<MatchStreamData>>& match_stream_list);\n\n  void Clean();\n\n private:\n  Status CacheBuffer(const std::string& port_name,\n                     std::shared_ptr<Buffer>& buffer, size_t backward_level);\n\n  void IncreaseOnePortBufferCount(const std::string& port_name,\n                                  size_t count = 1);\n\n  void DecreaseAllPortBufferCount(size_t count = 1);\n\n  size_t GetReadCount(const std::string& port_name);\n\n  MatchKey* GetInputStreamMatchKey(\n      const std::shared_ptr<BufferIndexInfo>& index_info,\n      size_t backward_level);\n\n  bool InitInheritBackwardLevel(\n      std::vector<std::shared_ptr<InPort>>& data_ports);\n\n  std::string node_name_;\n  size_t queue_size_{0};\n  size_t port_count_{0};\n  std::unordered_map<std::string, size_t> stream_count_each_port_;\n\n  bool need_gather_all_{false};\n  bool is_input_in_order_{false};\n  std::unordered_map<MatchKey*, std::shared_ptr<MatchStreamCache>>\n      match_stream_cache_map_;\n  std::unordered_map<std::string, size_t> port_inherit_backward_level_;\n\n  const size_t max_cache_count_{16384};\n  std::unordered_map<std::string, size_t> port_cache_count_map_;\n};\n\nclass OutputMatchStream {\n public:\n  void SetSession(std::shared_ptr<Session> session);\n\n  std::shared_ptr<Session> GetSession();\n\n  size_t Size();\n\n  bool Empty();\n\n  std::shared_ptr<Stream> GetStream(const std::string& port_name);\n\n  std::shared_ptr<Stream> CreateStream(const std::string& port_name);\n\n private:\n  std::unordered_map<std::string, std::shared_ptr<Stream>> port_stream_map_;\n\n  std::shared_ptr<Session> session_;\n};\n\nclass OutputMatchStreamManager {\n public:\n  OutputMatchStreamManager(std::string node_name,\n                           std::set<std::string>&& output_port_names);\n\n  virtual ~OutputMatchStreamManager();\n\n  size_t GetOutputStreamCount();\n\n  void SetNeedNewIndex(bool need_new_index);\n\n  Status UpdateStreamInfo(\n      const std::unordered_map<\n          std::string, std::vector<std::shared_ptr<Buffer>>>& stream_data_map,\n      const std::unordered_map<std::string, std::shared_ptr<DataMeta>>&\n          port_stream_meta,\n      const std::shared_ptr<Session>& session);\n\n  void Clean();\n\n private:\n  MatchKey* GetOutputStreamMatchKey(\n      const std::unordered_map<\n          std::string, std::vector<std::shared_ptr<Buffer>>>& stream_data_map);\n\n  void GenerateOutputStream(\n      OutputMatchStream& output_match_stream,\n      const std::unordered_map<\n          std::string, std::vector<std::shared_ptr<Buffer>>>& stream_data_map,\n      const std::unordered_map<std::string, std::shared_ptr<DataMeta>>&\n          port_stream_meta,\n      const std::shared_ptr<Session>& session);\n\n  void SetIndexInStream(const std::shared_ptr<BufferIndexInfo>& buffer_index,\n                        const std::shared_ptr<Stream>& stream);\n\n  std::string node_name_;\n  std::set<std::string> output_port_names_;\n  bool need_new_index_{false};\n\n  std::unordered_map<MatchKey*, OutputMatchStream> output_stream_map_;\n};\n}  // namespace modelbox\n\n#endif  // MODELBOX_MATCH_STREAM_H_"
  },
  {
    "path": "src/libmodelbox/include/modelbox/modelbox.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#ifndef MODELBOX_H_\n#define MODELBOX_H_\n\n#include <modelbox/flow.h>\n#include <modelbox/flowunit.h>\n\n#endif // MODELBOX_H_"
  },
  {
    "path": "src/libmodelbox/include/modelbox/modelbox_engine.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_ENGINE_H_\n#define MODELBOX_ENGINE_H_\n\n#include \"data_handler.h\"\n#include \"modelbox/base/error_info.h\"\n#include \"node.h\"\n#include \"scheduler.h\"\n\nnamespace modelbox {\nusing ConfigNodeMap =\n    std::map<std::map<std::string, std::string>, std::shared_ptr<NodeBase>>;\n\n/**\n * @brief dynamic graph manager,graph start ,run and stop\n * */\nclass ModelBoxEngine : public std::enable_shared_from_this<ModelBoxEngine> {\n public:\n  ModelBoxEngine();\n  virtual ~ModelBoxEngine();\n\n  /**\n   * @brief  init global config\n   * @param config  global configuration\n   * @return return result code\n   */\n  Status Init(std::shared_ptr<Configuration> &config);\n\n  /**\n   * @brief create input stream for external input\n   * @return datahandler bind to extern input\n   */\n  std::shared_ptr<DataHandler> CreateInput(\n      const std::set<std::string> &port_map);\n\n  /**\n   * @brief  choose right node to create graph and run graph\n   * @param name flowunit name\n   * @param config_map flowunit config\n   * @param data input data\n   * @return process result\n   */\n  std::shared_ptr<DataHandler> Execute(\n      const std::string &name, std::map<std::string, std::string> config_map,\n      const std::shared_ptr<DataHandler> &data = nullptr);\n  /**\n   * @brief choose right node to create graph and run graph\n   * @param name flowunit name\n   * @param config_map flowunit config\n   * @param data input data map\n   * @return process result\n   */\n  std::shared_ptr<DataHandler> Execute(\n      const std::string &name, std::map<std::string, std::string> config_map,\n      const std::map<std::string, std::shared_ptr<DataHandler>> &data);\n\n  /**\n   * @brief close the graph\n   */\n  void Close();\n  void ShutDown();\n\n  /**\n   * @brief get error info from graph\n   * @return error information\n   */\n  std::shared_ptr<ErrorInfo> GetErrorInfo();\n\n  void SetConfig(std::string &, std::string &);\n  std::shared_ptr<Configuration> GetConfig();\n\n  std::shared_ptr<GCNode> CreateDynamicGCGraph(\n      const std::string &name, const std::map<std::string, std::string> &config,\n      const std::shared_ptr<DataHandler> &data_handler);\n\n  Status CheckBuffer(const std::shared_ptr<FlowUnitDesc> &desc,\n                     const std::shared_ptr<DataHandler> &data);\n\n  std::shared_ptr<FlowUnitDesc> GetFlowunitDesc(\n      const std::string &name,\n      const std::map<std::string, std::string> &config);\n\n  std::shared_ptr<GCNode> CreateDynamicStreamNode(\n      const std::string &name, const std::map<std::string, std::string> &config,\n      const std::shared_ptr<DataHandler> &data_handler);\n\n  Status InsertGrahEdge(std::shared_ptr<GCGraph> &root_graph,\n                        std::shared_ptr<GCNode> &input_node,\n                        std::string &input_port,\n                        std::shared_ptr<GCNode> &output_node,\n                        std::string &output_port);\n  std::shared_ptr<NodeBase> CheckNodeExist(\n      const std::string &name,\n      const std::map<std::string, std::string> &config);\n\n  std::shared_ptr<NodeBase> CreateDynamicNormalNode(\n      const std::string &name,\n      const std::map<std::string, std::string> &config_map);\n\n  /*\n   feed data to graph\n   */\n  Status FeedData(std::shared_ptr<DynamicGraph> &dynamic_graph,\n                  std::shared_ptr<GCGraph> &gcgraph);\n  /*\n  create a graph for gcgraph\n  */\n  std::shared_ptr<DynamicGraph> CreateDynamicGraph(\n      std::shared_ptr<GCGraph> &graph);\n  std::shared_ptr<DeviceManager> GetDeviceManager();\n  std::shared_ptr<FlowUnitManager> GetFlowUnitManager();\n  std::shared_ptr<Scheduler> GetScheduler();\n  std::shared_ptr<Profiler> GetProfiler();\n  Status RunGraph(std::shared_ptr<DataHandler> &data_handler);\n  std::shared_ptr<DataHandler> BindDataHanlder(\n      std::shared_ptr<DataHandler> &data_handler,\n      std::shared_ptr<GCNode> &gcnode);\n  Status CheckInputPort(const std::shared_ptr<FlowUnitDesc> &flowunit_desc,\n                        const std::shared_ptr<DataHandler> &data_handler);\n  bool CheckisStream(const std::shared_ptr<FlowUnitDesc> &desc,\n                     const std::shared_ptr<DataHandler> &data_handler);\n  Status CheckInputFlowUnit(const std::string &name,\n                            std::map<std::string, std::string> &config_map,\n                            const std::shared_ptr<DataHandler> &buffers,\n                            const std::shared_ptr<FlowUnitDesc> &desc);\n  std::shared_ptr<DataHandler> ExecuteStreamNode(\n      const std::shared_ptr<FlowUnitDesc> &desc,\n      const std::shared_ptr<DataHandler> &buffers,\n      std::map<std::string, std::string> &config_map);\n  std::shared_ptr<DataHandler> ExecuteBufferListNode(\n      const std::string &name, std::map<std::string, std::string> &config_map,\n      const std::shared_ptr<DataHandler> &buffers);\n  Status SendExternalData(std::shared_ptr<ExternalDataMap> &extern_datamap,\n                          std::shared_ptr<BufferList> &buffer_list,\n                          const std::shared_ptr<GCNode> &gcnode);\n  std::shared_ptr<GCNode> ProcessOutputHandler(\n      const std::shared_ptr<DataHandler> &data_handler,\n      std::shared_ptr<GCNode> &gcnode, std::shared_ptr<GCGraph> &root_graph);\n  std::shared_ptr<GCNode> ProcessVirtualHandler(\n      std::shared_ptr<GCNode> &gcnode, std::shared_ptr<GCGraph> &root_graph);\n\n private:\n  friend class DataHandler;\n  std::shared_ptr<Configuration> config_;\n  std::shared_ptr<Drivers> drivers_;\n  std::shared_ptr<DeviceManager> device_mgr_;\n  std::shared_ptr<FlowUnitManager> flowunit_mgr_;\n  std::shared_ptr<Scheduler> scheduler_;\n  std::shared_ptr<Profiler> profiler_;\n  std::set<std::shared_ptr<Graph>> graphs_;\n  std::unordered_map<std::string, std::string> global_config_map_;\n  std::set<std::shared_ptr<NodeBase>> stream_nodes_;\n  std::map<std::string, ConfigNodeMap> nodes_config_;\n  std::shared_ptr<ErrorInfo> error_info_;\n};\n\n}  // namespace modelbox\n\n#endif"
  },
  {
    "path": "src/libmodelbox/include/modelbox/node.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_NODE_H_\n#define MODELBOX_NODE_H_\n\n#include <list>\n#include <memory>\n#include <set>\n#include <unordered_map>\n#include <utility>\n\n#include \"modelbox/base/status.h\"\n#include \"modelbox/buffer.h\"\n#include \"modelbox/flowunit.h\"\n#include \"modelbox/flowunit_group.h\"\n#include \"modelbox/inner_event.h\"\n#include \"modelbox/match_stream.h\"\n#include \"modelbox/profiler.h\"\n#include \"modelbox/statistics.h\"\n\nnamespace modelbox {\n\n#define DEFAULT_QUEUE_SIZE 32\n#define DEFAULT_QUEUE_SIZE_EXTERNAL 1024\n#define DEFAULT_QUEUE_EVENT 8192  // should be large enough\n\nusing PortsDataList =\n    std::unordered_map<std::string, std::list<std::shared_ptr<Buffer>>>;\n\nenum RunType {\n  DATA = 0,\n  EVENT = 1,\n};\n\nclass EventPort;\nclass InPort;\nclass OutPort;\n\nclass NodeBase : public std::enable_shared_from_this<NodeBase> {\n public:\n  NodeBase();\n  virtual ~NodeBase();\n\n  virtual Status Init(const std::set<std::string>& input_port_names,\n                      const std::set<std::string>& output_port_names,\n                      const std::shared_ptr<Configuration>& config);\n\n  virtual Status Run(RunType type) = 0;\n\n  virtual Status Open() = 0;\n\n  virtual std::shared_ptr<Device> GetDevice() = 0;\n\n  virtual void Close();\n\n  void SetName(const std::string& name);\n\n  std::string GetName() const;\n\n  void SetPriority(int32_t priortity);\n\n  int32_t GetPriority() const;\n\n  void SetQueueSize(int32_t queue_size);\n\n  int32_t GetQueueSize() const;\n\n  size_t GetInputNum();\n\n  size_t GetExternNum();\n\n  size_t GetOutputNum();\n\n  std::set<std::string> GetInputNames();\n\n  std::set<std::string> GetExternNames();\n\n  std::set<std::string> GetOutputNames();\n\n  std::vector<std::shared_ptr<InPort>> GetInputPorts() const;\n\n  std::vector<std::shared_ptr<OutPort>> GetOutputPorts() const;\n\n  std::vector<std::shared_ptr<InPort>> GetExternalPorts() const;\n\n  std::shared_ptr<InPort> GetInputPort(const std::string& port_name);\n\n  std::shared_ptr<InPort> GetExternalPort(const std::string& port_name);\n\n  std::shared_ptr<OutPort> GetOutputPort(const std::string& port_name);\n\n  std::shared_ptr<EventPort> GetEventPort();\n\n  void SetAllInportActivated(bool flag);\n\n  Status SendBatchEvent(\n      std::vector<std::shared_ptr<FlowUnitInnerEvent>>& event_list,\n      bool update_active_time = true);\n\n  Status SendEvent(std::shared_ptr<FlowUnitInnerEvent>& event,\n                   bool update_active_time = true);\n\n  void Shutdown();\n\n protected:\n  Status InitPorts(const std::set<std::string>& input_port_names,\n                   const std::set<std::string>& output_port_names,\n                   const std::shared_ptr<Configuration>& config);\n\n  std::string name_;\n\n  std::shared_ptr<Configuration> config_;\n\n  std::vector<std::shared_ptr<InPort>> input_ports_;\n\n  std::shared_ptr<EventPort> event_port_;\n\n  std::vector<std::shared_ptr<InPort>> extern_ports_;\n\n  std::vector<std::shared_ptr<OutPort>> output_ports_;\n\n  int32_t priority_{0};\n\n  size_t queue_size_{0};\n\n  size_t event_queue_size_{0};\n};\n\nclass SessionManager;\n\nclass Node : public NodeBase {\n public:\n  Node();\n\n  ~Node() override;\n\n  /**\n   * @brief Init the node\n   *\n   * @param input_port_names {set} the input port name\n   * @param output_port_names {set} the output port name\n   * @param config node configuration\n   * @return Status {status} if success return STATUS_SUCCESS\n   */\n  Status Init(const std::set<std::string>& input_port_names,\n              const std::set<std::string>& output_port_names,\n              const std::shared_ptr<Configuration>& config) override;\n\n  void SetFlowUnitInfo(const std::string& flowunit_name,\n                       const std::string& flowunit_type,\n                       const std::string& flowunit_device_id,\n                       std::shared_ptr<FlowUnitManager> flowunit_manager);\n\n  std::shared_ptr<FlowUnitGroup> GetFlowUnitGroup();\n\n  void SetProfiler(std::shared_ptr<Profiler> profiler);\n\n  void SetStats(std::shared_ptr<StatisticsItem> graph_stats);\n\n  /**\n   * @brief Open node\n   * @return open result\n   */\n  Status Open() override;\n\n  std::shared_ptr<Device> GetDevice() override { return nullptr; };\n\n  /**\n   * @brief close node\n   */\n  void Close() override;\n\n  /**\n   * @brief The node main function\n   *\n   * @param type run type\n   * @return Status\n   */\n  Status Run(RunType type) override;\n\n  void SetOutputType(FlowOutputType type);\n\n  void SetFlowType(FlowType type);\n\n  void SetConditionType(ConditionType type);\n\n  void SetLoopType(LoopType type);\n\n  void SetInputContiguous(bool is_input_contiguous);\n\n  void SetExceptionVisible(bool is_exception_visible);\n\n  FlowOutputType GetOutputType();\n\n  FlowType GetFlowType();\n\n  ConditionType GetConditionType();\n\n  LoopType GetLoopType();\n\n  bool IsInputContiguous();\n\n  bool IsExceptionVisible();\n\n  std::unordered_map<std::string, std::shared_ptr<Node>> GetMatchNodes();\n\n  void SetMatchNode(const std::string& name, std::shared_ptr<Node> match_node);\n\n  std::shared_ptr<Node> GetMatchNode();\n\n  std::shared_ptr<Node> GetMatchNode(const std::string& port_name);\n\n  std::shared_ptr<FlowUnitDesc> GetFlowUnitDesc();\n\n  void SetSessionManager(SessionManager* session_mgr);\n\n  void SetLoopOutPortName(const std::string& port_name);\n\n  std::string GetLoopOutPortName();\n\n protected:\n  virtual Status Recv(\n      RunType type,\n      std::list<std::shared_ptr<FlowUnitDataContext>>& data_ctx_list);\n\n  virtual Status GenInputMatchStreamData(\n      RunType type,\n      std::list<std::shared_ptr<MatchStreamData>>& match_stream_data_list);\n\n  Status GenMatchStreamFromDataPorts(\n      std::vector<std::shared_ptr<InPort>>& data_ports,\n      std::list<std::shared_ptr<MatchStreamData>>& match_stream_data_list);\n\n  Status GenMatchStreamFromEventPorts(\n      std::list<std::shared_ptr<MatchStreamData>>& match_stream_data_list);\n\n  virtual Status GenDataContextList(\n      std::list<std::shared_ptr<MatchStreamData>>& match_stream_data_list,\n      std::list<std::shared_ptr<FlowUnitDataContext>>& data_ctx_list);\n\n  Status AppendDataContextByEvent(\n      const std::shared_ptr<MatchStreamData>& match_stream_data,\n      std::set<std::shared_ptr<FlowUnitDataContext>>& data_ctx_set);\n\n  Status AppendDataContextByData(\n      const std::shared_ptr<MatchStreamData>& match_stream_data,\n      std::set<std::shared_ptr<FlowUnitDataContext>>& data_ctx_set);\n\n  std::shared_ptr<FlowUnitDataContext> GetDataContext(MatchKey* key);\n\n  std::shared_ptr<FlowUnitDataContext> CreateDataContext(\n      MatchKey* key, const std::shared_ptr<Session>& session);\n\n  std::shared_ptr<FlowUnitDataContext> AppendDataToDataContext(\n      MatchKey* key, const std::shared_ptr<MatchStreamData>& match_stream_data,\n      bool append_single_buffer = false, size_t buffer_index = 0);\n\n  Status Process(\n      std::list<std::shared_ptr<FlowUnitDataContext>>& data_ctx_list);\n\n  Status Send(std::list<std::shared_ptr<FlowUnitDataContext>>& data_ctx_list);\n\n  void SetLastError(\n      std::list<std::shared_ptr<FlowUnitDataContext>>& data_ctx_list);\n\n  void Clean(std::list<std::shared_ptr<FlowUnitDataContext>>& data_ctx_list);\n\n  void CleanDataContext();\n\n  virtual Status InitNodeProperties();\n\n  void UpdatePropConstrain(const std::shared_ptr<FlowUnitDesc>& flowunit_desc);\n\n  std::shared_ptr<ExternalData> CreateExternalData(\n      const std::shared_ptr<Device>& device);\n\n  bool NeedNewIndex();\n\n  std::unordered_map<std::string, size_t> GetStreamCountEachPort();\n\n  std::shared_ptr<FlowUnitManager> flowunit_manager_;\n  std::shared_ptr<FlowUnitGroup> flowunit_group_;\n  bool is_flowunit_opened_{false};\n  std::string flowunit_name_;\n  std::string flowunit_type_;\n  std::string flowunit_device_id_;\n\n  FlowOutputType output_type_{FlowOutputType::ORIGIN};\n  FlowType flow_type_{FlowType::STREAM};\n  ConditionType condition_type_{ConditionType::NONE};\n  LoopType loop_type_{LoopType::NOT_LOOP};\n  bool is_input_contiguous_{false};\n  bool is_exception_visible_{false};\n\n  std::shared_ptr<Profiler> profiler_;\n  std::shared_ptr<StatisticsItem> graph_stats_;\n  SessionManager* session_mgr_{nullptr};\n\n  std::unordered_map<std::string, std::shared_ptr<Node>> port_match_at_node_;\n  std::once_flag input_stream_count_update_flag_;\n  std::shared_ptr<InputMatchStreamManager> input_match_stream_mgr_;\n  std::unordered_map<MatchKey*, std::shared_ptr<FlowUnitDataContext>>\n      data_ctx_map_;\n  std::shared_ptr<OutputMatchStreamManager> output_match_stream_mgr_;\n\n  std::unordered_map<std::string, std::shared_ptr<Node>> match_node_;\n  std::string loop_out_port_name_;\n};\n\n}  // namespace modelbox\n\n#endif  // MODELBOX_NODE_H_"
  },
  {
    "path": "src/libmodelbox/include/modelbox/obs_client.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_OBS_CLIENT_H_\n#define MODELBOX_FLOWUNIT_OBS_CLIENT_H_\n\n#include <modelbox/base/log.h>\n#include <modelbox/base/status.h>\n\n#include <mutex>\n#include <vector>\n\n#include \"eSDKOBS.h\"\n\nnamespace modelbox {\n\ntypedef struct tag_ObsOptions {\n  std::string end_point;\n  std::string bucket;\n  std::string path;\n  std::string domain_name;\n  std::string xrole_name;\n  std::string user_id;\n  std::string ak;\n  std::string sk;\n  std::string token;\n} ObsOptions;\n\n/**\n * @brief This is a singleton class, in charge of all about the OBS SDK.\n */\nclass ObsClient {\n public:\n  /**\n   * @brief   Get an ObsClient object.\n   * @return  Pointer to an ObsClient object.\n   *          Notes: 1. return nullptr if it's failed to new the object;\n   *                 2. return nullptr if it's failed to initialize the OBS SDK\n   */\n  static std::shared_ptr<ObsClient> GetInstance();\n\n  /**\n   * @brief   List the objects in a certain OBS path.\n   * @param   opt - in, OBS options.\n   * @param   object_list - out, objects list vector.\n   * @return  modelbox::STATUS_OK - Successfully get the list.\n   *          other status - Failed.\n   */\n  modelbox::Status GetObjectsList(const ObsOptions &opt,\n                                  std::vector<std::string> &object_list);\n\n  /**\n   * @brief   Get an object from OBS.\n   * @param   opt - in, OBS options.\n   * @param   file_local_path - in, the object would be downloaded to this path.\n   *          Notes: 1. directories would be made recursively if those do not\n   * exist.\n   *                 2. Read-Write right is needed to access the path.\n   * @return  modelbox::STATUS_OK - Successfully get the list.\n   *          modelbox::STATUS_AGAIN - Need to try again.\n   *          other status - Failed.\n   */\n  modelbox::Status GetObject(const ObsOptions &opt,\n                             const std::string &file_local_path);\n\n  /**\n   * @brief   Get buffer from an OBS object.\n   * @param   opt - in, obs option.\n   * @param   buf - out, buffer.\n   * @param   size - in, get buffer size.\n   * @param   offset - in, start byte to get.\n   * @return  modelbox::STATUS_OK - Successfully get the buffer.\n   *          modelbox::STATUS_AGAIN - Need to try again.\n   *          other status - Failed.\n   */\n  modelbox::Status GetBuffer(ObsOptions &opt, unsigned char *buf, uint64_t size,\n                             uint64_t offset);\n\n  /**\n   * @brief   Get object size from OBS.\n   * @param   opt - in, obs option.\n   * @return  Object size.\n   */\n  uint64_t GetObjectSize(ObsOptions &opt);\n\n  /**\n   * @brief   Put an object to OBS.\n   * @param   opt - in, OBS options.\n   * @param   data - in, the object would be downloaded to this path.\n   * @param   data_size\n   *                            Notes: 1. directories would be made recursively\n   * if those do not exist.\n   *                                   2. Read-Write right is needed to access\n   * the path.\n   * @return  modelbox::STATUS_OK - Successfully get the list.\n   *          modelbox::STATUS_AGAIN - Need to try again.\n   *          other status - Failed.\n   */\n  modelbox::Status PutObject(const ObsOptions &opt, const char *data,\n                             size_t data_size);\n\n  virtual ~ObsClient();\n\n  static std::mutex obs_client_lock_;\n\n private:\n  ObsClient();\n\n  /**\n   * @brief   Initialize the OBS SDK.\n   * @return  Successful or not\n   */\n  modelbox::Status InitObsSdk();\n\n  /**\n   * @brief   Deinitialize the OBS SDK.\n   * @return  void\n   */\n  void DeInitObsSdk();\n\n  /**\n   * @brief   get Ak/Sk/SecurityToken from hw_auth.\n   * @param   domain_name - in, user domain name\n   * @param   xrole_name - in, user xrole name to vas\n   * @param   access_key - out, AK\n   * @param   secret_key - out, SK\n   * @param   security_token - out, Security Token\n   * @return  Successful or not\n   */\n  modelbox::Status GetAuthInfo(const std::string &domain_name,\n                               const std::string &xrole_name,\n                               const std::string &user_id,\n                               std::string &access_key, std::string &secret_key,\n                               std::string &security_token);\n\n  /**\n   * @brief   Notify hw_auth to update the Ak/SK/SecurityToken, and get the\n   * updated ones.\n   * @param   domain_name - in, user domain name\n   * @param   xrole_name - in, user xrole name to vas\n   * @param   access_key - out, AK\n   * @param   secret_key - out, SK\n   * @param   security_token - out, Security Token\n   * @return  Successful or not\n   */\n  modelbox::Status GetUpdatedAuthInfo(const std::string &domain_name,\n                                      const std::string &xrole_name,\n                                      const std::string &user_id,\n                                      std::string &access_key,\n                                      std::string &secret_key,\n                                      std::string &security_token);\n\n  /**\n   * @brief Notify hw_auth to update the Ak/SK/SecurityToken\n   * @param output_info - identify the configuration\n   * @return Successful or not\n   */\n  modelbox::Status NotifyToUpdateAuthInfo(const std::string &domain_name,\n                                          const std::string &xrole_name);\n\n  /**\n   * @brief   Validate the OBS options except for ObsOptions::path.\n   * @param   opt - in, OBS options\n   * @return  true - Valid, false - Invalid\n   */\n  bool IsValidOptionExceptPath(const ObsOptions &opt);\n\n  /**\n   * @brief   Validate the OBS options including ObsOptions::path.\n   * @param   opt - in, OBS options\n   * @return  true - Valid, false - Invalid\n   */\n  bool IsValidOptionIncludingPath(const ObsOptions &opt);\n\n  /**\n   * @brief\n   * @param   src - in, ObsOptions\n   * @param   dst - out, obs_options from OBS SDK\n   * @return\n   */\n  void SetObsOption(const ObsOptions &src, const std::string &ak,\n                    const std::string &sk, const std::string &security_token,\n                    obs_options &dst);\n\n  /**\n   * @brief   Open the file to accept downloaded data. The file would be created\n   * if not exists.\n   * @param   full_file_path - in, Path to save the downloaded OBS file,\n   * including the file name.\n   * @return\n   */\n  std::shared_ptr<FILE> OpenLocalFile(const std::string &full_file_path);\n\n  /**\n   * @brief Based on the obs_status, notify the hw_auth to update the auth info.\n   * @param status - status return from OBS SDK\n   * @return need or not.\n   */\n  bool NeedUpdateAuthInfo(obs_status status);\n\n  /**\n   * @brief Based on the obs_status, notify the framework to try uploading data\n   * again.\n   * @param status - status return from OBS SDK\n   * @return need or not.\n   */\n  bool NeedTryAgain(obs_status status);\n};\n\n}  // namespace modelbox\n\n#endif  // MODELBOX_FLOWUNIT_OBS_CLIENT_H_"
  },
  {
    "path": "src/libmodelbox/include/modelbox/output_broker_plugin.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_OUTPUT_BROKER_PLUGIN_H_\n#define MODELBOX_FLOWUNIT_OUTPUT_BROKER_PLUGIN_H_\n\n#include <modelbox/base/driver.h>\n#include <modelbox/base/status.h>\n#include <modelbox/buffer.h>\n\n#include <memory>\n#include <string>\n\nconstexpr const char *DRIVER_CLASS_OUTPUT_BROKER_PLUGIN =\n    \"DRIVER-OUTPUT-BROKER\";\n\nnamespace modelbox {\n\nclass OutputBrokerHandle {\n public:\n  std::string output_broker_type_;\n  std::string broker_id_;\n};\n\nclass OutputBrokerPlugin : public Driver {\n public:\n  virtual Status Init(const std::shared_ptr<Configuration> &opts) = 0;\n\n  virtual Status Deinit() = 0;\n\n  virtual std::shared_ptr<modelbox::OutputBrokerHandle> Open(\n      const std::shared_ptr<modelbox::Configuration> &session_config,\n      const std::string &config) = 0;\n\n  virtual Status Write(\n      const std::shared_ptr<modelbox::OutputBrokerHandle> &handle,\n      const std::shared_ptr<Buffer> &buffer) = 0;\n\n  virtual Status Sync(\n      const std::shared_ptr<modelbox::OutputBrokerHandle> &handle) = 0;\n\n  virtual Status Close(\n      const std::shared_ptr<modelbox::OutputBrokerHandle> &handle) = 0;\n};\n\n}  // namespace modelbox\n\n#endif  // MODELBOX_FLOWUNIT_OUTPUT_BROKER_PLUGIN_H_"
  },
  {
    "path": "src/libmodelbox/include/modelbox/port.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_PORT_H_\n#define MODELBOX_PORT_H_\n\n#include <utility>\n\n#include \"modelbox/base/blocking_queue.h\"\n#include \"modelbox/base/status.h\"\n#include \"modelbox/inner_event.h\"\n#include \"modelbox/match_stream.h\"\n#include \"modelbox/node.h\"\n\nnamespace modelbox {\n\nclass NodeBase;\nclass SingleMatchCache;\n\nstruct CustomCompare {\n  auto operator()(std::shared_ptr<Buffer> const& a,\n                  std::shared_ptr<Buffer> const& b) const -> bool {\n    return BufferManageView::GetPriority(a) < BufferManageView::GetPriority(b);\n  }\n};\n\nstruct EventCompare {\n  auto operator()(std::shared_ptr<FlowUnitInnerEvent> const& a,\n                  std::shared_ptr<FlowUnitInnerEvent> const& b) const -> bool {\n    return a->GetPriority() < b->GetPriority();\n  }\n};\n\ntypedef PriorityBlockingQueue<std::shared_ptr<Buffer>, CustomCompare>\n    BufferQueue;\n\nusing EventQueue =\n    PriorityBlockingQueue<std::shared_ptr<FlowUnitInnerEvent>, EventCompare>;\n\ntemplate <typename QueueType, typename Compare>\nusing PortQueue = PriorityBlockingQueue<std::shared_ptr<QueueType>, Compare>;\n\nclass Port {\n public:\n  /**\n   * @brief Construct a new Port object\n   *\n   * @param name the port name\n   * @param node the parent node contains the port\n   */\n  Port(std::string name, const std::shared_ptr<NodeBase>& node);\n\n  /**\n   * @brief destructor\n   */\n  virtual ~Port();\n\n  /**\n   * @brief Get the Name object\n   *\n   * @return std::string\n   */\n  const std::string& GetName();\n\n  /**\n   * @brief Get the parent Node object\n   *\n   * @return std::shared_ptr<NodeBase>\n   */\n  std::shared_ptr<NodeBase> GetNode();\n\n  virtual void Shutdown();\n\n protected:\n  std::string name_;\n  std::weak_ptr<NodeBase> node_;\n};\n\nusing PushCallBack = std::function<void(bool)>;\nusing PopCallBack = std::function<void(void)>;\n\nclass IPort : public Port {\n public:\n  IPort(const std::string& name, const std::shared_ptr<NodeBase>& node);\n  ~IPort() override;\n  virtual int32_t GetPriority() const = 0;\n  virtual int32_t GetDataCount() const = 0;\n  virtual void SetPriority(int32_t priority) = 0;\n  virtual void SetPushEventCallBack(const PushCallBack& func) = 0;\n  virtual void SetPopEventCallBack(const PopCallBack& func) = 0;\n  virtual void NotifyPushEvent(bool update_active_time) = 0;\n  virtual void NotifyPushEvent() = 0;\n  virtual void NotifyPopEvent() = 0;\n  virtual bool Empty() const = 0;\n\n  virtual bool IsActivated() = 0;\n  virtual void SetActiveState(bool flag) = 0;\n\n  virtual Status Init() = 0;\n};\n\ntemplate <typename QueueType, typename Compare>\nclass NotifyPort : public IPort {\n public:\n  NotifyPort(const std::string& name, const std::shared_ptr<NodeBase>& node,\n             uint32_t priority = 0, size_t event_capacity = SIZE_MAX)\n      : IPort(name, node),\n        priority_(priority),\n\n        push_call_back_(nullptr),\n        pop_call_back_(nullptr),\n        is_activated_(true),\n        queue_(\n            std::make_shared<PortQueue<QueueType, Compare>>(event_capacity)) {}\n\n  ~NotifyPort() override { queue_->Clear(); }\n\n  /**\n   * @brief Get the Priority\n   *\n   * @return int\n   */\n  int32_t GetPriority() const override {\n    std::shared_ptr<QueueType> data = nullptr;\n    if (queue_->Front(&data)) {\n      return data->GetPriority();\n    }\n\n    return priority_;\n  }\n\n  /**\n   * @brief Get the data count in port\n   *\n   * @return int\n   */\n  int32_t GetDataCount() const override { return queue_ ? queue_->Size() : 0; }\n\n  /**\n   * @brief Set the Priority\n   *\n   * @param priority\n   */\n  void SetPriority(int32_t priority) override { priority_ = priority; }\n\n  /**\n   * @brief Set the Push Event Call Back Function\n   *\n   * @param func PushEvent Callback Function\n   */\n  void SetPushEventCallBack(const PushCallBack& func) override {\n    push_call_back_ = func;\n  }\n\n  /**\n   * @brief Set the Pop Event Call Back Function\n   *\n   * @param func PopEvent Callback Function\n   */\n  void SetPopEventCallBack(const PopCallBack& func) override {\n    pop_call_back_ = func;\n  }\n\n  /**\n   * @brief Notify PushEvent\n   *\n   */\n  void NotifyPushEvent(bool update_active_time) override {\n    std::lock_guard<std::mutex> lock(mutex_);\n    if (push_call_back_) {\n      push_call_back_(update_active_time);\n    }\n  }\n\n  void NotifyPushEvent() override {\n    std::lock_guard<std::mutex> lock(mutex_);\n    if (push_call_back_) {\n      push_call_back_(true);\n    }\n  }\n\n  /**\n   * @brief Notify PopEvent\n   *\n   */\n  void NotifyPopEvent() override {\n    std::lock_guard<std::mutex> lock(mutex_);\n    if (pop_call_back_) {\n      pop_call_back_();\n    }\n  }\n\n  /**\n   * @brief\n   *\n   * @return true\n   * @return false\n   */\n  bool Empty() const override { return queue_->Empty(); };\n\n  bool IsActivated() override { return is_activated_; }\n\n  void SetActiveState(bool flag) override { is_activated_ = flag; }\n\n  void Shutdown() override {\n    std::lock_guard<std::mutex> lock(mutex_);\n    push_call_back_ = nullptr;\n    pop_call_back_ = nullptr;\n    queue_->Shutdown();\n  }\n\n  Status Send(const std::shared_ptr<QueueType>& data) {\n    if (!data) {\n      MBLOG_WARN << \"data must not be nullptr.\";\n      return STATUS_INVALID;\n    }\n\n    return queue_->Push(data, 0);\n  }\n\n  Status Recv(std::shared_ptr<std::vector<std::shared_ptr<QueueType>>>& datas) {\n    if (!datas) {\n      datas = std::make_shared<std::vector<std::shared_ptr<QueueType>>>();\n    }\n\n    queue_->PopBatch(datas.get(), -1);\n    return STATUS_OK;\n  }\n\n  std::shared_ptr<QueueType> Recv() {\n    std::shared_ptr<QueueType> data = nullptr;\n    queue_->Pop(&data, -1);\n    return data;\n  }\n\n  std::shared_ptr<PortQueue<QueueType, Compare>> GetQueue() { return queue_; }\n\n protected:\n  int32_t priority_{0};\n  std::mutex mutex_;\n\n  PushCallBack push_call_back_;\n  PopCallBack pop_call_back_;\n  std::atomic<bool> is_activated_{false};\n\n  std::shared_ptr<PortQueue<QueueType, Compare>> queue_;\n};\n\nclass EventPort : public NotifyPort<FlowUnitInnerEvent, EventCompare> {\n public:\n  EventPort(const std::string& name, const std::shared_ptr<NodeBase>& node,\n            uint32_t priority = 0, size_t event_capacity = SIZE_MAX);\n  ~EventPort() override;\n\n  Status Init() override;\n\n  Status SendBatch(\n      std::vector<std::shared_ptr<FlowUnitInnerEvent>>& event_list);\n\n  Status Send(std::shared_ptr<FlowUnitInnerEvent>& event);\n};\n\nclass OutPort;\nclass InPort : public NotifyPort<Buffer, CustomCompare> {\n  friend class OutPort;\n\n public:\n  InPort(const std::string& name, const std::shared_ptr<NodeBase>& node,\n         uint32_t priority = 0, size_t event_capacity = SIZE_MAX);\n\n  ~InPort() override;\n\n  Status Init() override;\n\n  void Recv(std::vector<std::shared_ptr<Buffer>>& buffer_vector,\n            uint32_t left_buffer_num);\n\n  size_t GetConnectedPortNumber();\n\n  std::vector<std::weak_ptr<OutPort>> GetAllOutPort();\n\n private:\n  bool SetOutputPort(const std::shared_ptr<OutPort>& output_port);\n\n  std::vector<std::weak_ptr<OutPort>> output_ports;\n};\n\nclass OutPort : public Port, public std::enable_shared_from_this<OutPort> {\n public:\n  OutPort(const std::string& name, const std::shared_ptr<NodeBase>& node);\n\n  ~OutPort() override;\n\n  Status Init();\n\n  Status Send(std::vector<std::shared_ptr<Buffer>>& buffers);\n\n  std::set<std::shared_ptr<InPort>> GetConnectInPort();\n\n  bool ConnectPort(const std::shared_ptr<InPort>& /*inport*/);\n\n  void Shutdown() override;\n\n private:\n  std::set<std::shared_ptr<InPort>> connected_input_ports_;\n};\n\n}  // namespace modelbox\n#endif\n"
  },
  {
    "path": "src/libmodelbox/include/modelbox/profiler.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_PROFLER_H_\n#define MODELBOX_PROFLER_H_\n\n#include <modelbox/base/any.h>\n#include <modelbox/base/configuration.h>\n#include <modelbox/base/device.h>\n#include <modelbox/base/status.h>\n#include <modelbox/base/thread_pool.h>\n#include <modelbox/base/timer.h>\n\n#include <atomic>\n#include <chrono>\n#include <functional>\n#include <map>\n#include <memory>\n#include <mutex>\n#include <regex>\n#include <set>\n#include <string>\n#include <thread>\n#include <type_traits>\n#include <typeinfo>\n#include <vector>\n\nnamespace modelbox {\n\nconstexpr const char* PROFILE_PATH_ENV = \"PROFILE_PATH\";\n\nenum class TraceSliceType {\n  OPEN,\n  CLOSE,\n  PROCESS,\n  STREAM_OPEN,\n  STREAM_CLOSE,\n  CUSTOM\n};\n\nenum class EventType { BEGIN, END };\n\nusing TimePoint = std::chrono::time_point<std::chrono::system_clock,\n                                          std::chrono::microseconds>;\n\nconstexpr uint32_t DEFAULT_TIMER_SAMPLE_INTERVAL = 100;\n\nconstexpr uint32_t DEFAULT_WRITE_PROFILE_INTERVAL = 600;\n\nconstexpr uint32_t DEFAULT_WRITE_TRACE_INTERVAL = 600;\n\nclass ProfilerLifeCycle {\n public:\n  ProfilerLifeCycle(std::string name);\n\n  virtual ~ProfilerLifeCycle();\n\n  Status Init();\n\n  Status Start();\n\n  Status Stop();\n\n  Status Pause();\n\n  Status Resume();\n\n  inline bool IsRunning() { return is_running_; }\n\n  inline bool IsInitialized() { return is_initialized_; }\n\n protected:\n  virtual Status OnInit() { return STATUS_SUCCESS; };\n\n  virtual Status OnStart() { return STATUS_SUCCESS; };\n\n  virtual Status OnStop() { return STATUS_SUCCESS; };\n\n  virtual Status OnPause() { return STATUS_SUCCESS; };\n\n  virtual Status OnResume() { return STATUS_SUCCESS; };\n\n private:\n  std::string name_;\n  std::atomic_bool is_running_{false};\n  std::atomic_bool is_initialized_{false};\n};\n\nclass TraceEvent {\n  friend class TraceSlice;\n\n public:\n  virtual ~TraceEvent();\n\n  TraceEvent& SetEventType(const EventType& event_type);\n\n  const EventType& GetEventType() const;\n\n  TraceEvent& SetEventTime(const TimePoint& event_time);\n\n  const TimePoint& GetEventTime() const;\n\n  TraceEvent& SetThreadId(std::thread::id thread_id);\n\n  std::thread::id GetThreadId() const;\n\n protected:\n  TraceEvent();\n\n private:\n  EventType event_type_{EventType::BEGIN};\n  TimePoint event_time_;\n  std::thread::id thread_id_;\n};\n\nclass FlowUnitTrace;\nclass FlowUnitPerfCtx;\n\nclass TraceSlice {\n  friend class FlowUnitTrace;\n\n public:\n  virtual ~TraceSlice();\n\n  void Begin();\n\n  void End();\n\n  inline std::shared_ptr<TraceEvent> GetBeginEvent() {\n    return begin_event_ptr_;\n  }\n\n  inline std::shared_ptr<TraceEvent> GetEndEvent() { return end_event_ptr_; };\n\n  inline TraceSliceType GetTraceSliceType() { return slice_type_; }\n\n  int32_t GetDuration();\n\n  std::string GetSession();\n\n  inline void SetBatchSize(uint32_t batch_size) { batch_size_ = batch_size; }\n\n  inline uint32_t GetBatchSize() { return batch_size_; }\n\n protected:\n  TraceSlice(TraceSliceType& slice_type, std::string session,\n             const std::shared_ptr<FlowUnitTrace>& flow_unit_trace_ptr,\n             std::shared_ptr<FlowUnitPerfCtx> flow_unit_perf_ctx);\n\n  TraceSlice(TraceSliceType& slice_type, std::string session,\n             const std::shared_ptr<FlowUnitTrace>& flow_unit_trace_ptr,\n             std::shared_ptr<TraceEvent> begin,\n             std::shared_ptr<TraceEvent> end);\n\n private:\n  TraceSliceType slice_type_;\n  std::string session_;\n  std::weak_ptr<FlowUnitTrace> flow_unit_trace_ptr_;\n  std::shared_ptr<FlowUnitPerfCtx> flow_unit_perf_ctx_;\n  std::shared_ptr<TraceEvent> begin_event_ptr_;\n  std::shared_ptr<TraceEvent> end_event_ptr_;\n  bool is_end_called_;\n  uint32_t batch_size_;\n};\n\nclass Trace;\n\nclass FlowUnitTrace : public std::enable_shared_from_this<FlowUnitTrace> {\n  friend class Trace;\n\n public:\n  virtual ~FlowUnitTrace();\n\n  inline const std::string& GetFlowUnitName() const { return flow_unit_name_; }\n\n  // return a new TraceSlice when call this function, if Slice has not\n  // couple begin and end, it will be ignored\n  std::shared_ptr<TraceSlice> Slice(TraceSliceType slice_type,\n                                    std::string session);\n\n  void GetTraceSlices(std::vector<std::shared_ptr<TraceSlice>>& trace_slices);\n\n  Status AddTraceSlice(const std::shared_ptr<TraceSlice>& trace_slice);\n\n  void SetFlowUnitPerfCtx(std::shared_ptr<FlowUnitPerfCtx> flow_unit_perf_ctx);\n\n protected:\n  explicit FlowUnitTrace(std::string flow_unit_name);\n\n private:\n  std::string flow_unit_name_;\n  std::shared_ptr<FlowUnitPerfCtx> flow_unit_perf_ctx_;\n  std::vector<std::shared_ptr<TraceSlice>> trace_slices_;\n  std::mutex trace_slices_mutex_;\n};\n\nclass FlowUnitPerfCtx {\n public:\n  explicit FlowUnitPerfCtx(const std::string& flow_unit_name);\n\n  virtual ~FlowUnitPerfCtx();\n\n  void UpdateProcessLatency(int32_t process_latency);\n\n  int32_t GetProcessLatency();\n\n  void UpdateDeviceMemory(std::string& device_type, std::string& device_id,\n                          int32_t memory);\n\n  int32_t GetDeviceMemory(std::string& device_type, std::string& device_id);\n\n  void UpdateDeviceMemoryUsage(std::string& device_type, std::string& device_id,\n                               int32_t memory_usage);\n\n  int32_t GetDeviceMemoryUsage(std::string& device_type,\n                               std::string& device_id);\n\n  inline std::map<std::string, std::map<TimePoint, int32_t>>\n  GetDeviceMemoryMap() {\n    std::lock_guard<std::mutex> lock(devices_memories_mutex_);\n    return devices_memories_;\n  }\n\n  inline std::map<std::string, std::map<TimePoint, int32_t>>\n  GetDeviceMemoryUsageMap() {\n    std::lock_guard<std::mutex> lock(devices_memories_usage_mutex_);\n    return devices_memories_usage_;\n  }\n\n private:\n  std::string flow_unit_name_;\n  double process_latency_;\n  int32_t process_latency_count_;\n\n  // device type + id -> std::map<TimePoint int32_t>\n  std::map<std::string, std::map<TimePoint, int32_t>> devices_memories_;\n  std::map<std::string, std::map<TimePoint, int32_t>> devices_memories_usage_;\n\n  std::mutex latency_mutex_;\n  std::mutex devices_memories_mutex_;\n  std::mutex devices_memories_usage_mutex_;\n};\n\nclass FlowUnitPerfCollector;\nclass PerfCollector;\n\nclass Performance : public ProfilerLifeCycle {\n public:\n  Performance(std::shared_ptr<DeviceManager> device_mgr,\n              std::string& output_dir_path);\n  ~Performance() override;\n\n  Status OnInit() override;\n\n  Status OnStart() override;\n\n  Status OnStop() override;\n\n  Status OnPause() override;\n\n  Status OnResume() override;\n\n  void SetTimerSampleInterval(int32_t interval);\n\n  void SetWriteFileInterval(int32_t interval);\n\n  Status WritePerformance();\n\n  std::shared_ptr<FlowUnitPerfCtx> GetFlowUnitPerfCtx(\n      const std::string& flow_unit_name);\n\n private:\n  // process statics, get by regular sampling\n  int32_t GetProcessDeviceMemory(std::string& device_type,\n                                 std::string& device_id);\n\n  int32_t GetProcessDeviceMemoryUsage(std::string& device_type,\n                                      std::string& device_id);\n\n  int32_t GetProcessCpuUsage();\n\n  // flow unit statics, get by regular sampling\n  int32_t GetFlowUnitDeviceMemory(std::string& device_type,\n                                  std::string& device_id,\n                                  std::string& flow_unit_name);\n\n  void PerformanceWorker();\n\n  // device type + device id -> std::pair<std::string, std::string>\n  std::shared_ptr<std::map<std::string, std::pair<std::string, std::string>>>\n      devices_;\n\n  std::shared_ptr<std::vector<std::string>> flow_unit_names_;\n\n  std::atomic_bool timer_run_{false};\n\n  std::shared_ptr<std::thread> timer_;\n\n  uint32_t sample_interval_;\n\n  uint32_t write_file_interval_;\n\n  std::shared_ptr<DeviceManager> device_mgr_;\n\n  std::string output_dir_path_;\n\n  std::shared_ptr<FlowUnitPerfCollector> flow_unit_perf_collector_;\n\n  std::vector<std::shared_ptr<PerfCollector>> perf_collectors_;\n};\n\nclass Trace : public std::enable_shared_from_this<Trace>,\n              public ProfilerLifeCycle {\n public:\n  Trace(std::string output_dir_path, std::shared_ptr<Performance> perf,\n        bool session_enable);\n  ~Trace() override;\n\n  Status OnStart() override;\n\n  Status OnStop() override;\n\n  Status OnPause() override;\n\n  Status OnResume() override;\n\n  std::shared_ptr<FlowUnitTrace> FlowUnit(const std::string& flow_unit_name);\n\n  Status WriteTrace();\n\n  void SetWriteFileInterval(int32_t threshold);\n\n  uint32_t GetWriteFileInterval();\n\n  void SetSessionEnable();\n\n private:\n  std::string TraceSliceTypeToString(TraceSliceType type);\n\n  void TraceWork();\n\n  // FlowUnit name -> FlowUnitTrace, get by lock\n  std::map<std::string, std::shared_ptr<FlowUnitTrace>> traces_;\n\n  std::mutex trace_mutex_;\n\n  std::string output_dir_path_;\n\n  std::shared_ptr<Performance> perf_;\n\n  uint32_t write_file_interval_;\n\n  std::atomic_bool timer_run_{false};\n\n  std::shared_ptr<std::thread> timer_;\n\n  std::atomic_bool session_enable_;\n};\n\n/**\n * call as following in one session:\n\n * auto trace =\n * profiler->FlowUint(\"resize\")->Slice(TraceSliceType::PROCESS);\n * trace->Begin();\n * process();\n * trace->End();\n */\nclass Profiler : public ProfilerLifeCycle {\n public:\n  explicit Profiler(std::shared_ptr<DeviceManager> device_mgr,\n                    std::shared_ptr<Configuration> config);\n\n  ~Profiler() override;\n\n  Profiler(const Profiler& profiler) = delete;\n  Profiler& operator=(const Profiler& profiler) = delete;\n  Profiler(const Profiler&& profiler) = delete;\n  Profiler& operator=(const Profiler&& profiler) = delete;\n\n  Status OnInit() override;\n\n  Status InitProfilerDir();\n\n  Status OnStart() override;\n\n  Status OnStop() override;\n\n  Status OnPause() override;\n\n  Status OnResume() override;\n\n  std::shared_ptr<Performance> GetPerf();\n\n  std::shared_ptr<Trace> GetTrace();\n\n private:\n  std::shared_ptr<DeviceManager> device_mgr_;\n\n  std::shared_ptr<Configuration> config_;\n\n  std::string output_dir_path_;\n\n  std::shared_ptr<Performance> perf_;\n\n  std::shared_ptr<Trace> trace_;\n};\n\n}  // namespace modelbox\n#endif  // MODELBOX_PROFLER_H_\n"
  },
  {
    "path": "src/libmodelbox/include/modelbox/scheduler.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#ifndef MODELBOX_SCHEDULER_H_\n#define MODELBOX_SCHEDULER_H_\n\n#include <modelbox/base/configuration.h>\n#include <modelbox/base/status.h>\n#include <modelbox/base/thread_pool.h>\n#include <modelbox/port.h>\n\n#include <memory>\n#include <string>\n#include <vector>\n\nnamespace modelbox {\n\nclass Graph;\n\n/**\n * @brief Scheduler run mode\n */\nenum RunMode {\n  /// run sync\n  SYNC = 0,\n  /// run async\n  ASYNC = 1,\n};\n\nclass Scheduler {\n public:\n  virtual ~Scheduler() = default;\n\n  /**\n   * @brief Init scheduler\n   * @param config scheduler configuration\n   * @param stats scheduler stats\n   * @param thread_pool thread pool for scheduler, if null, scheduler will\n   * create its own thread pool\n   * @return init result\n   */\n  virtual Status Init(std::shared_ptr<Configuration> config,\n                      std::shared_ptr<StatisticsItem> stats = nullptr,\n                      std::shared_ptr<ThreadPool> thread_pool = nullptr) = 0;\n\n\n  /**\n   * @brief Build graph\n   * @param graph graph\n   * @return build result\n   */\n  virtual Status Build(const Graph& graph) = 0;\n\n  /**\n   * @brief Run scheduler sync\n   * @return run result\n   */\n  virtual Status Run() = 0;\n\n  /**\n   * @brief Run scheduler async\n   */\n  virtual void RunAsync() = 0;\n\n  /**\n   * @brief Wait for scheduler result\n   * @param milliseconds timeout millisecond\n   * @param ret_val graph result.\n   * @return wait result\n   */\n  virtual Status Wait(int64_t milliseconds, Status* ret_val = nullptr) = 0;\n\n  /**\n   * @brief Shutdown scheduler\n   */\n  virtual void Shutdown() = 0;\n};\n\n}  // namespace modelbox\n\n#endif\n"
  },
  {
    "path": "src/libmodelbox/include/modelbox/session.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_SESSION_H_\n#define MODELBOX_SESSION_H_\n\n#include <list>\n#include <memory>\n#include <mutex>\n#include <unordered_map>\n\n#include \"modelbox/error.h\"\n#include \"modelbox/profiler.h\"\n#include \"modelbox/session_context.h\"\n\nnamespace modelbox {\n\nusing SessionId = std::string;\n\nclass SessionIO {\n public:\n  SessionIO();\n  virtual Status SetOutputMeta(const std::string& port_name,\n                               std::shared_ptr<DataMeta> meta) = 0;\n  virtual Status Send(const std::string& port_name,\n                      std::shared_ptr<BufferList> buffer_list) = 0;\n  virtual Status Recv(OutputBufferList& map_buffer_list, int timeout = 0) = 0;\n  virtual Status Close() = 0;\n  virtual Status Shutdown() = 0;\n\n  virtual ~SessionIO();\n\n protected:\n  friend class Session;\n  virtual void SessionEnd(std::shared_ptr<FlowUnitError> error = nullptr) = 0;\n};\n\nclass SessionStateListener {\n public:\n  SessionStateListener();\n  virtual ~SessionStateListener();\n\n  virtual void NotifySessionClose();\n};\n\nclass Session {\n public:\n  Session(const std::shared_ptr<StatisticsItem>& graph_stats);\n\n  virtual ~Session();\n\n  void AddStateListener(const std::shared_ptr<SessionStateListener>& listener);\n\n  void SetSessionIO(const std::shared_ptr<SessionIO>& io_handle);\n\n  std::shared_ptr<SessionIO> GetSessionIO();\n\n  bool HasSessionIO();\n\n  std::shared_ptr<SessionContext> GetSessionCtx();\n  /**\n   * @brief will cause session end after current data in engine processed over\n   **/\n  void Close();\n\n  bool IsClosed();\n\n  /**\n   * @brief abort session imediately\n   **/\n  void Abort();\n\n  bool IsAbort();\n\n  void SetError(std::shared_ptr<FlowUnitError> error);\n\n  std::shared_ptr<FlowUnitError> GetError();\n\n private:\n  std::atomic_bool has_io_{false};\n  std::weak_ptr<SessionIO> io_handle_;  // hold by user\n  std::shared_ptr<SessionContext> ctx_;\n\n  std::mutex state_lock_;\n  std::atomic_bool closed_{false};\n  std::atomic_bool abort_{false};\n\n  std::shared_ptr<FlowUnitError> error_;\n\n  std::mutex state_listener_list_lock_;\n  std::list<std::weak_ptr<SessionStateListener>> state_listener_list_;\n};\n\nclass SessionManager {\n public:\n  SessionManager();\n\n  virtual ~SessionManager();\n\n  std::shared_ptr<Session> CreateSession(\n      const std::shared_ptr<StatisticsItem>& graph_stats);\n\n  void DeleteSession(const SessionId& id);\n\n  std::unordered_map<SessionId, std::weak_ptr<Session>> GetSessions();\n\n private:\n  std::mutex sessions_lock_;\n  std::unordered_map<SessionId, std::weak_ptr<Session>> sessions_;\n};\n\n}  // namespace modelbox\n\n#endif  // MODELBOX_SESSION_H_"
  },
  {
    "path": "src/libmodelbox/include/modelbox/session_context.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_SESSION_CONTEXT_H_\n#define MODELBOX_SESSION_CONTEXT_H_\n\n#include <memory>\n#include <unordered_map>\n\n#include \"modelbox/buffer_list.h\"\n#include \"modelbox/statistics.h\"\n\nnamespace modelbox {\nclass ExternalDataMapImpl;\nusing OutputBufferList =\n    std::unordered_map<std::string, std::shared_ptr<BufferList>>;\n\nenum class SessionContexStatsType { SESSION, GRAPH };\n\nclass SessionContext {\n public:\n  /**\n   * @brief Session context\n   * @param graph_stats Statistics for graph\n   */\n  SessionContext(const std::shared_ptr<StatisticsItem> &graph_stats = nullptr);\n\n  virtual ~SessionContext();\n\n  /**\n   * @brief Set private data to session context\n   * @param key private data key\n   * @param private_content private data\n   * @param type_id private data typeid\n   */\n  void SetPrivate(const std::string &key, std::shared_ptr<void> private_content,\n                  std::size_t type_id = 0);\n\n  /**\n   * @brief Get private data from session context\n   * @param key private data key\n   * @return private data\n   */\n  std::shared_ptr<void> GetPrivate(const std::string &key);\n\n  /**\n   * @brief Get private data from session context\n   * @param key private data key\n   * @return private data\n   */\n  template <typename T>\n  inline std::shared_ptr<T> GetPrivate(const std::string &key) {\n    return std::static_pointer_cast<T>(GetPrivate(key));\n  }\n\n  /**\n   * @brief Get private data typeid from session context\n   * @param key private data key\n   * @return private data typeid\n   */\n  std::size_t GetPrivateType(const std::string &key);\n\n  /**\n   * @brief Set session id\n   * @param session_id session id\n   */\n  void SetSessionId(const std::string &session_id);\n\n  /**\n   * @brief Get session id\n   * @return session_id session id\n   */\n  std::string GetSessionId();\n\n  /**\n   * @brief Get session configuration object\n   * @return configuration\n   */\n  std::shared_ptr<Configuration> GetConfig();\n\n  /**\n   * @brief Set error to session\n   * @param error run error\n   */\n  void SetError(std::shared_ptr<FlowUnitError> error);\n\n  std::shared_ptr<FlowUnitError> GetError();\n  /**\n   * @brief Get statistics ctx in nodes.session_id level\n   * @return Statistics ctx\n   */\n  std::shared_ptr<StatisticsItem> GetStatistics(\n      SessionContexStatsType type = SessionContexStatsType::SESSION);\n\n private:\n  std::mutex private_map_lock_;\n  std::unordered_map<std::string, std::shared_ptr<void>> private_map_;\n  std::unordered_map<std::string, std::size_t> private_map_type_;\n  std::string session_id_;\n  std::shared_ptr<Configuration> config_;\n  std::shared_ptr<FlowUnitError> error_;\n  std::shared_ptr<StatisticsItem> graph_stats_;\n  std::shared_ptr<StatisticsItem> graph_session_stats_;\n};\n\n}  // namespace modelbox\n#endif  // MODELBOX_SESSION_CONTEXT_H_\n"
  },
  {
    "path": "src/libmodelbox/include/modelbox/single_node.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_UNMATCH_NODE_H_\n#define MODELBOX_UNMATCH_NODE_H_\n#include \"data_handler.h\"\n#include \"modelbox/node.h\"\n\nnamespace modelbox {\n\nclass SingleNode : public Node {\n public:\n  using Node::Init;\n  using Node::Run;\n  SingleNode(const std::string& unit_name, const std::string& unit_type,\n             const std::string& unit_device_id,\n             std::shared_ptr<FlowUnitManager> flowunit_mgr,\n             std::shared_ptr<Configuration> config,\n             std::shared_ptr<Profiler> profiler = nullptr,\n             std::shared_ptr<StatisticsItem> graph_stats = nullptr);\n\n  /**\n   * @brief init node amnd port\n   * @return init result.\n   */\n  Status Init();\n\n  /**\n   * @brief run node process\n   * @param data  run node with input data\n   * @return process result.\n   */\n  void Run(const std::shared_ptr<DataHandler>& data);\n\n  /**\n   * @brief  push data to output datahandler\n   * @param data_handler output datahandler\n   * @return process result.\n   */\n  Status PushDataToDataHandler(std::shared_ptr<DataHandler>& data_handler);\n\n private:\n  std::shared_ptr<FlowUnitDataContext> CreateDataContext();\n  Status RecvData(const std::shared_ptr<DataHandler>& data);\n  Status Process();\n\n  std::shared_ptr<FlowUnitDataContext> data_context_;\n  std::shared_ptr<Configuration> config_;\n};\n\n}  // namespace modelbox\n#endif\n"
  },
  {
    "path": "src/libmodelbox/include/modelbox/statistics.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_STATISTICS_H_\n#define MODELBOX_STATISTICS_H_\n\n#include <atomic>\n#include <map>\n#include <mutex>\n#include <set>\n#include <string>\n#include <tuple>\n#include <unordered_map>\n#include <utility>\n\n#include \"modelbox/base/any.h\"\n#include \"modelbox/base/status.h\"\n#include \"modelbox/base/thread_pool.h\"\n#include \"modelbox/base/timer.h\"\n\nnamespace modelbox {\n\nconstexpr const char* STATISTICS_ITEM_FLOW = \"flow\";\n\nclass StatisticsValue {\n public:\n  StatisticsValue(std::shared_ptr<Any> val);\n\n  virtual ~StatisticsValue();\n\n  const std::type_info& GetType();\n\n  template <typename T>\n  bool IsSameTypeTo(T& val) {\n    return typeid(T) == val_->type();\n  }\n\n  bool IsType(const std::type_info& type);\n\n  template <typename T>\n  bool GetValue(T& val) {\n    if (!IsSameTypeTo(val)) {\n      return false;\n    }\n\n    val = any_cast<T>(*val_);\n    return true;\n  }\n\n  bool IsInt32();\n\n  bool GetInt32(int32_t& val);\n\n  bool IsUint32();\n\n  bool GetUint32(uint32_t& val);\n\n  bool IsInt64();\n\n  bool GetInt64(int64_t& val);\n\n  bool IsUint64();\n\n  bool GetUint64(uint64_t& val);\n\n  bool IsFloat();\n\n  bool GetFloat(float& val);\n\n  bool IsDouble();\n\n  bool GetDouble(double& val);\n\n  bool IsBool();\n\n  bool GetBool(bool& val);\n\n  bool IsString();\n\n  bool GetString(std::string& val);\n\n  std::string ToString();\n\n private:\n  template <typename T>\n  std::string ToString();\n\n  std::shared_ptr<Any> val_;\n};\n\ntemplate <typename T>\nstd::string StatisticsValue::ToString() {\n  T val;\n  if (GetValue(val) == false) {\n    return \"\";\n  }\n  std::stringstream ss;\n  ss << val;\n  return ss.str();\n}\n\nenum class StatisticsNotifyType : uint32_t {\n  CREATE = 1,\n  DELETE = 2,\n  CHANGE = 4,\n  TIMER = 8\n};\n\nclass StatisticsNotifyMsg {\n public:\n  StatisticsNotifyMsg(std::string path, std::shared_ptr<StatisticsValue> value,\n                      StatisticsNotifyType type);\n\n  virtual ~StatisticsNotifyMsg();\n  std::string path_;\n  std::shared_ptr<StatisticsValue> value_;\n  StatisticsNotifyType type_;\n};\n\nusing StatisticsNotifyFunc =\n    std::function<void(const std::shared_ptr<const StatisticsNotifyMsg>& msg)>;\n\nclass StatisticsItem;\nclass StatisticsNotifyConsumers;\n\nconstexpr const size_t minimum_notify_time = 10 * 1000;  // 10s\n\nclass StatisticsNotifyCfg {\n  friend class StatisticsItem;\n  friend class StatisticsNotifyConsumers;\n\n public:\n  StatisticsNotifyCfg(std::string path_pattern, StatisticsNotifyFunc func,\n                      const StatisticsNotifyType& type);\n\n  StatisticsNotifyCfg(std::string path_pattern, StatisticsNotifyFunc func,\n                      std::set<StatisticsNotifyType> types = {});\n\n  StatisticsNotifyCfg(const StatisticsNotifyCfg& other);\n\n  bool operator==(const StatisticsNotifyCfg& other);\n\n  virtual ~StatisticsNotifyCfg();\n\n  /**\n   * @brief Set timer notify\n   * @param delay Delay to run first, in second, >= 10s\n   * @param interval Notify interval, in second, >= 10s\n   */\n  void SetNotifyTimer(size_t delay, size_t interval = 0);\n\n private:\n  std::string GetRootPath() const;\n\n  std::string GetSubPath() const;  // path without root\n\n  void BindTimerTask(const std::shared_ptr<TimerTask>& timer_task);\n\n  void RemoveTimerTask();\n\n  std::string path_pattern_;\n  StatisticsNotifyFunc func_;\n  std::set<StatisticsNotifyType> type_set_;\n  size_t delay_{minimum_notify_time};\n  size_t interval_{minimum_notify_time};\n  std::shared_ptr<TimerTask> timer_task_;\n  uintptr_t id_;\n};\n\nclass StatisticsNotifyTypeHash {\n public:\n  size_t operator()(const StatisticsNotifyType& type) const {\n    return (size_t)type;\n  }\n};\n\nclass StatisticsNotifyConsumers {\n public:\n  StatisticsNotifyConsumers();\n\n  virtual ~StatisticsNotifyConsumers();\n\n  Status AddConsumer(const std::shared_ptr<StatisticsNotifyCfg>& cfg);\n\n  Status DelConsumer(const std::shared_ptr<StatisticsNotifyCfg>& cfg);\n\n  std::list<std::shared_ptr<StatisticsNotifyCfg>> GetConsumers(\n      const StatisticsNotifyType& type);\n\n  void Clear();\n\n private:\n  std::unordered_map<StatisticsNotifyType, std::shared_ptr<std::mutex>,\n                     StatisticsNotifyTypeHash>\n      cfg_map_lock_;\n  std::unordered_map<StatisticsNotifyType,\n                     std::list<std::shared_ptr<StatisticsNotifyCfg>>,\n                     StatisticsNotifyTypeHash>\n      cfg_map_;\n};\n\nusing StatisticsForEachFunc =\n    std::function<Status(const std::shared_ptr<StatisticsItem>& item,\n                         const std::string relative_path)>;\n\n/**\n * @brief A statistics tree\n * only leaf node has value\n */\nclass StatisticsItem : public std::enable_shared_from_this<StatisticsItem> {\n public:\n  /**\n   * @brief Construct a root node for statistics\n   * sub node can only create by parent\n   */\n  StatisticsItem();\n\n  virtual ~StatisticsItem();\n\n  /**\n   * @brief Get parent path\n   * @return Parent path\n   */\n  inline std::string GetParentPath() { return parent_path_; }\n\n  /**\n   * @brief Get name\n   * @return Name\n   */\n  inline std::string GetName() { return name_; }\n\n  /**\n   * @brief Get full path\n   * @return parent_path.name\n   */\n  inline std::string GetPath() { return path_; }\n\n  /**\n   * @brief Check item is leaf or not\n   * @return check result\n   */\n  inline bool IsLeaf() { return is_leaf_; }\n\n  /**\n   * @brief Set value of this item\n   * @return Result of set\n   */\n  template <\n      typename T,\n      typename = typename std::enable_if<\n          std::is_same<T, bool>::value || std::is_same<T, int32_t>::value ||\n          std::is_same<T, uint32_t>::value || std::is_same<T, int64_t>::value ||\n          std::is_same<T, uint64_t>::value || std::is_same<T, float>::value ||\n          std::is_same<T, double>::value ||\n          std::is_same<T, std::string>::value>::type>\n  Status SetValue(const T& value);\n\n  /**\n   * @brief new_value = old_value + value\n   * @param value Value to add\n   * @return Result of Add\n   */\n  template <\n      typename T,\n      typename = typename std::enable_if<\n          std::is_same<T, int32_t>::value || std::is_same<T, uint32_t>::value ||\n          std::is_same<T, int64_t>::value || std::is_same<T, uint64_t>::value ||\n          std::is_same<T, float>::value ||\n          std::is_same<T, double>::value>::type>\n  Status IncreaseValue(const T& value);\n\n  /**\n   * @brief new_value = old_value + value, will create new item if not exist\n   * @param sub_item_name sub item name\n   * @param value Value to add\n   * @return Result of Add\n   */\n  template <\n      typename T,\n      typename = typename std::enable_if<\n          std::is_same<T, int32_t>::value || std::is_same<T, uint32_t>::value ||\n          std::is_same<T, int64_t>::value || std::is_same<T, uint64_t>::value ||\n          std::is_same<T, float>::value ||\n          std::is_same<T, double>::value>::type>\n  Status IncreaseValue(const std::string& sub_item_name, const T& value);\n\n  /**\n   * @brief Get value\n   * @param value Return value\n   * @return Result of get\n   */\n  template <\n      typename T,\n      typename = typename std::enable_if<\n          std::is_same<T, bool>::value || std::is_same<T, int32_t>::value ||\n          std::is_same<T, uint32_t>::value || std::is_same<T, int64_t>::value ||\n          std::is_same<T, uint64_t>::value || std::is_same<T, float>::value ||\n          std::is_same<T, double>::value ||\n          std::is_same<T, std::string>::value>::type>\n  Status GetValue(T& value);\n\n  /**\n   * @brief Get value\n   * @return Wrap value\n   */\n  inline std::shared_ptr<StatisticsValue> GetValue() {\n    if (!IsLeaf()) {\n      StatusError = {STATUS_NOTSUPPORT,\n                     \"This is not a leaf node, get value failed.\"};\n      return nullptr;\n    }\n\n    StatusError = STATUS_OK;\n    return std::make_shared<StatisticsValue>(value_);\n  }\n\n  /**\n   * @brief Get value\n   * @return Result & value\n   */\n  template <\n      typename T,\n      typename = typename std::enable_if<\n          std::is_same<T, bool>::value || std::is_same<T, int32_t>::value ||\n          std::is_same<T, uint32_t>::value || std::is_same<T, int64_t>::value ||\n          std::is_same<T, uint64_t>::value || std::is_same<T, float>::value ||\n          std::is_same<T, double>::value ||\n          std::is_same<T, std::string>::value>::type>\n  std::tuple<Status, T> GetValue();\n\n  /**\n   * @brief Add new item as child, it is not a leaf item, can not set value\n   * @param name Name of new item\n   * @return Status & new item\n   */\n  std::shared_ptr<StatisticsItem> AddItem(const std::string& name);\n\n  /**\n   * @brief Add new item as child with value, it is a leaf item, can not add\n   * child\n   * @param name Name of new item\n   * @param value Value to set\n   * @param override_val True: override value if item exist\n   * @return Status & new item\n   */\n  template <\n      typename T,\n      typename = typename std::enable_if<\n          std::is_same<T, bool>::value || std::is_same<T, int32_t>::value ||\n          std::is_same<T, uint32_t>::value || std::is_same<T, int64_t>::value ||\n          std::is_same<T, uint64_t>::value || std::is_same<T, float>::value ||\n          std::is_same<T, double>::value ||\n          std::is_same<T, std::string>::value>::type>\n  std::shared_ptr<StatisticsItem> AddItem(const std::string& name,\n                                          const T& value,\n                                          bool override_val = false);\n\n  /**\n   * @brief Get item with name\n   * @param child_path Target item name\n   * @return Target item, nullable\n   */\n  std::shared_ptr<StatisticsItem> GetItem(const std::string& child_path);\n\n  /**\n   * @brief Delete item with name\n   * @param name Target item name\n   */\n  void DelItem(const std::string& name) noexcept;\n\n  /**\n   * @brief Clear all item\n   */\n  void ClearItem();\n\n  /**\n   * @brief Detach from parent and clear all item, this item should not use\n   * again\n   */\n  void Dispose();\n\n  /**\n   * @brief Test has target item\n   * @param name Target item name\n   * @return Result of test\n   */\n  bool HasItem(const std::string& name);\n\n  /**\n   * @brief Get all sub item name\n   * @return Name set\n   */\n  std::set<std::string> GetItemNames();\n\n  /**\n   * @brief Walk with dfs\n   * @param func Func called on each item\n   * @param recursive recursive for each\n   * @return Status of func if not ok\n   */\n  Status ForEach(const StatisticsForEachFunc& func, bool recursive = false);\n\n  /**\n   * @brief Register notify for item, notify type {CREATE, DELETE, CHANGE,\n   * TIMER}\n   * @param cfg Config for notify\n   * @return Result for register\n   */\n  Status RegisterNotify(const std::shared_ptr<StatisticsNotifyCfg>& cfg);\n\n  /**\n   * @brief UnRegister notify with the cfg used in register\n   * @param cfg Used in register\n   */\n  void UnRegisterNotify(const std::shared_ptr<StatisticsNotifyCfg>& cfg);\n\n  /**\n   * @brief Notify the consumers of this item for specify type, async\n   * @param type Notify type\n   * @return Result for notify submit\n   */\n  Status Notify(const StatisticsNotifyType& type);\n\n private:\n  StatisticsItem(std::string parent_path, std::string name,\n                 std::weak_ptr<StatisticsItem> parent);\n\n  Status AddNotify(const std::shared_ptr<StatisticsNotifyCfg>& cfg);\n\n  void DelNotify(const std::shared_ptr<StatisticsNotifyCfg>& cfg);\n\n  Status AddChildrenNotify(const std::shared_ptr<StatisticsNotifyCfg>& cfg);\n\n  void DelChildrenNotify(const std::shared_ptr<StatisticsNotifyCfg>& cfg);\n\n  std::string GetRelativePath(const std::string& base_path);\n\n  Status ForEachInner(const StatisticsForEachFunc& func, bool recursive,\n                      const std::string& base_path);\n\n  std::shared_ptr<StatisticsItem> AddItemInner(\n      const std::string& name, const std::shared_ptr<Any>& value);\n\n  std::string parent_path_;\n  std::string name_;\n  std::weak_ptr<StatisticsItem> parent_;\n  std::string path_;  // full path : parent_path_ + \".\" + name_\n  std::mutex value_lock_;\n  std::shared_ptr<Any> value_;\n  std::mutex children_lock_;\n  std::map<std::string, std::shared_ptr<StatisticsItem>> children_;\n  std::set<std::string> children_name_set_;\n\n  std::mutex child_notify_cfg_lock_;\n  std::map<std::string, std::list<std::shared_ptr<StatisticsNotifyCfg>>>\n      children_notify_cfg_map_;  // For the child which has not been created.\n                                 // <child_name, cfg_list>\n\n  StatisticsNotifyConsumers consumers_;\n  std::shared_ptr<ThreadPool> thread_pool_;\n  std::shared_ptr<Timer> notify_timer_;\n  std::chrono::steady_clock::time_point last_change_notify_time_;\n  std::mutex last_change_notify_time_lock_;\n\n  std::atomic_bool is_alive_{true};\n  std::atomic_bool is_leaf_{false};\n};\n\ntemplate <typename T, typename>\nStatus StatisticsItem::SetValue(const T& value) {\n  if (!IsLeaf()) {\n    return {STATUS_NOTSUPPORT, \"This is not a leaf node, set value failed.\"};\n  }\n\n  std::lock_guard<std::mutex> lck(value_lock_);\n  auto old_val = value_;\n  value_ = std::make_shared<Any>(value);\n  if (!(value_->type() == old_val->type() &&\n        any_cast<T>(*value_) == any_cast<T>(*old_val))) {\n    Notify(StatisticsNotifyType::CHANGE);\n  }\n\n  return STATUS_OK;\n}\n\ntemplate <typename T, typename>\nStatus StatisticsItem::IncreaseValue(const T& value) {\n  if (!IsLeaf()) {\n    return {STATUS_NOTSUPPORT,\n            \"This is not a leaf node, increase value failed.\"};\n  }\n\n  std::lock_guard<std::mutex> lck(value_lock_);\n  if (value_ == nullptr) {\n    return STATUS_INVALID;\n  }\n\n  if (value_->type() != typeid(value)) {\n    return STATUS_INVALID;\n  }\n\n  auto old_val = any_cast<T>(*value_);\n  value_ = std::make_shared<Any>(old_val + value);\n  Notify(StatisticsNotifyType::CHANGE);\n  return STATUS_OK;\n}\n\ntemplate <typename T, typename>\nStatus StatisticsItem::IncreaseValue(const std::string& sub_item_name,\n                                     const T& value) {\n  if (!is_alive_) {\n    return {STATUS_FAULT, \"This item is disposed\"};\n  }\n\n  if (IsLeaf()) {\n    return {STATUS_NOTSUPPORT, \"This is a leaf node, has no child.\"};\n  }\n\n  std::lock_guard<std::mutex> lck(children_lock_);\n  auto item = children_.find(sub_item_name);\n  if (item != children_.end()) {\n    return item->second->IncreaseValue(value);\n  }\n\n  auto value_ptr = std::make_shared<Any>(value);\n  AddItemInner(sub_item_name, value_ptr);\n  return StatusError;\n}\n\ntemplate <typename T, typename>\nStatus StatisticsItem::GetValue(T& value) {\n  if (!IsLeaf()) {\n    return {STATUS_NOTSUPPORT, \"This is not a leaf node, get value failed.\"};\n  }\n\n  std::lock_guard<std::mutex> lck(value_lock_);\n  if (value_ == nullptr) {\n    return STATUS_NODATA;\n  }\n\n  if (value_->type() != typeid(value)) {\n    return STATUS_INVALID;\n  }\n\n  value = any_cast<T>(*value_);\n  return STATUS_OK;\n}\n\ntemplate <typename T, typename>\nstd::tuple<Status, T> StatisticsItem::GetValue() {\n  T value;\n  auto ret = GetValue(value);\n  return std::make_tuple(ret, value);\n}\n\ntemplate <typename T, typename>\nstd::shared_ptr<StatisticsItem> StatisticsItem::AddItem(const std::string& name,\n                                                        const T& value,\n                                                        bool override_val) {\n  if (!is_alive_) {\n    StatusError = {STATUS_FAULT, \"This item is disposed\"};\n    return nullptr;\n  }\n\n  std::lock_guard<std::mutex> lck(children_lock_);\n  auto item = children_.find(name);\n  if (item != children_.end()) {\n    StatusError = STATUS_EXIST;\n    auto& target = item->second;\n    if (override_val) {\n      target->SetValue(value);\n    }\n    return target;\n  }\n\n  auto value_ptr = std::make_shared<Any>(value);\n  return AddItemInner(name, value_ptr);\n}\n\nclass Statistics {\n public:\n  /**\n   * @brief Get global statistics item\n   */\n  static std::shared_ptr<StatisticsItem> GetGlobalItem();\n\n  static void ReleaseGlobalItem();\n\n private:\n  static std::once_flag fix_item_init_flag_;\n};\n\n}  // namespace modelbox\n\n#endif  // MODELBOX_STATISTICS_H_"
  },
  {
    "path": "src/libmodelbox/include/modelbox/stream.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_STREAM_H_\n#define MODELBOX_STREAM_H_\n\n#include <atomic>\n#include <memory>\n#include <unordered_map>\n\n#include \"modelbox/base/status.h\"\n#include \"modelbox/buffer_index_info.h\"\n\nnamespace modelbox {\n\nclass Session;\n\nclass DataMeta {\n public:\n  DataMeta();\n\n  DataMeta(const DataMeta &other);\n\n  virtual ~DataMeta();\n\n  void SetMeta(const std::string &key, std::shared_ptr<void> meta);\n\n  std::shared_ptr<void> GetMeta(const std::string &key);\n\n  std::unordered_map<std::string, std::shared_ptr<void>> GetMetas();\n\n private:\n  std::unordered_map<std::string, std::shared_ptr<void>> private_map_;\n};\n\n/**\n * @brief record stream order for each expand level\n **/\nclass StreamOrder {\n public:\n  StreamOrder();\n\n  bool operator<(const StreamOrder &other_stream_order);\n\n  std::shared_ptr<StreamOrder> Copy();\n\n  void Expand(size_t index_in_this_level);\n\n  void Collapse();\n\n private:\n  std::list<size_t> index_at_each_expand_level_;\n};\n\nclass Stream {\n public:\n  Stream(std::shared_ptr<Session> session);\n\n  virtual ~Stream();\n\n  std::shared_ptr<Session> GetSession();\n\n  void SetMaxBufferCount(size_t max_buffer_count);\n\n  bool ReachEnd();\n\n  size_t GetBufferCount();\n\n  void IncreaseBufferCount();\n\n  void SetStreamMeta(std::shared_ptr<DataMeta> data_meta);\n\n  std::shared_ptr<DataMeta> GetStreamMeta();\n\n  std::shared_ptr<StreamOrder> GetStreamOrder();\n\n  void SetStreamOrder(std::shared_ptr<StreamOrder> stream_order);\n\n private:\n  std::shared_ptr<Session> session_;\n  std::atomic_size_t cur_buffer_count_{0};\n  size_t max_buffer_count_{0};\n  std::shared_ptr<DataMeta> data_meta_;\n\n  std::shared_ptr<StreamOrder> stream_order_ = std::make_shared<StreamOrder>();\n};\n\nclass StreamPtrOrderCmp {\n public:\n  bool operator()(const std::shared_ptr<Stream> &s1,\n                  const std::shared_ptr<Stream> &s2) const {\n    return *(s1->GetStreamOrder()) < *(s2->GetStreamOrder());\n  }\n};\n\n}  // namespace modelbox\n#endif"
  },
  {
    "path": "src/libmodelbox/include/modelbox/tensor.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_TENSOR_H_\n#define MODELBOX_TENSOR_H_\n\n#include <modelbox/buffer.h>\n#include <modelbox/type.h>\n\nnamespace modelbox {\n\n/**\n * @brief Interface to access the data buffer with tensor API\n */\nclass TensorBuffer : public Buffer {\n public:\n  /**\n   * @brief Tensor buffer object\n   */\n  TensorBuffer();\n\n  /**\n   * @brief Create a new tensor buffer related with specific device\n   * @param device related device\n   */\n  TensorBuffer(const std::shared_ptr<Device>& device);\n\n  /**\n   * @brief Create a new tensor buffer related with specific device memory\n   * @param dev_mem related device memory\n   */\n  TensorBuffer(const std::shared_ptr<DeviceMemory>& dev_mem);\n\n  /**\n   * @brief Copy from another tensor buffer\n   * @param other another tensor buffer\n   */\n  TensorBuffer(const TensorBuffer& other);\n\n  ~TensorBuffer() override;\n\n  /**\n   * @brief Resize tensor\n   * @param shape shape list to resize\n   * @return resize result\n   */\n  template <typename T>\n  Status Resize(const std::vector<size_t>& shape) {\n    auto new_size = Volume(shape) * sizeof(T);\n    auto status = Build(new_size);\n    if (!status) {\n      MBLOG_WARN << \"Resize failed.\";\n      return status;\n    }\n\n    shape_.assign(shape.begin(), shape.end());\n    type_ = TypeToDataType<T>::Value;\n    return STATUS_OK;\n  }\n\n  /**\n   * @brief Get tensor buffer shape\n   * @return tensor buffer shape list.\n   */\n  const std::vector<size_t>& Shape() const;\n\n  /**\n   * @brief Set shape to tensor buffer\n   * @param shape shape list\n   * @return set result\n   */\n  template <typename T>\n  Status SetShape(const std::vector<size_t>& shape) {\n    auto type = TypeToDataType<T>::Value;\n    if (MODELBOX_TYPE_INVALID == type_) {\n      type_ = type;\n    } else if (type_ != type) {\n      return {STATUS_INVALID, \"invalid data type.\"};\n    }\n\n    if (Volume(shape) * sizeof(T) != GetBytes()) {\n      return {STATUS_INVALID, \"tensor size must be equal device memory size.\"};\n    }\n\n    shape_.assign(shape.begin(), shape.end());\n    return STATUS_OK;\n  }\n\n  /**\n   * @brief Set tensor buffer data type\n   * @param type data type\n   */\n  void SetType(ModelBoxDataType type);\n\n  /**\n   * @brief Get tensor buffer data type\n   * @return type data type\n   */\n  ModelBoxDataType GetType();\n\n  /**\n   * @brief Get tensor buffer mutable raw data\n   * @return raw data pointer to tensor buffer data\n   */\n  template <typename T>\n  T* MutableData() {\n    auto type = TypeToDataType<T>::Value;\n    if (type_ != type) {\n      MBLOG_WARN << \"invalid data type.\";\n      return nullptr;\n    }\n\n    auto device_mem = GetDeviceMemory();\n    if (!device_mem) {\n      MBLOG_WARN << \"device_mem is nullptr, may be exception buffer.\";\n      return nullptr;\n    }\n\n    auto&& data = device_mem->GetPtr<T>();\n    if (!data) {\n      return nullptr;\n    }\n\n    return data.get();\n  }\n\n  /**\n   * @brief Get tensor buffer const raw data\n   * @return raw data pointer to tensor buffer data\n   */\n  template <typename T>\n  const T* ConstData() const {\n    auto type = TypeToDataType<T>::Value;\n    if (type_ != type) {\n      MBLOG_WARN << \"invalid data type.\";\n      return nullptr;\n    }\n\n    auto device_mem = GetDeviceMemory();\n    if (!device_mem) {\n      MBLOG_WARN << \"dev_mem_ is nullptr, may be exception buffer.\";\n      return nullptr;\n    }\n\n    auto&& data = device_mem->GetConstPtr<T>();\n    if (!data) {\n      return nullptr;\n    }\n\n    return data.get();\n  }\n\n  /**\n   * @brief Create a copy of buffer share same data buffer\n   * @return new buffer object\n   */\n  std::shared_ptr<Buffer> Copy() const override;\n\n  /**\n   * @brief Create a copy of buffer with new data buffer\n   * @return new buffer object\n   */\n  std::shared_ptr<Buffer> DeepCopy() const override;\n\n protected:\n  /**\n   * @brief Create a copy of buffer with new data buffer\n   * @param other tensor buffer\n   * @return copy result\n   */\n  Status DeepCopy(const TensorBuffer& other);\n\n private:\n  friend BufferList;\n\n  /// tensor shape\n  std::vector<size_t> shape_;\n\n  /// tensor data type\n  ModelBoxDataType type_{MODELBOX_TYPE_INVALID};\n};\n\n}  // namespace modelbox\n\n#endif  // MODELBOX_TENSOR_H_"
  },
  {
    "path": "src/libmodelbox/include/modelbox/tensor_list.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#ifndef MODELBOX_TENSOR_LIST_H_\n#define MODELBOX_TENSOR_LIST_H_\n\n#include <modelbox/base/device.h>\n#include <modelbox/base/log.h>\n#include <modelbox/base/utils.h>\n#include <modelbox/buffer_list.h>\n#include <modelbox/buffer_type.h>\n#include <modelbox/tensor.h>\n#include <modelbox/type.h>\n\n#include <algorithm>\n#include <atomic>\n#include <memory>\n#include <stdexcept>\n#include <utility>\n\nnamespace modelbox {\n\n/**\n * @brief Tensor list API\n */\nclass TensorList {\n public:\n  /**\n   * @brief Create tensor list from buffer list.\n   * @param buffer_list buffer list\n   */\n  TensorList(std::shared_ptr<BufferList> buffer_list)\n      : bl_(std::move(buffer_list)) {\n    if (!bl_) {\n      throw std::invalid_argument(\"buffer list must not be nullptr.\");\n    }\n\n    for (auto& buffer : *bl_) {\n      auto tensor = std::dynamic_pointer_cast<TensorBuffer>(buffer);\n      if (!tensor) {\n        throw std::invalid_argument(\n            \"the elements of bufferlist must be Tensorbuffer.\");\n      }\n    }\n  }\n\n  virtual ~TensorList() = default;\n\n  /**\n   * @brief Set tensor list shape\n   * @param shape shape list\n   * @return set result\n   */\n  template <typename T>\n  Status SetShape(const std::vector<std::vector<size_t>>& shape) {\n    if (shape.size() != bl_->buffer_list_.size()) {\n      return {STATUS_RANGE, \"invalid shape size\"};\n    }\n\n    Status status{STATUS_SUCCESS};\n    auto iter = shape.begin();\n    for (auto& buffer : bl_->buffer_list_) {\n      auto tensor = std::dynamic_pointer_cast<TensorBuffer>(buffer);\n      if (!tensor) {\n        return STATUS_FAULT;\n      }\n\n      status = tensor->SetShape<T>(*(iter++));\n      if (!status) {\n        MBLOG_WARN << \"tensor set shape failed: \" << status;\n        return STATUS_FAULT;\n      }\n    }\n\n    return STATUS_OK;\n  }\n\n  /**\n   * @brief Set tensor list data type\n   * @param type data type\n   */\n  void SetType(ModelBoxDataType type);\n\n  /**\n   * @brief Get tensor list data type\n   * @return data type\n   */\n  std::vector<std::vector<size_t>> GetShape() const;\n\n  template <typename T>\n  Status Build(const std::vector<std::vector<size_t>>& shape_list) {\n    std::vector<size_t> data_size_list(shape_list.size(), 0);\n    std::transform(shape_list.begin(), shape_list.end(), data_size_list.begin(),\n                   [](const std::vector<size_t>& shape) {\n                     return Volume(shape) * sizeof(T);\n                   });\n\n    size_t size = std::accumulate(data_size_list.begin(), data_size_list.end(),\n                                  (size_t)0, std::plus<size_t>());\n    if (!bl_->dev_mem_) {\n      return {STATUS_INVALID, \"device memory must not be nullptr.\"};\n    }\n\n    auto device = bl_->dev_mem_->GetDevice();\n    bl_->dev_mem_ = device->MemAlloc(size);\n    if (!bl_->dev_mem_) {\n      MBLOG_WARN << \" MemAlloc \" << size << \" byte data failed\";\n      return STATUS_NOMEM;\n    }\n\n    bl_->buffer_list_.resize(data_size_list.size(), nullptr);\n\n    size_t offset = 0;\n    for (size_t i = 0; i < bl_->buffer_list_.size(); i++) {\n      auto&& mem = bl_->dev_mem_->Cut(offset, data_size_list[i]);\n      bl_->buffer_list_[i] = std::make_shared<TensorBuffer>(mem);\n      offset += data_size_list[i];\n    }\n\n    bl_->is_contiguous_ = true;\n    return SetShape<T>(shape_list);\n  }\n\n  template <typename T>\n  Status BuildFromHost(const std::vector<std::vector<size_t>>& shape_list,\n                       void* data, size_t data_size,\n                       const DeleteFunction& func = nullptr) {\n    std::vector<size_t> data_size_list(shape_list.size(), 0);\n    std::transform(shape_list.begin(), shape_list.end(), data_size_list.begin(),\n                   [](const std::vector<size_t>& shape) {\n                     return Volume(shape) * sizeof(T);\n                   });\n\n    size_t size = std::accumulate(data_size_list.begin(), data_size_list.end(),\n                                  (size_t)0, std::plus<size_t>());\n    if (data_size < size) {\n      MBLOG_WARN << \"invalid data size. size: \" << size\n                 << \" data_size: \" << data_size;\n      return STATUS_RANGE;\n    }\n\n    if (!bl_->dev_mem_) {\n      return {STATUS_INVALID, \"device memory must not be nullptr.\"};\n    }\n\n    auto device = bl_->dev_mem_->GetDevice();\n    if (bl_->dev_mem_->IsHost() && func) {\n      std::shared_ptr<void> data_ptr(data, func);\n      bl_->dev_mem_ = device->MemAcquire(data_ptr, data_size);\n    } else {\n      bl_->dev_mem_ = device->MemWrite(data, data_size);\n      if (!bl_->dev_mem_) {\n        MBLOG_WARN << \" device MemWrite failed.\";\n        return STATUS_NOMEM;\n      }\n    }\n\n    bl_->buffer_list_.resize(data_size_list.size(), nullptr);\n\n    size_t offset = 0;\n    for (size_t i = 0; i < bl_->buffer_list_.size(); i++) {\n      auto&& mem = bl_->dev_mem_->Cut(offset, data_size_list[i]);\n      bl_->buffer_list_[i] = std::make_shared<TensorBuffer>(mem);\n      offset += data_size_list[i];\n    }\n\n    bl_->is_contiguous_ = true;\n    return SetShape<T>(shape_list);\n  }\n\n  size_t Size() const;\n  size_t GetBytes() const;\n\n  std::shared_ptr<TensorBuffer> operator[](size_t pos);\n  std::shared_ptr<const TensorBuffer> operator[](size_t pos) const;\n  std::shared_ptr<TensorBuffer> At(size_t idx);\n  std::shared_ptr<const TensorBuffer> At(size_t idx) const;\n  void PushBack(const std::shared_ptr<TensorBuffer>& buf);\n\n  template <typename T>\n  T* MutableBufferData(size_t idx) {\n    if (idx > bl_->buffer_list_.size()) {\n      MBLOG_WARN << \"invalid idx: \" << idx\n                 << \" buff_vec_view_.size(): \" << bl_->buffer_list_.size();\n      return nullptr;\n    }\n\n    auto tensor = std::dynamic_pointer_cast<TensorBuffer>(bl_->At(idx));\n    return tensor->MutableData<T>();\n  }\n\n  template <typename T>\n  const T* ConstBufferData(size_t idx) const {\n    if (idx > bl_->buffer_list_.size()) {\n      MBLOG_WARN << \"invalid idx: \" << idx\n                 << \" buff_vec_view_.size(): \" << bl_->buffer_list_.size();\n      return nullptr;\n    }\n\n    auto tensor = std::dynamic_pointer_cast<TensorBuffer>(bl_->At(idx));\n    return tensor->ConstData<T>();\n  }\n\n  template <typename T>\n  T* MutableData() {\n    return static_cast<T*>(bl_->MutableData());\n  }\n\n  template <typename T>\n  const T* ConstData() const {\n    return static_cast<const T*>(bl_->ConstData());\n  }\n\n  Status CopyMeta(const std::shared_ptr<TensorList>& tl,\n                  bool is_override = false);\n\n  template <typename T>\n  void Set(const std::string& key, T&& value) {\n    bl_->Set(key, value);\n  }\n\n private:\n  std::shared_ptr<BufferList> bl_;\n};\n}  // namespace modelbox\n\n#endif"
  },
  {
    "path": "src/libmodelbox/include/modelbox/token_header.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef TOKEN_HEADER_H_\n#define TOKEN_HEADER_H_\n#include <iostream>\n\nnamespace modelbox {\nclass AccountInfo {\n public:\n  std::string domain_name;\n  std::string user_name;\n  std::string passwd;\n};\n\nclass ConsigneeInfo {\n public:\n  std::string ak;\n  std::string sk;\n  std::string domain_id;\n  std::string project_id;\n};\n\nclass UserAgencyCredential {\n public:\n  std::string user_id;\n  std::string user_ak;\n  std::string user_sk;\n  std::string user_secure_token;\n};\n\n\nclass AgentToken {\n public:\n  std::string expires_time_;\n  std::string x_subject_token_;\n};\n\nclass UserAgencyToken {\n public:\n  std::string user_token;\n};\n\nclass AgencyInfo {\n public:\n  std::string user_domain_name;\n  std::string xrole_name;\n\n  bool operator<(const AgencyInfo &agency_info) const {\n    if (this->user_domain_name < agency_info.user_domain_name) {\n      return true;\n    }\n    if ((this->user_domain_name == agency_info.user_domain_name) &&\n        (this->xrole_name < agency_info.xrole_name)) {\n      return true;\n    }\n    return false;\n  }\n};\n\nclass ProjectInfo {\n public:\n  std::string project_name;\n  std::string project_id;\n\n  bool operator<(const ProjectInfo &project_info) const {\n    if (this->project_name < project_info.project_name) {\n      return true;\n    }\n    if ((this->project_name == project_info.project_name) &&\n        (this->project_id < project_info.project_id)) {\n      return true;\n    }\n    return false;\n  }\n};\n\n}  // namespace modelbox\n#endif"
  },
  {
    "path": "src/libmodelbox/include/modelbox/type.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#ifndef MODELBOX_TYPE_H_\n#define MODELBOX_TYPE_H_\n\n#include <modelbox/base/log.h>\n#include <stdint.h>\n\nnamespace modelbox {\n\ntypedef enum ModelBoxDataType {\n  MODELBOX_TYPE_INVALID = 0,\n  MODELBOX_FLOAT = 1,\n  MODELBOX_DOUBLE = 2,\n  MODELBOX_INT32 = 3,  // Int32 tensors are always in 'host' memory.\n  MODELBOX_UINT8 = 4,\n  MODELBOX_INT16 = 5,\n  MODELBOX_INT8 = 6,\n  MODELBOX_STRING = 7,\n  MODELBOX_COMPLEX64 = 8,  // Single-precision complex\n  MODELBOX_COMPLEX = 8,    // Old identifier kept for API backwards compatibility\n  MODELBOX_INT64 = 9,\n  MODELBOX_BOOL = 10,\n  MODELBOX_QINT8 = 11,     // Quantized int8\n  MODELBOX_QUINT8 = 12,    // Quantized uint8\n  MODELBOX_QINT32 = 13,    // Quantized int32\n  MODELBOX_BFLOAT16 = 14,  // Float32 truncated to 16 bits.  Only for cast ops.\n  MODELBOX_QINT16 = 15,    // Quantized int16\n  MODELBOX_QUINT16 = 16,   // Quantized uint16\n  MODELBOX_UINT16 = 17,\n  MODELBOX_COMPLEX128 = 18,  // Double-precision complex\n  MODELBOX_HALF = 19,\n  MODELBOX_RESOURCE = 20,\n  MODELBOX_VARIANT = 21,\n  MODELBOX_UINT32 = 22,\n  MODELBOX_UINT64 = 23,\n} ModelBoxDataType;\n\ntemplate <class T>\nstruct TypeToDataType;\n\ntemplate <ModelBoxDataType T>\nstruct DataTypeSize;\n\ntemplate <ModelBoxDataType T>\nstruct DataTypeToType;\n\nstruct Float16 {\n  uint8_t bytes[2];\n};\n\n#define MODELBOX_DATATYPE_DEFINE(TYPE, DATA_TYPE)                 \\\n  template <>                                          \\\n  struct TypeToDataType<TYPE> {                        \\\n    static constexpr ModelBoxDataType Value = DATA_TYPE; \\\n  };                                                   \\\n  template <>                                          \\\n  struct DataTypeSize<DATA_TYPE> {                     \\\n    static constexpr size_t Size = sizeof(TYPE);       \\\n  };                                                   \\\n  template <>                                          \\\n  struct DataTypeToType<DATA_TYPE> {                   \\\n    typedef TYPE Type;                                 \\\n  }\n\nMODELBOX_DATATYPE_DEFINE(float, MODELBOX_FLOAT);\nMODELBOX_DATATYPE_DEFINE(double, MODELBOX_DOUBLE);\nMODELBOX_DATATYPE_DEFINE(int32_t, MODELBOX_INT32);\nMODELBOX_DATATYPE_DEFINE(uint32_t, MODELBOX_UINT32);\nMODELBOX_DATATYPE_DEFINE(uint16_t, MODELBOX_UINT16);\nMODELBOX_DATATYPE_DEFINE(uint8_t, MODELBOX_UINT8);\nMODELBOX_DATATYPE_DEFINE(int16_t, MODELBOX_INT16);\nMODELBOX_DATATYPE_DEFINE(int8_t, MODELBOX_INT8);\nMODELBOX_DATATYPE_DEFINE(std::string, MODELBOX_STRING);\nMODELBOX_DATATYPE_DEFINE(int64_t, MODELBOX_INT64);\nMODELBOX_DATATYPE_DEFINE(uint64_t, MODELBOX_UINT64);\nMODELBOX_DATATYPE_DEFINE(bool, MODELBOX_BOOL);\nMODELBOX_DATATYPE_DEFINE(Float16, MODELBOX_HALF);\n\n#undef MODELBOX_DATATYPE_DEFINE\n\nextern size_t GetDataTypeSize(ModelBoxDataType type);\n\n}  // namespace modelbox\n\n#endif\n"
  },
  {
    "path": "src/libmodelbox/include/modelbox/virtual_node.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_VIRTUAL_NODE_H_\n#define MODELBOX_VIRTUAL_NODE_H_\n\n#include <chrono>\n\n#include \"external_data_map.h\"\n#include \"modelbox/base/device.h\"\n#include \"modelbox/error.h\"\n#include \"modelbox/external_data_map.h\"\n#include \"modelbox/node.h\"\n#include \"modelbox/statistics.h\"\n\nnamespace modelbox {\n\nclass InputVirtualNode : public Node {\n public:\n  InputVirtualNode(std::string device_name, std::string device_id,\n                   std::shared_ptr<DeviceManager> device_manager);\n\n  ~InputVirtualNode() override;\n\n  Status Init(const std::set<std::string>& input_port_names,\n              const std::set<std::string>& output_port_names,\n              const std::shared_ptr<Configuration>& config) override;\n\n  /**\n   * @brief Open the Node object\n   *\n   */\n  Status Open() override;\n\n  /**\n   * @brief The node main function\n   *\n   * @param type run type\n   * @return Status\n   */\n  Status Run(RunType type) override;\n\n  std::shared_ptr<Device> GetDevice() override;\n\n private:\n  std::shared_ptr<DeviceManager> device_mgr_;\n  std::string device_name_;\n  std::string device_id_;\n};\n\nclass OutputVirtualNode : public Node {\n public:\n  OutputVirtualNode(const std::string& device_name,\n                    const std::string& device_id,\n                    std::shared_ptr<DeviceManager> device_manager);\n\n  ~OutputVirtualNode() override;\n\n  Status Init(const std::set<std::string>& input_port_names,\n              const std::set<std::string>& output_port_names,\n              const std::shared_ptr<Configuration>& config) override;\n\n  /**\n   * @brief Open the Node object\n   *\n   */\n  Status Open() override;\n\n  /**\n   * @brief The node main function\n   *\n   * @param type run type\n   * @return Status\n   */\n  Status Run(RunType type) override;\n\n  std::shared_ptr<Device> GetDevice() override;\n\n private:\n  void EraseInvalidData();\n\n  std::shared_ptr<DeviceManager> device_mgr_;\n  std::string device_name_;\n  std::string device_id_;\n\n  std::shared_ptr<Device> target_device_;\n  bool need_move_to_device_{false};\n};\n\nclass SessionUnmatchCache {\n public:\n  SessionUnmatchCache(const std::set<std::string>& port_names);\n\n  void SetTargetDevice(std::shared_ptr<Device> target_device);\n\n  Status CacheBuffer(const std::string& port_name,\n                     const std::shared_ptr<Buffer>& buffer);\n\n  std::shared_ptr<FlowUnitError> GetLastError();\n\n  Status PopCache(OutputBufferList& output_buffer_list);\n\n private:\n  std::shared_ptr<Device> target_device_;\n\n  std::unordered_map<std::string, std::map<std::shared_ptr<Stream>,\n                                           std::vector<std::shared_ptr<Buffer>>,\n                                           StreamPtrOrderCmp>>\n      port_streams_map_;\n\n  std::shared_ptr<FlowUnitError> last_error_;\n};\n\nclass OutputUnmatchVirtualNode : public Node {\n public:\n  OutputUnmatchVirtualNode(const std::string& device_name,\n                           const std::string& device_id,\n                           std::shared_ptr<DeviceManager> device_manager);\n\n  ~OutputUnmatchVirtualNode() override;\n\n  Status Init(const std::set<std::string>& input_port_names,\n              const std::set<std::string>& output_port_names,\n              const std::shared_ptr<Configuration>& config) override;\n\n  /**\n   * @brief Open the Node object\n   *\n   */\n  Status Open() override;\n\n  /**\n   * @brief The node main function\n   *\n   * @param type run type\n   * @return Status\n   */\n  Status Run(RunType type) override;\n\n  std::shared_ptr<Device> GetDevice() override;\n\n private:\n  std::shared_ptr<DeviceManager> device_mgr_;\n  std::string device_name_;\n  std::string device_id_;\n\n  std::shared_ptr<Device> target_device_;\n  bool need_move_to_device_{false};\n\n  std::unordered_map<std::shared_ptr<Session>,\n                     std::shared_ptr<SessionUnmatchCache>>\n      session_cache_map_;\n};\n\n}  // namespace modelbox\n#endif  // MODELBOX_VIRTUAL_NODE_H_\n"
  },
  {
    "path": "src/libmodelbox/libmodelbox.pc.in",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nprefix=/usr\nexec_prefix=${prefix}\nlibdir=${prefix}/lib\nincludedir=${prefix}/include/modelbox\n\nName: libmodelbox\nDescription: modelbox SDK\nVersion: @MODELBOX_VERSION_STRING@\nLibs: -L${libdir} -lmodelbox\nCflags: -I${includedir}"
  },
  {
    "path": "src/libmodelbox/profiling/performance.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <fstream>\n#include <nlohmann/json.hpp>\n#include <utility>\n\n#include \"modelbox/profiler.h\"\n\nnamespace modelbox {\n\nclass PerfCollector {\n public:\n  virtual void Export(nlohmann::json& perf_data) = 0;\n\n  virtual void Collect() = 0;\n\n  virtual bool Empty() = 0;\n};\n\nclass CpuUsageData {\n public:\n  CpuUsageData(TimePoint timestamp, int32_t percentage)\n      : timestamp_(timestamp), percentage_(percentage) {}\n\n  virtual ~CpuUsageData() = default;\n  TimePoint timestamp_;\n  int32_t percentage_{0};  // 0 ~ 100\n};\n\nclass DeviceMemUsageData {\n public:\n  DeviceMemUsageData(std::string device_tag, int64_t device_mem,\n                     int32_t device_mem_percentage)\n      : device_tag_(std::move(device_tag)),\n        device_mem_(device_mem),\n        device_mem_percentage_(device_mem_percentage) {}\n  virtual ~DeviceMemUsageData() = default;\n\n  std::string device_tag_;\n  int64_t device_mem_{0};\n  int32_t device_mem_percentage_{0};\n};\n\nclass MemUsageData {\n public:\n  MemUsageData(TimePoint timestamp, int64_t host_mem,\n               int32_t host_mem_percentage)\n      : timestamp_(timestamp),\n        host_mem_(host_mem),\n        host_mem_percentage_(host_mem_percentage) {}\n  virtual ~MemUsageData() = default;\n\n  void AddDeviceMemUsageData(const std::string& device_tag, int64_t device_mem,\n                             int32_t device_mem_percentage) {\n    device_mem_data_list_.emplace_back(device_tag, device_mem,\n                                       device_mem_percentage);\n  }\n\n  TimePoint timestamp_;\n  int64_t host_mem_{0};\n  int32_t host_mem_percentage_{0};\n  std::list<DeviceMemUsageData> device_mem_data_list_;\n};\n\nclass MemUsageCollector : public PerfCollector {\n public:\n  MemUsageCollector(std::shared_ptr<\n                    std::map<std::string, std::pair<std::string, std::string>>>\n                        devices)\n      : devices_(std::move(devices)){};\n\n  virtual ~MemUsageCollector() = default;\n\n  void Export(nlohmann::json& perf_data) override;\n\n  void Collect() override;\n\n  bool Empty() override;\n\n private:\n  // device type + device id -> std::map<TimePoint, int32_t>\n  std::list<MemUsageData> mem_usage_data_list_;\n  std::mutex data_mutex_;\n  std::shared_ptr<std::map<std::string, std::pair<std::string, std::string>>>\n      devices_;\n};\n\nclass FlowUnitPerfCollector : public PerfCollector {\n public:\n  FlowUnitPerfCollector(\n      std::shared_ptr<\n          std::map<std::string, std::pair<std::string, std::string>>>\n          devices,\n      std::shared_ptr<std::vector<std::string>> flow_unit_names);\n  virtual ~FlowUnitPerfCollector();\n\n  void Export(nlohmann::json& perf_data) override;\n\n  void Collect() override;\n\n  bool Empty() override;\n\n  std::shared_ptr<FlowUnitPerfCtx> GetFlowUnitPerfCtx(\n      const std::string& flow_unit_name);\n\n private:\n  std::mutex data_mutex_;\n  // FlowUnit name -> FlowUnitProfile\n  std::map<std::string, std::shared_ptr<FlowUnitPerfCtx>>\n      flow_unit_per_ctx_map_;\n  std::shared_ptr<std::map<std::string, std::pair<std::string, std::string>>>\n      devices_;\n  std::shared_ptr<std::vector<std::string>> flow_unit_names_;\n};\n\nclass CpuUsageCollector : public PerfCollector {\n public:\n  CpuUsageCollector() = default;\n\n  virtual ~CpuUsageCollector() = default;\n\n  void Export(nlohmann::json& perf_data) override;\n\n  void Collect() override;\n\n  bool Empty() override;\n\n private:\n  std::list<CpuUsageData> cpu_usage_data_list;\n  std::mutex data_mutex_;\n};\n\nPerformance::Performance(std::shared_ptr<DeviceManager> device_mgr,\n                         std::string& output_dir_path)\n    : ProfilerLifeCycle(\"Performance\"),\n      timer_(nullptr),\n      sample_interval_(DEFAULT_TIMER_SAMPLE_INTERVAL),\n      write_file_interval_(DEFAULT_WRITE_PROFILE_INTERVAL),\n      device_mgr_(std::move(device_mgr)),\n      output_dir_path_(output_dir_path) {}\nPerformance::~Performance() {\n  if (IsRunning()) {\n    Stop();\n  }\n}\n\nStatus Performance::OnInit() {\n  if (device_mgr_ == nullptr) {\n    return STATUS_FAULT;\n  }\n\n  devices_ = std::make_shared<\n      std::map<std::string, std::pair<std::string, std::string>>>();\n\n  flow_unit_names_ = std::make_shared<std::vector<std::string>>();\n\n  auto device_map = device_mgr_->GetDeviceList();\n  for (const auto& devices : device_map) {\n    for (const auto& device : devices.second) {\n      std::string device_type = devices.first;\n      std::string device_id = device.first;\n      devices_->insert(std::make_pair(device_type + device_id,\n                                      std::make_pair(device_type, device_id)));\n    }\n  }\n\n  // TODO : get flow unit names from session context\n\n  auto cpu_usage_collector = std::make_shared<CpuUsageCollector>();\n  perf_collectors_.push_back(cpu_usage_collector);\n\n  auto mem_usage_collector = std::make_shared<MemUsageCollector>(devices_);\n  perf_collectors_.push_back(mem_usage_collector);\n\n  auto flow_unit_perf_collector =\n      std::make_shared<FlowUnitPerfCollector>(devices_, flow_unit_names_);\n  perf_collectors_.push_back(flow_unit_perf_collector);\n\n  flow_unit_perf_collector_ = flow_unit_perf_collector;\n  return STATUS_SUCCESS;\n}\n\nStatus Performance::OnStart() {\n  timer_run_ = true;\n  timer_ = std::make_shared<std::thread>(&Performance::PerformanceWorker, this);\n  return STATUS_SUCCESS;\n}\n\nStatus Performance::OnStop() {\n  OnPause();\n\n  WritePerformance();\n  return STATUS_SUCCESS;\n}\n\nStatus Performance::OnPause() {\n  if (timer_) {\n    timer_run_ = false;\n    timer_->join();\n    timer_ = nullptr;\n  }\n\n  return STATUS_SUCCESS;\n}\n\nStatus Performance::OnResume() { return OnStart(); }\n\nStatus Performance::WritePerformance() {\n  bool is_empty = true;\n  for (const auto& collector : perf_collectors_) {\n    if (!collector->Empty()) {\n      is_empty = false;\n      break;\n    }\n  }\n\n  if (is_empty) {\n    return STATUS_SUCCESS;\n  }\n\n  nlohmann::json perf_json;\n  for (const auto& collector : perf_collectors_) {\n    collector->Export(perf_json);\n  }\n\n  time_t current_time = time(nullptr);\n  struct tm result_tm;\n  char buf[64] = {0};\n  auto* local_tm = localtime_r(&current_time, &result_tm);\n  if (local_tm) {\n    strftime(buf, sizeof(buf), \"%Y-%m-%d-%H-%M-%S\", local_tm);\n  }\n\n  // TODO: graph_name + task_name + timestample\n  std::string file_path =\n      output_dir_path_ + \"/\" + \"performance_\" + std::string(buf) + \".json\";\n\n  std::ofstream out(file_path);\n  if (out.is_open() == false) {\n    MBLOG_ERROR << \"write trace failed, file path : \" << file_path;\n    return STATUS_FAULT;\n  }\n\n  std::string profiles_json_str = perf_json.dump();\n  out.write(profiles_json_str.c_str(), profiles_json_str.size());\n  if (out.rdstate() & std::ios::failbit) {\n    MBLOG_ERROR << \"Write file \" << file_path << \" failed\";\n    out.close();\n    return STATUS_FAULT;\n  }\n\n  out.close();\n\n  return STATUS_SUCCESS;\n}\n\nstd::shared_ptr<FlowUnitPerfCtx> Performance::GetFlowUnitPerfCtx(\n    const std::string& flow_unit_name) {\n  return flow_unit_perf_collector_->GetFlowUnitPerfCtx(flow_unit_name);\n}\n\nvoid Performance::PerformanceWorker() {\n  unsigned long now = {0};\n  int32_t sleep = sample_interval_;\n  int32_t sleep_time = 0;\n  unsigned long expect_time = 0;\n\n  MBLOG_INFO << \"profiler timer start\";\n\n  now = GetTickCount();\n  expect_time = now + sleep;\n\n  uint32_t count = 0;\n  while (timer_run_) {\n    now = GetTickCount();\n    sleep_time = expect_time - now;\n    if (sleep_time < 0) {\n      sleep_time = 0;\n      expect_time = now;\n    }\n\n    expect_time += sleep;\n    std::this_thread::sleep_for(std::chrono::milliseconds(sleep_time));\n\n    for (const auto& profile_data : perf_collectors_) {\n      profile_data->Collect();\n    }\n\n    count++;\n    if (count > write_file_interval_) {\n      WritePerformance();\n      count = 0;\n    }\n  }\n\n  MBLOG_INFO << \"profiler timer end\";\n}\n\nvoid Performance::SetTimerSampleInterval(int32_t interval) {\n  sample_interval_ = interval;\n}\n\nvoid Performance::SetWriteFileInterval(int32_t interval) {\n  write_file_interval_ = interval;\n}\n\nFlowUnitPerfCtx::FlowUnitPerfCtx(const std::string& flow_unit_name) {\n  flow_unit_name_ = flow_unit_name;\n  process_latency_ = 0;\n  process_latency_count_ = 0;\n};\n\nFlowUnitPerfCtx::~FlowUnitPerfCtx() = default;\n\nvoid FlowUnitPerfCtx::UpdateProcessLatency(int32_t process_latency) {\n  std::lock_guard<std::mutex> lock(latency_mutex_);\n  double total_latency = process_latency_ * process_latency_count_;\n  total_latency += process_latency;\n  process_latency_count_++;\n  process_latency_ = total_latency / process_latency_count_;\n}\n\nint32_t FlowUnitPerfCtx::GetProcessLatency() {\n  return static_cast<int32_t>(process_latency_);\n}\n\nvoid FlowUnitPerfCtx::UpdateDeviceMemory(std::string& device_type,\n                                         std::string& device_id,\n                                         int32_t memory) {\n  std::lock_guard<std::mutex> lock(devices_memories_mutex_);\n  std::string device = device_type + device_id;\n  if (devices_memories_.find(device) == devices_memories_.end()) {\n    std::map<TimePoint, int32_t> device_memories;\n    devices_memories_.insert(std::make_pair(device, device_memories));\n  }\n\n  auto current_time = std::chrono::time_point_cast<std::chrono::milliseconds>(\n      std::chrono::system_clock::now());\n  devices_memories_[device].insert(std::make_pair(current_time, memory));\n}\n\nint32_t FlowUnitPerfCtx::GetDeviceMemory(std::string& device_type,\n                                         std::string& device_id) {\n  std::lock_guard<std::mutex> lock(devices_memories_mutex_);\n  std::string device = device_type + device_id;\n  if (devices_memories_.find(device) == devices_memories_.end()) {\n    MBLOG_ERROR\n        << \"can not get device memory profiling information, device_type : \"\n        << device_type << \", device_id : \" << device_id;\n    return 0;\n  }\n\n  auto device_memories = devices_memories_[device];\n  if (device_memories.empty()) {\n    return 0;\n  }\n\n  return device_memories.rbegin()->second;\n}\n\nvoid FlowUnitPerfCtx::UpdateDeviceMemoryUsage(std::string& device_type,\n                                              std::string& device_id,\n                                              int32_t memory_usage) {\n  std::lock_guard<std::mutex> lock(devices_memories_usage_mutex_);\n  std::string device = device_type + device_id;\n  if (devices_memories_usage_.find(device) == devices_memories_usage_.end()) {\n    std::map<TimePoint, int32_t> device_memories_usage;\n    devices_memories_usage_.insert(\n        std::make_pair(device, device_memories_usage));\n  }\n\n  auto current_time = std::chrono::time_point_cast<std::chrono::milliseconds>(\n      std::chrono::system_clock::now());\n  devices_memories_usage_[device].insert(\n      std::make_pair(current_time, memory_usage));\n}\n\nint32_t FlowUnitPerfCtx::GetDeviceMemoryUsage(std::string& device_type,\n                                              std::string& device_id) {\n  std::lock_guard<std::mutex> lock(devices_memories_usage_mutex_);\n  std::string device = device_type + device_id;\n  if (devices_memories_usage_.find(device) == devices_memories_usage_.end()) {\n    MBLOG_ERROR << \"can not get device memory usage profiling information, \"\n                   \"device_type : \"\n                << device_type << \", device_id : \" << device_id;\n    return 0;\n  }\n\n  auto device_memories_usage = devices_memories_usage_[device];\n  if (device_memories_usage.empty()) {\n    return 0;\n  }\n\n  return device_memories_usage.rbegin()->second;\n}\n\nvoid CpuUsageCollector::Export(nlohmann::json& perf_data) {\n  data_mutex_.lock();\n  nlohmann::json cpu_usage_json_arr = nlohmann::json::array();\n  for (const auto& data : cpu_usage_data_list) {\n    nlohmann::json cpu_usage_json;\n    cpu_usage_json[\"timestamp\"] = data.timestamp_.time_since_epoch().count();\n    cpu_usage_json[\"percentage\"] = data.percentage_;\n    cpu_usage_json_arr.push_back(cpu_usage_json);\n  }\n\n  data_mutex_.unlock();\n  perf_data[\"cpu_usage\"] = cpu_usage_json_arr;\n\n  cpu_usage_data_list.clear();\n}\n\nvoid CpuUsageCollector::Collect() {\n  auto current_time = std::chrono::time_point_cast<std::chrono::microseconds>(\n      std::chrono::system_clock::now());\n  int32_t cpu_percentage = 0;\n  data_mutex_.lock();\n  cpu_usage_data_list.emplace_back(current_time, cpu_percentage);\n  data_mutex_.unlock();\n}\n\nbool CpuUsageCollector::Empty() { return cpu_usage_data_list.empty(); }\n\nvoid MemUsageCollector::Export(nlohmann::json& perf_data) {\n  data_mutex_.lock();\n  nlohmann::json mem_usage_json_arr = nlohmann::json::array();\n  for (const auto& data : mem_usage_data_list_) {\n    nlohmann::json mem_usage_json;\n    mem_usage_json[\"timestamp\"] = data.timestamp_.time_since_epoch().count();\n    mem_usage_json[\"host_mem\"] = data.host_mem_;\n    mem_usage_json[\"host_mem_percentage\"] = data.host_mem_percentage_;\n    nlohmann::json device_mem_usage_json_arr = nlohmann::json::array();\n    for (const auto& dev_mem_data : data.device_mem_data_list_) {\n      nlohmann::json device_mem_usage_json;\n      device_mem_usage_json[\"device\"] = dev_mem_data.device_tag_;\n      device_mem_usage_json[\"device_mem\"] = dev_mem_data.device_mem_;\n      device_mem_usage_json[\"device_mem_percentage\"] =\n          dev_mem_data.device_mem_percentage_;\n      device_mem_usage_json_arr.push_back(device_mem_usage_json);\n    }\n\n    mem_usage_json[\"device_memory\"] = device_mem_usage_json_arr;\n    mem_usage_json_arr.push_back(mem_usage_json);\n  }\n\n  data_mutex_.unlock();\n  perf_data[\"memory_usage\"] = mem_usage_json_arr;\n  mem_usage_data_list_.clear();\n}\n\nvoid MemUsageCollector::Collect() {\n  auto current_time = std::chrono::time_point_cast<std::chrono::microseconds>(\n      std::chrono::system_clock::now());\n  MemUsageData data(current_time, 0, 0);\n  for (const auto& device : *devices_) {\n    std::string device_type = device.second.first;\n    std::string device_id = device.second.second;\n    std::string device_tag = device_type + \":\";\n    device_tag += device_id;\n    int32_t memory = 0;\n    int32_t memory_percentage = 0;\n    data.AddDeviceMemUsageData(device_tag, memory, memory_percentage);\n  }\n\n  data_mutex_.lock();\n  mem_usage_data_list_.push_back(data);\n  data_mutex_.unlock();\n}\n\nbool MemUsageCollector::Empty() { return mem_usage_data_list_.empty(); }\n\nFlowUnitPerfCollector::FlowUnitPerfCollector(\n    std::shared_ptr<std::map<std::string, std::pair<std::string, std::string>>>\n        devices,\n    std::shared_ptr<std::vector<std::string>> flow_unit_names)\n    : devices_(std::move(devices)),\n      flow_unit_names_(std::move(flow_unit_names)) {}\n\nvoid FlowUnitPerfCollector::Export(nlohmann::json& perf_data) {\n  data_mutex_.lock();\n  nlohmann::json flow_unit_perf_json_arr = nlohmann::json::array();\n  for (const auto& item : flow_unit_per_ctx_map_) {\n    nlohmann::json flow_unit_perf_json;\n    flow_unit_perf_json[\"flow_unit_name\"] = item.first;\n    flow_unit_perf_json[\"process_latency\"] = item.second->GetProcessLatency();\n\n    flow_unit_perf_json_arr.push_back(flow_unit_perf_json);\n  }\n\n  data_mutex_.unlock();\n  perf_data[\"flow_unit_performance\"] = flow_unit_perf_json_arr;\n}\n\nFlowUnitPerfCollector::~FlowUnitPerfCollector() = default;\n\nvoid FlowUnitPerfCollector::Collect() {}\n\nbool FlowUnitPerfCollector::Empty() { return flow_unit_per_ctx_map_.empty(); }\n\nstd::shared_ptr<FlowUnitPerfCtx> FlowUnitPerfCollector::GetFlowUnitPerfCtx(\n    const std::string& flow_unit_name) {\n  std::lock_guard<std::mutex> perf_lock(data_mutex_);\n  if (flow_unit_per_ctx_map_.find(flow_unit_name) ==\n      flow_unit_per_ctx_map_.end()) {\n    auto flow_unit_perf_ctx = std::make_shared<FlowUnitPerfCtx>(flow_unit_name);\n    flow_unit_per_ctx_map_[flow_unit_name] = flow_unit_perf_ctx;\n    return flow_unit_perf_ctx;\n  }\n\n  return flow_unit_per_ctx_map_[flow_unit_name];\n}\n}  // namespace modelbox"
  },
  {
    "path": "src/libmodelbox/profiling/profiler.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/profiler.h\"\n\n#include <sys/stat.h>\n\n#include <fstream>\n#include <iomanip>\n#include <iostream>\n#include <utility>\n\nnamespace modelbox {\n\nconstexpr const char* PROFILE_DEFAULT_PATH = \"/tmp/modelbox/perf\";\n\nProfilerLifeCycle::ProfilerLifeCycle(std::string name)\n    : name_(std::move(name)) {}\n\nProfilerLifeCycle::~ProfilerLifeCycle() = default;\n\nStatus ProfilerLifeCycle::Init() {\n  if (is_initialized_) {\n    MBLOG_INFO << name_ << \" has been initialized, no need to init\";\n    return STATUS_SUCCESS;\n  }\n\n  auto ret = OnInit();\n  if (ret != STATUS_SUCCESS) {\n    return ret;\n  }\n\n  is_initialized_ = true;\n  return STATUS_SUCCESS;\n}\n\nStatus ProfilerLifeCycle::Start() {\n  if (is_running_) {\n    MBLOG_INFO << name_ << \" is running, no need to start\";\n    return STATUS_SUCCESS;\n  }\n\n  auto ret = OnStart();\n  if (ret != STATUS_SUCCESS) {\n    return ret;\n  }\n\n  is_running_ = true;\n  return STATUS_SUCCESS;\n}\n\nStatus ProfilerLifeCycle::Stop() {\n  if (!is_running_) {\n    return STATUS_SUCCESS;\n  }\n\n  auto ret = OnStop();\n  if (ret != STATUS_SUCCESS) {\n    return ret;\n  }\n\n  is_running_ = false;\n  return STATUS_SUCCESS;\n}\n\nStatus ProfilerLifeCycle::Pause() {\n  if (!is_running_) {\n    return STATUS_SUCCESS;\n  }\n\n  auto ret = OnPause();\n  if (ret != STATUS_SUCCESS) {\n    return ret;\n  }\n\n  is_running_ = false;\n  return STATUS_SUCCESS;\n}\n\nStatus ProfilerLifeCycle::Resume() {\n  if (is_running_) {\n    MBLOG_INFO << name_ << \" is running, no need to resume\";\n    return STATUS_SUCCESS;\n  }\n\n  auto ret = OnResume();\n  if (ret != STATUS_SUCCESS) {\n    return ret;\n  }\n\n  is_running_ = true;\n  return STATUS_SUCCESS;\n}\n\nProfiler::Profiler(std::shared_ptr<DeviceManager> device_mgr,\n                   std::shared_ptr<Configuration> config)\n    : ProfilerLifeCycle(\"Profiler\"),\n      device_mgr_(std::move(device_mgr)),\n      config_(std::move(config)),\n      perf_(nullptr),\n      trace_(nullptr) {}\n\nProfiler::~Profiler() {\n  if (IsRunning()) {\n    Stop();\n  }\n}\n\nstd::shared_ptr<Performance> Profiler::GetPerf() { return perf_; }\n\nstd::shared_ptr<Trace> Profiler::GetTrace() { return trace_; }\n\nStatus Profiler::OnInit() {\n  bool performance_enable = false;\n  bool trace_enable = false;\n  bool session_enable = false;\n\n  performance_enable = config_->GetBool(\"profile.performance\");\n  trace_enable = config_->GetBool(\"profile.trace\");\n  session_enable = config_->GetBool(\"profile.session\");\n\n  if (performance_enable || trace_enable) {\n    auto ret = InitProfilerDir();\n    if (ret != STATUS_OK) {\n      return STATUS_FAULT;\n    }\n  }\n\n  if (performance_enable) {\n    perf_ = std::make_shared<Performance>(device_mgr_, output_dir_path_);\n    perf_->Init();\n  }\n\n  if (trace_enable) {\n    trace_ = std::make_shared<Trace>(output_dir_path_, perf_, session_enable);\n  }\n\n  return STATUS_SUCCESS;\n}\n\nStatus Profiler::InitProfilerDir() {\n  auto* profile_dir_path = getenv(PROFILE_PATH_ENV);\n  if (profile_dir_path == nullptr) {\n    output_dir_path_ = config_->GetString(\"profile.dir\", PROFILE_DEFAULT_PATH);\n  } else {\n    output_dir_path_ = profile_dir_path;\n  }\n\n  const std::string filter_dir =\n      \"(/bin)|(/boot)|(/sbin)|(/etc)|(/dev)|(/proc)|(/sys)|(/var)\";\n  const std::string black_dir_str =\n      filter_dir + \"|\" + \"((\" + filter_dir + \")/.*)\";\n  std::regex valid_str(black_dir_str);\n\n  output_dir_path_ = PathCanonicalize(output_dir_path_);\n  if (std::regex_match(output_dir_path_, valid_str)) {\n    MBLOG_ERROR << \"profiler dir invalid, please type valid profiler dir.\";\n    return STATUS_FAULT;\n  }\n\n  if (output_dir_path_.length() <= 0) {\n    output_dir_path_ = PROFILE_DEFAULT_PATH;\n  }\n\n  MBLOG_INFO << \"profiler save dir: \" << output_dir_path_;\n  Status ret = CreateDirectory(output_dir_path_);\n  if (ret != STATUS_OK) {\n    MBLOG_FATAL << \"create directory : \" << output_dir_path_ << \" failed, \"\n                << ret;\n    return ret;\n  }\n\n  return STATUS_SUCCESS;\n}\n\nStatus Profiler::OnStart() {\n  if (perf_ != nullptr) {\n    perf_->Start();\n  }\n\n  if (trace_ != nullptr) {\n    trace_->Start();\n  }\n\n  return STATUS_SUCCESS;\n}\n\nStatus Profiler::OnStop() {\n  if (perf_ != nullptr) {\n    perf_->Stop();\n  }\n\n  if (trace_ != nullptr) {\n    trace_->Stop();\n  }\n\n  return STATUS_SUCCESS;\n}\n\nStatus Profiler::OnResume() {\n  if (perf_ != nullptr) {\n    perf_->Resume();\n  }\n\n  if (trace_ != nullptr) {\n    trace_->Resume();\n  }\n\n  return STATUS_SUCCESS;\n}\n\nStatus Profiler::OnPause() {\n  if (perf_ != nullptr) {\n    perf_->Pause();\n  }\n\n  if (trace_ != nullptr) {\n    trace_->Pause();\n  }\n\n  return STATUS_SUCCESS;\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/libmodelbox/profiling/statistics.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/statistics.h\"\n\n#include <utility>\n\nnamespace modelbox {\n\nStatisticsNotifyMsg::StatisticsNotifyMsg(std::string path,\n                                         std::shared_ptr<StatisticsValue> value,\n                                         StatisticsNotifyType type)\n    : path_{std::move(path)}, value_{std::move(value)}, type_{type} {}\n\nStatisticsNotifyMsg::~StatisticsNotifyMsg() = default;\n\n/**\n * StatisticsValue\n */\nStatisticsValue::StatisticsValue(std::shared_ptr<Any> val)\n    : val_(std::move(val)) {}\n\nStatisticsValue::~StatisticsValue() = default;\n\nconst std::type_info& StatisticsValue::GetType() { return val_->type(); }\n\nbool StatisticsValue::IsType(const std::type_info& type) {\n  return type == val_->type();\n}\n\nbool StatisticsValue::IsInt32() { return IsType(typeid(int32_t)); }\n\nbool StatisticsValue::GetInt32(int32_t& val) { return GetValue(val); }\n\nbool StatisticsValue::IsUint32() { return IsType(typeid(uint32_t)); }\n\nbool StatisticsValue::GetUint32(uint32_t& val) { return GetValue(val); }\n\nbool StatisticsValue::IsInt64() { return IsType(typeid(int64_t)); }\n\nbool StatisticsValue::GetInt64(int64_t& val) { return GetValue(val); }\n\nbool StatisticsValue::IsUint64() { return IsType(typeid(uint64_t)); }\n\nbool StatisticsValue::GetUint64(uint64_t& val) { return GetValue(val); }\n\nbool StatisticsValue::IsFloat() { return IsType(typeid(float)); }\n\nbool StatisticsValue::GetFloat(float& val) { return GetValue(val); }\n\nbool StatisticsValue::IsDouble() { return IsType(typeid(double)); }\n\nbool StatisticsValue::GetDouble(double& val) { return GetValue(val); }\n\nbool StatisticsValue::IsBool() { return IsType(typeid(bool)); }\n\nbool StatisticsValue::GetBool(bool& val) { return GetValue(val); }\n\nbool StatisticsValue::IsString() { return IsType(typeid(std::string)); }\n\nbool StatisticsValue::GetString(std::string& val) { return GetValue(val); }\n\nstd::string StatisticsValue::ToString() {\n  if (IsInt32()) {\n    return ToString<int32_t>();\n  }\n\n  if (IsUint32()) {\n    return ToString<uint32_t>();\n  }\n\n  if (IsInt64()) {\n    return ToString<int64_t>();\n  }\n\n  if (IsUint64()) {\n    return ToString<uint64_t>();\n  }\n\n  if (IsFloat()) {\n    return ToString<float>();\n  }\n\n  if (IsDouble()) {\n    return ToString<double>();\n  }\n\n  if (IsBool()) {\n    return ToString<bool>();\n  }\n\n  if (IsString()) {\n    std::string val;\n    GetValue(val);\n    return val;\n  }\n\n  return \"\";\n}\n\nStatisticsNotifyCfg::StatisticsNotifyCfg(std::string path_pattern,\n                                         StatisticsNotifyFunc func,\n                                         const StatisticsNotifyType& type)\n    : path_pattern_(std::move(path_pattern)),\n      func_(std::move(func)),\n      id_((uintptr_t)this) {\n  type_set_.insert(type);\n}\n\nStatisticsNotifyCfg::StatisticsNotifyCfg(std::string path_pattern,\n                                         StatisticsNotifyFunc func,\n                                         std::set<StatisticsNotifyType> types)\n    : path_pattern_{std::move(path_pattern)},\n      func_{std::move(func)},\n      type_set_(std::move(types)),\n      id_((uintptr_t)this) {}\n\nStatisticsNotifyCfg::StatisticsNotifyCfg(const StatisticsNotifyCfg& other)\n    : path_pattern_(other.path_pattern_),\n      func_(other.func_),\n      type_set_(other.type_set_),\n      delay_(other.delay_),\n      interval_(other.interval_),\n      id_(other.id_) {}\n\nbool StatisticsNotifyCfg::operator==(const StatisticsNotifyCfg& other) {\n  return id_ == other.id_;\n}\n\nStatisticsNotifyCfg::~StatisticsNotifyCfg() = default;\n\n/**\n * @brief Set timer notify\n * @param delay Delay to run first, in second, >= 10s\n * @param interval Notify interval, in second, >= 10s\n */\nvoid StatisticsNotifyCfg::SetNotifyTimer(size_t delay, size_t interval) {\n  type_set_.insert(StatisticsNotifyType::TIMER);\n  const size_t second_to_milli = 1000;\n  delay_ = delay * second_to_milli;\n  if (delay_ < minimum_notify_time) {\n    delay_ = minimum_notify_time;\n  }\n\n  interval_ = interval * second_to_milli;\n  if (interval_ < minimum_notify_time) {\n    interval_ = minimum_notify_time;\n  }\n}\n\nstd::string StatisticsNotifyCfg::GetRootPath() const {\n  auto pos = path_pattern_.find('.');\n  if (pos == std::string::npos) {\n    return path_pattern_;\n  }\n\n  return path_pattern_.substr(0, pos);\n}\n\nstd::string StatisticsNotifyCfg::GetSubPath() const {\n  auto pos = path_pattern_.find('.');\n  if (pos == std::string::npos) {\n    return \"\";\n  }\n\n  return path_pattern_.substr(pos + 1);\n}\n\nvoid StatisticsNotifyCfg::BindTimerTask(\n    const std::shared_ptr<TimerTask>& timer_task) {\n  timer_task_ = timer_task;\n}\n\nvoid StatisticsNotifyCfg::RemoveTimerTask() {\n  if (timer_task_ == nullptr) {\n    return;\n  }\n\n  timer_task_->Stop();\n  timer_task_ = nullptr;\n}\n\nStatisticsNotifyConsumers::StatisticsNotifyConsumers() {\n  std::vector<StatisticsNotifyType> all_types = {\n      StatisticsNotifyType::CREATE, StatisticsNotifyType::DELETE,\n      StatisticsNotifyType::CHANGE, StatisticsNotifyType::TIMER};\n  for (auto& type : all_types) {\n    cfg_map_lock_[type] = std::make_shared<std::mutex>();\n  }\n}\n\nStatisticsNotifyConsumers::~StatisticsNotifyConsumers() { Clear(); }\n\nStatus StatisticsNotifyConsumers::AddConsumer(\n    const std::shared_ptr<StatisticsNotifyCfg>& cfg) {\n  for (const auto& type : cfg->type_set_) {\n    std::lock_guard<std::mutex> lck(*cfg_map_lock_[type]);\n    cfg_map_[type].push_back(cfg);\n  }\n\n  return STATUS_OK;\n}\n\nStatus StatisticsNotifyConsumers::DelConsumer(\n    const std::shared_ptr<StatisticsNotifyCfg>& cfg) {\n  for (const auto& type : cfg->type_set_) {\n    std::lock_guard<std::mutex> lck(*cfg_map_lock_[type]);\n    auto& consumers_for_one_type = cfg_map_[type];\n    consumers_for_one_type.remove_if(\n        [cfg](const std::shared_ptr<StatisticsNotifyCfg>& val) {\n          auto ret = (*cfg == *val);\n          if (ret) {\n            val->RemoveTimerTask();\n          }\n\n          return ret;\n        });\n  }\n\n  return STATUS_OK;\n}\n\nstd::list<std::shared_ptr<StatisticsNotifyCfg>>\nStatisticsNotifyConsumers::GetConsumers(const StatisticsNotifyType& type) {\n  std::lock_guard<std::mutex> lck(*cfg_map_lock_[type]);\n  return cfg_map_[type];\n}\n\nvoid StatisticsNotifyConsumers::Clear() {\n  {\n    auto type_timer = StatisticsNotifyType::TIMER;\n    std::lock_guard<std::mutex> lck(*cfg_map_lock_[type_timer]);\n    auto& consumers_for_timer = cfg_map_[type_timer];\n    for (auto& consumer : consumers_for_timer) {\n      consumer->RemoveTimerTask();\n    }\n  }\n\n  for (auto& lock_item : cfg_map_lock_) {\n    std::lock_guard<std::mutex> lck(*lock_item.second);\n    cfg_map_[lock_item.first].clear();\n  }\n}\n\n/**\n * StatisticsItem\n */\nStatisticsItem::StatisticsItem() {\n  thread_pool_ = std::make_shared<ThreadPool>(2, -1, 1000);\n  thread_pool_->SetName(\"Stat-Notify\");\n  notify_timer_ = std::make_shared<Timer>();\n  notify_timer_->SetName(\"Stat-Timer\");\n  notify_timer_->Start();\n  last_change_notify_time_ = std::chrono::steady_clock::now();\n}\n\nStatisticsItem::StatisticsItem(std::string parent_path, std::string name,\n                               std::weak_ptr<StatisticsItem> parent)\n    : parent_path_(std::move(parent_path)),\n      name_(std::move(name)),\n      parent_(std::move(parent)) {\n  if (!parent_path_.empty()) {\n    path_ = parent_path_ + \".\" + name_;\n  } else {\n    path_ = name_;\n  }\n\n  last_change_notify_time_ = std::chrono::steady_clock::now();\n}\n\nStatisticsItem::~StatisticsItem() {\n  consumers_.Clear();\n  ClearItem();\n}\n\nstd::shared_ptr<StatisticsItem> StatisticsItem::AddItem(\n    const std::string& name) {\n  if (!is_alive_) {\n    StatusError = {STATUS_FAULT, \"This item is disposed\"};\n    return nullptr;\n  }\n\n  std::lock_guard<std::mutex> lck(children_lock_);\n  return AddItemInner(name, nullptr);\n}\n\nstd::shared_ptr<StatisticsItem> StatisticsItem::AddItemInner(\n    const std::string& name, const std::shared_ptr<Any>& value) {\n  if (IsLeaf()) {\n    StatusError = {STATUS_NOTSUPPORT, \"This is a leaf node, can not add item.\"};\n    return nullptr;\n  }\n\n  if (name.empty()) {\n    StatusError = {STATUS_INVALID, \"Add item failed, name is empty\"};\n    return nullptr;\n  }\n\n  if (name == \"*\") {\n    StatusError = {STATUS_INVALID, \"Item name should not be '*'\"};\n    return nullptr;\n  }\n\n  auto* child_ptr = new StatisticsItem(path_, name, shared_from_this());\n  std::shared_ptr<StatisticsItem> child(child_ptr);\n  child->thread_pool_ = thread_pool_;\n  child->notify_timer_ = notify_timer_;\n  child->value_ = value;\n  if (value != nullptr) {\n    child->is_leaf_ = true;\n  }\n  // Delay register\n  {\n    std::lock_guard<std::mutex> lck(child_notify_cfg_lock_);\n    auto all_child_notify_cfg = children_notify_cfg_map_[\"*\"];\n    for (auto& cfg : all_child_notify_cfg) {\n      child->RegisterNotify(cfg);\n    }\n\n    auto specify_child_notify_cfg_item = children_notify_cfg_map_.find(name);\n    if (specify_child_notify_cfg_item != children_notify_cfg_map_.end()) {\n      auto& specify_child_notify_cfg = specify_child_notify_cfg_item->second;\n      for (auto& cfg : specify_child_notify_cfg) {\n        child->RegisterNotify(cfg);\n      }\n    }\n  }\n\n  child->Notify(StatisticsNotifyType::CREATE);\n  children_[name] = child;\n  children_name_set_.insert(name);\n  StatusError = STATUS_OK;\n  return child;\n}\n\nstd::shared_ptr<StatisticsItem> StatisticsItem::GetItem(\n    const std::string& child_path) {\n  auto child_name = child_path;\n  std::string sub_path;\n  auto pos = child_path.find('.');\n  if (pos != std::string::npos) {\n    child_name = child_path.substr(0, pos);\n    sub_path = child_path.substr(pos + 1);\n  }\n\n  std::shared_ptr<StatisticsItem> child;\n  {\n    std::lock_guard<std::mutex> lck(children_lock_);\n    auto item = children_.find(child_name);\n    if (item == children_.end()) {\n      return nullptr;\n    }\n\n    child = item->second;\n  }\n\n  if (sub_path.empty()) {\n    return child;\n  }\n\n  return child->GetItem(sub_path);\n}\n\nvoid StatisticsItem::DelItem(const std::string& name) noexcept {\n  std::lock_guard<std::mutex> lck(children_lock_);\n  auto item = children_.find(name);\n  if (item == children_.end()) {\n    return;\n  }\n\n  // Avoid that child has be captured by others\n  auto& child = item->second;\n  child->is_alive_ = false;\n  child->ClearItem();\n  child->Notify(StatisticsNotifyType::DELETE);\n  child->consumers_.Clear();\n  child->parent_.reset();\n  children_name_set_.erase(name);\n  children_.erase(name);\n}\n\nvoid StatisticsItem::ClearItem() {\n  std::set<std::string> children_name_set;\n  {\n    std::lock_guard<std::mutex> lck(children_lock_);\n    children_name_set = children_name_set_;\n  }\n\n  for (const auto& name : children_name_set) {\n    DelItem(name);\n  }\n}\n\nvoid StatisticsItem::Dispose() {\n  auto parent_ptr = parent_.lock();\n  if (!parent_ptr) {\n    MBLOG_WARN << \"Parent for \" << path_ << \" not exist\";\n    return;\n  }\n\n  parent_ptr->DelItem(name_);\n}\n\nbool StatisticsItem::HasItem(const std::string& name) {\n  std::lock_guard<std::mutex> lck(children_lock_);\n  return children_.find(name) != children_.end();\n}\n\nstd::set<std::string> StatisticsItem::GetItemNames() {\n  return children_name_set_;\n}\n\nStatus StatisticsItem::ForEach(const StatisticsForEachFunc& func,\n                               bool recursive) {\n  ForEachInner(func, recursive, path_);\n  return STATUS_OK;\n}\n\nstd::string StatisticsItem::GetRelativePath(const std::string& base_path) {\n  if (base_path.empty()) {\n    return path_;\n  }\n\n  return path_.substr(base_path.size() + 1);\n}\n\nStatus StatisticsItem::ForEachInner(const StatisticsForEachFunc& func,\n                                    bool recursive,\n                                    const std::string& base_path) {\n  std::map<std::string, std::shared_ptr<StatisticsItem>> childrens;\n  {\n    std::lock_guard<std::mutex> lck(children_lock_);\n    childrens = children_;\n  }\n\n  for (auto& child_iter : childrens) {\n    auto& child = child_iter.second;\n    auto ret = func(child, child->GetRelativePath(base_path));\n    if (!ret) {\n      return ret;\n    }\n\n    if (recursive) {\n      ret = child->ForEachInner(func, true, base_path);\n      if (!ret) {\n        return ret;\n      }\n    }\n  }\n\n  return STATUS_OK;\n}\n\nStatus StatisticsItem::RegisterNotify(\n    const std::shared_ptr<StatisticsNotifyCfg>& cfg) {\n  if (cfg == nullptr) {\n    return STATUS_INVALID;\n  }\n\n  if (cfg->path_pattern_.empty()) {\n    return AddNotify(cfg);\n  }\n\n  return AddChildrenNotify(cfg);\n}\n\nvoid StatisticsItem::UnRegisterNotify(\n    const std::shared_ptr<StatisticsNotifyCfg>& cfg) {\n  if (cfg == nullptr) {\n    return;\n  }\n\n  if (cfg->path_pattern_.empty()) {\n    DelNotify(cfg);\n    return;\n  }\n\n  DelChildrenNotify(cfg);\n}\n\nStatus StatisticsItem::Notify(const StatisticsNotifyType& type) {\n  auto consumer_list = consumers_.GetConsumers(type);\n  if (consumer_list.empty()) {\n    return STATUS_OK;\n  }\n\n  if (type == StatisticsNotifyType::CHANGE) {\n    // Avoid lock frequently\n    if ((std::chrono::steady_clock::now() - last_change_notify_time_) <\n        std::chrono::seconds(1)) {\n      return STATUS_BUSY;\n    }\n\n    // Avoid data race\n    std::lock_guard<std::mutex> lck(last_change_notify_time_lock_);\n    auto now = std::chrono::steady_clock::now();\n    if ((now - last_change_notify_time_) < std::chrono::seconds(1)) {\n      return STATUS_BUSY;\n    }\n\n    last_change_notify_time_ = now;\n  }\n\n  auto msg = std::make_shared<StatisticsNotifyMsg>(path_, GetValue(), type);\n  if (thread_pool_ == nullptr) {\n    MBLOG_ERROR << \"Thread pool is nullptr, can not submit notify action\";\n    return STATUS_INVALID;\n  }\n\n  auto notify_action = [consumer_list, msg]() {\n    for (const auto& cfg : consumer_list) {\n      cfg->func_(msg);\n    }\n  };\n  thread_pool_->Submit(notify_action);\n  return STATUS_OK;\n}\n\nStatus StatisticsItem::AddNotify(\n    const std::shared_ptr<StatisticsNotifyCfg>& cfg) {\n  consumers_.AddConsumer(cfg);\n  if (cfg->type_set_.find(StatisticsNotifyType::TIMER) ==\n      cfg->type_set_.end()) {\n    return STATUS_OK;\n  }\n\n  if (notify_timer_ == nullptr) {\n    return STATUS_INVALID;\n  }\n\n  auto timer_task = std::make_shared<TimerTask>();\n  timer_task->SetName(path_);\n  timer_task->Callback([this, cfg]() {\n    auto msg = std::make_shared<StatisticsNotifyMsg>(\n        path_, GetValue(), StatisticsNotifyType::TIMER);\n    if (thread_pool_ == nullptr) {\n      MBLOG_ERROR << \"Thread pool is nullptr, can not submit notify action\";\n      return;\n    }\n\n    thread_pool_->Submit(cfg->func_, msg);\n  });\n  notify_timer_->Schedule(timer_task, cfg->delay_, cfg->interval_);\n  cfg->BindTimerTask(timer_task);\n  return STATUS_OK;\n}\n\nvoid StatisticsItem::DelNotify(\n    const std::shared_ptr<StatisticsNotifyCfg>& cfg) {\n  consumers_.DelConsumer(cfg);\n}\n\nStatus StatisticsItem::AddChildrenNotify(\n    const std::shared_ptr<StatisticsNotifyCfg>& cfg) {\n  auto root_path = cfg->GetRootPath();\n  auto child_cfg = std::make_shared<StatisticsNotifyCfg>(*cfg);\n  child_cfg->path_pattern_ = cfg->GetSubPath();\n  // Lock here to avoid one case:\n  // 1.child created register cfg.\n  // 2.new child added. finally.\n  // 3.children_notify_cfg_map_ add the cfg.\n  std::lock_guard<std::mutex> lck(children_lock_);\n  // Register to the child created before\n  if (root_path != \"*\") {\n    auto item = children_.find(root_path);\n    if (item != children_.end()) {\n      item->second->RegisterNotify(child_cfg);\n    }\n  } else {\n    for (auto& child : children_) {\n      child.second->RegisterNotify(child_cfg);\n    }\n  }\n\n  // Prepare for the child created after\n  std::lock_guard<std::mutex> cfg_lck(child_notify_cfg_lock_);\n  auto& cfg_list = children_notify_cfg_map_[root_path];\n  cfg_list.push_back(child_cfg);\n  return STATUS_OK;\n}\n\nvoid StatisticsItem::DelChildrenNotify(\n    const std::shared_ptr<StatisticsNotifyCfg>& cfg) {\n  auto root_path = cfg->GetRootPath();\n  auto child_cfg = std::make_shared<StatisticsNotifyCfg>(*cfg);\n  child_cfg->path_pattern_ = cfg->GetSubPath();\n  std::lock_guard<std::mutex> cfg_lck(child_notify_cfg_lock_);\n  auto& cfg_list = children_notify_cfg_map_[root_path];\n  cfg_list.remove_if([cfg](const std::shared_ptr<StatisticsNotifyCfg>& val) {\n    return *cfg == *val;\n  });\n\n  std::lock_guard<std::mutex> lck(children_lock_);\n  if (root_path != \"*\") {\n    auto item = children_.find(root_path);\n    if (item != children_.end()) {\n      item->second->UnRegisterNotify(child_cfg);\n    }\n  } else {\n    for (auto& child : children_) {\n      child.second->UnRegisterNotify(child_cfg);\n    }\n  }\n}\n\nstatic std::shared_ptr<StatisticsItem> kGlobalRootStats;\nstd::mutex kGlobRootStatLock;\n\nstd::shared_ptr<StatisticsItem> Statistics::GetGlobalItem() {\n  if (kGlobalRootStats) {\n    return kGlobalRootStats;\n  }\n\n  std::lock_guard<std::mutex> lock(kGlobRootStatLock);\n  if (kGlobalRootStats) {\n    return kGlobalRootStats;\n  }\n\n  kGlobalRootStats = std::make_shared<StatisticsItem>();\n  auto flow_item = kGlobalRootStats->AddItem(STATISTICS_ITEM_FLOW);\n\n  if (flow_item == nullptr) {\n    MBLOG_ERROR << \"Add item \" << STATISTICS_ITEM_FLOW << \"failed\";\n  }\n\n  return kGlobalRootStats;\n}\n\nvoid Statistics::ReleaseGlobalItem() { kGlobalRootStats = nullptr; }\n\n}  // namespace modelbox"
  },
  {
    "path": "src/libmodelbox/profiling/trace.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <fstream>\n#include <nlohmann/json.hpp>\n#include <utility>\n\n#include \"modelbox/profiler.h\"\n\nnamespace modelbox {\nconst std::map<TraceSliceType, std::string> TRACE_SLICE_TYPE = {\n    {TraceSliceType::OPEN, \"OPEN\"},\n    {TraceSliceType::CLOSE, \"CLOSE\"},\n    {TraceSliceType::PROCESS, \"PROCESS\"},\n    {TraceSliceType::STREAM_OPEN, \"STREAM_OPEN\"},\n    {TraceSliceType::STREAM_CLOSE, \"STREAM_CLOSE\"}};\n\nTrace::Trace(std::string output_dir_path, std::shared_ptr<Performance> perf,\n             bool session_enable)\n    : ProfilerLifeCycle(\"Trace\"),\n      output_dir_path_(std::move(output_dir_path)),\n      perf_(std::move(perf)),\n      write_file_interval_(DEFAULT_WRITE_TRACE_INTERVAL),\n      session_enable_(session_enable) {}\n\nTrace::~Trace() {\n  if (IsRunning()) {\n    Stop();\n  }\n}\n\nStatus Trace::OnStart() {\n  timer_run_ = true;\n  timer_ = std::make_shared<std::thread>(&Trace::TraceWork, this);\n  return STATUS_SUCCESS;\n}\n\nStatus Trace::OnResume() { return OnStart(); }\n\nStatus Trace::OnStop() {\n  OnPause();\n\n  WriteTrace();\n  traces_.clear();\n  return STATUS_SUCCESS;\n}\n\nStatus Trace::OnPause() {\n  if (timer_) {\n    timer_run_ = false;\n    timer_->join();\n    timer_ = nullptr;\n  }\n\n  return STATUS_SUCCESS;\n}\n\nstd::shared_ptr<FlowUnitTrace> Trace::FlowUnit(\n    const std::string& flow_unit_name) {\n  std::unique_lock<std::mutex> trace_lock(trace_mutex_);\n\n  if (traces_.find(flow_unit_name) == traces_.end()) {\n    auto flow_unit_trace =\n        std::shared_ptr<FlowUnitTrace>(new FlowUnitTrace(flow_unit_name));\n    if (perf_ != nullptr) {\n      auto flow_unit_profile = perf_->GetFlowUnitPerfCtx(flow_unit_name);\n      flow_unit_trace->SetFlowUnitPerfCtx(flow_unit_profile);\n    }\n\n    traces_.insert(std::make_pair(flow_unit_name, flow_unit_trace));\n    return flow_unit_trace;\n  }\n\n  return traces_[flow_unit_name];\n}\n\nstd::string Trace::TraceSliceTypeToString(TraceSliceType type) {\n  if (TRACE_SLICE_TYPE.find(type) == TRACE_SLICE_TYPE.end()) {\n    MBLOG_ERROR << \"parse TraceSliceType to string failed\";\n    return \"\";\n  }\n\n  return TRACE_SLICE_TYPE.at(type);\n}\n\nvoid Trace::TraceWork() {\n  unsigned long now = {0};\n  int32_t sleep = DEFAULT_TIMER_SAMPLE_INTERVAL;\n  int32_t sleep_time = 0;\n  unsigned long expect_time = 0;\n\n  MBLOG_INFO << \"trace timer start\";\n\n  now = GetTickCount();\n  expect_time = now + sleep;\n\n  uint32_t count = 0;\n  while (timer_run_) {\n    now = GetTickCount();\n    sleep_time = expect_time - now;\n    if (sleep_time < 0) {\n      sleep_time = 0;\n      expect_time = now;\n    }\n\n    expect_time += sleep;\n    std::this_thread::sleep_for(std::chrono::milliseconds(sleep_time));\n    count++;\n    if (count > write_file_interval_) {\n      WriteTrace();\n      count = 0;\n    }\n  }\n\n  MBLOG_INFO << \"trace timer end\";\n}\n\nvoid Trace::SetWriteFileInterval(int32_t threshold) {\n  write_file_interval_ = threshold;\n}\n\nuint32_t Trace::GetWriteFileInterval() { return write_file_interval_; }\n\nvoid Trace::SetSessionEnable() { session_enable_ = true; }\n\nStatus Trace::WriteTrace() {\n  nlohmann::json traces_json = nlohmann::json::array();\n  std::unique_lock<std::mutex> lock(trace_mutex_);\n  if (traces_.empty()) {\n    return STATUS_SUCCESS;\n  }\n\n  uint64_t valid_trace_count = 0;\n  for (const auto& trace : traces_) {\n    std::string flow_unit_name = trace.second->GetFlowUnitName();\n    std::vector<std::shared_ptr<TraceSlice>> trace_slices;\n    trace.second->GetTraceSlices(trace_slices);\n    for (const auto& slice : trace_slices) {\n      if (slice->GetDuration() < 0) {\n        continue;\n      }\n\n      valid_trace_count++;\n      nlohmann::json trace_json;\n      // Global\n      nlohmann::json args;\n      args[\"batch_size\"] = slice->GetBatchSize();\n\n      trace_json[\"name\"] = TraceSliceTypeToString(slice->GetTraceSliceType());\n      trace_json[\"dur\"] = slice->GetDuration();\n      trace_json[\"ts\"] =\n          slice->GetBeginEvent()->GetEventTime().time_since_epoch().count();\n      trace_json[\"tid\"] = flow_unit_name;\n      trace_json[\"ph\"] = \"X\";\n      trace_json[\"pid\"] = \"Graph\";\n      trace_json[\"args\"] = args;\n      traces_json.push_back(trace_json);\n      // Session\n      if (session_enable_) {\n        trace_json[\"pid\"] = \"Session:\" + slice->GetSession();\n        traces_json.push_back(trace_json);\n      }\n    }\n  }\n\n  lock.unlock();\n\n  if (valid_trace_count == 0) {\n    return STATUS_SUCCESS;\n  }\n\n  time_t current_time = time(nullptr);\n  char buf[64] = {0};\n  struct tm tm_result;\n  auto* local_tm = localtime_r(&current_time, &tm_result);\n  if (local_tm) {\n    strftime(buf, sizeof(buf), \"%Y-%m-%d-%H-%M-%S\", local_tm);\n  }\n\n  // TODO: graph_name + task_name + timestample\n  std::string file_path =\n      output_dir_path_ + \"/\" + \"trace_\" + std::string(buf) + \".json\";\n\n  std::ofstream out(file_path);\n  if (out.is_open() == false) {\n    MBLOG_ERROR << \"write trace failed, file path : \" << file_path;\n    return STATUS_FAULT;\n  }\n  Defer { out.close(); };\n\n  std::string traces_json_str = traces_json.dump();\n  out.write(traces_json_str.c_str(), traces_json_str.size());\n  if (out.rdstate() & std::ios::failbit) {\n    MBLOG_ERROR << \"Write file \" << file_path << \" failed\";\n    return STATUS_FAULT;\n  }\n\n  return STATUS_SUCCESS;\n}\n\nFlowUnitTrace::FlowUnitTrace(std::string flow_unit_name)\n    : flow_unit_name_(std::move(flow_unit_name)) {}\n\nFlowUnitTrace::~FlowUnitTrace() = default;\n\nstd::shared_ptr<TraceSlice> FlowUnitTrace::Slice(TraceSliceType slice_type,\n                                                 std::string session) {\n  std::unique_lock<std::mutex> lock(trace_slices_mutex_);\n  auto slice_ptr = std::shared_ptr<TraceSlice>(new TraceSlice(\n      slice_type, std::move(session), shared_from_this(), flow_unit_perf_ctx_));\n\n  return slice_ptr;\n}\n\nStatus FlowUnitTrace::AddTraceSlice(\n    const std::shared_ptr<TraceSlice>& trace_slice) {\n  if (trace_slice == nullptr) {\n    return STATUS_FAULT;\n  }\n\n  std::unique_lock<std::mutex> lock(trace_slices_mutex_);\n  trace_slices_.emplace_back(trace_slice);\n\n  return STATUS_SUCCESS;\n}\n\nvoid FlowUnitTrace::GetTraceSlices(\n    std::vector<std::shared_ptr<TraceSlice>>& trace_slices) {\n  std::unique_lock<std::mutex> lock(trace_slices_mutex_);\n  trace_slices.swap(trace_slices_);\n  trace_slices_.clear();\n}\n\nvoid FlowUnitTrace::SetFlowUnitPerfCtx(\n    std::shared_ptr<FlowUnitPerfCtx> flow_unit_perf_ctx) {\n  flow_unit_perf_ctx_ = std::move(flow_unit_perf_ctx);\n}\n\nTraceEvent::TraceEvent()\n    : event_time_(std::chrono::time_point_cast<std::chrono::milliseconds>(\n          std::chrono::system_clock::now())),\n      thread_id_(std::this_thread::get_id()) {}\n\nTraceEvent::~TraceEvent() = default;\n\nTraceEvent& TraceEvent::SetEventType(const EventType& event_type) {\n  event_type_ = event_type;\n  return *this;\n}\n\nconst EventType& TraceEvent::GetEventType() const { return event_type_; }\n\nTraceEvent& TraceEvent::SetEventTime(const TimePoint& event_time) {\n  event_time_ = event_time;\n  return *this;\n}\n\nconst TimePoint& TraceEvent::GetEventTime() const { return event_time_; }\n\nTraceEvent& TraceEvent::SetThreadId(std::thread::id thread_id) {\n  thread_id_ = thread_id;\n  return *this;\n}\n\nstd::thread::id TraceEvent::GetThreadId() const { return thread_id_; }\n\nTraceSlice::TraceSlice(\n    TraceSliceType& slice_type, std::string session,\n    const std::shared_ptr<FlowUnitTrace>& flow_unit_trace_ptr,\n    std::shared_ptr<TraceEvent> begin, std::shared_ptr<TraceEvent> end)\n    : slice_type_(slice_type),\n      session_(std::move(session)),\n      flow_unit_trace_ptr_(flow_unit_trace_ptr),\n      begin_event_ptr_(std::move(begin)),\n      end_event_ptr_(std::move(end)),\n      is_end_called_(false),\n      batch_size_(0) {}\n\nTraceSlice::TraceSlice(\n    TraceSliceType& slice_type, std::string session,\n    const std::shared_ptr<FlowUnitTrace>& flow_unit_trace_ptr,\n    std::shared_ptr<FlowUnitPerfCtx> flow_unit_perf_ctx)\n    : slice_type_(slice_type),\n      session_(std::move(session)),\n      flow_unit_trace_ptr_(flow_unit_trace_ptr),\n      flow_unit_perf_ctx_(std::move(flow_unit_perf_ctx)),\n      is_end_called_(false),\n      batch_size_(0) {}\n\nTraceSlice::~TraceSlice() {\n  if (!is_end_called_) {\n    End();\n  }\n}\n\nint32_t TraceSlice::GetDuration() {\n  if (begin_event_ptr_ == nullptr || end_event_ptr_ == nullptr) {\n    return -1;\n  }\n\n  std::chrono::duration<double, std::micro> duration =\n      std::chrono::duration<double, std::micro>(\n          end_event_ptr_->GetEventTime() - begin_event_ptr_->GetEventTime());\n\n  return duration.count();\n}\n\nstd::string TraceSlice::GetSession() { return session_; }\n\nvoid TraceSlice::Begin() {\n  begin_event_ptr_.reset(new TraceEvent());\n  begin_event_ptr_->SetEventType(EventType::BEGIN);\n  begin_event_ptr_->SetEventTime(\n      std::chrono::time_point_cast<std::chrono::microseconds>(\n          std::chrono::system_clock::now()));\n  begin_event_ptr_->SetThreadId(std::this_thread::get_id());\n}\n\nvoid TraceSlice::End() {\n  is_end_called_ = true;\n  auto flow_unit_trace = flow_unit_trace_ptr_.lock();\n  if (flow_unit_trace == nullptr) {\n    return;\n  }\n\n  end_event_ptr_.reset(new TraceEvent());\n  end_event_ptr_->SetEventType(EventType::END);\n  end_event_ptr_->SetEventTime(\n      std::chrono::time_point_cast<std::chrono::microseconds>(\n          std::chrono::system_clock::now()));\n  end_event_ptr_->SetThreadId(std::this_thread::get_id());\n\n  std::shared_ptr<TraceSlice> new_slice_ptr(\n      new TraceSlice(slice_type_, session_, flow_unit_trace, begin_event_ptr_,\n                     end_event_ptr_));\n  new_slice_ptr->is_end_called_ = true;\n  new_slice_ptr->SetBatchSize(batch_size_);\n\n  // FIXME : Not good to update flow unit perf in trace\n  if ((TraceSliceType::PROCESS == slice_type_) &&\n      (flow_unit_perf_ctx_ != nullptr)) {\n    flow_unit_perf_ctx_->UpdateProcessLatency(new_slice_ptr->GetDuration());\n  }\n\n  flow_unit_trace->AddTraceSlice(new_slice_ptr);\n}\n}  // namespace modelbox"
  },
  {
    "path": "src/modelbox/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nproject(modelbox)\n\nif(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_BINARY_DIR})\n    message(FATAL_ERROR \"Do not build in source directory!\")\nendif()\n\nif (STANDALONE)\n    set(MODELBOX_ROOT_VAR \"\\${MODELBOX_ROOT}\")\nendif()\n\nadd_subdirectory(common)\nadd_subdirectory(manager)\nadd_subdirectory(server)\nadd_subdirectory(serving)\nadd_subdirectory(tool)\n\nset(MODELBOX_PROG_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\nset(MODELBOX_PROG_INCLUDE \n    ${MODELBOX_PROG_INCLUDE} \n    CACHE INTERNAL \"\")\n"
  },
  {
    "path": "src/modelbox/common/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nproject(modelbox-common)\nfile(GLOB_RECURSE SOURCES *.cpp *.cc *.c)\n\nset(MODELBOX_COMMON_INCLUDE ${CMAKE_CURRENT_LIST_DIR}/include)\n\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${HUAWEI_SECURE_C_INCLUDE_DIR})\ninclude_directories(${MODELBOX_COMMON_INCLUDE})\ninclude_directories(${TLOG_INCLUDE})\ninclude_directories(${TOML_INCLUDE_DIR})\ninclude_directories(${NLOHMANN_INCLUDE_DIR})\n\nset(LIBRARY modelbox-common-object)\nadd_library(${LIBRARY} STATIC ${SOURCES})\ntarget_link_libraries(${LIBRARY} ${TLOG_STATIC_LIBRARIES})\nset_property(TARGET ${LIBRARY} PROPERTY POSITION_INDEPENDENT_CODE ON)\n\nset(MODELBOX_COMMON_LIBRARY ${LIBRARY} CACHE INTERNAL \"\")\nset(MODELBOX_COMMON_INCLUDE ${MODELBOX_COMMON_INCLUDE} CACHE INTERNAL \"\")"
  },
  {
    "path": "src/modelbox/common/command.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/common/command.h\"\n\nnamespace modelbox {\n\nstd::recursive_mutex ToolCommandGetOptLock;\n\nStdOutStream::StdOutStream() = default;\nStdOutStream::~StdOutStream() = default;\n\nvoid StdOutStream::ProcessStream(OStream *st) { std::cout << st; }\n\nStdErrStream::StdErrStream() = default;\n\nStdErrStream::~StdErrStream() = default;\n\nvoid StdErrStream::ProcessStream(OStream *st) { std::cerr << st; }\n\nToolCommand::ToolCommand() = default;\n\nToolCommand::~ToolCommand() = default;\n\nvoid ToolCommand::SetUp(std::shared_ptr<OutStream> cout,\n                        std::shared_ptr<OutStream> cerr) {\n  out_cout_ = std::move(cout);\n  out_cerr_ = std::move(cerr);\n}\n\nToolCommandList::ToolCommandList() = default;\n\nToolCommandList::~ToolCommandList() = default;\n\nToolCommandList *ToolCommandList::Instance() {\n  static bool env_set = false;\n  if (!env_set) {\n    setenv(\"POSIXLY_CORRECT\", \"1\", 1);\n    env_set = true;\n  }\n  static ToolCommandList list;\n  return &list;\n}\n\nvoid ToolCommandList::AddCommand(const ToolCommandCreate &new_func) {\n  auto cmd = new_func();\n  auto name = cmd->GetCommandName();\n  auto itr = commands_.find(name);\n  if (itr != commands_.end()) {\n    commands_.erase(itr);\n  }\n\n  commands_[name] = new_func;\n}\n\nvoid ToolCommandList::RmvCommand(const std::string &name) {\n  auto itr = commands_.find(name);\n  if (itr != commands_.end()) {\n    commands_.erase(itr);\n  }\n}\n\nvoid ToolCommandList::Reset() { commands_.clear(); }\n\nstd::shared_ptr<ToolCommand> ToolCommandList::GetCommand(\n    const std::string &name) {\n  auto itr = commands_.find(name);\n  if (itr == commands_.end()) {\n    return nullptr;\n  }\n\n  return commands_[name]();\n}\n\nstd::vector<std::shared_ptr<ToolCommand>> ToolCommandList::GetAllCommands() {\n  std::vector<std::shared_ptr<ToolCommand>> cmds;\n  for (const auto &itr : commands_) {\n    cmds.push_back(itr.second());\n  }\n\n  return cmds;\n}\n\nvoid ToolCommandGetOptReset() {\n  static struct option long_options[] = {{\"-\", 0, nullptr, 0},\n                                         {nullptr, 0, nullptr, 0}};\n  int argc = 2;\n  char const *argv[] = {\"reset\", \"\", nullptr};\n\n  optind = 0;\n  opterr = 0;\n  optopt = 0;\n  getopt_long(argc, const_cast<char **>(argv), \"\", long_options, nullptr);\n  optind = 0;\n  opterr = 0;\n  optopt = 0;\n}\n\n}  // namespace modelbox\n"
  },
  {
    "path": "src/modelbox/common/config.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/common/config.h\"\n\n#include <linux/limits.h>\n#include <modelbox/base/configuration.h>\n#include <unistd.h>\n\nnamespace modelbox {\n\nvoid PluginMergeUniq(std::vector<std::string> *to,\n                     std::vector<std::string> *from) {\n  if (from->size() == 0) {\n    return;\n  }\n\n  for (const auto &t : *to) {\n    for (auto itr = from->begin(); itr != from->end(); itr++) {\n      if (t == *itr) {\n        from->erase(itr);\n        break;\n      }\n    }\n  }\n\n  to->insert(to->end(), from->begin(), from->end());\n}\n\nstd::shared_ptr<modelbox::Configuration> LoadSubConfig(\n    const std::string &file) {\n  modelbox::ConfigurationBuilder config_builder;\n\n  auto curr_config = config_builder.Build(file, ConfigType::TOML, true);\n  if (curr_config == nullptr) {\n    MBLOG_ERROR << \"Load config file \" << file\n                << \" failed, detail: \" << modelbox::StatusError.Errormsg();\n    fprintf(stderr, \"Load config %s failed, detail:\\n\", file.c_str());\n    fprintf(stderr, \"%s\\n\", modelbox::StatusError.Errormsg().c_str());\n    return nullptr;\n  }\n\n  auto include_conf_files = curr_config->GetStrings(\"include.files\");\n  curr_config->SetProperty(\"include.files\", std::vector<std::string>());\n\n  auto cur_conf_dir = modelbox::GetDirName(file);\n  if (cur_conf_dir.length() <= 0 || cur_conf_dir == \".\") {\n    char cwd[PATH_MAX];\n    cur_conf_dir = getcwd(cwd, sizeof(cwd));\n  }\n\n  for (const auto &conf_file_pattern : include_conf_files) {\n    auto pattern_conf_dir = modelbox::GetDirName(conf_file_pattern);\n    auto pattern_file = modelbox::GetBaseName(conf_file_pattern);\n    std::vector<std::string> conf_files;\n    std::string include_conf_dir;\n    if (pattern_conf_dir.length() > 0 && pattern_conf_dir != \".\") {\n      include_conf_dir = pattern_conf_dir;\n    } else {\n      include_conf_dir = cur_conf_dir;\n    }\n\n    modelbox::ListFiles(include_conf_dir, pattern_file, &conf_files,\n                        modelbox::LIST_FILES_FILE);\n    std::sort(conf_files.begin(), conf_files.end());\n    for (const auto &conf_file : conf_files) {\n      auto conf = LoadSubConfig(conf_file);\n      if (conf == nullptr) {\n        continue;\n      }\n\n      auto plugins = curr_config->GetStrings(\"plugin.files\");\n      auto include_plugins = conf->GetStrings(\"plugin.files\");\n\n      PluginMergeUniq(&plugins, &include_plugins);\n      curr_config->Add(*conf);\n      curr_config->SetProperty(\"plugin.files\", plugins);\n    }\n  }\n\n  return curr_config;\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/modelbox/common/control_msg.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/common/control_msg.h\"\n\n#include \"securec.h\"\n\nnamespace modelbox {\n\nControlMsg::ControlMsg(size_t buffer_size) {\n  data_buff_ = std::make_shared<std::vector<uint8_t>>();\n  data_buff_->resize(buffer_size);\n}\n\nControlMsg::ControlMsg() {\n  data_buff_ = std::make_shared<std::vector<uint8_t>>();\n  data_buff_->resize(CONTROL_MAX_MSG_LEN);\n}\n\nControlMsg::~ControlMsg() = default;\n\nsize_t ControlMsg::GetRemainSpace() { return data_buff_->size() - data_len_; }\n\nuint8_t *ControlMsg::GetData() { return data_buff_->data(); }\nsize_t ControlMsg::GetDataLen() { return data_len_; }\n\nuint8_t *ControlMsg::GetDataTail() { return data_buff_->data() + data_len_; }\n\nSERVER_CONTROL_MSG_TYPE ControlMsg::GetMsgType() {\n  if (data_ready_ == false) {\n    return msg_type_;\n  }\n\n  auto *msg_head = (struct ControlMsgHead *)data_buff_->data();\n  return (SERVER_CONTROL_MSG_TYPE)msg_head->type;\n}\n\nvoid ControlMsg::SetMsgType(SERVER_CONTROL_MSG_TYPE type) { msg_type_ = type; }\n\nsize_t ControlMsg::GetMsgLen() {\n  if (data_ready_ == false) {\n    return 0;\n  }\n\n  return sizeof(struct ControlMsgHead) + GetMsgDataLen();\n}\n\nsize_t ControlMsg::GetMsgDataLen() {\n  if (data_ready_ == false) {\n    return 0;\n  }\n\n  auto *msg_head = (struct ControlMsgHead *)data_buff_->data();\n  return msg_head->len;\n}\n\nconst uint8_t *ControlMsg::GetMsgData() {\n  if (data_ready_ == false) {\n    return nullptr;\n  }\n\n  auto *msg_head = (struct ControlMsgHead *)data_buff_->data();\n  return msg_head->msg;\n}\n\nstruct ControlMsgHead *ControlMsg::GetControlMsgHead() {\n  if (data_ready_ == false) {\n    return nullptr;\n  }\n\n  return (struct ControlMsgHead *)data_buff_->data();\n}\n\nvoid ControlMsg::Flip() {\n  if (data_ready_ == false) {\n    return;\n  }\n\n  auto *msg_head = (struct ControlMsgHead *)data_buff_->data();\n  int data_msg_len = sizeof(*msg_head) + msg_head->len;\n  int last_data_len = data_len_ - data_msg_len;\n  if (last_data_len > 0) {\n    auto ret = memmove_s(data_buff_->data(), data_buff_->size(),\n                         data_buff_->data() + data_msg_len, last_data_len);\n    if (ret != EOK) {\n      MBLOG_ERROR << \"memcpy_s failed\";\n    }\n  }\n  data_len_ = last_data_len;\n  data_ready_ = false;\n\n  Unserialize();\n}\n\nmodelbox::Status ControlMsg::AppendDataLen(size_t len) {\n  if (len > GetRemainSpace()) {\n    return modelbox::STATUS_NOSPACE;\n  }\n\n  data_len_ += len;\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status ControlMsg::AppendData(uint8_t *data, size_t data_len) {\n  if (data_len > GetRemainSpace()) {\n    return modelbox::STATUS_NOSPACE;\n  }\n\n  auto ret = memcpy_s(GetDataTail(), GetRemainSpace(), data, data_len);\n  if (ret != EOK) {\n    return modelbox::STATUS_FAULT;\n  }\n\n  AppendDataLen(data_len);\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status ControlMsg::Unserialize() { return Unserialize(data_buff_); }\n\nmodelbox::Status ControlMsg::Unserialize(\n    const std::shared_ptr<std::vector<uint8_t>> &data_buff) {\n  auto *msg_head = (struct ControlMsgHead *)data_buff->data();\n\n  if (data_ready_ == true) {\n    return modelbox::STATUS_OK;\n  }\n\n  if (data_len_ < sizeof(struct ControlMsgHead)) {\n    return modelbox::STATUS_AGAIN;\n  }\n\n  if (msg_head->magic != CONTROL_MAGIC) {\n    return {modelbox::STATUS_INVALID, \"magic is invalid\"};\n  }\n\n  if (msg_head->len >= data_buff_->size() - sizeof(*msg_head)) {\n    return {modelbox::STATUS_INVALID, \"length is invalid\"};\n  }\n\n  if (msg_head->type >= SERVER_CONTROL_MSG_TYPE_BUFF) {\n    return {modelbox::STATUS_INVALID, \"type is invalid\"};\n  }\n\n  data_ready_ = true;\n  SetMsgType((SERVER_CONTROL_MSG_TYPE)msg_head->type);\n  return modelbox::STATUS_OK;\n}\n\nvoid ControlMsg::Reset() {\n  data_ready_ = false;\n  data_len_ = 0;\n}\n\nmodelbox::Status ControlMsg::Serialize() {\n  auto *msg_head = (struct ControlMsgHead *)data_buff_->data();\n  auto msg_len =\n      SerializeMsg(msg_head->msg, data_buff_->size() - sizeof(*msg_head));\n  if (msg_len < 0) {\n    return modelbox::STATUS_NOBUFS;\n  }\n\n  msg_head->magic = CONTROL_MAGIC;\n  msg_head->type = msg_type_;\n  msg_head->len = msg_len;\n  data_len_ = sizeof(*msg_head) + msg_len;\n  data_ready_ = true;\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status ControlMsg::BuildFromOtherMsg(ControlMsg *from_control_msg) {\n  auto ret = modelbox::STATUS_OK;\n  Defer {\n    if (!ret) {\n      Reset();\n    }\n  };\n\n  data_len_ = from_control_msg->data_len_;\n  ret = Unserialize(from_control_msg->data_buff_);\n  if (!ret) {\n    return ret;\n  }\n\n  size_t outer_msg_data_len =\n      from_control_msg->GetDataLen() - from_control_msg->GetMsgLen();\n  if (outer_msg_data_len) {\n    auto rc = memcpy_s(\n        data_buff_->data(), data_buff_->size(),\n        from_control_msg->data_buff_->data() + from_control_msg->GetMsgLen(),\n        outer_msg_data_len);\n    if (rc != EOK) {\n      return modelbox::STATUS_NOBUFS;\n    }\n  }\n\n  data_len_ = from_control_msg->GetMsgLen();\n  auto tmp = from_control_msg->data_buff_;\n  from_control_msg->data_buff_ = data_buff_;\n  data_buff_ = tmp;\n  from_control_msg->Reset();\n  from_control_msg->data_len_ = outer_msg_data_len;\n  if (outer_msg_data_len > 0) {\n    from_control_msg->Unserialize();\n  }\n\n  auto *msg_head = (struct ControlMsgHead *)data_buff_->data();\n  return UnSerializeMsg(msg_head->msg, msg_head->len);\n}\n\nsize_t ControlMsg::SerializeMsg(uint8_t *buff, size_t buff_max_len) {\n  return 0;\n}\n\nmodelbox::Status ControlMsg::UnSerializeMsg(uint8_t *buff, size_t buff_len) {\n  return modelbox::STATUS_OK;\n}\n\nControlMsgResult::ControlMsgResult() {\n  SetMsgType(SERVER_CONTROL_MSG_TYPE_RESULT);\n}\n\nControlMsgResult::~ControlMsgResult() = default;\nvoid ControlMsgResult::SetResult(int result) { result_ = result; }\n\nint ControlMsgResult::GetResult() { return result_; }\n\nsize_t ControlMsgResult::SerializeMsg(uint8_t *buff, size_t buff_max_len) {\n  int *result = nullptr;\n  if (buff_max_len < sizeof(int)) {\n    return -1;\n  }\n  result = (int *)buff;\n  *result = result_;\n  return sizeof(*result);\n}\n\nmodelbox::Status ControlMsgResult::UnSerializeMsg(uint8_t *buff,\n                                                  size_t buff_len) {\n  int *result;\n  result = nullptr;\n  if (buff_len < sizeof(*result)) {\n    return modelbox::STATUS_NOBUFS;\n  }\n\n  result = (int *)buff;\n  result_ = *result;\n\n  return modelbox::STATUS_OK;\n}\n\nControlMsgString::ControlMsgString() {\n  SetMsgType(SERVER_CONTROL_MSG_TYPE_STRING);\n}\nControlMsgString::~ControlMsgString() = default;\n\nconst std::string &ControlMsgString::GetString() { return str_; }\n\nvoid ControlMsgString::SetString(const std::string &str) { str_ = str; }\n\nsize_t ControlMsgString::SerializeMsg(uint8_t *buff, size_t buff_max_len) {\n  if (buff_max_len < str_.length() + 1) {\n    return -1;\n  }\n\n  auto rc = memcpy_s(buff, buff_max_len, str_.c_str(), str_.length());\n  if (rc != EOK) {\n    return -1;\n  }\n\n  return str_.length() + 1;\n}\n\nmodelbox::Status ControlMsgString::UnSerializeMsg(uint8_t *buff,\n                                                  size_t buff_len) {\n  str_.assign((char *)buff, buff_len);\n  return modelbox::STATUS_OK;\n}\n\nControlMsgHelp::ControlMsgHelp() { SetMsgType(SERVER_CONTROL_MSG_TYPE_HELP); }\nControlMsgHelp::~ControlMsgHelp() = default;\n\nControlMsgStdout::ControlMsgStdout() {\n  SetMsgType(SERVER_CONTROL_MSG_TYPE_OUTMSG);\n}\nControlMsgStdout::~ControlMsgStdout() = default;\n\nControlMsgErrout::ControlMsgErrout() {\n  SetMsgType(SERVER_CONTROL_MSG_TYPE_ERRMSG);\n}\nControlMsgErrout::~ControlMsgErrout() = default;\n\nControlMsgCmd::ControlMsgCmd() { SetMsgType(SERVER_CONTROL_MSG_TYPE_CMD); }\n\nControlMsgCmd::~ControlMsgCmd() = default;\n\nvoid ControlMsgCmd::SetArgs(int argc, char *argv[]) {\n  argv_.clear();\n  for (int i = 0; i < argc; i++) {\n    argv_.emplace_back(argv[i]);\n  }\n}\n\nint ControlMsgCmd::GetArgc() { return argc_; }\nstd::vector<std::string> ControlMsgCmd::GetArgv() { return argv_; }\n\nsize_t ControlMsgCmd::SerializeMsg(uint8_t *buff, size_t buff_max_len) {\n  auto *cmd_head = (struct MsgCmdHead *)buff;\n  if (buff_max_len < sizeof(*cmd_head)) {\n    return -1;\n  }\n\n  cmd_head->magic = CMD_MAGIC;\n  size_t cmd_data_free_len = buff_max_len - sizeof(*cmd_head);\n  char *cmd_data = cmd_head->args;\n\n  for (auto &arg : argv_) {\n    auto *cmd_arg = (struct MsgCmdArg *)cmd_data;\n    if (cmd_data_free_len < sizeof(*cmd_arg)) {\n      return -1;\n    }\n    cmd_data_free_len -= sizeof(*cmd_arg);\n    cmd_data += sizeof(*cmd_arg);\n\n    if (cmd_data_free_len < arg.length() + 1) {\n      return -1;\n    }\n    auto ret =\n        strncpy_s(cmd_arg->arg, cmd_data_free_len, arg.c_str(), arg.length());\n    if (ret != 0) {\n      MBLOG_ERROR << \"strncpy_s failed.\";\n      return -1;\n    }\n    cmd_arg->len = arg.length() + 1;\n    cmd_data_free_len -= arg.length() + 1;\n    cmd_data += arg.length() + 1;\n    cmd_arg->magic = CMD_MAGIC;\n  }\n\n  cmd_head->argc = argv_.size();\n  return (uint8_t *)cmd_data - buff;\n}\n\nmodelbox::Status ControlMsgCmd::UnSerializeMsg(uint8_t *buff, size_t buff_len) {\n  auto *cmd_head = (struct MsgCmdHead *)buff;\n  if (buff_len < sizeof(*cmd_head)) {\n    return modelbox::STATUS_NOBUFS;\n  }\n\n  if (cmd_head->magic != CMD_MAGIC) {\n    return modelbox::STATUS_INVALID;\n  }\n\n  argv_.clear();\n  char *cmd_data = cmd_head->args;\n  while (true) {\n    size_t left_data_len = buff_len - ((uint8_t *)cmd_data - buff);\n    if (left_data_len == 0) {\n      break;\n    }\n\n    if (left_data_len < 0) {\n      return modelbox::STATUS_NOBUFS;\n    }\n\n    auto *cmd_arg = (struct MsgCmdArg *)cmd_data;\n    left_data_len -= sizeof(*cmd_arg);\n    if (cmd_arg->len > left_data_len || cmd_arg->len <= 0) {\n      return modelbox::STATUS_NOBUFS;\n    }\n\n    if (cmd_arg->magic != CMD_MAGIC) {\n      return modelbox::STATUS_INVALID;\n    }\n\n    if (cmd_arg->arg[cmd_arg->len - 1] != 0) {\n      return modelbox::STATUS_INVALID;\n    }\n\n    std::string arg;\n    arg.assign(cmd_arg->arg, cmd_arg->len - 1);\n    argv_.emplace_back(arg);\n    cmd_data += sizeof(*cmd_arg) + cmd_arg->len;\n  }\n\n  if (cmd_head->argc != argv_.size() || cmd_head->argc <= 0) {\n    return modelbox::STATUS_INVALID;\n  }\n\n  argc_ = cmd_head->argc;\n\n  return modelbox::STATUS_OK;\n}\n\nControlMsgError::ControlMsgError() { SetMsgType(SERVER_CONTROL_MSG_TYPE_ERR); }\n\nControlMsgError::~ControlMsgError() = default;\n\nvoid ControlMsgError::SetError(int err_code, const std::string &err_msg) {\n  err_code_ = err_code;\n  err_msg_ = err_msg;\n}\n\nstd::string ControlMsgError::GetErrorMsg() { return err_msg_; }\n\nint ControlMsgError::GetErrorCode() { return err_code_; }\n\nsize_t ControlMsgError::SerializeMsg(uint8_t *buff, size_t buff_max_len) {\n  auto *err_msg_head = (struct MsgErrorHead *)buff;\n  if (buff_max_len < sizeof(*err_msg_head)) {\n    return -1;\n  }\n  buff_max_len -= sizeof(*err_msg_head);\n  if (buff_max_len < err_msg_.length() + 1) {\n    return -1;\n  }\n\n  auto rc = memcpy_s(err_msg_head->err_msg, buff_max_len, err_msg_.c_str(),\n                     err_msg_.length());\n  if (rc != EOK) {\n    return -1;\n  }\n\n  err_msg_head->err_msg_len = err_msg_.length();\n  err_msg_head->err_code = err_code_;\n\n  return sizeof(*err_msg_head) + err_msg_head->err_msg_len;\n}\n\nmodelbox::Status ControlMsgError::UnSerializeMsg(uint8_t *buff,\n                                                 size_t buff_len) {\n  auto *err_msg_head = (struct MsgErrorHead *)buff;\n  if (buff_len < sizeof(*err_msg_head)) {\n    return modelbox::STATUS_NOBUFS;\n  }\n\n  buff_len -= sizeof(*err_msg_head);\n  if (buff_len > (size_t)(err_msg_head->err_msg_len)) {\n    return modelbox::STATUS_NOBUFS;\n  }\n\n  err_code_ = err_msg_head->err_code;\n  err_msg_.assign((char *)err_msg_head->err_msg, err_msg_head->err_msg_len);\n  return modelbox::STATUS_OK;\n}\n\nstd::shared_ptr<ControlMsg> ControlMsgBuilder::Build(\n    const std::shared_ptr<ControlMsg> &from_msg) {\n  std::shared_ptr<ControlMsg> msg;\n  switch (from_msg->GetMsgType()) {\n    case SERVER_CONTROL_MSG_TYPE_STRING:\n      msg = std::make_shared<ControlMsgString>();\n      break;\n    case SERVER_CONTROL_MSG_TYPE_CMD:\n      msg = std::make_shared<ControlMsgCmd>();\n      break;\n    case SERVER_CONTROL_MSG_TYPE_RESULT:\n      msg = std::make_shared<ControlMsgResult>();\n      break;\n    case SERVER_CONTROL_MSG_TYPE_OUTMSG:\n      break;\n    case SERVER_CONTROL_MSG_TYPE_ERR:\n      msg = std::make_shared<ControlMsgError>();\n      break;\n    case SERVER_CONTROL_MSG_TYPE_HELP:\n      msg = std::make_shared<ControlMsgHelp>();\n      break;\n    default:\n      break;\n  }\n\n  if (msg == nullptr) {\n    modelbox::StatusError = modelbox::STATUS_NOTFOUND;\n    return nullptr;\n  }\n\n  auto ret = msg->BuildFromOtherMsg(from_msg.get());\n  if (!ret) {\n    modelbox::StatusError = ret;\n    return nullptr;\n  }\n\n  return msg;\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/modelbox/common/flowuint_info.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <modelbox/base/config.h>\n#include <modelbox/modelbox.h>\n\n#include <fstream>\n#include <iostream>\n#include <nlohmann/json.hpp>\n\n#include \"modelbox/common/flowunit_info.h\"\n\nnamespace modelbox {\nFlowUnitInfo::FlowUnitInfo() = default;\n\nFlowUnitInfo::~FlowUnitInfo() = default;\n\nStatus FlowUnitInfo::Init(const std::shared_ptr<Configuration> &config) {\n  ConfigurationBuilder config_builder;\n  config_ = config_builder.Build();\n  if (config) {\n    config_->Add(*config);\n  }\n\n  drivers_ = std::make_shared<Drivers>();\n  auto status = drivers_->Initialize(config_->GetSubConfig(\"driver\"));\n  if (!status) {\n    MBLOG_ERROR << \"initialize drivers failed, \"\n                << status.WrapErrormsgs().c_str();\n    return {status, \"init driver failed.\"};\n  }\n\n  status = drivers_->Scan();\n  if (!status) {\n    MBLOG_ERROR << \"scan failed, \" << status.WrapErrormsgs().c_str();\n    return {status, \"scan failed.\"};\n  }\n\n  device_ = std::make_shared<DeviceManager>();\n  status = device_->Initialize(drivers_, config_);\n  if (!status) {\n    MBLOG_ERROR << \"init device manager failed, \"\n                << status.WrapErrormsgs().c_str();\n    return {status, \"init device manager failed.\"};\n  }\n\n  flowunit_ = std::make_shared<FlowUnitManager>();\n  status = flowunit_->Initialize(drivers_, device_, config_);\n  if (!status) {\n    MBLOG_ERROR << \"init flowunit manager failed, \"\n                << status.WrapErrormsgs().c_str();\n    return {status, \"init flowunit manager failed.\"};\n  }\n\n  if (config_->GetSubConfig(\"driver\") != nullptr) {\n    auto paths = config_->GetSubConfig(\"driver\")->GetStrings(DRIVER_DIR);\n    for (const auto &search_path : paths) {\n      modelbox::ListSubDirectoryFiles(search_path, \"*.toml\",\n                                      &flowunits_from_files_);\n    }\n  }\n\n  return STATUS_OK;\n}\n\nstd::shared_ptr<DeviceManager> FlowUnitInfo::GetDeviceManager() {\n  return device_;\n}\n\nstd::shared_ptr<FlowUnitManager> FlowUnitInfo::GetFlowUnitManager() {\n  return flowunit_;\n}\n\nstd::shared_ptr<Drivers> FlowUnitInfo::GetDriverManager() { return drivers_; }\n\nStatus GetInfoFromTomlFile(const std::string &file, nlohmann::json &json) {\n  MBLOG_DEBUG << \"flowunit from file: \" << file;\n  std::string json_data;\n  std::ifstream infile(file);\n  if (infile.fail()) {\n    return {modelbox::STATUS_NOTFOUND,\n            \"Get file failed\" + modelbox::StrError(errno)};\n  }\n  Defer { infile.close(); };\n\n  std::string data((std::istreambuf_iterator<char>(infile)),\n                   std::istreambuf_iterator<char>());\n  if (data.length() <= 0) {\n    return {modelbox::STATUS_BADCONF, \"toml file is invalid.\"};\n  }\n\n  auto ret = modelbox::TomlToJson(data, &json_data);\n  if (!ret) {\n    MBLOG_WARN << \"Get flowunit info failed. \" << ret.WrapErrormsgs();\n    return {STATUS_BADCONF, \"Get flowunit info failed.\"};\n  }\n\n  try {\n    auto json_flowunit = nlohmann::json::parse(json_data);\n    // only add c++ virtual flowunit\n    if (json_flowunit.contains(\"base\") == false) {\n      return {STATUS_BADCONF, \"not a flowunit toml file\"};\n    }\n\n    if (json_flowunit[\"base\"].contains(\"type\") == false) {\n      return {STATUS_BADCONF, \"not a flowunit toml file\"};\n    }\n\n    if (json_flowunit[\"base\"][\"type\"] != \"c++\") {\n      return {STATUS_BADCONF, \"not a flowunit toml file\"};\n    }\n\n    nlohmann::json json_inputs = nlohmann::json::array();\n    nlohmann::json json_outputs = nlohmann::json::array();\n    nlohmann::json json_options = nlohmann::json::array();\n\n    json = json_flowunit[\"base\"];\n    json[\"type\"] = json_flowunit[\"base\"][\"device\"];\n    json.erase(\"device\");\n    json[\"group\"] = json_flowunit[\"base\"][\"group_type\"];\n    json.erase(\"group_type\");\n    if (json_flowunit.contains(\"input\")) {\n      for (auto &input : json_flowunit[\"input\"]) {\n        nlohmann::json json_input;\n        json_input[\"name\"] = input[\"name\"];\n        json_input[\"port_type\"] = input[\"type\"];\n        json_input[\"device_type\"] = input[\"device\"];\n        json_inputs.push_back(json_input);\n      }\n      json[\"inputports\"] = json_inputs;\n    }\n\n    if (json_flowunit.contains(\"output\")) {\n      for (auto &output : json_flowunit[\"output\"]) {\n        nlohmann::json json_output;\n        json_output[\"name\"] = output[\"name\"];\n        json_output[\"port_type\"] = output[\"type\"];\n        json_output[\"device_type\"] = output[\"device\"];\n        json_outputs.push_back(json_output);\n      }\n      json[\"outputports\"] = json_outputs;\n    }\n\n    if (json_flowunit.contains(\"options\")) {\n      for (auto &output : json_flowunit[\"options\"]) {\n        json_outputs.push_back(output);\n      }\n      json[\"options\"] = json_options;\n    }\n  } catch (const std::exception &e) {\n    std::string errmsg = \"Get flowunit info failed. \";\n    errmsg += e.what();\n    MBLOG_WARN << errmsg;\n    return {STATUS_BADCONF, errmsg};\n  }\n\n  return STATUS_OK;\n}\n\nStatus FlowUnitInfo::GetInfoInJson(std::string *result) {\n  nlohmann::json result_json;\n  nlohmann::json flowunits;\n  nlohmann::json devices;\n\n  if (result == nullptr) {\n    return STATUS_INVALID;\n  }\n\n  try {\n    auto device_desc_list = device_->GetDeviceDescList();\n    for (const auto &itr_list : device_desc_list) {\n      for (const auto &itr_device : itr_list.second) {\n        nlohmann::json json;\n        auto desc = itr_device.second;\n        json[\"name\"] = itr_device.first;\n        json[\"type\"] = desc->GetDeviceType();\n        json[\"version\"] = desc->GetDeviceVersion();\n        json[\"description\"] = desc->GetDeviceDesc();\n        devices.push_back(json);\n      }\n    }\n\n    auto flow_list = flowunit_->GetAllFlowUnitDesc();\n    std::map<std::string, bool> flowunit_map;\n    for (const auto &flow : flow_list) {\n      nlohmann::json json;\n      nlohmann::json json_inputs = nlohmann::json::array();\n      nlohmann::json json_outputs = nlohmann::json::array();\n      nlohmann::json json_options = nlohmann::json::array();\n\n      auto driverdesc = flow->GetDriverDesc();\n      json[\"name\"] = flow->GetFlowUnitName();\n      json[\"type\"] = driverdesc->GetType();\n      json[\"version\"] = driverdesc->GetVersion();\n      json[\"description\"] = flow->GetDescription();\n      json[\"group\"] = [&]() -> std::string {\n        auto type = flow->GetGroupType();\n        if (type.empty()) {\n          return \"Generic\";\n        }\n\n        return type;\n      }();\n\n      json[\"virtual\"] = false;\n\n      for (const auto &input : flow->GetFlowUnitInput()) {\n        nlohmann::json json_input;\n        json_input[\"name\"] = input.GetPortName();\n        json_input[\"device_type\"] = input.GetDeviceType();\n        json_input[\"port_type\"] = input.GetPortType();\n        json_inputs.push_back(json_input);\n      }\n      json[\"inputports\"] = json_inputs;\n\n      for (const auto &output : flow->GetFlowUnitOutput()) {\n        nlohmann::json json_output;\n        json_output[\"name\"] = output.GetPortName();\n        json_output[\"device_type\"] = output.GetDeviceType();\n        json_output[\"port_type\"] = output.GetPortType();\n        json_outputs.push_back(json_output);\n      }\n      json[\"outputports\"] = json_outputs;\n\n      for (auto &option : flow->GetFlowUnitOption()) {\n        nlohmann::json json_option;\n        json_option[\"name\"] = option.GetOptionName();\n        json_option[\"type\"] = option.GetOptionType();\n        json_option[\"default\"] = option.GetOptionDefault();\n        json_option[\"desc\"] = option.GetOptionDesc();\n        json_option[\"required\"] = option.IsRequire();\n        auto values = option.GetOptionValues();\n        if (values.size() > 0) {\n          nlohmann::json json_values;\n          for (const auto &value : values) {\n            json_values[value.first] = value.second;\n          }\n\n          json_option[\"values\"] = json_values;\n        }\n        json_options.push_back(json_option);\n      }\n      json[\"options\"] = json_options;\n\n      std::string key = json[\"name\"];\n      key += \":\";\n      key += json[\"type\"];\n      key += \":\";\n      key += json[\"version\"];\n      flowunit_map[key] = true;\n\n      flowunits.push_back(json);\n    }\n\n    for (const auto &f : flowunits_from_files_) {\n      nlohmann::json json_flowunit;\n      auto ret = GetInfoFromTomlFile(f, json_flowunit);\n      if (!ret) {\n        if (ret == STATUS_BADCONF) {\n          continue;\n        }\n\n        MBLOG_WARN << \"Get flowunit info failed. \" << ret.WrapErrormsgs();\n        continue;\n      }\n\n      std::string key = json_flowunit[\"name\"];\n      key += \":\";\n      key += json_flowunit[\"type\"];\n      key += \":\";\n      key += json_flowunit[\"version\"];\n      if (flowunit_map.find(key) != flowunit_map.end()) {\n        continue;\n      }\n\n      flowunits.push_back(json_flowunit);\n    }\n\n    result_json[\"flowunits\"] = flowunits;\n    result_json[\"devices\"] = devices;\n  } catch (const std::exception &e) {\n    MBLOG_INFO << e.what();\n    return {STATUS_INTERNAL, e.what()};\n  }\n\n  *result = result_json.dump();\n\n  return STATUS_OK;\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/modelbox/common/include/modelbox/common/command.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_COMMON_TOOL_COMMANDS_H\n#define MODELBOX_COMMON_TOOL_COMMANDS_H\n\n#include <getopt.h>\n\n#include <iostream>\n#include <map>\n#include <memory>\n#include <mutex>\n#include <string>\n#include <utility>\n#include <vector>\n\n#include \"modelbox/common/utils.h\"\n\nnamespace modelbox {\n\nextern void ToolCommandGetOptReset();\nextern std::recursive_mutex ToolCommandGetOptLock;\n\n#define MODELBOX_TOOL_STRCAT_(a, b) a##b\n#define MODELBOX_TOOL_STRCAT(a, b) MODELBOX_TOOL_STRCAT_(a, b)\n#define MODELBOX_TOOL_ADD_COMMAND(new_func) \\\n  ::modelbox::ToolCommandList::Instance()->AddCommand(new_func);\n#define MODELBOX_TOOL_RMV_COMMAND(name) \\\n  ::modelbox::ToolCommandList::Instance()->RmvCommand(name);\n#define MODELBOX_TOOL_CLEAR_COMMAND() \\\n  ::modelbox::ToolCommandList::Instance()->Reset();\n\n#define REG_MODELBOX_TOOL_COMMAND(class)                                     \\\n  static std::string cmd_name_##class;                                       \\\n  static auto __attribute__((unused))                                        \\\n  MODELBOX_TOOL_STRCAT(__auto_reg__, __LINE__) = []() {                      \\\n    auto new_func = []() -> std::shared_ptr<::modelbox::ToolCommand> {       \\\n      auto cmd = std::make_shared<class>();                                  \\\n      return cmd;                                                            \\\n    };                                                                       \\\n    auto cmd = new_func();                                                   \\\n    cmd_name_##class = cmd->GetCommandName();                                \\\n    ::modelbox::ToolCommandList::Instance()->AddCommand(new_func);           \\\n    return 0;                                                                \\\n  }();                                                                       \\\n  DeferExt() {                                                               \\\n    /* Remove command from command list.*/                                   \\\n    if (cmd_name_##class.length() > 0) {                                     \\\n      ::modelbox::ToolCommandList::Instance()->RmvCommand(cmd_name_##class); \\\n    }                                                                        \\\n  };\n\n#define MODELBOX_COMMAND_SUB_ARGC argc_sub\n#define MODELBOX_COMMAND_SUB_ARGV argv_sub\n#define MODELBOX_COMMAND_SUB_UNLOCK() get_opt_lock.unlock()\n\n/**\n * @brief Lock globally in the macro to avoid concurrent access to the getopt\n * function. You can use the MODELBOX_COMMAND_SUB_UNLOCK() function to unlock,\n * but after unlocking, you need to return the function immediately\n */\n#define MODELBOX_COMMAND_GETOPT_SHORT_BEGIN(cmdtype, short_options, options) \\\n  optind = 1;                                                                \\\n  int option_index = 0;                                                      \\\n  if (argc <= 0 || argv == nullptr) {                                        \\\n    return -1;                                                               \\\n  }                                                                          \\\n                                                                             \\\n  std::unique_lock<std::recursive_mutex> get_opt_lock(                       \\\n      ::modelbox::ToolCommandGetOptLock);                                    \\\n  ::modelbox::ToolCommandGetOptReset();                                      \\\n  while (((cmdtype) = getopt_long_only(argc, argv, short_options, options,   \\\n                                       &option_index)) != EOF) {             \\\n    int MODELBOX_COMMAND_SUB_ARGC = argc - optind + 1;                       \\\n    char **MODELBOX_COMMAND_SUB_ARGV = argv + optind - 1;                    \\\n    { auto &unused __attribute__((unused)) = MODELBOX_COMMAND_SUB_ARGC; }    \\\n    { auto &unused __attribute__((unused)) = MODELBOX_COMMAND_SUB_ARGV; }\n\n/**\n * @brief Lock globally in the macro to avoid concurrent access to the getopt\n * function. You can use the MODELBOX_COMMAND_SUB_UNLOCK() function to unlock,\n * but after unlocking, you need to return the function immediately\n */\n#define MODELBOX_COMMAND_GETOPT_BEGIN(cmdtype, options) \\\n  MODELBOX_COMMAND_GETOPT_SHORT_BEGIN(cmdtype, \"\", options)\n\n#define MODELBOX_COMMAND_GETOPT_END() \\\n  }                                   \\\n  get_opt_lock.unlock();\n\nclass StdOutStream : public OutStream {\n public:\n  StdOutStream();\n  virtual ~StdOutStream();\n\n protected:\n  void ProcessStream(OStream *st) override;\n};\n\nclass StdErrStream : public OutStream {\n public:\n  StdErrStream();\n  virtual ~StdErrStream();\n\n protected:\n  void ProcessStream(OStream *st);\n};\n\nclass ToolCommand {\n protected:\n#define TOOL_COUT *out_cout_->Stream()\n#define TOOL_CERR *out_cerr_->Stream()\n  std::shared_ptr<OutStream> out_cout_ = std::make_shared<StdOutStream>();\n  std::shared_ptr<OutStream> out_cerr_ = std::make_shared<StdErrStream>();\n\n public:\n  ToolCommand();\n  virtual ~ToolCommand();\n\n  void SetUp(std::shared_ptr<OutStream> cout, std::shared_ptr<OutStream> cerr);\n\n  virtual int Run(int argc, char *argv[]) = 0;\n\n  virtual std::string GetHelp() = 0;\n\n  virtual std::string GetCommandName() = 0;\n\n  virtual std::string GetCommandDesc() = 0;\n};\n\nusing ToolCommandCreate = std::function<std::shared_ptr<ToolCommand>()>;\nclass ToolCommandList {\n  ToolCommandList();\n  virtual ~ToolCommandList();\n\n public:\n  static ToolCommandList *Instance();\n  void AddCommand(const ToolCommandCreate &new_func);\n\n  void RmvCommand(const std::string &name);\n\n  void Reset();\n\n  std::shared_ptr<ToolCommand> GetCommand(const std::string &name);\n\n  std::vector<std::shared_ptr<ToolCommand>> GetAllCommands();\n\n  void ShowHelp();\n\n  void ShowHelp(const std::string &name);\n\n  std::map<std::string, ToolCommandCreate> commands_;\n};\n\n}  // namespace modelbox\n\n#endif\n"
  },
  {
    "path": "src/modelbox/common/include/modelbox/common/config.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_COMMON_CONF_H_\n#define MODELBOX_COMMON_CONF_H_\n\n#include <modelbox/base/configuration.h>\n\n#include <memory>\nnamespace modelbox {\n/**\n * @brief Load Sub configuration from file\n */\nstd::shared_ptr<modelbox::Configuration> LoadSubConfig(const std::string &file);\n}  // namespace modelbox\n\n#endif  // MODELBOX_COMMON_CONF_H_"
  },
  {
    "path": "src/modelbox/common/include/modelbox/common/control_msg.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_CONTROL_MSG_H_\n#define MODELBOX_CONTROL_MSG_H_\n\n#include <modelbox/base/config.h>\n#include <modelbox/modelbox.h>\n\n#ifdef BUILD_TEST\n#include \"test_config.h\"\nconstexpr const char CONTROL_UNIX_PATH[] = \"/tmp/modelbox.sock\";\n#else\nconstexpr const char CONTROL_UNIX_PATH[] = \"/var/run/modelbox.sock\";\n#endif\n\nnamespace modelbox {\nconstexpr uint64_t CONTROL_MAGIC = 0x676d4c5443767273; /* srvCTLmg */\nconstexpr int CONTROL_MAX_MSG_LEN = 32 * 1024;\n\ntypedef enum SERVER_CONTROL_MSG_TYPE {\n  SERVER_CONTROL_MSG_TYPE_NULL = 0,\n  SERVER_CONTROL_MSG_TYPE_CMD = 1,\n  SERVER_CONTROL_MSG_TYPE_RESULT = 2,\n  SERVER_CONTROL_MSG_TYPE_OUTMSG = 3,\n  SERVER_CONTROL_MSG_TYPE_ERRMSG = 4,\n  SERVER_CONTROL_MSG_TYPE_HELP = 5,\n  SERVER_CONTROL_MSG_TYPE_ERR = 6,\n  SERVER_CONTROL_MSG_TYPE_STRING = 7,\n  SERVER_CONTROL_MSG_TYPE_BUFF\n} SERVER_CONTROL_MSG_TYPE;\n\nstruct ControlMsgHead {\n  uint64_t magic;\n  uint32_t type;\n  uint32_t len;\n  uint8_t msg[0];\n};\n\nclass ControlMsg {\n public:\n  ControlMsg(size_t buffer_size);\n  ControlMsg();\n  virtual ~ControlMsg();\n\n  SERVER_CONTROL_MSG_TYPE GetMsgType();\n\n  size_t GetMsgDataLen();\n  size_t GetMsgLen();\n  const uint8_t *GetMsgData();\n  struct ControlMsgHead *GetControlMsgHead();\n\n  modelbox::Status AppendDataLen(size_t len);\n  modelbox::Status AppendData(uint8_t *data, size_t data_len);\n\n  uint8_t *GetData();\n  size_t GetDataLen();\n  uint8_t *GetDataTail();\n  size_t GetRemainSpace();\n\n  modelbox::Status Unserialize();\n  modelbox::Status Serialize();\n  modelbox::Status BuildFromOtherMsg(ControlMsg *from_control_msg);\n  void Flip();\n  void Reset();\n\n protected:\n  void SetMsgType(SERVER_CONTROL_MSG_TYPE type);\n  modelbox::Status Unserialize(\n      const std::shared_ptr<std::vector<uint8_t>> &data_buff);\n  virtual modelbox::Status UnSerializeMsg(uint8_t *buff, size_t buff_len);\n  virtual size_t SerializeMsg(uint8_t *buff, size_t buff_max_len);\n\n  std::shared_ptr<std::vector<uint8_t>> data_buff_;\n  size_t data_len_{0};\n  SERVER_CONTROL_MSG_TYPE msg_type_{SERVER_CONTROL_MSG_TYPE_NULL};\n  bool data_ready_{false};\n};\n\nclass ControlMsgResult : public ControlMsg {\n public:\n  ControlMsgResult();\n  ~ControlMsgResult() override;\n\n  void SetResult(int result);\n\n  int GetResult();\n\n protected:\n  size_t SerializeMsg(uint8_t *buff, size_t buff_max_len) override;\n  modelbox::Status UnSerializeMsg(uint8_t *buff, size_t buff_len) override;\n\n private:\n  int result_;\n};\n\nclass ControlMsgString : public ControlMsg {\n public:\n  ControlMsgString();\n  ~ControlMsgString() override;\n\n  void SetString(const std::string &str);\n  const std::string &GetString();\n\n protected:\n  size_t SerializeMsg(uint8_t *buff, size_t buff_max_len) override;\n  modelbox::Status UnSerializeMsg(uint8_t *buff, size_t buff_len) override;\n\n private:\n  std::string str_;\n};\n\nclass ControlMsgHelp : public ControlMsgString {\n public:\n  ControlMsgHelp();\n  ~ControlMsgHelp() override;\n};\n\nclass ControlMsgStdout : public ControlMsgString {\n public:\n  ControlMsgStdout();\n  ~ControlMsgStdout() override;\n};\n\nclass ControlMsgErrout : public ControlMsgString {\n public:\n  ControlMsgErrout();\n  ~ControlMsgErrout() override;\n};\n\nclass ControlMsgCmd : public ControlMsg {\n public:\n  ControlMsgCmd();\n  ~ControlMsgCmd() override;\n\n  void SetArgs(int argc, char *argv[]);\n  int GetArgc();\n  std::vector<std::string> GetArgv();\n\n protected:\n  uint32_t CMD_MAGIC = 0x5F446d43; /* CmD_*/\n  struct MsgCmdHead {\n    uint32_t argc;\n    uint32_t magic;\n    char args[0];\n  };\n\n  struct MsgCmdArg {\n    uint32_t len;\n    uint32_t magic;\n    char arg[0];\n  };\n  size_t SerializeMsg(uint8_t *buff, size_t buff_max_len) override;\n  modelbox::Status UnSerializeMsg(uint8_t *buff, size_t buff_len) override;\n\n private:\n  int argc_;\n  std::vector<std::string> argv_;\n};\n\nclass ControlMsgError : public ControlMsg {\n public:\n  ControlMsgError();\n  ~ControlMsgError() override;\n\n  void SetError(int err_code, const std::string &err_msg);\n\n  std::string GetErrorMsg();\n\n  int GetErrorCode();\n\n protected:\n  struct MsgErrorHead {\n    int err_code;\n    int err_msg_len;\n    char err_msg[0];\n  };\n\n  size_t SerializeMsg(uint8_t *buff, size_t buff_max_len) override;\n  modelbox::Status UnSerializeMsg(uint8_t *buff, size_t buff_len) override;\n\n private:\n  int err_code_;\n  std::string err_msg_;\n};\n\nclass ControlMsgBuilder {\n public:\n  static std::shared_ptr<ControlMsg> Build(\n      const std::shared_ptr<ControlMsg> &from_msg);\n};\n\n}  // namespace modelbox\n\n#endif  // MODELBOX_CONTROL_MSG_H_\n"
  },
  {
    "path": "src/modelbox/common/include/modelbox/common/flowunit_info.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_MODELBOX_EDITOR_FLOWUNIT_INFO_H_\n#define MODELBOX_MODELBOX_EDITOR_FLOWUNIT_INFO_H_\n\n#include <modelbox/base/config.h>\n#include <modelbox/modelbox.h>\n\nnamespace modelbox {\n\nclass FlowUnitInfo {\n public:\n  FlowUnitInfo();\n  virtual ~FlowUnitInfo();\n\n  Status Init(const std::shared_ptr<Configuration>& config);\n\n  Status GetInfoInJson(std::string *result);\n\n  std::shared_ptr<DeviceManager> GetDeviceManager();\n\n  std::shared_ptr<FlowUnitManager> GetFlowUnitManager();\n\n  std::shared_ptr<Drivers> GetDriverManager();\n\n private:\n  std::shared_ptr<Drivers> drivers_;\n  std::shared_ptr<Configuration> config_;\n  std::shared_ptr<DeviceManager> device_;\n  std::shared_ptr<FlowUnitManager> flowunit_;\n  std::vector<std::string> flowunits_from_files_;\n};\n}  // namespace modelbox\n#endif  // MODELBOX_MODELBOX_EDITOR_FLOWUNIT_INFO_H_\n"
  },
  {
    "path": "src/modelbox/common/include/modelbox/common/log.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_COMMON_LOG_H_\n#define MODELBOX_COMMON_LOG_H_\n\n#include <modelbox/base/log.h>\n\nnamespace modelbox {\n\nclass ModelboxServerLogger : public modelbox::Logger {\n public:\n  ModelboxServerLogger();\n  ~ModelboxServerLogger() override;\n\n  /**\n   * @brief Init server log\n   * @param file path of logging file.\n   * @param logsize max log file size.\n   * @param logcount max log file number.\n   * @param logscreen enable output log to screen.\n   * @return init result.\n   */\n  bool Init(const std::string &file, int logsize, int logcount, bool logscreen);\n\n  /**\n   * @brief Output log with va-arg\n   * @param level log level\n   * @param file log file\n   * @param lineno log file line number\n   * @param func log function\n   * @param format log format\n   * @param ap va_list\n   */\n  void Vprint(modelbox::LogLevel level, const char *file, int lineno,\n              const char *func, const char *format, va_list ap) override;\n\n  /**\n   * @brief Set log level\n   * @param level log level\n   */\n  void SetLogLevel(modelbox::LogLevel level) override;\n\n  /**\n   * @brief Get log level\n   * @return level log level\n   */\n  modelbox::LogLevel GetLogLevel() override;\n\n  /**\n   * @brief Enable or disable log to screen.\n   * @param logscreen enable or disable flag.\n   */\n  void SetVerbose(bool logscreen);\n\n  /**\n   * @brief Change log file path.\n   * @param file new log file path.\n   */\n  void SetLogfile(const std::string &file);\n\n private:\n  bool initialized_{false};\n};\n\n}  // namespace modelbox\n\n#endif  // MODELBOX_COMMON_LOG_H_"
  },
  {
    "path": "src/modelbox/common/include/modelbox/common/utils.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_COMMON_UTILS_H_\n#define MODELBOX_COMMON_UTILS_H_\n\n#include <signal.h>\n\n#include <functional>\n#include <iostream>\n#include <memory>\n#include <sstream>\n#include <string>\n\n#include \"modelbox/base/status.h\"\n\nnamespace modelbox {\n\nconstexpr const char *MODELBOX_ROOT_VAR = \"${MODELBOX_ROOT}\";\n\n/**\n * @brief Get current modelbox standalone directory\n * @return standalone root dir\n */\nconst std::string &modelbox_root_dir();\n\n/**\n * @brief Get modelbox full path\n *\n * @param path\n * @return std::string\n */\nstd::string modelbox_full_path(const std::string &path);\n\n/**\n * @brief Create pid file of current process\n * @param pid_file path of pid file\n * @return create result\n */\nint modelbox_create_pid(const char *pid_file);\n\n/**\n * @brief Handle process signal\n * @param sig_list signal list to handle\n * @param sig_num sig_list count\n * @param action signal handler\n * @return register result.\n */\nint modelbox_sig_register(const int sig_list[], int sig_num,\n                          void (*action)(int, siginfo_t *, void *));\n\n/**\n * @brief Get cpu register data in string format\n * @param buf output message buffer\n * @param buf_size max size of buffer\n * @param ucontext signal context\n * @return result.\n */\nint modelbox_cpu_register_data(char *buf, int buf_size, ucontext_t *ucontext);\n\n/**\n * @brief Split ip address and port from string\n * @param host host string\n * @param ip output ip address\n * @param port output port\n * @return result.\n */\nStatus SplitIPPort(const std::string &host, std::string &ip, std::string &port);\n\n/**\n * @brief Get user id and gid by username\n * @param user username\n * @param uid user id\n * @param gid group id\n * @return result.\n */\nStatus GetUidGid(const std::string &user, uid_t &uid, gid_t &gid);\n\n/**\n * @brief change user and group of path\n * @param user username\n * @param path path to change\n * @return result.\n */\nStatus ChownToUser(const std::string &user, const std::string &path);\n\n/**\n * @brief run as user\n * @return result.\n */\nStatus RunAsUser(const std::string &user);\n\n/**\n * @brief Custom stream\n */\nclass OutStream {\n protected:\n  using OStream = std::ostringstream;\n  using Buffer_p = std::unique_ptr<OStream, std::function<void(OStream *)>>;\n  virtual void ProcessStream(OStream *st) = 0;\n\n public:\n  /**\n   * @brief return stream\n   */\n  Buffer_p Stream() {\n    return Buffer_p(new OStream, [=](OStream *st) {\n      ProcessStream(st);\n      delete st;\n    });\n  }\n};\n\n}  // namespace modelbox\n\n#endif  // MODELBOX_COMMON_UTILS_H_"
  },
  {
    "path": "src/modelbox/common/log.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/common/log.h\"\n#define TLOG_MAX_LINE_LEN 4096\n#include \"tlog.h\"\n\nnamespace modelbox {\n\ntlog_level MblogLevelToTlogLevel(modelbox::LogLevel mblog_level) {\n  tlog_level level = TLOG_INFO;\n  switch (mblog_level) {\n    case modelbox::LOG_DEBUG:\n      level = TLOG_DEBUG;\n      break;\n    case modelbox::LOG_INFO:\n      level = TLOG_INFO;\n      break;\n    case modelbox::LOG_NOTICE:\n      level = TLOG_NOTICE;\n      break;\n    case modelbox::LOG_WARN:\n      level = TLOG_WARN;\n      break;\n    case modelbox::LOG_ERROR:\n      level = TLOG_ERROR;\n      break;\n    case modelbox::LOG_FATAL:\n      level = TLOG_FATAL;\n      break;\n    case modelbox::LOG_OFF:\n      level = TLOG_OFF;\n      break;\n  }\n\n  return level;\n}\n\nmodelbox::LogLevel TlogLevelToMblogLevel(tlog_level tlog_level) {\n  modelbox::LogLevel level = modelbox::LOG_INFO;\n  switch (tlog_level) {\n    case TLOG_DEBUG:\n      level = modelbox::LOG_DEBUG;\n      break;\n    case TLOG_INFO:\n      level = modelbox::LOG_INFO;\n      break;\n    case TLOG_NOTICE:\n      level = modelbox::LOG_NOTICE;\n      break;\n    case TLOG_WARN:\n      level = modelbox::LOG_WARN;\n      break;\n    case TLOG_ERROR:\n      level = modelbox::LOG_ERROR;\n      break;\n    case TLOG_FATAL:\n      level = modelbox::LOG_FATAL;\n      break;\n    case TLOG_OFF:\n      level = modelbox::LOG_OFF;\n      break;\n    case TLOG_END:\n      level = modelbox::LOG_OFF;\n      break;\n  }\n\n  return level;\n}\n\nModelboxServerLogger::ModelboxServerLogger() = default;\n\nModelboxServerLogger::~ModelboxServerLogger() {\n  if (initialized_) {\n    tlog_exit();\n  }\n}\n\nbool ModelboxServerLogger::Init(const std::string &file, int logsize,\n                                int logcount, bool logscreen) {\n  if (tlog_init(file.c_str(), logsize, logcount, 0, 0) != 0) {\n    return false;\n  }\n\n  if (logscreen) {\n    tlog_setlogscreen(true);\n  }\n\n  initialized_ = true;\n  return true;\n}\n\nvoid ModelboxServerLogger::SetLogLevel(modelbox::LogLevel level) {\n  // set log level to tlog\n  tlog_setlevel(MblogLevelToTlogLevel(level));\n}\n\nmodelbox::LogLevel ModelboxServerLogger::GetLogLevel() {\n  // get log level\n  return TlogLevelToMblogLevel(tlog_getlevel());\n}\n\nvoid ModelboxServerLogger::Vprint(modelbox::LogLevel level, const char *file,\n                                  int lineno, const char *func,\n                                  const char *format, va_list ap) {\n  tlog_vext(MblogLevelToTlogLevel(level), file, lineno, func, nullptr, format,\n            ap);\n}\n\nvoid ModelboxServerLogger::SetVerbose(bool logscreen) {\n  tlog_setlogscreen(logscreen);\n}\n\nvoid ModelboxServerLogger::SetLogfile(const std::string &file) {\n  tlog_set_logfile(file.c_str());\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/modelbox/common/utils.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/common/utils.h\"\n\n#include <errno.h>\n#include <fcntl.h>\n#include <linux/capability.h>\n#include <pwd.h>\n#include <signal.h>\n#include <stdio.h>\n#include <string.h>\n#include <sys/prctl.h>\n#include <sys/stat.h>\n#include <sys/types.h>\n#include <unistd.h>\n\n#include <mutex>\n\n#include \"modelbox/base/utils.h\"\n#include \"securec.h\"\n\nnamespace modelbox {\n\n#define TMP_BUFF_LEN_32 32\n\nstatic int kPidFileFd = -1;\n\nextern \"C\" int capget(struct __user_cap_header_struct *header,\n                      struct __user_cap_data_struct *cap);\nextern \"C\" int capset(struct __user_cap_header_struct *header,\n                      struct __user_cap_data_struct *cap);\n\nstd::once_flag root_dir_flag;\n\nconst std::string &modelbox_root_dir() {\n  static std::string rootdir;\n\n  std::call_once(root_dir_flag, []() {\n    char buff[PATH_MAX] = {0};\n    int len;\n\n    len = readlink(\"/proc/self/exe\", buff, sizeof(buff) - 1);\n    if (len < 0) {\n      rootdir = \"\";\n      return;\n    }\n\n    buff[len] = {0};\n    rootdir = modelbox::GetDirName(buff);\n    rootdir = rootdir + \"../../../../\";\n    rootdir = PathCanonicalize(rootdir);\n    if (rootdir == \"/\") {\n      rootdir = \"\";\n    }\n  });\n\n  return rootdir;\n}\n\nstd::string modelbox_full_path(const std::string &path) {\n  std::string fullpath = path;\n  modelbox::StringReplaceAll(fullpath, MODELBOX_ROOT_VAR, modelbox_root_dir());\n  return fullpath;\n}\n\nint modelbox_create_pid(const char *pid_file) {\n  int fd = -1;\n  unsigned int flags;\n  char buff[TMP_BUFF_LEN_32];\n  int ret;\n\n  if (pid_file == nullptr) {\n    return -1;\n  }\n\n  /*  create pid file, and lock this file */\n  fd = open(pid_file, O_RDWR | O_CREAT, S_IRUSR | S_IWUSR);\n  if (fd == -1) {\n    fprintf(stderr, \"create pid file failed, path %s, %s\\n\", pid_file,\n            StrError(errno).c_str());\n    return -1;\n  }\n\n  flags = fcntl(fd, F_GETFD);\n  if (flags < 0) {\n    fprintf(stderr, \"Could not get flags for PID file %s, %s\\n\", pid_file,\n            StrError(errno).c_str());\n    goto errout;\n  }\n\n  flags |= FD_CLOEXEC;\n  if (fcntl(fd, F_SETFD, flags) == -1) {\n    fprintf(stderr, \"Could not set flags for PID file %s, %s\\n\", pid_file,\n            StrError(errno).c_str());\n    goto errout;\n  }\n\n  if (lockf(fd, F_TLOCK, 0) < 0) {\n    fprintf(stderr, \"Server is already running.\\n\");\n    goto errout;\n  }\n\n  ret = ftruncate(fd, 0);\n  UNUSED_VAR(ret);\n\n  ret =\n      snprintf_s(buff, TMP_BUFF_LEN_32, TMP_BUFF_LEN_32 - 1, \"%d\\n\", getpid());\n  if (ret < 0 || ret == TMP_BUFF_LEN_32) {\n    fprintf(stderr, \"format pid failed.\\n\");\n    goto errout;\n  }\n\n  if (write(fd, buff, strnlen(buff, TMP_BUFF_LEN_32)) < 0) {\n    fprintf(stderr, \"write pid to file failed, %s.\\n\", StrError(errno).c_str());\n    goto errout;\n  }\n\n  if (kPidFileFd > 0) {\n    close(kPidFileFd);\n    kPidFileFd = -1;\n  }\n\n  kPidFileFd = fd;\n\n  return 0;\nerrout:\n  if (fd > 0) {\n    close(fd);\n  }\n  return -1;\n}\n\nint modelbox_sig_register(const int sig_list[], int sig_num,\n                          void (*action)(int, siginfo_t *, void *)) {\n  int i = 0;\n  struct sigaction sig_act;\n\n  for (i = 0; i < sig_num; i++) {\n    memset_s(&sig_act, sizeof(sig_act), 0, sizeof(sig_act));\n    sig_act.sa_sigaction = action;\n    sig_act.sa_flags = SA_SIGINFO;\n\n    if (sigaction(sig_list[i], &sig_act, nullptr) < 0) {\n      fprintf(stderr, \"Register signal %d failed.\", sig_list[i]);\n    }\n  }\n\n  return 0;\n}\n\n#if defined(__aarch64__)\nenum {\n  REG_R0 = 0,\n  REG_R1,\n  REG_R2,\n  REG_R3,\n  REG_R4,\n  REG_R5,\n  REG_R6,\n  REG_R7,\n  REG_R8,\n  REG_R9,\n  REG_R10,\n  REG_R11,\n  REG_R12,\n  REG_R13,\n  REG_R14,\n  REG_R15,\n  REG_R16,\n  REG_R17,\n  REG_R18,\n  REG_R19,\n  REG_R20,\n  REG_R21,\n  REG_R22,\n  REG_R23,\n  REG_R24,\n  REG_R25,\n  REG_R26,\n  REG_R27,\n  REG_R28,\n  REG_R29,\n  REG_R30\n};\n#endif\n\nint modelbox_cpu_register_data(char *buf, int buf_size, ucontext_t *ucontext) {\n  greg_t *gregs = nullptr;\n  if (buf == nullptr || buf_size <= 0 || ucontext == nullptr) {\n    return -1;\n  }\n\n  int len = -1;\n#if defined(__aarch64__)\n  gregs = (greg_t *)&(ucontext->uc_mcontext.regs);\n  len = snprintf_s(\n      buf, buf_size, buf_size - 1,\n      \"[R0]=0x%.16lx\\t\\t[R1]=0x%.16lx\\t\\t[R2]=0x%.16lx\\t\\t[R3]=0x%.16lx\\n\"\n      \"[R4]=0x%.16lx\\t\\t[R5]=0x%.16lx\\t\\t[R6]=0x%.16lx\\t\\t[R7]=0x%.16lx\\n\"\n      \"[R8]=0x%.16lx\\t\\t[R9]=0x%.16lx\\t\\t[R10]=0x%.16lx\\t[R11]=0x%.16lx\\n\"\n      \"[R12]=0x%.16lx\\t\\t[R13]=0x%.16lx\\t[R14]=0x%.16lx\\t[R15]=0x%.16lx\\n\"\n      \"[R16]=0x%.16lx\\t\\t[R17]=0x%.16lx\\t[R18]=0x%.16lx\\t[R19]=0x%.16lx\\n\"\n      \"[R20]=0x%.16lx\\t\\t[R21]=0x%.16lx\\t[R22]=0x%.16lx\\t[R23]=0x%.16lx\\n\"\n      \"[R24]=0x%.16lx\\t\\t[R25]=0x%.16lx\\t[R26]=0x%.16lx\\t[R27]=0x%.16lx\\n\"\n      \"[R28]=0x%.16lx\\t\\t[R29]=0x%.16lx\\t[R30]=0x%.16lx\\n\"\n      \"sp: 0x%.16llx\\t\\tpc: 0x%.16llx\\t\\tpstate: 0x%.16llx\\tfault_address: \"\n      \"0x%.16llx\\n\",\n      *(gregs + REG_R1), *(gregs + REG_R2), *(gregs + REG_R3),\n      *(gregs + REG_R4), *(gregs + REG_R5), *(gregs + REG_R6),\n      *(gregs + REG_R7), *(gregs + REG_R8), *(gregs + REG_R9),\n      *(gregs + REG_R10), *(gregs + REG_R11), *(gregs + REG_R12),\n      *(gregs + REG_R13), *(gregs + REG_R14), *(gregs + REG_R15),\n      *(gregs + REG_R16), *(gregs + REG_R17), *(gregs + REG_R18),\n      *(gregs + REG_R19), *(gregs + REG_R20), *(gregs + REG_R21),\n      *(gregs + REG_R22), *(gregs + REG_R23), *(gregs + REG_R24),\n      *(gregs + REG_R25), *(gregs + REG_R26), *(gregs + REG_R27),\n      *(gregs + REG_R28), *(gregs + REG_R29), *(gregs + REG_R30),\n      ucontext->uc_mcontext.sp, ucontext->uc_mcontext.pc,\n      ucontext->uc_mcontext.pstate, ucontext->uc_mcontext.fault_address);\n#elif defined(__x86_64__)\n  gregs = (greg_t *)&(ucontext->uc_mcontext.gregs);\n  len = snprintf_s(\n      buf, buf_size, buf_size - 1,\n      \"[R8]=0x%.16lx\\t\\t[R9]=0x%.16lx\\t\\t[R10]=0x%.16lx\\t[R11]=0x%.16lx\\n\"\n      \"[R12]=0x%.16lx\\t[R13]=0x%.16lx\\t[R14]=0x%.16lx\\t[R15]=0x%.16lx\\n\"\n      \"[RDI]=0x%.16lx\\t[RSI]=0x%.16lx\\t[RBP]=0x%.16lx\\t[RBX]=0x%.16lx\\n\"\n      \"[RDX]=0x%.16lx\\t[RAX]=0x%.16lx\\t[RCX]=0x%.16lx\\t[RSP]=0x%.16lx\\n\"\n      \"[RIP]=0x%.16lx\\t[RFLAGS]=0x%.16lx\\n\",\n      *(gregs + REG_R8), *(gregs + REG_R9), *(gregs + REG_R10),\n      *(gregs + REG_R12), *(gregs + REG_R13), *(gregs + REG_R14),\n      *(gregs + REG_R15), *(gregs + REG_RDI), *(gregs + REG_RSI),\n      *(gregs + REG_RBP), *(gregs + REG_RBX), *(gregs + REG_RDX),\n      *(gregs + REG_RAX), *(gregs + REG_RCX), *(gregs + REG_RSP),\n      *(gregs + REG_RIP), *(gregs + REG_EFL));\n#endif\n  if (len < 0 || len >= buf_size) {\n    return -1;\n  }\n\n  return 0;\n}\n\nStatus GetUidGid(const std::string &user, uid_t &uid, gid_t &gid) {\n  struct passwd *result = nullptr;\n  struct passwd pwd;\n  std::vector<char> buff;\n  ssize_t bufsize = 0;\n  int ret = -1;\n\n  if (user == \"\") {\n    return {STATUS_INVALID, \"user is empty\"};\n  }\n\n  bufsize = sysconf(_SC_GETPW_R_SIZE_MAX);\n  if (bufsize == -1) {\n    bufsize = 1024 * 16;\n  }\n\n  buff.reserve(bufsize);\n  ret = getpwnam_r(user.c_str(), &pwd, buff.data(), bufsize, &result);\n  if (ret != 0) {\n    return {STATUS_FAULT, \"get user \" + user + \" failed: \" + StrError(errno)};\n  }\n\n  if (result == nullptr) {\n    return {STATUS_NOTFOUND, \"user \" + user + \" not found\"};\n  }\n\n  uid = result->pw_uid;\n  gid = result->pw_gid;\n\n  return STATUS_OK;\n}\n\nStatus ChownToUser(const std::string &user, const std::string &path) {\n  uid_t uid = 0;\n  gid_t gid = 0;\n  int unused __attribute__((unused)) = 0;\n\n  auto ret = GetUidGid(user, uid, gid);\n  if (ret != STATUS_OK) {\n    return ret;\n  }\n\n  if (chown(path.c_str(), uid, gid) != 0) {\n    return {STATUS_INVALID, \"chown \" + path + \" failed: \" + StrError(errno)};\n  }\n\n  return STATUS_OK;\n}\n\nStatus RunAsUser(const std::string &user) {\n  struct __user_cap_header_struct header;\n#ifdef _LINUX_CAPABILITY_VERSION_3\n  struct __user_cap_data_struct caps[_LINUX_CAPABILITY_U32S_3];\n  header.version = _LINUX_CAPABILITY_VERSION_3;\n#else\n  struct __user_cap_data_struct caps[_LINUX_CAPABILITY_U32S_1];\n  header.version = _LINUX_CAPABILITY_VERSION;\n#endif\n  header.pid = 0;\n  uid_t uid = 0;\n  gid_t gid = 0;\n  int unused __attribute__((unused)) = 0;\n\n  auto ret = GetUidGid(user, uid, gid);\n  if (ret != STATUS_OK) {\n    return ret;\n  }\n\n  if (getuid() == uid) {\n    return STATUS_OK;\n  }\n\n  memset(caps, 0, sizeof(caps));\n  if (capget(&header, caps) < 0) {\n    return {STATUS_INVALID, \"capget failed: \" + StrError(errno)};\n  }\n\n  prctl(PR_SET_KEEPCAPS, 1, 0, 0, 0);\n  Defer { prctl(PR_SET_KEEPCAPS, 0, 0, 0, 0); };\n  for (auto &cap : caps) {\n    cap.effective = 1 << CAP_NET_ADMIN;\n    cap.permitted = 1 << CAP_NET_ADMIN;\n    cap.inheritable = 0;\n  }\n  unused = setgid(gid);\n  unused = setuid(uid);\n  if (capset(&header, caps) < 0) {\n    if (errno == EPERM) {\n      return {STATUS_PERMIT, \"capset failed: \" + StrError(errno)};\n    }\n  }\n\n  if (getuid() != uid) {\n    return {STATUS_INVALID, \"change user failed. \" + StrError(errno)};\n  }\n\n  return STATUS_OK;\n}\n\nStatus SplitIPPort(const std::string &host, std::string &ip,\n                   std::string &port) {\n  auto pos = host.find_last_of(':');\n\n  if (pos == std::string::npos) {\n    const auto *msg = \"invalid ip address, please try ip:port\";\n    return {STATUS_INVALID, msg};\n  }\n\n  port = host.substr(pos + 1, host.length());\n  int n_port = atol(port.c_str());\n  if (n_port <= 0 || n_port > 65535) {\n    const auto *msg = \"invalid port\";\n    return {STATUS_INVALID, msg};\n  }\n\n  ip = host.substr(0, pos);\n  /* process ipv6 format */\n  pos = ip.find_first_of('[');\n  if (pos != std::string::npos) {\n    ip = ip.substr(pos + 1, ip.length());\n  }\n\n  pos = ip.find_first_of(']');\n  if (pos != std::string::npos) {\n    ip = ip.substr(0, pos);\n  }\n\n  if (ip == \"\") {\n    ip = \"0.0.0.0\";\n  }\n\n  return STATUS_OK;\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/modelbox/manager/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nfile(GLOB_RECURSE MODELBOX_SOURCES *.cpp *.cc *.c)\nset(MODELBOX_INCLUDE ${CMAKE_CURRENT_LIST_DIR}/include)\nfile(GLOB_RECURSE MANAGER_BIN_FILES ${CMAKE_CURRENT_LIST_DIR}/bin/*)\nset(MODELBOX_MANAGER_CLIENT_SOURCE \"${CMAKE_CURRENT_LIST_DIR}/src/manager_monitor_client.c\")\n\nexclude_files_from_dir_in_list(MODELBOX_SOURCES \"${MODELBOX_SOURCES}\" \"${MODELBOX_MANAGER_CLIENT_SOURCE}\")\n\nlist(APPEND MODELBOX_MANAGER_INCLUDES ${MODELBOX_INCLUDE})\nlist(APPEND MANAGER_INCLUDES ${MODELBOX_INCLUDE})\nlist(APPEND MANAGER_INCLUDES ${MODELBOX_MANAGER_INCLUDES})\nlist(APPEND MANAGER_INCLUDES ${CMAKE_CURRENT_LIST_DIR}/src)\nlist(APPEND MANAGER_INCLUDES ${TLOG_INCLUDE})\nlist(REMOVE_DUPLICATES MANAGER_INCLUDES)\n\ninclude_directories(${MANAGER_INCLUDES})\ninclude_directories(${HUAWEI_SECURE_C_INCLUDE_DIR})\n\nset(HEADER \n    ${MODELBOX_INCLUDE}/modelbox\n    ${MODELBOX_COMMON_INCLUDE}/modelbox\n    )\n\nadd_executable(modelbox-manager ${MODELBOX_SOURCES})\ntarget_link_libraries(modelbox-manager pthread)\ntarget_link_libraries(modelbox-manager rt)\ntarget_link_libraries(modelbox-manager ${TLOG_STATIC_LIBRARIES})\ntarget_link_libraries(modelbox-manager ${HUAWEI_SECURE_C_LIBRARIES})\n\nadd_library(manager-client SHARED ${MODELBOX_MANAGER_CLIENT_SOURCE})\n\nif (NOT MODELBOX_MANAGER_CONFIG_FILE_NAME)\n    set(MODELBOX_MANAGER_CONFIG_FILE_NAME \"manager.conf\")\nendif()\n\nif (STANDALONE)\n    set_target_properties(modelbox-manager PROPERTIES INSTALL_RPATH \"$ORIGIN/../lib\")\nendif()\n\nconfigure_file(${CMAKE_CURRENT_LIST_DIR}/etc/manager-opts ${CMAKE_CURRENT_BINARY_DIR}/etc/manager-opts @ONLY)\nconfigure_file(${CMAKE_CURRENT_LIST_DIR}/etc/init.d/modelbox-manager.in ${CMAKE_CURRENT_BINARY_DIR}/etc/init.d/modelbox-manager @ONLY)\nconfigure_file(${CMAKE_CURRENT_LIST_DIR}/etc/manager.conf.in ${CMAKE_CURRENT_BINARY_DIR}/etc/${MODELBOX_MANAGER_CONFIG_FILE_NAME} @ONLY)\nconfigure_file(${CMAKE_CURRENT_LIST_DIR}/etc/modelbox-manager.service.in ${CMAKE_CURRENT_BINARY_DIR}/etc/modelbox-manager.service @ONLY)\n\ninstall(TARGETS modelbox-manager \n    COMPONENT server\n    RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n    LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    )\n\ninstall(TARGETS manager-client\n    COMPONENT server\n    RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n    LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    )\n\ninstall(DIRECTORY \n    ${HEADER} \n    DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR}\n    COMPONENT server-devel\n    )\n\ninstall(FILES \n    ${CMAKE_CURRENT_BINARY_DIR}/etc/init.d/modelbox-manager\n    DESTINATION /etc/init.d\n    PERMISSIONS OWNER_WRITE OWNER_READ OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ WORLD_EXECUTE \n    COMPONENT server\n    )\n\ninstall(FILES \n    ${CMAKE_CURRENT_BINARY_DIR}/etc/${MODELBOX_MANAGER_CONFIG_FILE_NAME}\n    ${CMAKE_CURRENT_BINARY_DIR}/etc/manager-opts\n    DESTINATION ${CMAKE_INSTALL_FULL_SYSCONFDIR}/modelbox/ \n    PERMISSIONS OWNER_WRITE OWNER_READ GROUP_READ WORLD_READ\n    COMPONENT server\n    )\n\ninstall(DIRECTORY \n    DESTINATION ${CMAKE_INSTALL_FULL_SYSCONFDIR}/modelbox/init-script/modelbox-manager\n    DIRECTORY_PERMISSIONS OWNER_EXECUTE OWNER_WRITE OWNER_READ\n    COMPONENT server\n    )\n\ninstall(DIRECTORY \n    DESTINATION /${CMAKE_INSTALL_RUNSTATEDIR}/modelbox/\n    COMPONENT server\n)\n\ninstall(DIRECTORY \n    DESTINATION /var/log/modelbox\n    COMPONENT server\n)\n\n\ninstall(PROGRAMS ${MANAGER_BIN_FILES}\n    DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n    COMPONENT server)\n\nif (SYSTEMDSYSTEMUNITDIR) \ninstall(FILES \n    ${CMAKE_CURRENT_BINARY_DIR}/etc/modelbox-manager.service \n    DESTINATION ${SYSTEMDSYSTEMUNITDIR} \n    COMPONENT server\n    )\nelse()\nmessage(\"Skip install systemd unit\")\nendif()\n\nset(MODELBOX_MANAGER_INCLUDE \n    ${MANAGER_INCLUDES} \n    CACHE INTERNAL \"\")\n    \nset(MODELBOX_MANAGER_SOURCES ${MODELBOX_SOURCES} ${MODELBOX_MANAGER_CLIENT_SOURCE} CACHE INTERNAL \"\")\nset(MODELBOX_MANAGER_LINK_LIBRARIES \n    ${HUAWEI_SECURE_C_LIBRARIES}\n    CACHE INTERNAL \"\")"
  },
  {
    "path": "src/modelbox/manager/etc/init.d/modelbox-manager.in",
    "content": "#!/bin/sh\n#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n### BEGIN INIT INFO\n# Provides:        modelbox-manager\n# Required-Start:  $network \n# Required-Stop:   $network \n# Default-Start:   2 3 4 5\n# Default-Stop:\n# Short-Description: Start modelbox-manager service\n### END INIT INFO\n\nPATH=/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/bin\n\nMODELBOX_ROOT=$(cd $(dirname $0)/../../ && pwd)\nif [ \"$MODELBOX_ROOT\" = \"/\" ]; then\n\tMODELBOX_ROOT=\"\"\nfi\n\n# this env may changed by script, do not modify\nCUSTOM_ENV_FILE=\"\"\n\nif [ -f \"$CUSTOM_ENV_FILE\" ]; then\n\t. $CUSTOM_ENV_FILE\nelse\n\t. ${MODELBOX_ROOT}@CMAKE_INSTALL_FULL_SYSCONFDIR@/modelbox/manager-opts\n\tPIDDIR=${MODELBOX_ROOT}/@CMAKE_INSTALL_RUNSTATEDIR@/modelbox-manager\n\tPIDFILE=${MODELBOX_ROOT}/@CMAKE_INSTALL_RUNSTATEDIR@/modelbox-manager.pid\n\tMANAGER_INITSCRIPT_DIR=${MODELBOX_ROOT}@CMAKE_INSTALL_FULL_SYSCONFDIR@/modelbox/init-script/modelbox-manager\nfi\n\nMODELBOX_MANAGER=${MODELBOX_ROOT}@CMAKE_INSTALL_FULL_BINDIR@/modelbox-manager\n\nif [ ! -x \"${MODELBOX_MANAGER}\" ]; then\n\techo \"manager not exists: ${MODELBOX_MANAGER}\"\n\texit 5\nfi\n\n\ncase $1 in\n\texec)\n\t\tif [ ! -d \"$PIDDIR\" ]; then\n\t\t\tmkdir $PIDDIR\n\t\tfi\n\n\t\tif [ -d ${MANAGER_INITSCRIPT_DIR} ]; then\n\t\t\tfor i in ${MANAGER_INITSCRIPT_DIR}/*.sh; do\n\t\t\t\tif [ -r $i ]; then\n\t\t\t\t. $i\n\t\t\t\tfi\n\t\t\tdone\n\t\t\tunset i\t\n\t\tfi\n\n\t\texec $MODELBOX_MANAGER $MODELBOX_MANAGER_OPTS -v -f -p $PIDFILE\n\t\t;;\n\tstart)\n\t\tif [ ! -d \"$PIDDIR\" ]; then\n\t\t\tmkdir $PIDDIR\n\t\tfi\n\n\t\tif [ -d ${MANAGER_INITSCRIPT_DIR} ]; then\n\t\t\tfor i in ${MANAGER_INITSCRIPT_DIR}/*.sh; do\n\t\t\t\tif [ -r $i ]; then\n\t\t\t\t. $i\n\t\t\t\tfi\n\t\t\tdone\n\t\t\tunset i\t\n\t\tfi\n\n\t\t$MODELBOX_MANAGER $MODELBOX_MANAGER_OPTS -p $PIDFILE\n\t\tLOOP=0\n\t\twhile true; do\n\t\t\tif [ -e \"$PIDFILE\" ]; then\n\t\t\t\tbreak;\n\t\t\tfi\n\t\t\tLOOP=$((LOOP+1))\n\n\t\t\tif [ $LOOP -gt 10 ]; then\n\t\t\t\techo \"start modelbox-manager service failed.\"\n\t\t\t\t\"$0\" stop\n\t\t\t\texit 1\n\t\t\tfi\n\t\t\tsleep .5\n\t\tdone\n\n\t\tPID=\"$(cat $PIDFILE 2>/dev/null)\"\n\t\tif [ -z \"$PID\" ]; then\n\t\t\techo \"start modelbox-manager service failed.\"\n\t\t\texit 1\n\t\tfi\n\t\tif [ ! -e \"/proc/$PID\" ]; then\n\t\t\techo \"start modelbox-manager service failed.\"\n\t\t\texit 1\n\t\tfi\n\t\techo \"start modelbox-manager service success.\"\n\t\t;;\n\tstop)\n\t\tif [ ! -f \"$PIDFILE\" ]; then\n\t\t\techo \"modelbox-manager service is stopped.\"\n\t\t\texit 0\n\t\tfi\n\t\tPID=\"$(cat $PIDFILE 2>/dev/null)\"\n\t\tif [ ! -e \"/proc/$PID\" ] || [ -z \"$PID\" ]; then\n\t\t\techo \"modelbox-manager service is stopped\"\n\t\t\texit 0\n\t\tfi\n\n\t\tkill -TERM \"$PID\"\n\t\tif [ $? -ne 0 ]; then\n\t\t\techo \"stop modelbox-manager service failed.\"\n\t\t\texit 1;\n\t\tfi\n\n\t\tLOOP=1\n\t\twhile true; do\n\t\t\tif [ ! -d \"/proc/$PID\" ]; then\n\t\t\t\tbreak;\n\t\t\tfi\n\n\t\t\tif [ $LOOP -gt 10 ]; then\n\t\t\t\tkill -9 \"$PID\"\n\t\t\t\tbreak;\n\t\t\tfi\n\t\t\tLOOP=$((LOOP+1))\n\t\t\tsleep .5\n\t\tdone\n\t\trm -f \"$PIDFILE\"\n\t\techo \"stop modelbox-manager service success.\"\n\t\t;;\n\trestart)\n\t\t\"$0\" stop && sleep 1 && \"$0\" start\n\t\t;;\n\treload)\n\t\tif [ ! -f \"$PIDFILE\" ]; then\n\t\t\techo \"modelbox-manager service is stopped.\"\n\t\t\texit 0\n\t\tfi\n\t\tPID=\"$(cat $PIDFILE 2>/dev/null)\"\n\t\tif [ ! -e \"/proc/$PID\" ] || [ -z \"$PID\" ]; then\n\t\t\techo \"modelbox-manager service is stopped\"\n\t\t\texit 0\n\t\tfi\n\n\t\tkill -HUP \"$PID\"\n\t\tif [ $? -ne 0 ]; then\n\t\t\techo \"reload modelbox-manager service failed.\"\n\t\t\texit 1;\n\t\tfi\n\t\techo \"reload modelbox-manager service success.\"\n\t\t;;\n\tstatus)\n\t\tPID=\"$(cat \"$PIDFILE\" 2>/dev/null)\"\n\t\tif [ ! -e \"/proc/$PID\" ] || [ -z \"$PID\" ]; then\n\t\t\techo \"modelbox-manager service is not running.\"\n\t\t\texit 1\n\t\tfi\n\t\techo \"modelbox-manager service is running. pid $PID\"\n\t\tstatus=$?\n\t\t;;\n\t*)\n\t\techo \"Usage: $0 {start|exec|stop|restart|status}\"\n\t\texit 2\n\t\t;;\nesac\n\nexit $status\n\n"
  },
  {
    "path": "src/modelbox/manager/etc/manager-opts",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nMODELBOX_ROOT=$(cd $(dirname $0)/../../ && pwd)\nif [ \"$MODELBOX_ROOT\" = \"/\" ]; then\n\tMODELBOX_ROOT=\"\"\nfi\n\n# modelbox server opts\nMODELBOX_MANAGER_OPTS=\"-c ${MODELBOX_ROOT}@CMAKE_INSTALL_FULL_SYSCONFDIR@/modelbox/manager.conf\""
  },
  {
    "path": "src/modelbox/manager/etc/manager.conf.in",
    "content": "# log level\nloglevel INFO\n\n# log file path\nlogfile @MODELBOX_ROOT_VAR@/var/log/modelbox/manager.log\n\n# default watchdog timeout\n# watchdog-timeout 90\n\n#\n# app -name \"appname\" -pidfile \"@MODELBOX_ROOT_VAR@/run/app.pid\" -check-alive -check-alive-time [90] -heartbeat-interval [5] \\\n# --kill-cmd \"/path/to/script/to/kill\" \\\n# -- run command list here\n#\napp -name \"modelbox\" -check-alive -pidfile \"@MODELBOX_ROOT_VAR@/@CMAKE_INSTALL_RUNSTATEDIR@/modelbox/modelbox.pid\" -- @MODELBOX_ROOT_VAR@/etc/init.d/modelbox start\n\n#\n#"
  },
  {
    "path": "src/modelbox/manager/etc/modelbox-manager.service.in",
    "content": "[Unit]\nDescription=modelbox server manager\nAfter=network.target \nStartLimitBurst=0\nStartLimitIntervalSec=60\n\n[Service]\nType=forking\nPermissionsStartOnly=True\nPIDFile=/@CMAKE_INSTALL_RUNSTATEDIR@/modelbox-manager.pid\nEnvironmentFile=@CMAKE_INSTALL_FULL_SYSCONFDIR@/modelbox/manager-opts\nExecStart=@CMAKE_INSTALL_FULL_BINDIR@/modelbox-manager -p /@CMAKE_INSTALL_RUNSTATEDIR@/modelbox-manager.pid $MODELBOX_MANAGER_OPTS \nKillMode=process\nLimitNOFILE=100000\nRestart=always\nRestartSec=3\n\n[Install]\nWantedBy=multi-user.target\n"
  },
  {
    "path": "src/modelbox/manager/include/modelbox/manager/manager_monitor_client.h",
    "content": "/*\r\n * Copyright (C) 2020 Huawei Technologies Co., Ltd. All rights reserved.\r\n */\r\n\r\n#ifndef MODELBOX_MANAGER_MONITOR_CLIENT_H\r\n\r\n#ifdef __cplusplus\r\nextern \"C\" {\r\n#endif /*__cplusplus */\r\n\r\nint app_monitor_keyfile(char *file);\r\n\r\nint app_monitor_init(const char *name, const char *keyfile);\r\n\r\nint app_monitor_keepalive_time(void);\r\n\r\nint app_monitor_heartbeat_interval(void);\r\n\r\nint app_monitor_heartbeat(void);\r\n\r\n#ifdef __cplusplus\r\n}\r\n#endif /*__cplusplus */\r\n\r\n#endif  // !MODELBOX_MANAGER_MONITOR_CLIENT_H"
  },
  {
    "path": "src/modelbox/manager/src/common.c",
    "content": "/*\r\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\r\n *\r\n * Licensed under the Apache License, Version 2.0 (the \"License\");\r\n * you may not use this file except in compliance with the License.\r\n * You may obtain a copy of the License at\r\n *\r\n * http://www.apache.org/licenses/LICENSE-2.0\r\n *\r\n * Unless required by applicable law or agreed to in writing, software\r\n * distributed under the License is distributed on an \"AS IS\" BASIS,\r\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n * See the License for the specific language governing permissions and\r\n * limitations under the License.\r\n */\r\n\r\n#include \"common.h\"\r\n\r\n#include <errno.h>\r\n#include <fcntl.h>\r\n#include <netdb.h>\r\n#include <netinet/tcp.h>\r\n#include <stdio.h>\r\n#include <string.h>\r\n#include <sys/epoll.h>\r\n#include <sys/socket.h>\r\n#include <sys/types.h>\r\n#include <unistd.h>\r\n\r\n#ifndef BUF_LEN_32\r\n#define BUF_LEN_32 32\r\n#endif\r\n\r\nint create_pid(char *pid_file) {\r\n  int fd = 0;\r\n  int flags;\r\n  char buff[BUF_LEN_32];\r\n\r\n  fd = open(pid_file, O_RDWR | O_CREAT | O_TRUNC, S_IRUSR | S_IWUSR);\r\n  if (fd == -1) {\r\n    fprintf(stderr, \"create pid file failed, %s\\n\", strerror(errno));\r\n    return -1;\r\n  }\r\n\r\n  flags = fcntl(fd, F_GETFD);\r\n  if (flags < 0) {\r\n    fprintf(stderr, \"Could not get flags for PID file %s\\n\", pid_file);\r\n    goto errout;\r\n  }\r\n\r\n  flags |= FD_CLOEXEC;\r\n  if (fcntl(fd, F_SETFD, flags) == -1) {\r\n    fprintf(stderr, \"Could not set flags for PID file %s\\n\", pid_file);\r\n    goto errout;\r\n  }\r\n\r\n  if (lockf(fd, F_TLOCK, 0) < 0) {\r\n    fprintf(stderr, \"Server is already running.\\n\");\r\n    goto errout;\r\n  }\r\n\r\n  snprintf(buff, BUF_LEN_32, \"%d\\n\", getpid());\r\n\r\n  if (write(fd, buff, strnlen(buff, BUF_LEN_32)) < 0) {\r\n    fprintf(stderr, \"write pid to file failed, %s.\\n\", strerror(errno));\r\n    goto errout;\r\n  }\r\n\r\n  return fd;\r\nerrout:\r\n  if (fd > 0) {\r\n    close(fd);\r\n  }\r\n  return -1;\r\n}\r\n\r\npid_t get_pid_from_pidfile(char *pid_file, int *is_locked) {\r\n  int fd = 0;\r\n  char buff[BUF_LEN_32];\r\n  int locked = 0;\r\n  pid_t pid;\r\n\r\n  fd = open(pid_file, O_RDONLY, S_IRUSR | S_IWUSR);\r\n  if (fd == -1) {\r\n    fprintf(stderr, \"open pid file %s failed, %s\\n\", pid_file, strerror(errno));\r\n    return -1;\r\n  }\r\n\r\n  if (lockf(fd, F_TLOCK, 0) < 0) {\r\n    locked = 1;\r\n  }\r\n\r\n  snprintf(buff, BUF_LEN_32, \"%d\\n\", getpid());\r\n\r\n  buff[0] = 0;\r\n  if (read(fd, buff, BUF_LEN_32) < 0) {\r\n    fprintf(stderr, \"read pid from file %s failed, %s.\\n\", pid_file, strerror(errno));\r\n    goto errout;\r\n  }\r\n\r\n  pid = atoi(buff);\r\n\r\n  if (is_locked) {\r\n    *is_locked = locked;\r\n  }\r\n\r\n  close(fd);\r\n  return pid;\r\nerrout:\r\n  if (fd > 0) {\r\n    close(fd);\r\n  }\r\n  return -1;\r\n}\r\n\r\nunsigned long get_tick(void) {\r\n  struct timespec ts;\r\n  unsigned theTick = 0U;\r\n  clock_gettime(CLOCK_REALTIME, &ts);\r\n  theTick = ts.tv_nsec / 1000000;\r\n  theTick += ts.tv_sec * 1000;\r\n  return theTick;\r\n}\r\n"
  },
  {
    "path": "src/modelbox/manager/src/common.h",
    "content": "/*\r\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\r\n *\r\n * Licensed under the Apache License, Version 2.0 (the \"License\");\r\n * you may not use this file except in compliance with the License.\r\n * You may obtain a copy of the License at\r\n *\r\n * http://www.apache.org/licenses/LICENSE-2.0\r\n *\r\n * Unless required by applicable law or agreed to in writing, software\r\n * distributed under the License is distributed on an \"AS IS\" BASIS,\r\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n * See the License for the specific language governing permissions and\r\n * limitations under the License.\r\n */\r\n\r\n\r\n#ifndef MODELBOX_MANAGER_COMMON_H\r\n#define MODELBOX_MANAGER_COMMON_H\r\n\r\n#include <arpa/inet.h>\r\n#include <errno.h>\r\n#include <fcntl.h>\r\n#include <getopt.h>\r\n#include <limits.h>\r\n#include <linux/limits.h>\r\n#include <netdb.h>\r\n#include <netinet/in.h>\r\n#include <netinet/tcp.h>\r\n#include <pthread.h>\r\n#include <signal.h>\r\n#include <stdbool.h>\r\n#include <stdint.h>\r\n#include <stdio.h>\r\n#include <stdlib.h>\r\n#include <string.h>\r\n#include <sys/epoll.h>\r\n#include <sys/file.h>\r\n#include <sys/ioctl.h>\r\n#include <sys/socket.h>\r\n#include <sys/stat.h>\r\n#include <sys/types.h>\r\n#include <sys/un.h>\r\n#include <time.h>\r\n#include <unistd.h>\r\n\r\n#include \"list.h\"\r\n#include \"log.h\"\r\n#include \"securec.h\"\r\n\r\n#ifdef __cplusplus\r\nextern \"C\" {\r\n#endif /*__cplusplus */\r\n\r\n#define max(x, y) (((int)(x) > (int)(y)) ? x : y)\r\n#define likely(x) __builtin_expect((x), 1)\r\n#define unlikely(x) __builtin_expect((x), 0)\r\n\r\nextern int create_pid(char *pid_file);\r\n\r\nextern pid_t get_pid_from_pidfile(char *pid_file, int *is_locked);\r\n\r\nextern unsigned long get_tick(void);\r\n\r\n#ifdef __cplusplus\r\n}\r\n#endif  /*__cplusplus */\r\n#endif  // !MODELBOX_MANAGER_COMMON_H\r\n"
  },
  {
    "path": "src/modelbox/manager/src/conf.c",
    "content": "/*\r\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\r\n *\r\n * Licensed under the Apache License, Version 2.0 (the \"License\");\r\n * you may not use this file except in compliance with the License.\r\n * You may obtain a copy of the License at\r\n *\r\n * http://www.apache.org/licenses/LICENSE-2.0\r\n *\r\n * Unless required by applicable law or agreed to in writing, software\r\n * distributed under the License is distributed on an \"AS IS\" BASIS,\r\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n * See the License for the specific language governing permissions and\r\n * limitations under the License.\r\n */\r\n\r\n#include \"conf.h\"\r\n\r\n#include <errno.h>\r\n#include <stdio.h>\r\n#include <stdlib.h>\r\n#include <string.h>\r\n\r\n#define CONF_LINE_MAX 8192\r\n#define DEFAULT_LOG_NUM 48\r\n#define DEFAULT_LOG_SIZE (1024 * 1024 * 64)\r\n#define BUF_LEN_64 64\r\n#define MAX_ARGV_NUM 128\r\n\r\nMANAGER_LOG_LEVEL conf_log_level = MANAGER_LOG_INFO;\r\nint conf_log_num = DEFAULT_LOG_NUM;\r\nsize_t conf_log_size = DEFAULT_LOG_SIZE;\r\nchar conf_log_file[PATH_MAX];\r\n\r\nint conf_parse_int(void *item, int argc, char *argv[]) {\r\n  if (argc < 0) {\r\n    return -1;\r\n  }\r\n\r\n  char *num = argv[1];\r\n  struct CONF_PARSE_INT *conf_int = item;\r\n  int num_int = atoi(num);\r\n\r\n  if (num_int > conf_int->max) {\r\n    *(conf_int->value) = conf_int->max;\r\n  } else if (num_int < conf_int->min) {\r\n    *(conf_int->value) = conf_int->min;\r\n  } else {\r\n    *(conf_int->value) = num_int;\r\n  }\r\n\r\n  return 0;\r\n}\r\n\r\nint conf_parse_string(void *item, int argc, char *argv[]) {\r\n  if (argc < 0) {\r\n    return -1;\r\n  }\r\n\r\n  char *str = argv[1];\r\n  struct CONF_PARSE_STRING *conf_string = item;\r\n\r\n  strncpy(conf_string->value, str, conf_string->max);\r\n\r\n  return 0;\r\n}\r\n\r\nint conf_parse_size(void *item, int argc, char *argv[]) {\r\n  size_t size_num;\r\n  struct CONF_PARSE_SIZE *conf_size = item;\r\n\r\n  if (argc < 0) {\r\n    return -1;\r\n  }\r\n\r\n  char *size = argv[1];\r\n\r\n  if (strstr(size, \"k\") || strstr(size, \"K\")) {\r\n    size_num = atoi(size) * 1024;\r\n  } else if (strstr(size, \"m\") || strstr(size, \"M\")) {\r\n    size_num = atoi(size) * 1024 * 1024;\r\n  } else if (strstr(size, \"g\") || strstr(size, \"G\")) {\r\n    size_num = atoi(size) * 1024 * 1024 * 1024;\r\n  } else {\r\n    size_num = atoi(size);\r\n  }\r\n\r\n  if (size_num > conf_size->max) {\r\n    *(conf_size->value) = conf_size->max;\r\n  } else if (size_num < conf_size->min) {\r\n    *(conf_size->value) = conf_size->min;\r\n  } else {\r\n    *(conf_size->value) = size_num;\r\n  }\r\n\r\n  return 0;\r\n}\r\n\r\nint conf_parse_loglevel(void *item, int argc, char *argv[]) {\r\n  if (argc < 0) {\r\n    return -1;\r\n  }\r\n\r\n  char *log_level = argv[1];\r\n\r\n  if ((strncmp(\"DEBUG\", log_level, sizeof(\"DEBUG\")) == 0) ||\r\n      (strncmp(\"debug\", log_level, sizeof(\"debug\")) == 0)) {\r\n    conf_log_level = MANAGER_LOG_DBG;\r\n  } else if ((strncmp(\"INFO\", log_level, sizeof(\"INFO\")) == 0) ||\r\n             (strncmp(\"info\", log_level, sizeof(\"info\")) == 0)) {\r\n    conf_log_level = MANAGER_LOG_INFO;\r\n  } else if ((strncmp(\"NOTICE\", log_level, sizeof(\"NOTICE\")) == 0) ||\r\n             (strncmp(\"notice\", log_level, sizeof(\"notice\")) == 0)) {\r\n    conf_log_level = MANAGER_LOG_NOTE;\r\n  } else if ((strncmp(\"WARN\", log_level, sizeof(\"WARN\")) == 0) ||\r\n             (strncmp(\"warn\", log_level, sizeof(\"warn\")) == 0)) {\r\n    conf_log_level = MANAGER_LOG_WARN;\r\n  } else if ((strncmp(\"ERROR\", log_level, sizeof(\"ERROR\")) == 0) ||\r\n             (strncmp(\"error\", log_level, sizeof(\"error\")) == 0)) {\r\n    conf_log_level = MANAGER_LOG_ERR;\r\n  } else if ((strncmp(\"FATAL\", log_level, sizeof(\"FATAL\")) == 0) ||\r\n             (strncmp(\"fatal\", log_level, sizeof(\"fatal\")) == 0)) {\r\n    conf_log_level = MANAGER_LOG_FATAL;\r\n  } else {\r\n    conf_log_level = MANAGER_LOG_INFO;\r\n  }\r\n\r\n  return 0;\r\n}\r\n\r\nstatic int _parse_args(char *key, char *value, int max_argv, int *argc,\r\n                       char *argv[]) {\r\n  int num = 0;\r\n  int quotation = 0;\r\n  int is_in_args = 0;\r\n  char end_char = ' ';\r\n  int escape = 0;\r\n\r\n  if (value == NULL) {\r\n    return -1;\r\n  }\r\n\r\n  argv[0] = key;\r\n  num++;\r\n\r\n  do {\r\n    /* 如果字符是转义，则跳过转义字符 */\r\n    if (escape) {\r\n      escape = 0;\r\n      value++;\r\n      continue;\r\n    }\r\n    /* 如果是转义字符，则设置转义标志 */\r\n    if (*value == '\\\\') {\r\n      if (escape == 0) {\r\n        escape = 1;\r\n\r\n        /* 将后续字符前移一位 */\r\n        char *tmp = value + 1;\r\n        for (; *tmp; tmp++) {\r\n          *(tmp - 1) = *tmp;\r\n        }\r\n        *(tmp - 1) = 0;\r\n        continue;\r\n      }\r\n    }\r\n    /* 如果是引号，则引号中的数据解释为参数 */\r\n    if (*value == '\"' || *value == '\\'') {\r\n      if (quotation == 0) {\r\n        quotation = 1;\r\n        /* 设置解析结束标志字符 */\r\n        end_char = *value;\r\n        value++;\r\n        continue;\r\n      }\r\n    }\r\n\r\n    if (is_in_args == 0 && *value == '\\t') {\r\n      *value = ' ';\r\n    }\r\n\r\n    /* 如果遇到结束字符 */\r\n    if (*value == end_char) {\r\n      /* 如果未处理任何参数，则跳过 */\r\n      if (is_in_args == 0) {\r\n        if (quotation == 0) {\r\n          value++;\r\n          continue;\r\n        }\r\n\r\n        /*如果是\"\"号，则设置值为'\\0'*/\r\n        argv[num] = value;\r\n      }\r\n\r\n      /* 将对应最后标志位置0 */\r\n      *value = '\\0';\r\n      /* 还原标志字符为空格 */\r\n      end_char = ' ';\r\n      /* 参数解析结束，参数个数加1 */\r\n      is_in_args = 0;\r\n      num++;\r\n      if (num >= max_argv - 1) {\r\n        break;\r\n      }\r\n      quotation = 0;\r\n      value++;\r\n      continue;\r\n    }\r\n\r\n    /* 参数开始，初始化argv */\r\n    if (is_in_args == 0) {\r\n      argv[num] = value;\r\n      is_in_args = 1;\r\n    }\r\n\r\n    value++;\r\n  } while (*value != '\\0');\r\n\r\n  /* 处理最后一个参数 */\r\n  if (is_in_args == 1) {\r\n    num++;\r\n  }\r\n\r\n  if (num <= 1) {\r\n    return -1;\r\n  }\r\n\r\n  *argc = num;\r\n  argv[num] = 0;\r\n\r\n  return 0;\r\n}\r\n\r\n/* getopts函数不支持不同args重入，getopt_data.__nextchar是上次的结果，\r\n * 此函数将上次处理结果还原，通过这种方法重置getopts\r\n */\r\nstatic void _reset_getopts(void) {\r\n  static struct option options[] = {{\"-\", 0, 0, 0}, {0, 0, 0, 0}};\r\n  static int argc = 2;\r\n  static char *argv[2];\r\n\r\n  argv[0] = \"reset\";\r\n  argv[1] = \"\";\r\n\r\n  optind = 1;\r\n  opterr = 0;\r\n  getopt_long_only(argc, argv, \"\", options, NULL);\r\n}\r\n\r\nstatic int parse_conf(struct config_map config_map[], char *key, char *value) {\r\n  int i = 0;\r\n  int argc = 0;\r\n  char *argv[MAX_ARGV_NUM];\r\n  int old_optind;\r\n  int old_opterr;\r\n  int ret = 0;\r\n\r\n  for (i = 0;\r\n       config_map[i].parse_func != NULL && config_map[i].config_name != NULL;\r\n       i++) {\r\n    if (strncmp(config_map[i].config_name, key, CONF_LINE_MAX) != 0) {\r\n      continue;\r\n    }\r\n\r\n    argc = 0;\r\n    memset(argv, 0, sizeof(argv));\r\n    if (_parse_args(key, value, MAX_ARGV_NUM, &argc, argv) != 0) {\r\n      manager_log(MANAGER_LOG_ERR, \"parse config line failed.\");\r\n      return -1;\r\n    }\r\n\r\n    /* 重置getopts，并调用处理函数 */\r\n    old_optind = optind;\r\n    old_opterr = opterr;\r\n    _reset_getopts();\r\n    optind = 1;\r\n    opterr = 1;\r\n    ret = config_map[i].parse_func(config_map[i].item, argc, argv);\r\n    _reset_getopts();\r\n    optind = old_optind;\r\n    opterr = old_opterr;\r\n\r\n    if (ret != 0) {\r\n      return -1;\r\n    }\r\n\r\n    return 0;\r\n  }\r\n\r\n  return 0;\r\n}\r\n\r\nstatic int load_conf_from_file(struct config_map config_map[],\r\n                               const char *conf_file) {\r\n  char read_line[CONF_LINE_MAX];\r\n  char conf_line[CONF_LINE_MAX];\r\n  char *line = NULL;\r\n  char conf_key[BUF_LEN_64];\r\n  char conf_value[CONF_LINE_MAX];\r\n  int filed_num = 0;\r\n  int line_no = 0;\r\n  int line_len = 0;\r\n  int read_len = 0;\r\n  FILE *fp;\r\n\r\n  if (conf_file == NULL) {\r\n    manager_log(MANAGER_LOG_ERR, \"conf file is invalid.\");\r\n    return -1;\r\n  }\r\n\r\n  fp = fopen(conf_file, \"r\");\r\n  if (fp == NULL) {\r\n    manager_log(MANAGER_LOG_ERR, \"open %s failed, %s\", conf_file,\r\n                strerror(errno));\r\n    return -1;\r\n  }\r\n\r\n  while (fgets(read_line, sizeof(read_line), fp) != NULL) {\r\n    line_no++;\r\n    read_len = strnlen(read_line, sizeof(read_line));\r\n    if (read_len == 0) {\r\n      continue;\r\n    }\r\n\r\n    if (read_line[0] == '#') {\r\n      continue;\r\n    }\r\n\r\n    if (read_line[read_len - 1] == '\\\\' ||\r\n        (read_len >= 2 && read_line[read_len - 2] == '\\\\')) {\r\n      if (line == NULL) {\r\n        line = conf_line;\r\n        line[0] = '\\0';\r\n      }\r\n\r\n      if (line_len + read_len - 1 >= CONF_LINE_MAX) {\r\n        goto errout;\r\n      }\r\n\r\n      if (read_len >= 2 && read_line[read_len - 2] == '\\\\') {\r\n        read_len -= 1;\r\n      }\r\n\r\n      strncpy(line + line_len, read_line, read_len - 1);\r\n      line_len += read_len - 1;\r\n      line[line_len] = '\\0';\r\n      continue;\r\n    } else if (line != NULL) {\r\n      if (line_len + read_len >= CONF_LINE_MAX) {\r\n        goto errout;\r\n      }\r\n\r\n      strncpy(line + line_len, read_line, sizeof(conf_line) - line_len);\r\n      line_len += read_len;\r\n      line[line_len] = '\\0';\r\n    } else {\r\n      line = read_line;\r\n    }\r\n\r\n    filed_num = sscanf(line, \"%63s %1023[^\\n]s\", conf_key, conf_value);\r\n    line = NULL;\r\n    line_len = 0;\r\n    if (filed_num <= 0) {\r\n      continue;\r\n    }\r\n\r\n    if (conf_key[0] == '#') {\r\n      continue;\r\n    }\r\n\r\n    if (filed_num != 2) {\r\n      goto errout;\r\n    }\r\n\r\n    if (parse_conf(config_map, conf_key, conf_value) != 0) {\r\n      goto errout;\r\n    }\r\n  }\r\n\r\n  fclose(fp);\r\n\r\n  return 0;\r\n\r\nerrout:\r\n  if (fp) {\r\n    if (line_no > 0) {\r\n      manager_log(MANAGER_LOG_ERR, \"invalid config at line %s:%d %s\", conf_file,\r\n                  line_no, line);\r\n    }\r\n    fclose(fp);\r\n  }\r\n  return -1;\r\n}\r\n\r\nstatic struct config_map common_config_map[] = {\r\n    {CONF_LOG_LEVEL, conf_parse_loglevel, NULL},\r\n    {CONF_LOG_FILE, conf_parse_string,\r\n     .item =\r\n         &(struct CONF_PARSE_STRING){.value = conf_log_file, .max = PATH_MAX}},\r\n    {CONF_LOG_NUM, conf_parse_int,\r\n     .item = &(\r\n         struct CONF_PARSE_INT){.value = &conf_log_num, .min = 1, .max = 512}},\r\n    {CONF_LOG_SIZE, conf_parse_size,\r\n     .item = &(struct CONF_PARSE_SIZE){.value = &conf_log_size,\r\n                                       .min = 1024 * 1024,\r\n                                       .max = 1024 * 1024 * 1024}},\r\n    {NULL, NULL, NULL},\r\n};\r\n\r\nint load_conf(struct config_map config_map[], const char *conf_file) {\r\n  if (conf_file == NULL) {\r\n    manager_log(MANAGER_LOG_ERR, \"conf file is null\");\r\n    return -1;\r\n  }\r\n\r\n  if (config_map == NULL) {\r\n    manager_log(MANAGER_LOG_END, \"config_map is null\");\r\n    return -1;\r\n  }\r\n\r\n  /* 加载公共部分配置 */\r\n  if (load_conf_from_file(common_config_map, conf_file) != 0) {\r\n    manager_log(MANAGER_LOG_END, \"load common config failed\");\r\n    return -1;\r\n  }\r\n\r\n  return load_conf_from_file(config_map, conf_file);\r\n}\r\n"
  },
  {
    "path": "src/modelbox/manager/src/conf.h",
    "content": "/*\r\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\r\n *\r\n * Licensed under the Apache License, Version 2.0 (the \"License\");\r\n * you may not use this file except in compliance with the License.\r\n * You may obtain a copy of the License at\r\n *\r\n * http://www.apache.org/licenses/LICENSE-2.0\r\n *\r\n * Unless required by applicable law or agreed to in writing, software\r\n * distributed under the License is distributed on an \"AS IS\" BASIS,\r\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n * See the License for the specific language governing permissions and\r\n * limitations under the License.\r\n */\r\n\r\n\r\n#ifndef MANAGER_CONF_H\r\n#define MANAGER_CONF_H\r\n\r\n#ifdef __cplusplus\r\nextern \"C\" {\r\n#endif /*__cplusplus */\r\n\r\n#include <getopt.h>\r\n#include <limits.h>\r\n#include <unistd.h>\r\n\r\n#include \"log.h\"\r\n\r\n#define CONF_LOG_LEVEL \"loglevel\"\r\n#define CONF_LOG_NUM \"lognum\"\r\n#define CONF_LOG_SIZE \"logsize\"\r\n#define CONF_LOG_FILE \"logfile\"\r\n\r\nstruct CONF_PARSE_INT {\r\n  int *value;\r\n  int min;\r\n  int max;\r\n};\r\nextern int conf_parse_string(void *item, int argc, char *argv[]);\r\n\r\nstruct CONF_PARSE_STRING {\r\n  char *value;\r\n  int max;\r\n};\r\nextern int conf_parse_int(void *item, int argc, char *argv[]);\r\n\r\nstruct CONF_PARSE_SIZE {\r\n  size_t *value;\r\n  size_t min;\r\n  size_t max;\r\n};\r\nextern int conf_parse_size(void *item, int argc, char *argv[]);\r\n\r\nextern MANAGER_LOG_LEVEL conf_log_level;\r\nextern int conf_log_num;\r\nextern size_t conf_log_size;\r\nextern char conf_log_file[PATH_MAX];\r\n\r\ntypedef int (*parse_func)(void *item, int argc, char *argv[]);\r\nstruct config_map {\r\n  const char *config_name;\r\n  parse_func parse_func;\r\n  void *item;\r\n};\r\n\r\nextern int load_conf(struct config_map config_map[], const char *conf_file);\r\n\r\n#ifdef __cplusplus\r\n}\r\n#endif  /*__cplusplus */\r\n#endif  // !MANAGER_CONF_H\r\n"
  },
  {
    "path": "src/modelbox/manager/src/hashtable.h",
    "content": "/*\r\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\r\n *\r\n * Licensed under the Apache License, Version 2.0 (the \"License\");\r\n * you may not use this file except in compliance with the License.\r\n * You may obtain a copy of the License at\r\n *\r\n * http://www.apache.org/licenses/LICENSE-2.0\r\n *\r\n * Unless required by applicable law or agreed to in writing, software\r\n * distributed under the License is distributed on an \"AS IS\" BASIS,\r\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n * See the License for the specific language governing permissions and\r\n * limitations under the License.\r\n */\r\n\r\n\r\n#ifndef _hashtable_\r\n#define _hashtable_\r\n\r\n#include <stdbool.h>\r\n#include <stdint.h>\r\n\r\n#include \"list.h\"\r\n\r\n#ifndef __WORDSIZE\r\n#define __WORDSIZE (__SIZEOF_LONG__ * 8)\r\n#endif\r\n\r\n#ifndef BITS_PER_LONG\r\n#define BITS_PER_LONG __WORDSIZE\r\n#endif\r\n\r\n#ifndef __same_type\r\n#define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))\r\n#endif\r\n#define MANAGER_BUILD_BUG_ON_ZERO(e) (sizeof(struct { int : -!!(e); }))\r\n#define __must_be_array(a) MANAGER_BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))\r\n#define MANAGER_ARRAY_SIZE(arr) \\\r\n  (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))\r\n\r\n#define MANAGER_DEFINE_HASHTABLE(name, bits)                           \\\r\n  struct hlist_head name[1 << (bits)] = {[0 ...((1 << (bits)) - 1)] = \\\r\n                                             MANAGER_HLIST_HEAD_INIT}\r\n\r\n#define MANAGER_DECLARE_HASHTABLE(name, bits) struct hlist_head name[1 << (bits)]\r\n\r\n#define MANAGER_HASH_SIZE(name) (MANAGER_ARRAY_SIZE(name))\r\n#define MANAGER_HASH_BITS(name) ilog2(MANAGER_HASH_SIZE(name))\r\n\r\nstatic inline int fls(unsigned int x) { return 32 - __builtin_clz(x); }\r\n\r\nstatic inline int fls64(unsigned long x) { return 64 - __builtin_clzll(x); }\r\n\r\nstatic inline int __ilog2_u32(uint32_t n) { return fls(n) - 1; }\r\n\r\nstatic inline int __ilog2_u64(uint64_t n) { return fls64(n) - 1; }\r\n\r\n/* clang-format off */\r\n#define ilog2(n)\t\t\t\t\\\r\n(\t\t\t\t\t\t\\\r\n\t__builtin_constant_p(n) ? (\t\t\\\r\n\t\t(n) < 2 ? 0 :\t\t\t\\\r\n\t\t(n) & (1ULL << 63) ? 63 :\t\\\r\n\t\t(n) & (1ULL << 62) ? 62 :\t\\\r\n\t\t(n) & (1ULL << 61) ? 61 :\t\\\r\n\t\t(n) & (1ULL << 60) ? 60 :\t\\\r\n\t\t(n) & (1ULL << 59) ? 59 :\t\\\r\n\t\t(n) & (1ULL << 58) ? 58 :\t\\\r\n\t\t(n) & (1ULL << 57) ? 57 :\t\\\r\n\t\t(n) & (1ULL << 56) ? 56 :\t\\\r\n\t\t(n) & (1ULL << 55) ? 55 :\t\\\r\n\t\t(n) & (1ULL << 54) ? 54 :\t\\\r\n\t\t(n) & (1ULL << 53) ? 53 :\t\\\r\n\t\t(n) & (1ULL << 52) ? 52 :\t\\\r\n\t\t(n) & (1ULL << 51) ? 51 :\t\\\r\n\t\t(n) & (1ULL << 50) ? 50 :\t\\\r\n\t\t(n) & (1ULL << 49) ? 49 :\t\\\r\n\t\t(n) & (1ULL << 48) ? 48 :\t\\\r\n\t\t(n) & (1ULL << 47) ? 47 :\t\\\r\n\t\t(n) & (1ULL << 46) ? 46 :\t\\\r\n\t\t(n) & (1ULL << 45) ? 45 :\t\\\r\n\t\t(n) & (1ULL << 44) ? 44 :\t\\\r\n\t\t(n) & (1ULL << 43) ? 43 :\t\\\r\n\t\t(n) & (1ULL << 42) ? 42 :\t\\\r\n\t\t(n) & (1ULL << 41) ? 41 :\t\\\r\n\t\t(n) & (1ULL << 40) ? 40 :\t\\\r\n\t\t(n) & (1ULL << 39) ? 39 :\t\\\r\n\t\t(n) & (1ULL << 38) ? 38 :\t\\\r\n\t\t(n) & (1ULL << 37) ? 37 :\t\\\r\n\t\t(n) & (1ULL << 36) ? 36 :\t\\\r\n\t\t(n) & (1ULL << 35) ? 35 :\t\\\r\n\t\t(n) & (1ULL << 34) ? 34 :\t\\\r\n\t\t(n) & (1ULL << 33) ? 33 :\t\\\r\n\t\t(n) & (1ULL << 32) ? 32 :\t\\\r\n\t\t(n) & (1ULL << 31) ? 31 :\t\\\r\n\t\t(n) & (1ULL << 30) ? 30 :\t\\\r\n\t\t(n) & (1ULL << 29) ? 29 :\t\\\r\n\t\t(n) & (1ULL << 28) ? 28 :\t\\\r\n\t\t(n) & (1ULL << 27) ? 27 :\t\\\r\n\t\t(n) & (1ULL << 26) ? 26 :\t\\\r\n\t\t(n) & (1ULL << 25) ? 25 :\t\\\r\n\t\t(n) & (1ULL << 24) ? 24 :\t\\\r\n\t\t(n) & (1ULL << 23) ? 23 :\t\\\r\n\t\t(n) & (1ULL << 22) ? 22 :\t\\\r\n\t\t(n) & (1ULL << 21) ? 21 :\t\\\r\n\t\t(n) & (1ULL << 20) ? 20 :\t\\\r\n\t\t(n) & (1ULL << 19) ? 19 :\t\\\r\n\t\t(n) & (1ULL << 18) ? 18 :\t\\\r\n\t\t(n) & (1ULL << 17) ? 17 :\t\\\r\n\t\t(n) & (1ULL << 16) ? 16 :\t\\\r\n\t\t(n) & (1ULL << 15) ? 15 :\t\\\r\n\t\t(n) & (1ULL << 14) ? 14 :\t\\\r\n\t\t(n) & (1ULL << 13) ? 13 :\t\\\r\n\t\t(n) & (1ULL << 12) ? 12 :\t\\\r\n\t\t(n) & (1ULL << 11) ? 11 :\t\\\r\n\t\t(n) & (1ULL << 10) ? 10 :\t\\\r\n\t\t(n) & (1ULL <<  9) ?  9 :\t\\\r\n\t\t(n) & (1ULL <<  8) ?  8 :\t\\\r\n\t\t(n) & (1ULL <<  7) ?  7 :\t\\\r\n\t\t(n) & (1ULL <<  6) ?  6 :\t\\\r\n\t\t(n) & (1ULL <<  5) ?  5 :\t\\\r\n\t\t(n) & (1ULL <<  4) ?  4 :\t\\\r\n\t\t(n) & (1ULL <<  3) ?  3 :\t\\\r\n\t\t(n) & (1ULL <<  2) ?  2 :\t\\\r\n\t\t1 ) :\t\t\t\t\\\r\n\t(sizeof(n) <= 4) ?\t\t\t\\\r\n\t__ilog2_u32(n) :\t\t\t\\\r\n\t__ilog2_u64(n)\t\t\t\t\\\r\n )\r\n/* clang-format on */\r\n\r\n#define GOLDEN_RATIO_32 0x61C88647\r\n#define GOLDEN_RATIO_64 0x61C8864680B583EBull\r\n\r\nstatic inline uint32_t __hash_32(uint32_t val) { return val * GOLDEN_RATIO_32; }\r\n\r\nstatic inline uint32_t hash_32(uint32_t val, unsigned int bits) {\r\n  /* High bits are more random, so use them. */\r\n  return __hash_32(val) >> (32 - bits);\r\n}\r\n\r\nstatic inline uint32_t hash_long(uint64_t val, unsigned int bits) {\r\n#if BITS_PER_LONG == 64\r\n  /* 64x64-bit multiply is efficient on all 64-bit processors */\r\n  return val * GOLDEN_RATIO_64 >> (64 - bits);\r\n#else\r\n  /* Hash 64 bits using only 32x32-bit multiply. */\r\n  return hash_32((uint32_t)val ^ __hash_32(val >> 32), bits);\r\n#endif\r\n}\r\n\r\nstatic inline uint32_t hash_ptr(const void *ptr, unsigned int bits) {\r\n  return hash_long((unsigned long)ptr, bits);\r\n}\r\n\r\nstatic inline uint32_t hash32_ptr(const void *ptr) {\r\n  unsigned long val = (unsigned long)ptr;\r\n\r\n#if BITS_PER_LONG == 64\r\n  val ^= (val >> 32);\r\n#endif\r\n  return (uint32_t)val;\r\n}\r\n\r\nstatic inline unsigned long hash_string(const char *str) {\r\n  unsigned long v = 0;\r\n  const char *c;\r\n  for (c = str; *c;) v = (((v << 1) + (v >> 14)) ^ (*c++)) & 0x3fff;\r\n  return (v);\r\n}\r\n\r\n/* Use hash_32 when possible to allow for fast 32bit hashing in 64bit kernels.\r\n */\r\n#define hash_min(val, bits) \\\r\n  (sizeof(val) <= 4 ? hash_32(val, bits) : hash_long(val, bits))\r\n\r\nstatic inline void __hash_init(struct hlist_head *ht, unsigned int sz) {\r\n  unsigned int i;\r\n\r\n  for (i = 0; i < sz; i++) MANAGER_INIT_HLIST_HEAD(&ht[i]);\r\n}\r\n\r\n#define MANAGER_JHASH_INITVAL 0xdeadbeef\r\n/* Best hash sizes are of power of two */\r\n#define jhash_size(n) ((uint32_t)1 << (n))\r\n/* Mask the hash value, i.e (value & jhash_mask(n)) instead of (value % n) */\r\n#define jhash_mask(n) (jhash_size(n) - 1)\r\n\r\nstatic inline uint32_t rol32(uint32_t word, unsigned int shift) {\r\n  return (word << shift) | (word >> ((-shift) & 31));\r\n}\r\n\r\n/* __jhash_mix -- mix 3 32-bit values reversibly. */\r\n#define __jhash_mix(a, b, c) \\\r\n  {                          \\\r\n    a -= c;                  \\\r\n    a ^= rol32(c, 4);        \\\r\n    c += b;                  \\\r\n    b -= a;                  \\\r\n    b ^= rol32(a, 6);        \\\r\n    a += c;                  \\\r\n    c -= b;                  \\\r\n    c ^= rol32(b, 8);        \\\r\n    b += a;                  \\\r\n    a -= c;                  \\\r\n    a ^= rol32(c, 16);       \\\r\n    c += b;                  \\\r\n    b -= a;                  \\\r\n    b ^= rol32(a, 19);       \\\r\n    a += c;                  \\\r\n    c -= b;                  \\\r\n    c ^= rol32(b, 4);        \\\r\n    b += a;                  \\\r\n  }\r\n\r\n/* __jhash_final - final mixing of 3 32-bit values (a,b,c) into c */\r\n#define __jhash_final(a, b, c) \\\r\n  {                            \\\r\n    c ^= b;                    \\\r\n    c -= rol32(b, 14);         \\\r\n    a ^= c;                    \\\r\n    a -= rol32(c, 11);         \\\r\n    b ^= a;                    \\\r\n    b -= rol32(a, 25);         \\\r\n    c ^= b;                    \\\r\n    c -= rol32(b, 16);         \\\r\n    a ^= c;                    \\\r\n    a -= rol32(c, 4);          \\\r\n    b ^= a;                    \\\r\n    b -= rol32(a, 14);         \\\r\n    c ^= b;                    \\\r\n    c -= rol32(b, 24);         \\\r\n  }\r\n\r\nstruct __una_u32 {\r\n  uint32_t x;\r\n} __attribute__((__packed__));\r\n\r\nstatic inline uint32_t __get_unaligned_cpu32(const void *p) {\r\n  const struct __una_u32 *ptr = (const struct __una_u32 *)p;\r\n  return ptr->x;\r\n}\r\n\r\nstatic inline uint32_t jhash(const void *key, uint32_t length,\r\n                             uint32_t initval) {\r\n  uint32_t a, b, c;\r\n  const uint8_t *k = (uint8_t *)key;\r\n\r\n  /* Set up the internal state */\r\n  a = b = c = MANAGER_JHASH_INITVAL + length + initval;\r\n\r\n  /* All but the last block: affect some 32 bits of (a,b,c) */\r\n  while (length > 12) {\r\n    a += __get_unaligned_cpu32(k);\r\n    b += __get_unaligned_cpu32(k + 4);\r\n    c += __get_unaligned_cpu32(k + 8);\r\n    __jhash_mix(a, b, c);\r\n    length -= 12;\r\n    k += 12;\r\n  }\r\n  /* Last block: affect all 32 bits of (c) */\r\n  /* All the case statements fall through */\r\n  switch (length) {\r\n    case 12:\r\n      c += (uint32_t)k[11] << 24;\r\n    case 11:\r\n      c += (uint32_t)k[10] << 16;\r\n    case 10:\r\n      c += (uint32_t)k[9] << 8;\r\n    case 9:\r\n      c += k[8];\r\n    case 8:\r\n      b += (uint32_t)k[7] << 24;\r\n    case 7:\r\n      b += (uint32_t)k[6] << 16;\r\n    case 6:\r\n      b += (uint32_t)k[5] << 8;\r\n    case 5:\r\n      b += k[4];\r\n    case 4:\r\n      a += (uint32_t)k[3] << 24;\r\n    case 3:\r\n      a += (uint32_t)k[2] << 16;\r\n    case 2:\r\n      a += (uint32_t)k[1] << 8;\r\n    case 1:\r\n      a += k[0];\r\n      __jhash_final(a, b, c);\r\n    case 0: /* Nothing left to add */\r\n      break;\r\n  }\r\n\r\n  return c;\r\n}\r\n\r\n#define hash_init(hashtable) __hash_init(hashtable, MANAGER_HASH_SIZE(hashtable))\r\n\r\n#define hash_add(hashtable, node, key) \\\r\n  hlist_add_head(node, &hashtable[hash_min(key, MANAGER_HASH_BITS(hashtable))])\r\n\r\nstatic inline bool hash_hashed(struct hlist_node *node) {\r\n  return !hlist_unhashed(node);\r\n}\r\n\r\nstatic inline bool __hash_empty(struct hlist_head *ht, unsigned int sz) {\r\n  unsigned int i;\r\n\r\n  for (i = 0; i < sz; i++)\r\n    if (!hlist_empty(&ht[i])) return false;\r\n\r\n  return true;\r\n}\r\n\r\n#define hash_empty(hashtable) \\\r\n  __hash_empty(hashtable, MANAGER_HASH_SIZE(hashtable))\r\n\r\nstatic inline void hash_del(struct hlist_node *node) { hlist_del_init(node); }\r\n\r\n#define hash_for_each(name, bkt, obj, member)                                \\\r\n  for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < MANAGER_HASH_SIZE(name); \\\r\n       (bkt)++)                                                              \\\r\n  hlist_for_each_entry(obj, &name[bkt], member)\r\n\r\n#define hash_for_each_safe(name, bkt, tmp, obj, member)                      \\\r\n  for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < MANAGER_HASH_SIZE(name); \\\r\n       (bkt)++)                                                              \\\r\n  hlist_for_each_entry_safe(obj, tmp, &name[bkt], member)\r\n\r\n#define hash_for_each_possible(name, obj, member, key)                    \\\r\n  hlist_for_each_entry(obj, &name[hash_min(key, MANAGER_HASH_BITS(name))], \\\r\n                       member)\r\n\r\n#define hash_for_each_possible_safe(name, obj, tmp, member, key) \\\r\n  hlist_for_each_entry_safe(                                     \\\r\n      obj, tmp, &name[hash_min(key, MANAGER_HASH_BITS(name))], member)\r\n\r\n#endif\r\n"
  },
  {
    "path": "src/modelbox/manager/src/list.h",
    "content": "/*\r\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\r\n *\r\n * Licensed under the Apache License, Version 2.0 (the \"License\");\r\n * you may not use this file except in compliance with the License.\r\n * You may obtain a copy of the License at\r\n *\r\n * http://www.apache.org/licenses/LICENSE-2.0\r\n *\r\n * Unless required by applicable law or agreed to in writing, software\r\n * distributed under the License is distributed on an \"AS IS\" BASIS,\r\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n * See the License for the specific language governing permissions and\r\n * limitations under the License.\r\n */\r\n\r\n\r\n#ifndef MANAGER_LIST_H\r\n#define MANAGER_LIST_H\r\n\r\n#include <stdlib.h>\r\n\r\n#ifdef __cplusplus\r\nextern \"C\" {\r\n#endif /*__cplusplus */\r\n\r\nstruct list_head {\r\n  struct list_head *next, *prev;\r\n};\r\n\r\n#define MANAGER_LIST_HEAD_INIT(name) \\\r\n  { &(name), &(name) }\r\n\r\n#define MANAGER_LIST_HEAD(name) \\\r\n  struct list_head name = MANAGER_LIST_HEAD_INIT(name)\r\n\r\nstatic inline void MANAGER_INIT_LIST_HEAD(struct list_head *list) {\r\n  list->next = list;\r\n  list->prev = list;\r\n}\r\n\r\nstatic inline void __list_add(struct list_head *add, struct list_head *prev,\r\n                              struct list_head *next) {\r\n  next->prev = add;\r\n  add->next = next;\r\n  add->prev = prev;\r\n  prev->next = add;\r\n}\r\n\r\nstatic inline void list_add(struct list_head *add, struct list_head *head) {\r\n  __list_add(add, head, head->next);\r\n}\r\n\r\nstatic inline void list_add_tail(struct list_head *add,\r\n                                 struct list_head *head) {\r\n  __list_add(add, head->prev, head);\r\n}\r\n\r\nstatic inline void __list_del(struct list_head *prev, struct list_head *next) {\r\n  next->prev = prev;\r\n  prev->next = next;\r\n}\r\n\r\nstatic inline void list_del(struct list_head *entry) {\r\n  __list_del(entry->prev, entry->next);\r\n  entry->next = NULL;\r\n  entry->prev = NULL;\r\n}\r\n\r\nstatic inline void list_replace(struct list_head *old, struct list_head *add) {\r\n  add->next = old->next;\r\n  add->next->prev = add;\r\n  add->prev = old->prev;\r\n  add->prev->next = add;\r\n}\r\n\r\nstatic inline void list_replace_init(struct list_head *old,\r\n                                     struct list_head *add) {\r\n  list_replace(old, add);\r\n  MANAGER_INIT_LIST_HEAD(old);\r\n}\r\n\r\nstatic inline void list_del_init(struct list_head *entry) {\r\n  __list_del(entry->prev, entry->next);\r\n  MANAGER_INIT_LIST_HEAD(entry);\r\n}\r\n\r\nstatic inline void list_move(struct list_head *list, struct list_head *head) {\r\n  __list_del(list->prev, list->next);\r\n  list_add(list, head);\r\n}\r\n\r\nstatic inline void list_move_tail(struct list_head *list,\r\n                                  struct list_head *head) {\r\n  __list_del(list->prev, list->next);\r\n  list_add_tail(list, head);\r\n}\r\n\r\nstatic inline int list_is_last(const struct list_head *list,\r\n                               const struct list_head *head) {\r\n  return list->next == head;\r\n}\r\n\r\nstatic inline int list_empty(const struct list_head *head) {\r\n  return head->next == head;\r\n}\r\n\r\nstatic inline int list_is_singular(const struct list_head *head) {\r\n  return !list_empty(head) && (head->next == head->prev);\r\n}\r\n\r\n#ifndef offsetof\r\n#define offsetof(TYPE, MEMBER) ((size_t) & ((TYPE *)0)->MEMBER)\r\n#endif\r\n\r\n#define container_of(ptr, type, member)                \\\r\n  ({                                                   \\\r\n    const typeof(((type *)0)->member) *__mptr = (ptr); \\\r\n    (type *)((char *)__mptr - offsetof(type, member)); \\\r\n  })\r\n\r\n#define prefetch(x) __builtin_prefetch(x)\r\n\r\n#define list_entry(ptr, type, member) container_of(ptr, type, member)\r\n\r\n#define list_first_entry(ptr, type, member) \\\r\n  list_entry((ptr)->next, type, member)\r\n\r\n#define list_for_each(pos, head) \\\r\n  for (pos = (head)->next; (pos->next), pos != (head); pos = pos->next)\r\n\r\n#define list_for_each_safe(pos, n, head) \\\r\n  for (pos = (head)->next, n = pos->next; pos != (head); pos = n, n = pos->next)\r\n\r\n#define list_for_each_entry(pos, head, member)               \\\r\n  for (pos = list_entry((head)->next, typeof(*pos), member); \\\r\n       prefetch(pos->member.next), &pos->member != (head);   \\\r\n       pos = list_entry(pos->member.next, typeof(*pos), member))\r\n\r\n#define list_for_each_entry_safe(pos, n, head, member)        \\\r\n  for (pos = list_entry((head)->next, typeof(*pos), member),  \\\r\n      n = list_entry(pos->member.next, typeof(*pos), member); \\\r\n       &pos->member != (head);                                \\\r\n       pos = n, n = list_entry(n->member.next, typeof(*n), member))\r\n\r\nstruct hlist_head {\r\n  struct hlist_node *first;\r\n};\r\n\r\nstruct hlist_node {\r\n  struct hlist_node *next, **pprev;\r\n};\r\n\r\n#define MANAGER_HLIST_HEAD_INIT \\\r\n  { .first = NULL }\r\n#define MANAGER_HLIST_HEAD(name) struct hlist_head name = {.first = NULL}\r\n#define MANAGER_INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)\r\nstatic inline void MANAGER_INIT_HLIST_NODE(struct hlist_node *h) {\r\n  h->next = NULL;\r\n  h->pprev = NULL;\r\n}\r\n\r\nstatic inline int hlist_unhashed(const struct hlist_node *h) {\r\n  return !h->pprev;\r\n}\r\n\r\nstatic inline int hlist_empty(const struct hlist_head *h) { return !h->first; }\r\n\r\nstatic inline void __hlist_del(struct hlist_node *n) {\r\n  struct hlist_node *next = n->next;\r\n  struct hlist_node **pprev = n->pprev;\r\n\r\n  *pprev = next;\r\n  if (next) next->pprev = pprev;\r\n}\r\n\r\nstatic inline void hlist_del(struct hlist_node *n) {\r\n  __hlist_del(n);\r\n  n->next = (struct hlist_node *)NULL;\r\n  n->pprev = (struct hlist_node **)NULL;\r\n}\r\n\r\nstatic inline void hlist_del_init(struct hlist_node *n) {\r\n  if (!hlist_unhashed(n)) {\r\n    __hlist_del(n);\r\n    MANAGER_INIT_HLIST_NODE(n);\r\n  }\r\n}\r\n\r\nstatic inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h) {\r\n  struct hlist_node *first = h->first;\r\n  n->next = first;\r\n  if (first) first->pprev = &n->next;\r\n  h->first = n;\r\n  n->pprev = &h->first;\r\n}\r\n\r\n#define hlist_entry(ptr, type, member) container_of(ptr, type, member)\r\n\r\n#define hlist_for_each(pos, head) \\\r\n  for (pos = (head)->first; pos; pos = pos->next)\r\n\r\n#define hlist_for_each_safe(pos, n, head)    \\\r\n  for (pos = (head)->first; pos && ({        \\\r\n                              n = pos->next; \\\r\n                              1;             \\\r\n                            });              \\\r\n       pos = n)\r\n\r\n#define hlist_entry_safe(ptr, type, member)              \\\r\n  ({                                                     \\\r\n    typeof(ptr) ____ptr = (ptr);                         \\\r\n    ____ptr ? hlist_entry(____ptr, type, member) : NULL; \\\r\n  })\r\n\r\n/**\r\n * hlist_for_each_entry\t- iterate over list of given type\r\n * @pos:\tthe type * to use as a loop cursor.\r\n * @head:\tthe head for your list.\r\n * @member:\tthe name of the hlist_node within the struct.\r\n */\r\n#define hlist_for_each_entry(pos, head, member)                            \\\r\n  for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member); pos; \\\r\n       pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))\r\n\r\n/**\r\n * hlist_for_each_entry_continue - iterate over a hlist continuing after current\r\n * point\r\n * @pos:\tthe type * to use as a loop cursor.\r\n * @member:\tthe name of the hlist_node within the struct.\r\n */\r\n#define hlist_for_each_entry_continue(pos, member)                         \\\r\n  for (pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member); \\\r\n       pos;                                                                \\\r\n       pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))\r\n\r\n/**\r\n * hlist_for_each_entry_from - iterate over a hlist continuing from current\r\n * point\r\n * @pos:\tthe type * to use as a loop cursor.\r\n * @member:\tthe name of the hlist_node within the struct.\r\n */\r\n#define hlist_for_each_entry_from(pos, member) \\\r\n  for (; pos;                                  \\\r\n       pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))\r\n\r\n/**\r\n * hlist_for_each_entry_safe - iterate over list of given type safe against\r\n * removal of list entry\r\n * @pos:\tthe type * to use as a loop cursor.\r\n * @n:\t\tanother &struct hlist_node to use as temporary storage\r\n * @head:\tthe head for your list.\r\n * @member:\tthe name of the hlist_node within the struct.\r\n */\r\n#define hlist_for_each_entry_safe(pos, n, head, member)             \\\r\n  for (pos = hlist_entry_safe((head)->first, typeof(*pos), member); \\\r\n       pos && ({                                                    \\\r\n         n = pos->member.next;                                      \\\r\n         1;                                                         \\\r\n       });                                                          \\\r\n       pos = hlist_entry_safe(n, typeof(*pos), member))\r\n\r\n#ifdef __cplusplus\r\n}\r\n#endif /*__cplusplus */\r\n#endif\r\n"
  },
  {
    "path": "src/modelbox/manager/src/log.c",
    "content": "/*\r\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\r\n *\r\n * Licensed under the Apache License, Version 2.0 (the \"License\");\r\n * you may not use this file except in compliance with the License.\r\n * You may obtain a copy of the License at\r\n *\r\n * http://www.apache.org/licenses/LICENSE-2.0\r\n *\r\n * Unless required by applicable law or agreed to in writing, software\r\n * distributed under the License is distributed on an \"AS IS\" BASIS,\r\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n * See the License for the specific language governing permissions and\r\n * limitations under the License.\r\n */\r\n\r\n\r\n#include \"log.h\"\r\n\r\n#include <execinfo.h>\r\n#include <stdarg.h>\r\n#include <stddef.h>\r\n#include <stdio.h>\r\n#include <stdlib.h>\r\n\r\n#include \"tlog.h\"\r\n\r\nstatic manager_log_callback log_func = NULL;\r\n\r\nint manager_log_ext(MANAGER_LOG_LEVEL level, const char *file, int line,\r\n                   const char *func, void *userptr, const char *format, ...) {\r\n  int len = 0;\r\n  va_list ap;\r\n\r\n  if (log_func == NULL) {\r\n    va_start(ap, format);\r\n    vprintf(format, ap);\r\n    va_end(ap);\r\n    printf(\"\\n\");\r\n    return 0;\r\n  }\r\n\r\n  va_start(ap, format);\r\n  len = log_func(level, file, line, func, userptr, format, ap);\r\n  va_end(ap);\r\n\r\n  return len;\r\n}\r\n\r\nint manager_log_vext(MANAGER_LOG_LEVEL level, const char *file, int line,\r\n                    const char *func, void *userptr, const char *format,\r\n                    va_list ap) {\r\n  int len = 0;\r\n  if (log_func == NULL) {\r\n    return 0;\r\n  }\r\n\r\n  len = log_func(level, file, line, func, userptr, format, ap);\r\n\r\n  return len;\r\n}\r\n\r\nvoid manager_log_callback_reg(manager_log_callback callback) {\r\n  log_func = callback;\r\n}\r\n\r\nvoid manager_backtrace(MANAGER_LOG_LEVEL loglevel, const char *format, ...) {\r\n  int j, nptrs;\r\n#define SIZE 100\r\n  void *buffer[100];\r\n  char stack_buffer[4096];\r\n  char *buff = stack_buffer;\r\n  char **strings;\r\n  int total_len = 0;\r\n  int len = 0;\r\n  va_list ap;\r\n\r\n  nptrs = backtrace(buffer, SIZE);\r\n\r\n  /* The call backtrace_symbols_fd(buffer, nptrs, STDOUT_FILENO)\r\n            would produce similar output to the following: */\r\n\r\n  strings = backtrace_symbols(buffer, nptrs);\r\n  if (strings == NULL) {\r\n    return;\r\n  }\r\n\r\n  va_start(ap, format);\r\n  len =\r\n      vsnprintf(buff + total_len, sizeof(stack_buffer) - total_len, format, ap);\r\n  total_len += len;\r\n  if (*(buff + len) != '\\n') {\r\n    len = snprintf(buff + total_len, sizeof(stack_buffer) - total_len, \"\\n\");\r\n    total_len += len;\r\n  }\r\n  va_end(ap);\r\n  for (j = 0; j < nptrs; j++) {\r\n    len = snprintf(buff + total_len, sizeof(stack_buffer) - total_len,\r\n                   \"    @ %s\\n\", strings[j]);\r\n    if (len >= sizeof(stack_buffer) - total_len) {\r\n      break;\r\n    }\r\n    total_len += len;\r\n  }\r\n\r\n  manager_log_ext(loglevel, BASE_FILE_NAME, __LINE__, __func__, 0, \"%s\",\r\n                 stack_buffer);\r\n\r\n  free(strings);\r\n}"
  },
  {
    "path": "src/modelbox/manager/src/log.h",
    "content": "/*\r\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\r\n *\r\n * Licensed under the Apache License, Version 2.0 (the \"License\");\r\n * you may not use this file except in compliance with the License.\r\n * You may obtain a copy of the License at\r\n *\r\n * http://www.apache.org/licenses/LICENSE-2.0\r\n *\r\n * Unless required by applicable law or agreed to in writing, software\r\n * distributed under the License is distributed on an \"AS IS\" BASIS,\r\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n * See the License for the specific language governing permissions and\r\n * limitations under the License.\r\n */\r\n\r\n\r\n#ifndef MANAGER_LOG_H\r\n#define MANAGER_LOG_H\r\n\r\n#include <stdarg.h>\r\n\r\n#ifdef __cplusplus\r\nextern \"C\" {\r\n#endif /*__cplusplus */\r\n\r\n#ifndef MANAGER_LOG_HAS_LEVEL\r\n#define MANAGER_LOG_HAS_LEVEL\r\ntypedef enum {\r\n  MANAGER_LOG_DBG = 0,\r\n  MANAGER_LOG_INFO = 1,\r\n  MANAGER_LOG_NOTE = 2,\r\n  MANAGER_LOG_WARN = 3,\r\n  MANAGER_LOG_ERR = 4,\r\n  MANAGER_LOG_FATAL = 5,\r\n  MANAGER_LOG_END = 6\r\n} MANAGER_LOG_LEVEL;\r\n#endif\r\n\r\n#ifndef BASE_FILE_NAME\r\n#define BASE_FILE_NAME __FILE__\r\n#endif\r\n#define manager_log(level, format, ...)                                    \\\r\n  manager_log_ext(level, BASE_FILE_NAME, __LINE__, __func__, NULL, format, \\\r\n                  ##__VA_ARGS__)\r\n\r\nextern int manager_log_ext(MANAGER_LOG_LEVEL level, const char *file, int line,\r\n                           const char *func, void *userptr, const char *format,\r\n                           ...) __attribute__((format(printf, 6, 7)));\r\n\r\nextern int manager_log_vext(MANAGER_LOG_LEVEL level, const char *file, int line,\r\n                            const char *func, void *userptr, const char *format,\r\n                            va_list ap);\r\n\r\ntypedef int (*manager_log_callback)(MANAGER_LOG_LEVEL level, const char *file,\r\n                                    int line, const char *func, void *userptr,\r\n                                    const char *format, va_list ap);\r\n\r\nextern void manager_log_callback_reg(manager_log_callback callback);\r\n\r\nvoid manager_backtrace(MANAGER_LOG_LEVEL loglevel, const char *format, ...);\r\n\r\n#ifdef __cplusplus\r\n}\r\n#endif /*__cplusplus */\r\n#endif\r\n"
  },
  {
    "path": "src/modelbox/manager/src/manager.c",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"manager.h\"\n\n#include <libgen.h>\n#include <sys/mman.h>\n\n#include \"common.h\"\n#include \"manager_conf.h\"\n#include \"manager_monitor.h\"\n#include \"securec.h\"\n#include \"tlog.h\"\n#include \"util.h\"\n\nstatic int g_reload_config;\nstatic int pid_fd;\nstatic int key_fd;\nextern char *program_invocation_name;\nextern char *program_invocation_short_name;\nstatic int exit_signal;\nstatic int g_run_server = 0;\nstatic int g_is_verbose = 0;\n\nstatic int g_sig_list[] = {\n    SIGIO,   SIGPWR,    SIGSTKFLT, SIGPROF, SIGINT,  SIGTERM,\n    SIGBUS,  SIGVTALRM, SIGTRAP,   SIGXCPU, SIGXFSZ, SIGILL,\n    SIGABRT, SIGFPE,    SIGSEGV,   SIGQUIT, SIGSYS,\n};\n\nstatic int g_sig_num = sizeof(g_sig_list) / sizeof(g_sig_list[0]);\n\nstatic void manager_showhelp(void) {\n  /* clang-format off */\n    char help[] = \"\"\n        \"Usage: manager [OPTION]...\\n\"\n        \"Start manager server.\\n\"\n        \"  -c            configuration file.\\n\"\n        \"  -f            run forground.\\n\"\n        \"  -p            pid file.\\n\"\n        \"  -v            output log to screen.\\n\"\n        \"  -h            show this help message.\\n\"\n        \"\\n\";\n    printf(\"%s\", help);\n  /* clang-format on */\n}\n\nstatic void manager_sig_handler(int volatile sig_no, siginfo_t *sig_info,\n                                void *volatile ptr) {\n  switch (sig_no) {\n    case SIGINT:\n    case SIGTERM:\n      exit_signal = sig_no;\n      exit(1);\n      break;\n    case SIGQUIT:\n      return;\n      break;\n    case SIGSEGV:\n    case SIGPIPE:\n    case SIGFPE:\n    case SIGABRT:\n    case SIGBUS:\n    case SIGILL:\n      sleep(1);\n      break;\n    default:\n      break;\n  }\n\n  _exit(1);\n}\n\nstatic int manager_sig_register(void) {\n  int i = 0;\n  struct sigaction sig_act;\n\n  for (i = 0; i < g_sig_num; i++) {\n    memset_s(&sig_act, sizeof(sig_act), 0, sizeof(sig_act));\n    sig_act.sa_sigaction = manager_sig_handler;\n    sig_act.sa_flags = SA_SIGINFO | SA_RESTART;\n\n    if (sigaction(g_sig_list[i], &sig_act, NULL) < 0) {\n      fprintf(stderr, \"Register signal %d failed.\", g_sig_list[i]);\n    }\n  }\n\n  return 0;\n}\n\nint manager_add_apps(void) {\n  int i = 0;\n  for (i = 0; i < conf_apps_num; i++) {\n    struct app_start_info info;\n    memset_s(&info, sizeof(info), 0, sizeof(info));\n    info.name = conf_apps[i].name;\n    info.cmdline = conf_apps[i].cmd;\n    info.cmd_max_len = sizeof(conf_apps[i].cmd);\n    info.killcmd = conf_apps[i].killcmd;\n    info.killcmd_max_len = sizeof(conf_apps[i].killcmd);\n    info.pidfile = conf_apps[i].pidfile;\n    info.check_alive = conf_apps[i].check_alive;\n    info.keepalive_time = conf_apps[i].check_alive_time;\n    info.heartbeat_interval = conf_apps[i].heartbeat_interval;\n\n    manager_log(MANAGER_LOG_INFO, \"add %s, cmd: '%s', check alive %d\\n\",\n                conf_apps[i].name, strcmds(conf_apps[i].cmd, PATH_MAX), conf_apps[i].check_alive);\n    if (app_start(&info) != 0) {\n      manager_log(MANAGER_LOG_ERR, \"add app %s failed.\", conf_apps[i].name);\n      return -1;\n    }\n  }\n\n  return 0;\n}\n\nvoid manager_reload_apps(struct conf_app oldapps[CONF_MAX_APPS]) {\n  /* stop app */\n  int i, j;\n  manager_log(MANAGER_LOG_INFO, \"start reload apps.\");\n\n  for (i = 0; i < CONF_MAX_APPS; i++) {\n    for (j = 0; j < CONF_MAX_APPS; j++) {\n      if (memcmp(&oldapps[i], &conf_apps[j], sizeof(struct conf_app)) == 0) {\n        break;\n      }\n    }\n\n    if (j == CONF_MAX_APPS) {\n      /* stop app, when config not exists */\n      if (app_stop(oldapps[i].name, 1) == 0) {\n        manager_log(MANAGER_LOG_INFO, \"stop app %s success.\", oldapps[i].name);\n      } else {\n        manager_log(MANAGER_LOG_ERR, \"stop app %s failed.\", oldapps[i].name);\n      }\n\n      memset_s(&oldapps[i], sizeof(struct conf_app), 0,\n               sizeof(struct conf_app));\n    }\n  }\n\n  /* start new apps */\n  for (i = 0; i < CONF_MAX_APPS; i++) {\n    for (j = 0; j < CONF_MAX_APPS; j++) {\n      if (memcmp(&conf_apps[i], &oldapps[j], sizeof(struct conf_app)) == 0) {\n        break;\n      }\n    }\n\n    if (j == CONF_MAX_APPS) {\n      struct app_start_info info;\n      memset_s(&info, sizeof(info), 0, sizeof(info));\n      info.name = conf_apps[i].name;\n      info.cmdline = conf_apps[i].cmd;\n      info.cmd_max_len = sizeof(conf_apps[i].cmd);\n      info.killcmd = conf_apps[i].killcmd;\n      info.killcmd_max_len = sizeof(conf_apps[i].killcmd);\n      info.pidfile = conf_apps[i].pidfile;\n      info.check_alive = conf_apps[i].check_alive;\n      info.keepalive_time = conf_apps[i].check_alive_time;\n      info.heartbeat_interval = conf_apps[i].heartbeat_interval;\n\n      if (app_start(&info) == 0) {\n        manager_log(MANAGER_LOG_INFO, \"start app %s success.\",\n                    conf_apps[i].name);\n      } else {\n        manager_log(MANAGER_LOG_ERR, \"start app %s failed.\", conf_apps[i].name);\n      }\n    }\n  }\n}\n\nint manager_reload(void) {\n  struct conf_app oldapps[CONF_MAX_APPS];\n  memcpy(oldapps, conf_apps, sizeof(oldapps));\n\n  manager_log(MANAGER_LOG_INFO, \"start reload configuration.\");\n  if (manager_reload_conf() != 0) {\n    manager_log(MANAGER_LOG_ERR, \"reload configuration failed.\\n\");\n    return -1;\n  }\n\n  tlog_setlevel((tlog_level)conf_log_level);\n\n  manager_reload_apps(oldapps);\n\n  return 0;\n}\n\nint manager_init_server(int lockpage) {\n  /* init monitor*/\n  if (manager_monitor_init() != 0) {\n    manager_log(MANAGER_LOG_ERR, \"init monitor failed.\\n\");\n    return -1;\n  }\n\n  /* load apps */\n  if (manager_add_apps() != 0) {\n    manager_log(MANAGER_LOG_ERR, \"add apps failed.\");\n    return -1;\n  }\n\n  if (lockpage != 0 && mlockall(MCL_FUTURE) != 0) {\n    manager_log(MANAGER_LOG_WARN, \"lock memory failed.\");\n  }\n  return 0;\n}\n\nint manager_run(void) {\n  unsigned long now = {0};\n#ifdef BUILD_TEST\n  int sleep = 10;\n#else\n  int sleep = 500;\n#endif\n  int sleep_time = 0;\n  unsigned long expect_time = 0;\n\n  g_run_server = 1;\n\n  sleep_time = sleep;\n  now = get_tick_count() - sleep;\n  expect_time = now + sleep;\n  while (g_run_server) {\n    now = get_tick_count();\n    if (now >= expect_time) {\n      sleep_time = sleep - (now - expect_time);\n      if (sleep_time < 0) {\n        sleep_time = 0;\n      }\n      expect_time += sleep;\n    }\n\n    if (g_reload_config) {\n      manager_reload();\n      g_reload_config = 0;\n    }\n\n    usleep(sleep_time * 1000);\n    app_monitor();\n  }\n\n  manager_log(MANAGER_LOG_INFO, \"master run stopped.\");\n  return 0;\n}\n\nvoid manager_stop(void) { g_run_server = 0; }\n\nvoid manager_exit(void) {\n  manager_stop();\n\n  /* stop monitor */\n  manager_monitor_exit();\n}\n\nstatic void manager_onexit(void) {\n  if (exit_signal > 0) {\n    manager_log(MANAGER_LOG_INFO, \"process exit with signal %d\", exit_signal);\n  }\n\n  manager_exit();\n\n  tlog_exit();\n}\n\nstatic void manager_sighup(int signo) { g_reload_config = 1; }\n\nint manager_tlog(MANAGER_LOG_LEVEL level, const char *file, int line,\n                 const char *func, void *userptr, const char *format,\n                 va_list ap) {\n  return tlog_vext((tlog_level)(level), file, line, func, userptr, format, ap);\n}\n\nstatic void _manager_default_conf_file(char *path, int max_len) {\n  char current_path[1024] = {0};\n  if (get_prog_path(current_path, sizeof(current_path) - 1) != 0) {\n    path[0] = 0;\n    return;\n  }\n\n  snprintf(path, max_len, \"%s/../%s/%s\", current_path, CONF_FILE_PATH,\n           CONF_FILE_NAME);\n  if (access(path, R_OK) == 0) {\n    return;\n  }\n\n  snprintf(path, max_len, \"%s/%s\", CONF_FILE_PATH, CONF_FILE_NAME);\n  if (access(path, R_OK) == 0) {\n    return;\n  }\n\n  path[0] = 0;\n\n  return;\n}\n\nint manager_init(char *name) {\n  char log_file[PATH_MAX];\n  char piddir[PATH_MAX];\n\n  if (manager_sig_register()) {\n    fprintf(stderr, \"register signal failed\\n\");\n    return -1;\n  }\n\n  manager_log_callback_reg(manager_tlog);\n\n  if (strnlen(pid_file_path, sizeof(pid_file_path)) <= 0) {\n    if (name) {\n      snprintf_s(pid_file_path, sizeof(pid_file_path), sizeof(pid_file_path),\n                 \"%s/%s.pid\", MANAGER_PID_PATH, name);\n    } else {\n      snprintf_s(pid_file_path, sizeof(pid_file_path), sizeof(pid_file_path),\n                 \"%s/%s.pid\", MANAGER_PID_PATH, MANAGER_NAME);\n    }\n  }\n\n  strncpy_s(piddir, sizeof(piddir), pid_file_path, sizeof(pid_file_path));\n  dirname(piddir);\n  mkdir(piddir, 0750);\n\n  /* create key */\n  if (strnlen(key_file_path, sizeof(key_file_path)) <= 0) {\n    if (name) {\n      snprintf_s(key_file_path, sizeof(key_file_path), sizeof(key_file_path),\n                 \"%s/%s.key\", piddir, name);\n    } else {\n      snprintf_s(key_file_path, sizeof(key_file_path), sizeof(key_file_path),\n                 \"%s/%s.key\", piddir, MANAGER_NAME);\n    }\n  }\n\n  pid_fd = create_pid(pid_file_path);\n  if (pid_fd <= 0) {\n    fprintf(stderr, \"create pid failed, failed\\n\");\n    return -1;\n  }\n\n  key_fd = create_pid(key_file_path);\n  if (key_fd <= 0) {\n    fprintf(stderr, \"create key file failed, failed\\n\");\n    return -1;\n  }\n\n  /* generate log */\n  snprintf(conf_log_file, sizeof(log_file), \"%s/%s.log\", MANAGER_LOG_PATH,\n           MANAGER_NAME);\n\n  if (tlog_init(get_modelbox_full_path(conf_log_file), conf_log_size,\n                conf_log_num, 0, 0) != 0) {\n    fprintf(stderr, \"init master log failed.\\n\");\n    return -1;\n  }\n\n  tlog_setlogscreen(g_is_verbose);\n  tlog_setlevel((tlog_level)conf_log_level);\n\n  manager_log(MANAGER_LOG_INFO, \"%s starting... (Build : %s %s)\",\n              program_invocation_short_name, __DATE__, __TIME__);\n\n  if (manager_init_server(conf_lockpage) != 0) {\n    manager_log(MANAGER_LOG_ERR, \"init master server failed.\");\n    return -1;\n  }\n\n  return 0;\n}\n\n#ifdef BUILD_TEST\nint manager_main(int argc, char *argv[])\n#else\nint main(int argc, char *argv[])\n#endif\n{\n  int is_forground = 0;\n  int opt;\n  char conf_file[PATH_MAX] = {0};\n  char *name = NULL;\n\n  while ((opt = getopt(argc, argv, \"fvhc:n:p:k:\")) != -1) {\n    switch (opt) {\n      case 'c':\n        strncpy(conf_file, get_modelbox_full_path(optarg),\n                sizeof(conf_file) - 1);\n        break;\n      case 'n':\n        name = optarg;\n        break;\n      case 'p':\n        strncpy(pid_file_path, get_modelbox_full_path(optarg),\n                sizeof(pid_file_path) - 1);\n        break;\n      case 'k':\n        strncpy(key_file_path, get_modelbox_full_path(optarg),\n                sizeof(key_file_path) - 1);\n        break;\n      case 'f':\n        is_forground = 1;\n        break;\n      case 'v':\n        g_is_verbose = 1;\n        break;\n      case 'h':\n        manager_showhelp();\n        return 1;\n    }\n  }\n\n  char default_conf_file[PATH_MAX] = {0};\n  const char *load_conf = conf_file;\n  if (conf_file == NULL) {\n    _manager_default_conf_file(default_conf_file, PATH_MAX);\n    if (default_conf_file[0]) {\n      load_conf = default_conf_file;\n    }\n  }\n\n  if (manager_load_conf(load_conf) != 0) {\n    fprintf(stderr, \"load master config file %s failed.\\n\", load_conf);\n    return -1;\n  }\n\n\n  if (is_forground == 0) {\n    if (daemon(0, 0) < 0) {\n      fprintf(stderr, \"run daemon process failed, %s\\n\", strerror(errno));\n      return 1;\n    }\n  }\n\n  atexit(manager_onexit);\n  signal(SIGPIPE, SIG_IGN);\n  signal(SIGHUP, manager_sighup);\n  g_reload_config = 0;\n\n  if (manager_init(name) != 0) {\n    fprintf(stderr, \"master init failed.\\n\");\n    return 1;\n  }\n\n  if (manager_run() != 0) {\n    return 1;\n  }\n\n  return 0;\n}\n\n#ifdef BUILD_TEST\n\nvoid manager_force_exit(void) {\n  manager_stop();\n  usleep(100000);\n  app_free_memory();\n}\n\n#endif\n"
  },
  {
    "path": "src/modelbox/manager/src/manager.h",
    "content": "/*\r\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\r\n *\r\n * Licensed under the Apache License, Version 2.0 (the \"License\");\r\n * you may not use this file except in compliance with the License.\r\n * You may obtain a copy of the License at\r\n *\r\n * http://www.apache.org/licenses/LICENSE-2.0\r\n *\r\n * Unless required by applicable law or agreed to in writing, software\r\n * distributed under the License is distributed on an \"AS IS\" BASIS,\r\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n * See the License for the specific language governing permissions and\r\n * limitations under the License.\r\n */\r\n\r\n\r\n#ifndef MODELBOX_MANAGER_H\r\n#define MODELBOX_MANAGER_H\r\n\r\n#include <arpa/inet.h>\r\n#include <errno.h>\r\n#include <fcntl.h>\r\n#include <limits.h>\r\n#include <linux/limits.h>\r\n#include <netdb.h>\r\n#include <netinet/in.h>\r\n#include <netinet/tcp.h>\r\n#include <pthread.h>\r\n#include <signal.h>\r\n#include <stdio.h>\r\n#include <stdlib.h>\r\n#include <string.h>\r\n#include <sys/epoll.h>\r\n\r\n#include <sys/file.h>\r\n#include <sys/ioctl.h>\r\n#include <sys/socket.h>\r\n#include <sys/stat.h>\r\n#include <sys/types.h>\r\n#include <sys/un.h>\r\n#include <unistd.h>\r\n\r\n#include \"securec.h\"\r\n\r\n#ifdef __cplusplus\r\nextern \"C\" {\r\n#endif /*__cplusplus */\r\n\r\n#define MANAGER_LOG_PATH \"/var/log/modelbox/\"\r\n#define MANAGER_LOG_SIZE (1024 * 1024 * 64)\r\n#define MANAGER_LOG_NUM (48)\r\n\r\nextern int manager_init_server(int lockpage);\r\n\r\nextern int manager_run(void);\r\n\r\nextern void manager_stop(void);\r\n\r\nextern void manager_exit(void);\r\n\r\n#ifdef BUILD_TEST\r\n\r\nextern void manager_force_exit(void);\r\n\r\n#endif\r\n\r\n#ifdef __cplusplus\r\n}\r\n#endif /*__cplusplus */\r\n\r\n#endif  // !MODELBOX_MANAGER_H"
  },
  {
    "path": "src/modelbox/manager/src/manager_common.h",
    "content": "/*\r\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\r\n *\r\n * Licensed under the Apache License, Version 2.0 (the \"License\");\r\n * you may not use this file except in compliance with the License.\r\n * You may obtain a copy of the License at\r\n *\r\n * http://www.apache.org/licenses/LICENSE-2.0\r\n *\r\n * Unless required by applicable law or agreed to in writing, software\r\n * distributed under the License is distributed on an \"AS IS\" BASIS,\r\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n * See the License for the specific language governing permissions and\r\n * limitations under the License.\r\n */\r\n\r\n\r\n#ifndef MODELBOX_MANAGER_COMMON_HEAD_H\r\n#define MODELBOX_MANAGER_COMMON_HEAD_H\r\n\r\n#include <errno.h>\r\n#include <sched.h>\r\n#include <signal.h>\r\n#include <stdio.h>\r\n#include <string.h>\r\n#include <sys/ipc.h>\r\n#include <sys/msg.h>\r\n#include <sys/types.h>\r\n#include <sys/wait.h>\r\n#include <time.h>\r\n\r\n#ifdef __cplusplus\r\nextern \"C\" {\r\n#endif /*__cplusplus */\r\n\r\n#define HEARTBEAT_MSG 1\r\n#define MANAGER_NAME \"manager\"\r\n#define MANAGER_PID_PATH \"/var/run/modelbox-manager\"\r\n\r\n#define APP_NAME_LEN 64\r\n\r\n/* heartbeat message struct */\r\nstruct heartbeat_msg {\r\n  long mtype;\r\n  char name[APP_NAME_LEN];\r\n  pid_t pid;\r\n  time_t time;\r\n} __attribute__((aligned));\r\n\r\n#ifdef __cplusplus\r\n}\r\n#endif /*__cplusplus */\r\n\r\n#endif  // !MODELBOX_MANAGER_COMMON_HEAD_H\r\n"
  },
  {
    "path": "src/modelbox/manager/src/manager_conf.c",
    "content": "/*\r\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\r\n *\r\n * Licensed under the Apache License, Version 2.0 (the \"License\");\r\n * you may not use this file except in compliance with the License.\r\n * You may obtain a copy of the License at\r\n *\r\n * http://www.apache.org/licenses/LICENSE-2.0\r\n *\r\n * Unless required by applicable law or agreed to in writing, software\r\n * distributed under the License is distributed on an \"AS IS\" BASIS,\r\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n * See the License for the specific language governing permissions and\r\n * limitations under the License.\r\n */\r\n\r\n#include \"manager_conf.h\"\r\n\r\n#include <stdlib.h>\r\n#include <string.h>\r\n\r\n#include \"conf.h\"\r\n#include \"log.h\"\r\n#include \"util.h\"\r\n\r\nstatic char g_conf_file[PATH_MAX];\r\nchar pid_file_path[PATH_MAX];\r\nchar key_file_path[PATH_MAX];\r\nint conf_watchdog_timeout = DEFAULT_WATCHDOG_TIMEOUT;\r\nint conf_force_kill_time = DEFAULT_FORCE_KILL_TIMEOUT;\r\nint conf_lockpage = 1;\r\n\r\nstruct conf_app conf_apps[CONF_MAX_APPS];\r\nint conf_apps_num;\r\n\r\nint _manager_is_app_exists(char *name) {\r\n  int i = 0;\r\n\r\n  for (i = 0; i < CONF_MAX_APPS; i++) {\r\n    struct conf_app *conf_app = &conf_apps[i];\r\n    if (conf_app->name[0] == 0) {\r\n      continue;\r\n    }\r\n\r\n    if (strncmp(conf_app->name, name, APP_NAME_LEN) == 0) {\r\n      return 0;\r\n    }\r\n  }\r\n\r\n  return -1;\r\n}\r\n\r\nint _manager_conf_process_cmds(char *cmd, int max_cmdlen, const char *argline) {\r\n  const char *ptr = argline;\r\n  const char *arg_begin_ptr = argline;\r\n  int cmd_len = 0;\r\n  char arg[PATH_MAX];\r\n\r\n  while (1) {\r\n    if (ptr == arg_begin_ptr) {\r\n      ptr++;\r\n      continue;\r\n    }\r\n\r\n    if (*ptr != ' ' && *ptr != '\\0') {\r\n      ptr++;\r\n      continue;\r\n    }\r\n\r\n    if (ptr - 1 == arg_begin_ptr && *arg_begin_ptr == ' ') {\r\n      arg_begin_ptr = ptr + 1;\r\n      continue;\r\n    }\r\n\r\n    strncpy(arg, arg_begin_ptr, ptr - arg_begin_ptr);\r\n    arg[ptr - arg_begin_ptr] = '\\0';\r\n\r\n    if (cmd_len + ptr - arg_begin_ptr + 1 > max_cmdlen - 2) {\r\n      manager_log(MANAGER_LOG_ERR, \"cmd is too long\");\r\n      return -1;\r\n    }\r\n\r\n    strncpy(cmd + cmd_len, get_modelbox_full_path(arg), ptr - arg_begin_ptr);\r\n    cmd_len += ptr - arg_begin_ptr;\r\n    arg_begin_ptr = ptr;\r\n    cmd[cmd_len] = '\\0';\r\n    if (*ptr == '\\0') {\r\n      cmd[cmd_len + 1] = '\\0';\r\n      break;\r\n    }\r\n\r\n    ptr++;\r\n    cmd_len++;\r\n    arg_begin_ptr = ptr;\r\n  }\r\n\r\n  return 0;\r\n}\r\n\r\nint manager_load_app(void *item, int argc, char *argv[]) {\r\n  static struct option options[] = {{\"name\", 1, 0, 'n'},\r\n                                    {\"pidfile\", 1, 0, 'p'},\r\n                                    {\"check-alive\", 0, 0, 'k'},\r\n                                    {\"check-alive-time\", 1, 0, 't'},\r\n                                    {\"heartbeat-interval\", 1, 0, 'i'},\r\n                                    {\"kill-cmd\", 1, 0, 'K'},\r\n                                    {0, 0, 0, 0}};\r\n\r\n  int cmdtype;\r\n  struct conf_app *conf_app;\r\n  int end_opt = 0;\r\n  int i = 0;\r\n\r\n  if (conf_apps_num >= CONF_MAX_APPS) {\r\n    manager_log(MANAGER_LOG_ERR, \"apps configuration is full.\");\r\n    return 0;\r\n  }\r\n\r\n  conf_app = conf_apps + conf_apps_num;\r\n  memset(conf_app, 0, sizeof(*conf_app));\r\n\r\n  while ((cmdtype = getopt_long_only(argc, argv, \"\", options, NULL)) != -1 &&\r\n         end_opt == 0) {\r\n    switch (cmdtype) {\r\n      case 'n':\r\n        if (_manager_is_app_exists(optarg) == 0) {\r\n          manager_log(MANAGER_LOG_ERR, \"app %s exists.\", optarg);\r\n          return -1;\r\n        }\r\n        strncpy(conf_app->name, optarg, APP_NAME_LEN - 1);\r\n        break;\r\n      case 'k':\r\n        conf_app->check_alive = 1;\r\n        break;\r\n      case 'i':\r\n        conf_app->heartbeat_interval = atoi(optarg);\r\n        break;\r\n      case 't':\r\n        conf_app->check_alive_time = atoi(optarg);\r\n        break;\r\n      case 'p':\r\n        strncpy(conf_app->pidfile, get_modelbox_full_path(optarg),\r\n                PATH_MAX - 1);\r\n        break;\r\n      case 'K': {\r\n        char path_buf[PATH_MAX];\r\n        if (_manager_conf_process_cmds(path_buf, PATH_MAX, optarg) != 0) {\r\n          manager_log(MANAGER_LOG_ERR, \"process kill cmd failed.\");\r\n          return -1;\r\n        }\r\n        strncpy(conf_app->killcmd, get_modelbox_full_path(path_buf),\r\n                PATH_MAX - 1);\r\n      } break;\r\n      default:\r\n        break;\r\n    }\r\n  }\r\n\r\n  if (conf_app->heartbeat_interval <= 0) {\r\n    conf_app->heartbeat_interval = DEFAULT_HEARTBEAT_INTERVAL;\r\n  }\r\n\r\n  if (conf_app->heartbeat_interval > conf_watchdog_timeout) {\r\n    manager_log(MANAGER_LOG_ERR,\r\n                \"heartbeat interval is too large, watch dog timeout is %d\",\r\n                conf_watchdog_timeout);\r\n    return -1;\r\n  }\r\n\r\n  if (conf_app->check_alive_time <= 0) {\r\n    conf_app->check_alive_time = conf_watchdog_timeout;\r\n  }\r\n\r\n  char *ptr = conf_app->cmd;\r\n  for (i = optind; i < argc; i++) {\r\n    strncpy(ptr, get_modelbox_full_path(argv[i]),\r\n            PATH_MAX - 1 - (ptr - conf_app->cmd));\r\n    ptr += strnlen(ptr, PATH_MAX - 1 - (ptr - conf_app->cmd));\r\n    ptr++;\r\n    if (conf_app->cmd - ptr >= PATH_MAX - 1) {\r\n      manager_log(MANAGER_LOG_ERR, \"command line too long\");\r\n      return -1;\r\n    }\r\n  }\r\n\r\n  if (strlen(conf_app->name) <= 0 || strlen(conf_app->cmd) <= 0) {\r\n    manager_log(MANAGER_LOG_ERR, \"load app failed, name %s, cmd %s.\",\r\n                conf_app->name, strcmds(conf_app->cmd, PATH_MAX));\r\n    return -1;\r\n  }\r\n\r\n  conf_apps_num++;\r\n  return 0;\r\n}\r\n\r\nstatic struct config_map conf_parse_map[] = {\r\n    {CONF_WATCHDOG_TIMEOUT, conf_parse_int,\r\n     .item =\r\n         &(struct CONF_PARSE_INT){\r\n             .value = &conf_watchdog_timeout, .min = 3, .max = 600 * 5}},\r\n    {CONF_FORCE_KILLTIME, conf_parse_int,\r\n     .item =\r\n         &(struct CONF_PARSE_INT){\r\n             .value = &conf_force_kill_time, .min = 3, .max = 60 * 3}},\r\n    {CONF_LOCK_PAGE, conf_parse_int,\r\n     .item =\r\n         &(struct CONF_PARSE_INT){.value = &conf_lockpage, .min = 0, .max = 1}},\r\n    {CONF_APP, manager_load_app, 0},\r\n    {CONF_KEY_FILE, conf_parse_string,\r\n     .item =\r\n         &(struct CONF_PARSE_STRING){.value = key_file_path, .max = PATH_MAX}},\r\n    {NULL, NULL, NULL},\r\n};\r\n\r\nint manager_reload_conf(void) {\r\n  manager_log(MANAGER_LOG_INFO, \"reload configuration file %s\", g_conf_file);\r\n\r\n  conf_apps_num = 0;\r\n  memset(conf_apps, 0, sizeof(conf_apps));\r\n\r\n  return load_conf(conf_parse_map, g_conf_file);\r\n}\r\n\r\nint manager_load_conf(const char *conf_file) {\r\n  if (conf_file == NULL) {\r\n    manager_log(MANAGER_LOG_ERR, \"conf file is null\");\r\n    return -1;\r\n  }\r\n\r\n  strncpy(g_conf_file, get_modelbox_full_path(conf_file),\r\n          sizeof(g_conf_file) - 1);\r\n\r\n  return load_conf(conf_parse_map, g_conf_file);\r\n}\r\n"
  },
  {
    "path": "src/modelbox/manager/src/manager_conf.h",
    "content": "/*\r\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\r\n *\r\n * Licensed under the Apache License, Version 2.0 (the \"License\");\r\n * you may not use this file except in compliance with the License.\r\n * You may obtain a copy of the License at\r\n *\r\n * http://www.apache.org/licenses/LICENSE-2.0\r\n *\r\n * Unless required by applicable law or agreed to in writing, software\r\n * distributed under the License is distributed on an \"AS IS\" BASIS,\r\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n * See the License for the specific language governing permissions and\r\n * limitations under the License.\r\n */\r\n\r\n#ifndef MODELBOX_MANAGER_CONF_H\r\n#define MODELBOX_MANAGER_CONF_H\r\n\r\n#include \"conf.h\"\r\n#include \"manager_common.h\"\r\n\r\n#ifdef __cplusplus\r\nextern \"C\" {\r\n#endif /*__cplusplus */\r\n\r\n#define DEFAULT_WATCHDOG_TIMEOUT 90\r\n#define DEFAULT_HEARTBEAT_INTERVAL 30\r\n#define DEFAULT_FORCE_KILL_TIMEOUT 6\r\n#define CONF_MAX_APPS 256\r\n\r\n#define CONF_FILE_NAME \"manager.conf\"\r\n#define CONF_FILE_PATH \"/etc\"\r\n\r\n#define CONF_WATCHDOG_TIMEOUT \"watchdog-timeout\"\r\n#define CONF_FORCE_KILLTIME \"force-kill-time\"\r\n#define CONF_KEY_FILE \"key-file\"\r\n#define CONF_LOCK_PAGE \"lock-page\"\r\n\r\n#define CONF_APP \"app\"\r\n\r\nextern char pid_file_path[PATH_MAX];\r\nextern char key_file_path[PATH_MAX];\r\n\r\nextern int conf_watchdog_timeout;\r\n\r\nextern int conf_force_kill_time;\r\n\r\nextern int conf_lockpage;\r\n\r\nstruct conf_app {\r\n  char name[APP_NAME_LEN];\r\n  char pidfile[PATH_MAX];\r\n  char cmd[PATH_MAX];\r\n  char killcmd[PATH_MAX];\r\n  int check_alive;\r\n  int check_alive_time;\r\n  int heartbeat_interval;\r\n};\r\n\r\nextern struct conf_app conf_apps[CONF_MAX_APPS];\r\n\r\nextern int conf_apps_num;\r\n\r\nextern int manager_reload_conf(void);\r\n\r\nextern int manager_load_conf(const char *conf_file);\r\n\r\n#ifdef __cplusplus\r\n}\r\n#endif /*__cplusplus */\r\n\r\n#endif  // !MODELBOX_MANAGER_CONF_H"
  },
  {
    "path": "src/modelbox/manager/src/manager_monitor.c",
    "content": "/*\r\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\r\n *\r\n * Licensed under the Apache License, Version 2.0 (the \"License\");\r\n * you may not use this file except in compliance with the License.\r\n * You may obtain a copy of the License at\r\n *\r\n * http://www.apache.org/licenses/LICENSE-2.0\r\n *\r\n * Unless required by applicable law or agreed to in writing, software\r\n * distributed under the License is distributed on an \"AS IS\" BASIS,\r\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n * See the License for the specific language governing permissions and\r\n * limitations under the License.\r\n */\r\n\r\n#include \"manager_monitor.h\"\r\n\r\n#include <errno.h>\r\n#include <sched.h>\r\n#include <string.h>\r\n#include <sys/ipc.h>\r\n#include <sys/msg.h>\r\n#include <sys/types.h>\r\n#include <sys/wait.h>\r\n\r\n#include \"common.h\"\r\n#include \"hashtable.h\"\r\n#include \"list.h\"\r\n#include \"manager_conf.h\"\r\n#include \"modelbox/manager/manager_monitor_client.h\"\r\n#include \"util.h\"\r\n\r\n#define MANAGER_MON_MAX_APP_NUM 256\r\n#define SHM_MODE 0600\r\n#define APP_MAX_ARGS 128\r\n\r\nstatic key_t g_msgkey = 0;\r\nstatic int g_msgid = 0;\r\nstatic int g_manager_restarting = 0;\r\n\r\n#define APP_MAP_BITS 9\r\n\r\ntypedef enum APP_STATUS {\r\n  APP_STOP,\r\n  APP_NOT_RUNNING,\r\n  APP_PENDING,\r\n  APP_RUNNING,\r\n  APP_DEAD,\r\n  APP_EXITING,\r\n  APP_FORCE_KILL,\r\n} APP_STATUS;\r\n\r\n/* app control block */\r\nstruct app_monitor {\r\n  char name[APP_NAME_LEN];\r\n  char cmdline[PATH_MAX];\r\n  char killcmd[PATH_MAX];\r\n  char pid_file[PATH_MAX];\r\n  int start_limit_interval;\r\n  int start_limit_burst;\r\n\r\n  struct hlist_node map;\r\n  pid_t pid;\r\n  pid_t kill_pid;\r\n  time_t last_alive;\r\n  time_t last_start;\r\n  time_t dead_time;\r\n  int check_alive;\r\n  int check_alive_time;\r\n  int heartbeat_interval;\r\n  int start_limit_count;\r\n  int start_limit_log;\r\n  APP_STATUS state;\r\n};\r\n\r\nstruct app_mon {\r\n  MANAGER_DECLARE_HASHTABLE(app_map, APP_MAP_BITS);\r\n  MANAGER_DECLARE_HASHTABLE(app_map_pid, APP_MAP_BITS);\r\n};\r\n\r\nstatic struct app_mon app_mon;\r\n\r\n#ifdef BUILD_TEST\r\nstruct Test_App test_app;\r\n#endif\r\n\r\n/* find app control block by name*/\r\nstruct app_monitor *_app_find_app_byid(const char *name) {\r\n  struct app_monitor *app;\r\n\r\n  unsigned int key = hash_string(name);\r\n  hash_for_each_possible(app_mon.app_map, app, map, key) {\r\n    if (strncmp(app->name, name, APP_NAME_LEN) == 0) {\r\n      return app;\r\n    }\r\n  }\r\n\r\n  return NULL;\r\n}\r\n\r\n/* find app control block by pid*/\r\nstruct app_monitor *_app_find_app_bypid(pid_t pid) {\r\n  struct app_monitor *app;\r\n\r\n  if (pid <= 0) {\r\n    return NULL;\r\n  }\r\n\r\n  unsigned int key = hash_long(pid, APP_MAP_BITS);\r\n  hash_for_each_possible(app_mon.app_map_pid, app, map, key) {\r\n    if (app->pid == pid) {\r\n      return app;\r\n    }\r\n  }\r\n\r\n  return NULL;\r\n}\r\n\r\n/* whether pid exits */\r\nint _app_pid_exists(pid_t pid) {\r\n  if (pid <= 0) {\r\n    return -1;\r\n  }\r\n\r\n  if (kill(pid, 0) != 0) {\r\n    return -1;\r\n  }\r\n\r\n  return 0;\r\n}\r\n\r\n/* whether app exits */\r\nint _app_exists(struct app_monitor *app) {\r\n  if (_app_pid_exists(app->pid) != 0) {\r\n    return -1;\r\n  }\r\n\r\n  return 0;\r\n}\r\n\r\nvoid _app_do_execve(char *cmdline) {\r\n  char *argv[APP_MAX_ARGS];\r\n  int argc = 0;\r\n  int i = 0;\r\n\r\n  argv[argc] = cmdline;\r\n  for (i = 0; i < PATH_MAX - 2 && argc < APP_MAX_ARGS - 1; i++) {\r\n    if (cmdline[i] != '\\0') {\r\n      continue;\r\n    }\r\n\r\n    argc++;\r\n    argv[argc] = cmdline + i + 1;\r\n    if (cmdline[i + 1] == '\\0') {\r\n      break;\r\n    }\r\n  }\r\n\r\n  argv[argc] = 0;\r\n\r\n  setpgrp();\r\n\r\n  execvp(argv[0], argv);\r\n  manager_log(MANAGER_LOG_ERR, \"execvp %s failed: %s\\n\", argv[0],\r\n              strerror(errno));\r\n}\r\n\r\n/* run shell command */\r\nvoid _app_start_exec(struct app_monitor *app) {\r\n  if (app->check_alive) {\r\n    char keyfileenv[PATH_MAX];\r\n    char appnameenv[PATH_MAX];\r\n    char keepalivetime[64];\r\n    char intervaltime[64];\r\n\r\n    snprintf_s(keyfileenv, sizeof(keyfileenv), sizeof(keyfileenv),\r\n               \"MANAGER_MONITOR_KEYFILE=%s\", key_file_path);\r\n    snprintf_s(appnameenv, sizeof(appnameenv), sizeof(appnameenv),\r\n               \"MANAGER_MONITOR_NAME=%s\", app->name);\r\n    snprintf_s(keepalivetime, sizeof(keepalivetime), sizeof(keepalivetime),\r\n               \"MANAGER_MONITOR_KEEPALIVE_TIME=%d\", app->check_alive_time);\r\n    snprintf_s(intervaltime, sizeof(intervaltime), sizeof(intervaltime),\r\n               \"MANAGER_MONITOR_HEARTBEAT_INTERVAL=%d\",\r\n               app->heartbeat_interval);\r\n\r\n    putenv(keyfileenv);\r\n    putenv(appnameenv);\r\n    putenv(keepalivetime);\r\n    putenv(intervaltime);\r\n\r\n    if (get_modelbox_root_path()[0] != '\\0') {\r\n      char modelbox_root[PATH_MAX];\r\n      snprintf_s(modelbox_root, sizeof(modelbox_root), sizeof(modelbox_root),\r\n                 \"MODELBOX_ROOT=%s\", get_modelbox_root_path());\r\n      putenv(modelbox_root);\r\n    }\r\n  }\r\n\r\n  _app_do_execve(app->cmdline);\r\n}\r\n\r\nint app_getpid_from_pidfile(struct app_monitor *app) {\r\n  pid_t pid = 0;\r\n  int islocked = 0;\r\n  if (app->pid_file[0] == 0) {\r\n    return -1;\r\n  }\r\n\r\n  pid = get_pid_from_pidfile(app->pid_file, &islocked);\r\n\r\n  if (pid <= 0 || islocked == 0) {\r\n    return -1;\r\n  }\r\n\r\n  if (app->pid <= 0) {\r\n    manager_log(MANAGER_LOG_INFO, \"app %s attach monitoring, pid %d\", app->name,\r\n                pid);\r\n  } else {\r\n    manager_log(MANAGER_LOG_INFO,\r\n                \"app %s attach monitoring new pid %d, old pid %d\", app->name,\r\n                pid, app->pid);\r\n  }\r\n\r\n  app->state = APP_RUNNING;\r\n  app->pid = pid;\r\n  time(&app->last_alive);\r\n\r\n  return 0;\r\n}\r\n\r\nint app_getpid_by_ps(struct app_monitor *app) {\r\n  FILE *fp = NULL;\r\n  char cmdline[PATH_MAX * 2];\r\n  char pid_buff[PATH_MAX];\r\n  int pid = -1;\r\n\r\n  /* if use pid file, skip*/\r\n  if (app->pid_file[0] != 0) {\r\n    return -1;\r\n  }\r\n\r\n  pid_buff[0] = 0;\r\n  snprintf(cmdline, PATH_MAX * 2 - 1, \"pgrep -x -f \\\"%s\\\"\",\r\n           strcmds(app->cmdline, PATH_MAX));\r\n  fp = popen(cmdline, \"re\");\r\n  if (fp == NULL) {\r\n    manager_log(MANAGER_LOG_ERR, \"run command %s failed\",\r\n                strcmds(app->cmdline, PATH_MAX));\r\n    goto errout;\r\n  }\r\n\r\n  if (fread(pid_buff, 1, sizeof(pid_buff), fp) > 0) {\r\n    pid = atoi(pid_buff);\r\n    if (kill(pid, 0) != 0) {\r\n      pid = -1;\r\n    }\r\n  }\r\n\r\n  if (pclose(fp)) {\r\n    fp = NULL;\r\n    goto errout;\r\n  }\r\n\r\n  if (pid <= 0) {\r\n    goto errout;\r\n  }\r\n\r\n  if (app->pid <= 0) {\r\n    manager_log(MANAGER_LOG_INFO,\r\n                \"app %s attach monitoring by finding process, pid %d\",\r\n                app->name, pid);\r\n  }\r\n\r\n  app->state = APP_RUNNING;\r\n  app->pid = pid;\r\n\r\n  return 0;\r\n\r\nerrout:\r\n  if (fp) {\r\n    pclose(fp);\r\n    fp = NULL;\r\n  }\r\n  return -1;\r\n}\r\n\r\nint _app_start(struct app_monitor *app) {\r\n  int pid = -1;\r\n  int unused __attribute__((unused));\r\n  time_t now;\r\n\r\n  if (app->pid > 0) {\r\n    return 0;\r\n  }\r\n\r\n  time(&now);\r\n  if (now - app->last_start < app->start_limit_interval) {\r\n    app->start_limit_count++;\r\n    if (app->start_limit_count > app->start_limit_burst) {\r\n      if (app->start_limit_log == 0) {\r\n        manager_log(MANAGER_LOG_ERR,\r\n                    \"app %s start limit burst %d, interval %d, skip start\",\r\n                    app->name, app->start_limit_burst,\r\n                    app->start_limit_interval);\r\n        app->start_limit_log = 1;\r\n      }\r\n      return 0;\r\n    }\r\n  } else {\r\n    app->start_limit_count = 0;\r\n    app->start_limit_log = 0;\r\n  }\r\n\r\n#ifdef BUILD_TEST\r\n  /* for test */\r\n  pid = fork();\r\n  if (pid < 0) {\r\n    manager_log(MANAGER_LOG_ERR, \"start process failed, %s\", strerror(errno));\r\n    return -1;\r\n  } else if (pid == 0) {\r\n    char keyfileenv[PATH_MAX];\r\n    char appnameenv[PATH_MAX];\r\n    char keepalivetime[64];\r\n    char intervaltime[64];\r\n\r\n    snprintf_s(keyfileenv, sizeof(keyfileenv), sizeof(keyfileenv),\r\n               \"MANAGER_MONITOR_KEYFILE=%s\", key_file_path);\r\n    snprintf_s(appnameenv, sizeof(appnameenv), sizeof(appnameenv),\r\n               \"MANAGER_MONITOR_NAME=%s\", app->name);\r\n    snprintf_s(keepalivetime, sizeof(keepalivetime), sizeof(keepalivetime),\r\n               \"MANAGER_MONITOR_KEEPALIVE_TIME=%d\", app->check_alive_time);\r\n    snprintf_s(intervaltime, sizeof(intervaltime), sizeof(intervaltime),\r\n               \"MANAGER_MONITOR_HEARTBEAT_INTERVAL=%d\",\r\n               app->heartbeat_interval);\r\n\r\n    putenv(keyfileenv);\r\n    putenv(appnameenv);\r\n    putenv(keepalivetime);\r\n    putenv(intervaltime);\r\n\r\n    if (get_modelbox_root_path()[0] != '\\0') {\r\n      char modelbox_root[PATH_MAX];\r\n      snprintf_s(modelbox_root, sizeof(modelbox_root), sizeof(modelbox_root),\r\n                 \"MODELBOX_ROOT=%s\", get_modelbox_root_path());\r\n      putenv(modelbox_root);\r\n    }\r\n\r\n    close_all_fd();\r\n    app_test(app);\r\n    _exit(1);\r\n  }\r\n#else\r\n  pid = vfork();\r\n  if (pid < 0) {\r\n    manager_log(MANAGER_LOG_ERR, \"app %s start failed, %s\", app->name,\r\n                strerror(errno));\r\n    return -1;\r\n  } else if (pid == 0) {\r\n    /* close all fd */\r\n    close_all_fd();\r\n    /* start process */\r\n    _app_start_exec(app);\r\n    _exit(1);\r\n  }\r\n#endif\r\n\r\n  if (app->pid_file[0]) {\r\n    app->state = APP_PENDING;\r\n  } else {\r\n    /* update state */\r\n    app->state = APP_RUNNING;\r\n  }\r\n\r\n  app->pid = pid;\r\n  app->dead_time = 0;\r\n  time(&app->last_alive);\r\n  time(&app->last_start);\r\n\r\n  manager_log(MANAGER_LOG_INFO, \"app %s start success, pid %d \", app->name,\r\n              app->pid);\r\n\r\n  return 0;\r\n}\r\n\r\nint _app_run_killcmd(struct app_monitor *app) {\r\n  manager_log(MANAGER_LOG_INFO, \"run kill-cmd %s\",\r\n              strcmds(app->killcmd, sizeof(app->killcmd)));\r\n  pid_t pid = vfork();\r\n  if (pid < 0) {\r\n    manager_log(MANAGER_LOG_ERR, \"app %s kill-cmd failed, %s\", app->name,\r\n                strerror(errno));\r\n    return -1;\r\n  } else if (pid == 0) {\r\n    char childpid[PATH_MAX];\r\n    if (get_modelbox_root_path()[0] != '\\0') {\r\n      char modelbox_root[PATH_MAX];\r\n      snprintf_s(modelbox_root, sizeof(modelbox_root), sizeof(modelbox_root),\r\n                 \"MODELBOX_ROOT=%s\", get_modelbox_root_path());\r\n      putenv(modelbox_root);\r\n    }\r\n\r\n    snprintf_s(childpid, sizeof(childpid), sizeof(childpid), \"APP_PID=%d\",\r\n               app->pid);\r\n    putenv(childpid);\r\n\r\n    /* close all fd */\r\n    close_all_fd();\r\n    /* start process */\r\n    _app_do_execve(app->killcmd);\r\n    _exit(1);\r\n  }\r\n\r\n  app->kill_pid = pid;\r\n\r\n  return 0;\r\n}\r\n\r\nint _app_stop(struct app_monitor *app, int gracefull) {\r\n  int status;\r\n  int kill_mode = SIGKILL;\r\n\r\n  /* process is not running when pid < 0 */\r\n  if (app->pid <= 0) {\r\n    manager_log(MANAGER_LOG_DBG, \"app %s not started.\", app->name);\r\n    return 0;\r\n  }\r\n\r\n  /* check whether process is running */\r\n  if (kill(app->pid, 0) != 0) {\r\n    if (errno == ESRCH) {\r\n      manager_log(MANAGER_LOG_INFO, \"app %s not exist, pid %d\", app->name,\r\n                  app->pid);\r\n      goto clearout;\r\n    }\r\n  }\r\n\r\n  if (gracefull) {\r\n    kill_mode = SIGTERM;\r\n  } else {\r\n    kill_mode = SIGKILL;\r\n  }\r\n\r\n  /* send SEGV to process before kill\r\n   * after MANAGER_APP_DEAD_EXIT_TIME second, force kill process。\r\n   */\r\n  if (app->state == APP_DEAD) {\r\n    app->state = APP_EXITING;\r\n    if (app->killcmd[0] != 0 && app->kill_pid <= 0) {\r\n      _app_run_killcmd(app);\r\n      goto out;\r\n    }\r\n    kill_mode = SIGSEGV;\r\n    time(&app->dead_time);\r\n  } else if (app->state == APP_EXITING) {\r\n    time_t now;\r\n    time(&now);\r\n    if (now < (app->dead_time + conf_force_kill_time)) {\r\n      goto out;\r\n    }\r\n    app->state = APP_FORCE_KILL;\r\n  }\r\n\r\n  /* force kill process */\r\n  manager_log(MANAGER_LOG_ERR, \"app %s send signal %d, pid %d\", app->name,\r\n              kill_mode, app->pid);\r\n  if (killpg(app->pid, kill_mode) != 0) {\r\n    if (errno == ESRCH) {\r\n      goto clearout;\r\n    }\r\n    manager_log(MANAGER_LOG_WARN, \"app %s kill(%d) failed, pid %d, %s\",\r\n                app->name, kill_mode, app->pid, strerror(errno));\r\n    return 0;\r\n  }\r\n\r\n  usleep(1000);\r\nout:\r\n  /* wait pid */\r\n  if (waitpid(app->pid, &status, WNOHANG) > 0) {\r\n    /* clear app control block */\r\n    manager_log(MANAGER_LOG_INFO, \"app %s stop success, pid %d \", app->name,\r\n                app->pid);\r\n  clearout:\r\n    app->pid = -1;\r\n    app->last_alive = 0;\r\n    app->state = APP_NOT_RUNNING;\r\n    if (app->kill_pid > 0) {\r\n      killpg(app->kill_pid, SIGKILL);\r\n      manager_log(MANAGER_LOG_INFO, \"app %s stop kill command: %s, pid %d.\",\r\n                  app->name, app->killcmd, app->kill_pid);\r\n      app->kill_pid = -1;\r\n    }\r\n    return 0;\r\n  }\r\n\r\n  if (app->state == APP_EXITING) {\r\n    return 0;\r\n  }\r\n\r\n  return -1;\r\n}\r\n\r\nint app_start(struct app_start_info *start_info) {\r\n  struct app_monitor *app = NULL;\r\n  unsigned int key;\r\n\r\n  if (start_info == NULL) {\r\n    manager_log(MANAGER_LOG_ERR, \"parameter is invalid\");\r\n    return -1;\r\n  }\r\n\r\n  if (start_info->name == NULL || start_info->name[0] == '\\0') {\r\n    manager_log(MANAGER_LOG_ERR, \"app name is invalid\");\r\n    return -1;\r\n  }\r\n\r\n  if (start_info->cmdline == NULL || start_info->cmdline[0] == '\\0' ||\r\n      start_info->cmd_max_len > MAX_CMDLINE_LEN - 2 ||\r\n      start_info->cmd_max_len <= 0) {\r\n    manager_log(MANAGER_LOG_ERR, \"app cmdline is invalid\");\r\n    return -1;\r\n  }\r\n\r\n  app = _app_find_app_byid(start_info->name);\r\n  if (app) {\r\n    manager_log(MANAGER_LOG_WARN, \"app %s exists\", start_info->name);\r\n    return -1;\r\n  }\r\n\r\n  app = malloc(sizeof(struct app_monitor));\r\n  if (app == NULL) {\r\n    manager_log(MANAGER_LOG_ERR, \"malloc for app_monitor failed.\");\r\n    goto errout;\r\n  }\r\n  memset_s(app, sizeof(struct app_monitor), 0, sizeof(struct app_monitor));\r\n\r\n  strncpy(app->name, start_info->name, APP_NAME_LEN - 1);\r\n  copycmds(app->cmdline, sizeof(app->cmdline), start_info->cmdline,\r\n           start_info->cmd_max_len);\r\n  if (start_info->killcmd && start_info->killcmd_max_len > 0 &&\r\n      start_info->killcmd_max_len <= MAX_CMDLINE_LEN - 2) {\r\n    copycmds(app->killcmd, sizeof(app->killcmd), start_info->killcmd,\r\n             start_info->killcmd_max_len);\r\n  } else {\r\n    app->killcmd[0] = '\\0';\r\n  }\r\n\r\n  if (start_info->pidfile) {\r\n    strncpy(app->pid_file, start_info->pidfile, PATH_MAX - 1);\r\n  } else {\r\n    app->pid_file[0] = 0;\r\n  }\r\n\r\n  app->pid = -1;\r\n  app->state = APP_NOT_RUNNING;\r\n  app->last_alive = 0;\r\n  app->dead_time = 0;\r\n  app->start_limit_interval = start_info->keepalive_time * 2;\r\n  app->start_limit_burst = 3;\r\n  app->check_alive = (start_info->check_alive) ? 1 : 0;\r\n  app->check_alive_time = start_info->keepalive_time;\r\n  app->heartbeat_interval = start_info->heartbeat_interval;\r\n\r\n  if (g_manager_restarting) {\r\n    /* if process exists, attach process*/\r\n    if (app_getpid_from_pidfile(app) != 0 && app->pid_file[0] != 0) {\r\n      time(&app->last_alive);\r\n      app->last_alive -= app->check_alive_time + 5;\r\n      app->state = APP_PENDING;\r\n      manager_log(MANAGER_LOG_INFO, \"start app %s, pending \", start_info->name);\r\n    } else if (app_getpid_by_ps(app) != 0 && app->pid_file[0] == 0) {\r\n      manager_log(MANAGER_LOG_INFO,\r\n                  \"try start app %s, may cause duplicate process \",\r\n                  start_info->name);\r\n    }\r\n  } else {\r\n    manager_log(MANAGER_LOG_INFO, \"start app %s\", start_info->name);\r\n  }\r\n\r\n  key = hash_string(start_info->name);\r\n  hash_add(app_mon.app_map, &app->map, key);\r\n\r\n  return 0;\r\n\r\nerrout:\r\n  if (app) {\r\n    free(app);\r\n  }\r\n  return -1;\r\n}\r\n\r\nint app_stop(const char *name, int gracefull) {\r\n  /* find process and stop */\r\n  struct app_monitor *app = _app_find_app_byid(name);\r\n  if (app == NULL) {\r\n    manager_log(MANAGER_LOG_ERR, \"app %s is not found.\", name);\r\n    return -1;\r\n  }\r\n\r\n  manager_log(MANAGER_LOG_INFO, \"stop app %s, pid %d\", app->name, app->pid);\r\n\r\n  /* stop process*/\r\n  if (gracefull == 0) {\r\n    app->state = APP_FORCE_KILL;\r\n  }\r\n\r\n  if (_app_stop(app, gracefull) != 0) {\r\n    manager_log(MANAGER_LOG_WARN, \"stop app failed.\");\r\n  }\r\n\r\n  hash_del(&app->map);\r\n  free(app);\r\n\r\n  return 0;\r\n}\r\n\r\n/* is process alive? */\r\nint app_alive(const char *name) {\r\n  struct app_monitor *app = _app_find_app_byid(name);\r\n  if (app == NULL) {\r\n    return -1;\r\n  }\r\n\r\n  if (app->pid > 0) {\r\n    return 0;\r\n  }\r\n\r\n  /* process is running? */\r\n  if (kill(app->pid, 0) == 0) {\r\n    return 0;\r\n  }\r\n\r\n  return -1;\r\n}\r\n\r\n/* 获取进程pid */\r\nint app_getpid(const char *name) {\r\n  struct app_monitor *app = _app_find_app_byid(name);\r\n  if (app == NULL) {\r\n    return -1;\r\n  }\r\n\r\n  return app->pid;\r\n}\r\n\r\nint _app_stopall(void) {\r\n  struct app_monitor *app;\r\n  struct hlist_node *tmp;\r\n  int bucket;\r\n\r\n  /* 循环停止所有进程 */\r\n  manager_log(MANAGER_LOG_INFO, \"stop all apps.\");\r\n  hash_for_each_safe(app_mon.app_map, bucket, tmp, app, map) {\r\n    app_stop(app->name, 1);\r\n  }\r\n\r\n  return 0;\r\n}\r\n\r\n/* 重启进程 */\r\nint _app_restart(struct app_monitor *app) {\r\n  /* 强制停止进程 */\r\n  if (_app_stop(app, 0) != 0) {\r\n    manager_log(MANAGER_LOG_ERR, \"app stop failed.\");\r\n  }\r\n\r\n  /* 重启进程 */\r\n  if (_app_start(app) != 0) {\r\n    manager_log(MANAGER_LOG_ERR, \"app start failed.\");\r\n    return -1;\r\n  }\r\n\r\n  return 0;\r\n}\r\n\r\nint _app_waitchild(void) {\r\n  int status;\r\n  int ret = 0;\r\n  do {\r\n    ret = waitpid(-1, &status, WNOHANG);\r\n  } while (ret > 0);\r\n\r\n  return 0;\r\n}\r\n\r\n/* process heartbeat message*/\r\nint _app_heartbeat_process(struct heartbeat_msg *msg) {\r\n  struct app_monitor *app = NULL;\r\n\r\n  /* find app control block by name */\r\n  app = _app_find_app_byid(msg->name);\r\n  if (app == NULL) {\r\n    manager_log(MANAGER_LOG_WARN, \"app not found, name %s\", msg->name);\r\n    return 0;\r\n  }\r\n\r\n  if (app->state == APP_PENDING) {\r\n    if (app->pid <= 0) {\r\n      /* 如果master初始化过程，则更新pid信息 */\r\n      app->pid = msg->pid;\r\n      app->last_alive = msg->time;\r\n      app->state = APP_RUNNING;\r\n      manager_log(MANAGER_LOG_INFO, \"re-monitoring app %s, pid %d\", app->name,\r\n                  app->pid);\r\n      return 0;\r\n    } else {\r\n      /* process running as daemon */\r\n      if (app_getpid_from_pidfile(app) == 0) {\r\n        manager_log(MANAGER_LOG_INFO, \"app %s, run as child deamon, new pid %d\",\r\n                    app->name, app->pid);\r\n        return 0;\r\n      }\r\n    }\r\n  }\r\n\r\n  /* if pid not match, output err message*/\r\n  if (app->pid != msg->pid) {\r\n    if (_app_pid_exists(msg->pid) != 0) {\r\n      return 0;\r\n    }\r\n\r\n    if (_app_exists(app) == 0) {\r\n      manager_log(MANAGER_LOG_ERR, \"app %s, pid is not match %d:%d\", app->name,\r\n                  app->pid, msg->pid);\r\n      int sig = SIGKILL;\r\n      manager_log(MANAGER_LOG_ERR, \"force kill unknown app %s, pid: %d\",\r\n                  msg->name, msg->pid);\r\n      killpg(msg->pid, sig);\r\n      return -1;\r\n    }\r\n    /* if pid not exists, update infomation*/\r\n    manager_log(MANAGER_LOG_ERR,\r\n                \"app %s pid is not exists, and not match %d:%d\", app->name,\r\n                app->pid, msg->pid);\r\n    app->pid = msg->pid;\r\n  }\r\n\r\n  /* if heart message stalls, skip */\r\n  if (app->last_alive > msg->time) {\r\n    manager_log(MANAGER_LOG_DBG, \"msg is stall.\");\r\n    return -1;\r\n  }\r\n\r\n  /* update alive time */\r\n  app->last_alive = msg->time;\r\n\r\n  return 0;\r\n}\r\n\r\n/* create message queue */\r\nint _app_mon_create_msg(void) {\r\n  unsigned int ipc_mode = 0600;\r\n  const char *key_file;\r\n\r\n#ifdef BUILD_TEST\r\n  key_file = \"/proc/self/exe\";\r\n#else\r\n  key_file = key_file_path;\r\n#endif\r\n  g_msgkey = ftok(key_file, 1);\r\n  if (g_msgkey < 0) {\r\n    manager_log(MANAGER_LOG_ERR, \"get key failed.\");\r\n    goto errout;\r\n  }\r\n\r\n  g_msgid = msgget(g_msgkey, ipc_mode | IPC_EXCL | IPC_CREAT);\r\n  if (g_msgid < 0) {\r\n    if (errno != EEXIST) {\r\n      manager_log(MANAGER_LOG_ERR, \"create msg key failed, %s\",\r\n                  strerror(errno));\r\n      goto errout;\r\n    }\r\n\r\n    g_msgid = msgget(g_msgkey, ipc_mode);\r\n    if (g_msgid < 0) {\r\n      manager_log(MANAGER_LOG_ERR, \"attatch msg key failed, %s\",\r\n                  strerror(errno));\r\n      goto errout;\r\n    }\r\n\r\n    g_manager_restarting = 1;\r\n  }\r\n\r\n  manager_log(MANAGER_LOG_INFO, \"key file %s, key is 0x%x, id is %u\", key_file,\r\n              g_msgkey, g_msgid);\r\n\r\n  return 0;\r\nerrout:\r\n  if (g_msgid > 0) {\r\n    msgctl(g_msgid, IPC_RMID, 0);\r\n    g_msgid = -1;\r\n  }\r\n  return -1;\r\n}\r\n\r\nint _recv_heartbeat(void) {\r\n  struct heartbeat_msg msg;\r\n  int ret;\r\n\r\n  for (;;) {\r\n    /* recv message  */\r\n    ret = msgrcv(g_msgid, &msg, sizeof(msg), HEARTBEAT_MSG, IPC_NOWAIT);\r\n    if (ret != sizeof(msg)) {\r\n      if (errno == ENOMSG) {\r\n        break;\r\n      }\r\n\r\n      /* if queue not exists, recreate message queue */\r\n      if (errno == EIDRM || errno == EINVAL) {\r\n        manager_log(MANAGER_LOG_ERR, \"key %u not exists, recreate.\", g_msgid);\r\n        _app_mon_create_msg();\r\n      }\r\n\r\n      manager_log(MANAGER_LOG_INFO, \"recv msg failed, len = %d:%ld, %s\", ret,\r\n                  sizeof(msg), strerror(errno));\r\n      break;\r\n    }\r\n\r\n    manager_log(MANAGER_LOG_DBG,\r\n                \"heartbeat msg, name = %s, time = %lu, pid = %d\", msg.name,\r\n                msg.time, msg.pid);\r\n\r\n    /* process */\r\n    ret = _app_heartbeat_process(&msg);\r\n    if (ret != 0) {\r\n      manager_log(MANAGER_LOG_DBG, \"process heart beat message failed\");\r\n    }\r\n  }\r\n\r\n  return 0;\r\n}\r\n\r\nint _app_alive(struct app_monitor *app) {\r\n  time_t now;\r\n  int time_out = app->check_alive_time;\r\n\r\n  if (app->pid > 0) {\r\n    if (kill(app->pid, 0) != 0 && app->state != APP_PENDING) {\r\n      manager_log(MANAGER_LOG_ERR, \"app %s exited, pid %d\", app->name,\r\n                  app->pid);\r\n      app->state = APP_NOT_RUNNING;\r\n      return -1;\r\n    }\r\n  } else {\r\n    return -1;\r\n  }\r\n\r\n  time(&now);\r\n\r\n  if (app->state == APP_PENDING) {\r\n    time_out = 10;\r\n  }\r\n\r\n  /* if no check, return */\r\n  if (app->check_alive == 0 && app->state == APP_RUNNING) {\r\n    return 0;\r\n  }\r\n\r\n  if (app->last_alive > (now - time_out)) {\r\n    return 0;\r\n  }\r\n\r\n  char buffer[64];\r\n  struct tm tm_last;\r\n  localtime_r(&app->last_alive, &tm_last);\r\n  strftime(buffer, sizeof(buffer), \"%x %X\", &tm_last);\r\n  app->state = APP_DEAD;\r\n  app->dead_time = now;\r\n  manager_log(MANAGER_LOG_ERR, \"app %s dead, last %s(%lu:%lu) pid %d.\",\r\n              app->name, buffer, app->last_alive, now, app->pid);\r\n  return -1;\r\n}\r\n\r\n/* check process running status */\r\nint _app_state_check(struct app_monitor *app) {\r\n  if (app->state != APP_RUNNING && app->state != APP_PENDING) {\r\n    return -1;\r\n  }\r\n\r\n  /* if pending, skip */\r\n  if (app->state == APP_PENDING) {\r\n    if (app_getpid_from_pidfile(app) == 0) {\r\n      return 0;\r\n    }\r\n  }\r\n\r\n  if (_app_alive(app) == 0) {\r\n    return 0;\r\n  }\r\n\r\n  return -1;\r\n}\r\n\r\nint _app_timeout_check(void) {\r\n  struct app_monitor *app;\r\n  int bucket = 0;\r\n\r\n  hash_for_each(app_mon.app_map, bucket, app, map) {\r\n    if (_app_state_check(app) == 0) {\r\n      continue;\r\n    }\r\n\r\n    if (_app_restart(app) != 0) {\r\n      manager_log(MANAGER_LOG_ERR, \"restart app %s failed\", app->name);\r\n    }\r\n  }\r\n\r\n  return 0;\r\n}\r\n\r\nint app_monitor(void) {\r\n  /* recv heartbeat message */\r\n  _recv_heartbeat();\r\n\r\n  /* check timeout */\r\n  _app_timeout_check();\r\n\r\n  /* wait child process */\r\n  _app_waitchild();\r\n\r\n  if (unlikely(g_manager_restarting)) {\r\n    g_manager_restarting = 0;\r\n  }\r\n\r\n  return 0;\r\n}\r\n\r\n/* delete message queue */\r\nint _app_mon_destroy_msg(void) {\r\n  if (g_msgid > 0) {\r\n    msgctl(g_msgid, IPC_RMID, 0);\r\n    g_msgid = -1;\r\n  }\r\n\r\n  return 0;\r\n}\r\n\r\nint manager_monitor_init(void) {\r\n  hash_init(app_mon.app_map);\r\n  hash_init(app_mon.app_map_pid);\r\n  g_manager_restarting = 0;\r\n\r\n  if (_app_mon_create_msg() != 0) {\r\n    manager_log(MANAGER_LOG_ERR, \"create monitor msg failed.\");\r\n    return -1;\r\n  }\r\n\r\n  if (g_manager_restarting) {\r\n    manager_log(MANAGER_LOG_ERR, \"master restart, try to recive all messages.\");\r\n  }\r\n\r\n  return 0;\r\n}\r\n\r\nint manager_monitor_exit(void) {\r\n  _app_stopall();\r\n\r\n  _app_mon_destroy_msg();\r\n\r\n  return 0;\r\n}\r\n\r\n#ifdef BUILD_TEST\r\n/* test child process */\r\nvoid app_test(struct app_monitor *app) {\r\n  int count = 0;\r\n  int pidfile = -1;\r\n\r\n  signal(SIGINT, SIG_DFL);\r\n  signal(SIGTERM, SIG_DFL);\r\n\r\n  if (app->pid_file[0] != 0) {\r\n    pidfile = create_pid(app->pid_file);\r\n  }\r\n\r\n  if (app_monitor_init(app->name, NULL) != 0) {\r\n    goto out;\r\n  }\r\n\r\n  while (true) {\r\n    /* report heart beat */\r\n    if (app_monitor_heartbeat() != 0) {\r\n      printf(\"send heartbeat message faild.\");\r\n      break;\r\n    }\r\n\r\n    /* callback function */\r\n    if (test_app.run) {\r\n      if (test_app.run(&test_app, count, app->name) != 0) {\r\n        break;\r\n      }\r\n    } else if (count > 10) {\r\n      break;\r\n    }\r\n\r\n    count++;\r\n    sleep(1);\r\n  }\r\n\r\nout:\r\n  if (pidfile > 0) {\r\n    close(pidfile);\r\n    pidfile = 0;\r\n  }\r\n  printf(\"app %s exit, pid %d\", app->name, getpid());\r\n  return;\r\n}\r\n\r\nvoid app_free_memory(void) {\r\n  struct app_monitor *app;\r\n  struct hlist_node *tmp;\r\n  int bucket;\r\n\r\n  /* stop all process */\r\n  manager_log(MANAGER_LOG_INFO, \"force free all apps.\");\r\n  hash_for_each_safe(app_mon.app_map, bucket, tmp, app, map) {\r\n    hash_del(&app->map);\r\n    free(app);\r\n  }\r\n\r\n  return;\r\n}\r\n\r\n#endif"
  },
  {
    "path": "src/modelbox/manager/src/manager_monitor.h",
    "content": "/*\r\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\r\n *\r\n * Licensed under the Apache License, Version 2.0 (the \"License\");\r\n * you may not use this file except in compliance with the License.\r\n * You may obtain a copy of the License at\r\n *\r\n * http://www.apache.org/licenses/LICENSE-2.0\r\n *\r\n * Unless required by applicable law or agreed to in writing, software\r\n * distributed under the License is distributed on an \"AS IS\" BASIS,\r\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n * See the License for the specific language governing permissions and\r\n * limitations under the License.\r\n */\r\n\r\n#ifndef MODELBOX_MANAGER_MONITOR_H\r\n#define MODELBOX_MANAGER_MONITOR_H\r\n\r\n#include <linux/limits.h>\r\n\r\n#include \"manager_common.h\"\r\n\r\n#ifdef __cplusplus\r\nextern \"C\" {\r\n#endif /*__cplusplus */\r\n\r\n#define MAX_CMDLINE_LEN (4096 * 2)\r\n\r\nint manager_monitor_init(void);\r\n\r\n/**\r\n * @brief start app\r\n *\r\n * @param name app name\r\n * @param cmdline command list, format arg1\\0arg2\\0arg3\\0\\0\r\n * @param cmd_max_len max length of cmdline string.\r\n * @param pidfile pid file path\r\n * @param check_alive whether check alive\r\n * @param keepalive_time keep alive time\r\n * @param heartbeat_interval heart beat interval\r\n * @return int\r\n */\r\n\r\nstruct app_start_info {\r\n  const char *name;\r\n  const char *cmdline; /* arg1\\0arg2\\0arg3\\0\\0 */\r\n  int cmd_max_len;\r\n  const char *killcmd; /* arg1\\0arg2\\0arg3\\0\\0 */\r\n  int killcmd_max_len;\r\n  const char *pidfile;\r\n  int check_alive;\r\n  int keepalive_time;\r\n  int heartbeat_interval;\r\n};\r\n\r\nint app_start(struct app_start_info *start_info);\r\n\r\n/**\r\n * @brief stop app\r\n *\r\n * @param name app name\r\n * @param gracefull gracefull stop\r\n * @return int\r\n */\r\nint app_stop(const char *name, int gracefull);\r\n\r\nint app_alive(const char *name);\r\n\r\nint app_getpid(const char *name);\r\n\r\nint app_monitor(void);\r\n\r\nint manager_monitor_exit(void);\r\n\r\n#ifdef BUILD_TEST\r\n\r\nstruct app_monitor;\r\nstruct Test_App;\r\n\r\nvoid app_free_memory(void);\r\n\r\nvoid app_test(struct app_monitor *app);\r\ntypedef int (*app_child_func)(struct Test_App *child, int count,\r\n                              const char *name);\r\nstruct Test_App {\r\n  app_child_func run;\r\n  void *arg1;\r\n  void *arg2;\r\n  void *arg3;\r\n};\r\nextern struct Test_App test_app;\r\n#endif\r\n\r\n#ifdef __cplusplus\r\n}\r\n#endif /*__cplusplus */\r\n\r\n#endif  // !MODELBOX_MANAGER_MONITOR_H"
  },
  {
    "path": "src/modelbox/manager/src/manager_monitor_client.c",
    "content": "/*\r\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\r\n *\r\n * Licensed under the Apache License, Version 2.0 (the \"License\");\r\n * you may not use this file except in compliance with the License.\r\n * You may obtain a copy of the License at\r\n *\r\n * http://www.apache.org/licenses/LICENSE-2.0\r\n *\r\n * Unless required by applicable law or agreed to in writing, software\r\n * distributed under the License is distributed on an \"AS IS\" BASIS,\r\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n * See the License for the specific language governing permissions and\r\n * limitations under the License.\r\n */\r\n\r\n#include \"modelbox/manager/manager_monitor_client.h\"\r\n\r\n#include <stdlib.h>\r\n\r\n#include \"manager_common.h\"\r\n#include \"manager_conf.h\"\r\n#include \"manager_monitor.h\"\r\n\r\nchar g_key_file[PATH_MAX];\r\n\r\nstruct app_struct {\r\n  char name[APP_NAME_LEN];\r\n  pid_t pid;\r\n  time_t time;\r\n};\r\n\r\nstruct app_struct app_info = {.name[0] = 0};\r\nstatic key_t g_msgkey = -1;\r\nstatic int g_msgid = -1;\r\nstatic int g_keepalive_time = DEFAULT_WATCHDOG_TIMEOUT;\r\nstatic int g_heartbeat_interval = DEFAULT_HEARTBEAT_INTERVAL;\r\n\r\nextern void app_log_reg(manager_log_callback callback);\r\n\r\nstatic void _app_monitor_reset_msgkey(void) {\r\n  g_msgkey = -1;\r\n  g_msgid = -1;\r\n}\r\n\r\nint app_monitor_init(const char *name, const char *keyfile) {\r\n  if (name == NULL) {\r\n    name = getenv(\"MANAGER_MONITOR_NAME\");\r\n    if (name == NULL) {\r\n      return -1;\r\n    }\r\n  }\r\n\r\n  if (keyfile == NULL) {\r\n    keyfile = getenv(\"MANAGER_MONITOR_KEYFILE\");\r\n    if (keyfile == NULL) {\r\n      return -1;\r\n    }\r\n  }\r\n\r\n  const char *keepalive_time = getenv(\"MANAGER_MONITOR_KEEPALIVE_TIME\");\r\n  if (keepalive_time) {\r\n    int alive_time = atol(keepalive_time);\r\n    if (alive_time > 0) {\r\n      g_keepalive_time = alive_time;\r\n    }\r\n  }\r\n\r\n  const char *heart_interval = getenv(\"MANAGER_MONITOR_HEARTBEAT_INTERVAL\");\r\n  if (heart_interval) {\r\n    int interval_time = atol(heart_interval);\r\n    if (interval_time > 0) {\r\n      g_heartbeat_interval = interval_time;\r\n    }\r\n  }\r\n\r\n  strncpy(app_info.name, name, APP_NAME_LEN - 1);\r\n  app_info.pid = getpid();\r\n  _app_monitor_reset_msgkey();\r\n  if (strlen(g_key_file) <= 0) {\r\n    snprintf(g_key_file, PATH_MAX, \"%s/%s.key\", MANAGER_PID_PATH, MANAGER_NAME);\r\n  }\r\n\r\n  if (keyfile) {\r\n    snprintf(g_key_file, PATH_MAX, \"%s\", keyfile);\r\n  }\r\n\r\n  return 0;\r\n}\r\n\r\nint app_monitor_keepalive_time(void) { return g_keepalive_time; }\r\n\r\nint app_monitor_heartbeat_interval(void) { return g_heartbeat_interval; }\r\n\r\nint app_monitor_keyfile(char *file) {\r\n  if (file == NULL) {\r\n    return -1;\r\n  }\r\n\r\n  strncpy(g_key_file, file, PATH_MAX - 1);\r\n  _app_monitor_reset_msgkey();\r\n\r\n  return 0;\r\n}\r\n\r\nint app_attach_msg_queue(void) {\r\n  const char *key_file = NULL;\r\n#ifdef BUILD_TEST\r\n  key_file = \"/proc/self/exe\";\r\n#else\r\n  key_file = g_key_file;\r\n#endif\r\n\r\n  if (g_msgkey <= 0) {\r\n    if (key_file[0] == 0) {\r\n      return -1;\r\n    }\r\n\r\n    g_msgkey = ftok(key_file, 1);\r\n    if (g_msgkey < 0) {\r\n      if (errno == ENOENT) {\r\n        return 0;\r\n      }\r\n      return 0;\r\n    }\r\n  }\r\n\r\n  if (g_msgid < 0) {\r\n    g_msgid = msgget(g_msgkey, 0600);\r\n    if (g_msgid < 0) {\r\n      return -1;\r\n    }\r\n  }\r\n\r\n  return 0;\r\n}\r\n\r\nint app_monitor_heartbeat(void) {\r\n  struct heartbeat_msg msgs[2];\r\n  struct heartbeat_msg *msg = &msgs[0];\r\n  int ret;\r\n  time_t now;\r\n\r\n  if (app_info.name[0] == 0) {\r\n    return 0;\r\n  }\r\n\r\n  time(&now);\r\n  if (now == app_info.time) {\r\n    return 0;\r\n  }\r\n\r\n  app_info.time = now;\r\n\r\n  if (app_attach_msg_queue() != 0) {\r\n    return -1;\r\n  }\r\n\r\n  if (g_msgid < 0) {\r\n    return -1;\r\n  }\r\n\r\n  memset(&msgs, 0, sizeof(msgs));\r\n  msg->pid = app_info.pid;\r\n  msg->mtype = HEARTBEAT_MSG;\r\n  strncpy(msg->name, app_info.name, APP_NAME_LEN);\r\n  msg->time = app_info.time;\r\n\r\n  ret = msgsnd(g_msgid, msg, sizeof(*msg), IPC_NOWAIT);\r\n  if (ret != 0) {\r\n    if (errno == EIDRM) {\r\n      _app_monitor_reset_msgkey();\r\n    }\r\n  }\r\n\r\n  return ret;\r\n}"
  },
  {
    "path": "src/modelbox/manager/src/util.c",
    "content": "/*\r\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\r\n *\r\n * Licensed under the Apache License, Version 2.0 (the \"License\");\r\n * you may not use this file except in compliance with the License.\r\n * You may obtain a copy of the License at\r\n *\r\n * http://www.apache.org/licenses/LICENSE-2.0\r\n *\r\n * Unless required by applicable law or agreed to in writing, software\r\n * distributed under the License is distributed on an \"AS IS\" BASIS,\r\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n * See the License for the specific language governing permissions and\r\n * limitations under the License.\r\n */\r\n\r\n#include \"util.h\"\r\n\r\n#include <dirent.h>\r\n#include <libgen.h>\r\n#include <sys/time.h>\r\n#include <time.h>\r\n\r\n#include \"common.h\"\r\n\r\nunsigned long get_tick_count(void) {\r\n  struct timespec ts;\r\n\r\n  clock_gettime(CLOCK_MONOTONIC, &ts);\r\n\r\n  return (ts.tv_sec * 1000 + ts.tv_nsec / 1000000);\r\n}\r\n\r\nvoid close_all_fd(void) {\r\n  char path_name[PATH_MAX];\r\n  DIR *dir = NULL;\r\n  struct dirent *ent;\r\n  int dir_fd = -1;\r\n\r\n  snprintf(path_name, sizeof(path_name), \"/proc/self/fd/\");\r\n  dir = opendir(path_name);\r\n  if (dir == NULL) {\r\n    fprintf(stderr, \"open directory failed, %s\\n\", strerror(errno));\r\n    goto errout;\r\n  }\r\n\r\n  dir_fd = dirfd(dir);\r\n\r\n  while ((ent = readdir(dir)) != NULL) {\r\n    int fd = atoi(ent->d_name);\r\n    if (fd < 0 || dir_fd == fd) {\r\n      continue;\r\n    }\r\n    switch (fd) {\r\n      case STDIN_FILENO:\r\n      case STDOUT_FILENO:\r\n      case STDERR_FILENO:\r\n        continue;\r\n        break;\r\n      default:\r\n        break;\r\n    }\r\n\r\n    close(fd);\r\n  }\r\n\r\n  closedir(dir);\r\n\r\n  return;\r\nerrout:\r\n  if (dir) {\r\n    closedir(dir);\r\n  }\r\n  return;\r\n}\r\n\r\nint get_prog_path(char *path, int max_len) {\r\n  /* 读取进程的二进制路径 */\r\n  int len = readlink(\"/proc/self/exe\", path, max_len - 1);\r\n  if (len < 0) {\r\n    return -1;\r\n  }\r\n\r\n  path[len] = 0;\r\n\r\n  dirname(path);\r\n\r\n  return 0;\r\n}\r\n\r\nconst char *get_modelbox_root_path(void) {\r\n  static char root_path[PATH_MAX] = {0};\r\n  static int is_init = false;\r\n\r\n  if (is_init) {\r\n    return root_path;\r\n  }\r\n\r\n  is_init = 1;\r\n  char prog_path[PATH_MAX] = {0};\r\n  char path_tmp[PATH_MAX];\r\n\r\n  if (get_prog_path(prog_path, PATH_MAX - 1) != 0) {\r\n    return root_path;\r\n  }\r\n\r\n  snprintf_s(path_tmp, PATH_MAX - 1, PATH_MAX - 1, \"%s/../../../\", prog_path);\r\n  if (realpath(path_tmp, root_path) == NULL) {\r\n    return root_path;\r\n  }\r\n\r\n  if (root_path[0] == '/' && root_path[1] == '\\0') {\r\n    root_path[0] = '\\0';\r\n  }\r\n\r\n  return root_path;\r\n}\r\n\r\nstatic char *string_replace(const char *in, char *out, int out_max,\r\n                            const char *from, const char *to) {\r\n  char *needle;\r\n  size_t from_len = strnlen(from, PATH_MAX);\r\n  size_t to_len = strnlen(to, PATH_MAX);\r\n  size_t resoffset = 0;\r\n  int ret = 0;\r\n\r\n  while ((needle = strstr(in, from)) && out_max - resoffset > 0) {\r\n    ret = memcpy_s(out + resoffset, out_max - resoffset, in, needle - in);\r\n    if (ret != 0) {\r\n      return NULL;\r\n    }\r\n    resoffset += needle - in;\r\n\r\n    in = needle + from_len;\r\n    ret = strncpy_s(out + resoffset, out_max - resoffset, to, to_len);\r\n    if (ret != 0) {\r\n      return NULL;\r\n    }\r\n\r\n    resoffset += to_len;\r\n  }\r\n\r\n  if (out_max - resoffset <= 0) {\r\n    return NULL;\r\n  }\r\n\r\n  ret = strncpy_s(out + resoffset, out_max - resoffset, in, out_max);\r\n  if (ret != 0) {\r\n    return NULL;\r\n  }\r\n\r\n  return out;\r\n}\r\n\r\nconst char *get_modelbox_full_path(const char *path) {\r\n  const char *root_path = get_modelbox_root_path();\r\n  static char full_path[PATH_MAX * 4] = {0};\r\n  full_path[0] = '\\0';\r\n  memset(full_path, 0, sizeof(full_path));\r\n  if (string_replace(path, full_path, PATH_MAX * 4, \"${MODELBOX_ROOT}\",\r\n                     root_path) == NULL) {\r\n    return NULL;\r\n  }\r\n\r\n  return full_path;\r\n}\r\n\r\nconst char *strcmds(const char *cmd, int cmd_max_len) {\r\n  static char full_cmd[PATH_MAX * 4] = {0};\r\n  full_cmd[0] = 0;\r\n  for (int i = 0; i < PATH_MAX && i < cmd_max_len; i++) {\r\n    if (i > 0) {\r\n      if (cmd[i] == '\\0' && cmd[i - 1] == '\\0') {\r\n        full_cmd[i] = '\\0';\r\n        break;\r\n      }\r\n    }\r\n\r\n    if (cmd[i] == '\\0') {\r\n      full_cmd[i] = ' ';\r\n      continue;\r\n    }\r\n\r\n    full_cmd[i] = cmd[i];\r\n  }\r\n\r\n  return full_cmd;\r\n}\r\n\r\nvoid copycmds(char *dest, int dest_size, const char *src, int src_size)\r\n{\r\n  int i = 0;\r\n  for (i = 0; i < dest_size && i < src_size; i++) {\r\n    if (i > 0) {\r\n      if (src[i] == '\\0' && src[i - 1] == '\\0') {\r\n        dest[i] = '\\0';\r\n        break;\r\n      }\r\n    }\r\n\r\n    dest[i] = src[i];\r\n  }\r\n\r\n  if (i <= 0) {\r\n    i = 1;\r\n  }\r\n  dest[i - 1] = '\\0';\r\n  dest[i] = '\\0';\r\n}\r\n"
  },
  {
    "path": "src/modelbox/manager/src/util.h",
    "content": "/*\r\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\r\n *\r\n * Licensed under the Apache License, Version 2.0 (the \"License\");\r\n * you may not use this file except in compliance with the License.\r\n * You may obtain a copy of the License at\r\n *\r\n * http://www.apache.org/licenses/LICENSE-2.0\r\n *\r\n * Unless required by applicable law or agreed to in writing, software\r\n * distributed under the License is distributed on an \"AS IS\" BASIS,\r\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n * See the License for the specific language governing permissions and\r\n * limitations under the License.\r\n */\r\n\r\n\r\n#ifndef MODELBOX_MANAGER_UTIL_H\r\n#define MODELBOX_MANAGER_UTIL_H\r\n\r\n#ifdef __cplusplus\r\nextern \"C\" {\r\n#endif /*__cplusplus */\r\n\r\n/* 获取系统tick值, 1ms */\r\nunsigned long get_tick_count(void);\r\n\r\n/* 关闭所有文件句柄，除标准输入，输出，错误 */\r\nvoid close_all_fd(void);\r\n\r\n/* 获取当前进程的路径 */\r\nint get_prog_path(char *path, int max_len);\r\n\r\nconst char *get_modelbox_root_path(void);\r\n\r\n/* 获取完整的路径，非线程安全 */\r\nconst char *get_modelbox_full_path(const char *path);\r\n\r\n/* 合并命令行为字符串,格式arg1\\0arg2\\0\\0，非线程安全*/\r\nconst char *strcmds(const char *cmd, int cmd_max_len);\r\n\r\n/* 复制cmd命令，格式为arg1\\0arg2\\0\\0*/\r\nvoid copycmds(char *dest, int dest_size, const char *src, int src_size);\r\n\r\n#ifdef __cplusplus\r\n}\r\n#endif /*__cplusplus */\r\n\r\n#endif\r\n"
  },
  {
    "path": "src/modelbox/server/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nif(DUKTAPE_FOUND)\n    add_definitions(-DENABLE_JS_PLUGIN)\nendif()\n\nfile(GLOB_RECURSE MODELBOX_SOURCES *.cpp *.cc *.c)\nexclude_files_from_dir_in_list(MODELBOX_SOURCES \"${MODELBOX_SOURCES}\" \"${CMAKE_CURRENT_LIST_DIR}/plugin/\")\nexclude_files_from_dir_in_list(MODELBOX_SOURCES \"${MODELBOX_SOURCES}\" \"${CMAKE_CURRENT_LIST_DIR}/iva_plugin/\")\nset(MODELBOX_INCLUDE ${CMAKE_CURRENT_LIST_DIR}/include)\nfile(GLOB_RECURSE SERVER_BIN_FILES ${CMAKE_CURRENT_LIST_DIR}/bin/*)\n\nlist(APPEND MODELBOX_SERVER_INCLUDES ${MODELBOX_INCLUDE})\nlist(APPEND MODELBOX_SERVER_INCLUDES ${MODELBOX_COMMON_INCLUDE})\nlist(APPEND MODELBOX_SERVER_INCLUDES ${CPP_HTTPLIB_INCLUDE})\nlist(APPEND SERVER_INCLUDES ${LIBMODELBOX_INCLUDE})\nlist(APPEND SERVER_INCLUDES ${LIBMODELBOX_BASE_INCLUDE})\nlist(APPEND SERVER_INCLUDES ${MODELBOX_INCLUDE})\nlist(APPEND SERVER_INCLUDES ${OPENSSL_INCLUDE_DIR})\nlist(APPEND SERVER_INCLUDES ${MODELBOX_SERVER_INCLUDES})\nlist(APPEND SERVER_INCLUDES ${MODELBOX_MANAGER_INCLUDE})\nlist(REMOVE_DUPLICATES SERVER_INCLUDES)\n\ninclude_directories(${SERVER_INCLUDES})\ninclude_directories(${TOML_INCLUDE_DIR})\ninclude_directories(${DUKTAPE_INCLUDE_DIR})\ninclude_directories(${HUAWEI_SECURE_C_INCLUDE_DIR})\n\nset(HEADER \n    ${MODELBOX_INCLUDE}/modelbox\n    ${MODELBOX_COMMON_INCLUDE}/modelbox\n    )\n\nadd_subdirectory(plugin)\n\nadd_executable(modelbox-server ${MODELBOX_SOURCES})\nset_target_properties(modelbox-server PROPERTIES ENABLE_EXPORTS 1)\ntarget_link_libraries(modelbox-server ${LIBMODELBOX_SHARED})\ntarget_link_libraries(modelbox-server ${MODELBOX_COMMON_LIBRARY})\ntarget_link_libraries(modelbox-server pthread)\ntarget_link_libraries(modelbox-server rt)\ntarget_link_libraries(modelbox-server ${DUKTAPE_LIBRARIES})\ntarget_link_libraries(modelbox-server manager-client)\ntarget_link_libraries(modelbox-server ${OPENSSL_LIBRARIES})\ntarget_link_libraries(modelbox-server ${CPP_HTTPLIB_STATIC_LIBRARIES})\n\nif(NOT CMAKE_INSTALL_RUNSTATEDIR)\n    set(CMAKE_INSTALL_RUNSTATEDIR \"var/run\" CACHE INTERNAL \"\")\nendif()\n\nif (NOT MODELBOX_CONFIG_FILE_NAME)\n    set(MODELBOX_CONFIG_FILE_NAME \"modelbox.conf\")\nendif()\n\nif (STANDALONE)\n    set_target_properties(modelbox-server PROPERTIES INSTALL_RPATH \"$ORIGIN/../lib\")\nendif()\n\nconfigure_file(${CMAKE_CURRENT_LIST_DIR}/etc/modelbox-opts ${CMAKE_CURRENT_BINARY_DIR}/etc/modelbox-opts @ONLY)\nconfigure_file(${CMAKE_CURRENT_LIST_DIR}/etc/init.d/modelbox.in ${CMAKE_CURRENT_BINARY_DIR}/etc/init.d/modelbox @ONLY)\nconfigure_file(${CMAKE_CURRENT_LIST_DIR}/etc/modelbox.conf.in ${CMAKE_CURRENT_BINARY_DIR}/etc/${MODELBOX_CONFIG_FILE_NAME} @ONLY)\nconfigure_file(${CMAKE_CURRENT_LIST_DIR}/etc/modelbox-dev.conf.in ${CMAKE_CURRENT_BINARY_DIR}/etc/modelbox-dev.conf.template @ONLY)\nconfigure_file(${CMAKE_CURRENT_LIST_DIR}/etc/modelbox-template.conf.in ${CMAKE_CURRENT_BINARY_DIR}/etc/modelbox-template.conf @ONLY)\nconfigure_file(${CMAKE_CURRENT_LIST_DIR}/etc/modelbox.service.in ${CMAKE_CURRENT_BINARY_DIR}/etc/modelbox.service @ONLY)\nconfigure_file(${CMAKE_CURRENT_LIST_DIR}/misc/modelbox-server-cmd.json.in ${CMAKE_CURRENT_BINARY_DIR}/misc/modelbox-server-cmd.json @ONLY)\n\nadd_dependencies(modelbox-server ${LIBMODELBOX_SHARED})\n\nset_target_properties(modelbox-server PROPERTIES OUTPUT_NAME \"modelbox\")\n\n\ninstall(TARGETS modelbox-server \n    COMPONENT server\n    RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n    LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    )\n\ninstall(DIRECTORY \n    ${HEADER} \n    DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR}\n    COMPONENT server-devel\n    )\n\ninstall(FILES \n    ${CMAKE_CURRENT_BINARY_DIR}/etc/init.d/modelbox\n    DESTINATION /etc/init.d\n    PERMISSIONS OWNER_WRITE OWNER_READ OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ WORLD_EXECUTE \n    COMPONENT server\n    )\n\n\ninstall(FILES \n    ${CMAKE_CURRENT_BINARY_DIR}/etc/${MODELBOX_CONFIG_FILE_NAME}\n    ${CMAKE_CURRENT_BINARY_DIR}/etc/modelbox-dev.conf.template\n    ${CMAKE_CURRENT_BINARY_DIR}/etc/modelbox-opts\n    DESTINATION ${CMAKE_INSTALL_FULL_SYSCONFDIR}/modelbox/ \n    PERMISSIONS OWNER_WRITE OWNER_READ GROUP_READ WORLD_READ\n    COMPONENT server\n    )\n\ninstall(FILES \n    ${CMAKE_CURRENT_BINARY_DIR}/etc/modelbox-template.conf\n    DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/modelbox/misc\n    PERMISSIONS OWNER_WRITE OWNER_READ GROUP_READ WORLD_READ\n    COMPONENT server\n    )\n\ninstall(DIRECTORY \n    DESTINATION ${CMAKE_INSTALL_FULL_SYSCONFDIR}/modelbox/graph\n    DIRECTORY_PERMISSIONS OWNER_EXECUTE OWNER_WRITE OWNER_READ\n    COMPONENT server\n    )\n\ninstall(DIRECTORY \n    DESTINATION ${CMAKE_INSTALL_FULL_SYSCONFDIR}/modelbox/init-script/modelbox\n    DIRECTORY_PERMISSIONS OWNER_EXECUTE OWNER_WRITE OWNER_READ\n    COMPONENT server\n    )\n\ninstall(DIRECTORY \n    DESTINATION ${CMAKE_INSTALL_FULL_SYSCONFDIR}/modelbox/conf.d\n    DIRECTORY_PERMISSIONS OWNER_EXECUTE OWNER_WRITE OWNER_READ\n    COMPONENT server\n    )\n\ninstall(DIRECTORY \n    DESTINATION /${CMAKE_INSTALL_RUNSTATEDIR}/modelbox/\n    COMPONENT server\n)\n\ninstall(DIRECTORY \n    DESTINATION /var/log/modelbox\n    COMPONENT server\n)\n\ninstall(FILES \n    ${CMAKE_CURRENT_BINARY_DIR}/misc/modelbox-server-cmd.json\n    DESTINATION ${MODELBOX_TOOLS_PATH}\n    COMPONENT server\n    )\ninstall(PROGRAMS ${SERVER_BIN_FILES}\n    DESTINATION ${MODELBOX_TOOLS_PATH}\n    COMPONENT server)\n\nif (SYSTEMDSYSTEMUNITDIR)\ninstall(FILES \n    ${CMAKE_CURRENT_BINARY_DIR}/etc/modelbox.service \n    DESTINATION ${SYSTEMDSYSTEMUNITDIR} \n    COMPONENT server\n    )\nelse()\nmessage(\"Skip install systemd unit\")\nendif()\n\nset(MODELBOX_SERVER_INCLUDE \n    ${MODELBOX_SERVER_INCLUDES} \n    CACHE INTERNAL \"\")\n    \nset(MODELBOX_SERVER_SOURCES ${MODELBOX_SOURCES} CACHE INTERNAL \"\")\nset(MODELBOX_SERVER_LINK_LIBRARIES \n    ${HUAWEI_SECURE_C_LIBRARIES}\n    ${BBOX_LIBRARIES}\n    ${LINK_LIBRARIES}\n    manager-client\n    ${CPP_HTTPLIB_STATIC_LIBRARIES}\n    ${MODELBOX_COMMON_LIBRARY}\n    CACHE INTERNAL \"\")\n"
  },
  {
    "path": "src/modelbox/server/bin/develop",
    "content": "#!/bin/bash\n#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nMODELBOX_DEV_FILE=\"${MODELBOX_ROOT}/usr/local/share/modelbox/misc/modelbox-template.conf\"\nMODELBOX_USER_HOME=\"$HOME/modelbox-service\"\n\nMODELBOX_SYS_FLOW_DIR=\"${MODELBOX_ROOT}/usr/local/etc/modelbox/graph/\"\nMODELBOX_DEFAULT_PORT=\"1104\"\nMODELBOX_BIN=\"${MODELBOX_ROOT}/usr/local/bin/modelbox\"\nMODELBOX_TOOL_BIN=\"${MODELBOX_ROOT}/usr/local/bin/modelbox-tool\"\n\nsetup_env() {\n    MODELBOX_FLOW_DIR=\"$MODELBOX_USER_HOME/graph\"\n    MODELBOX_CONF_DIR=\"$MODELBOX_USER_HOME/conf\"\n    MODELBOX_USER_OPT=\"$MODELBOX_CONF_DIR/modelbox-opt\"\n    MODELBOX_USER_CONF_FILE=\"$MODELBOX_CONF_DIR/modelbox.conf\"\n    MODELBOX_LOG_DIR=\"$MODELBOX_USER_HOME/log\"\n    MODELBOX_LOG_FILE=\"$MODELBOX_LOG_DIR/modelbox.log\"\n    MODELBOX_PID_DIR=\"$MODELBOX_USER_HOME/run\"\n    MODELBOX_PID_FILE=\"$MODELBOX_PID_DIR/modelbox.pid\"\n    MODELBOX_SOCK_FILE=\"$MODELBOX_PID_DIR/modelbox.sock\"\n    MODELBOX_RUN_SCRIPT=\"$MODELBOX_USER_HOME/modelbox\"\n\n    MODELBOX_MANAGER_USER_OPT=\"$MODELBOX_CONF_DIR/manager-opt\"\n    MODELBOX_MANAGER_PID_FILE=\"$MODELBOX_PID_DIR/manager.pid\"\n    MODELBOX_MANAGER_KEY_FILE=\"$MODELBOX_PID_DIR/manager.key\"\n    MODELBOX_MANAGER_LOG_FILE=\"$MODELBOX_LOG_DIR/manager.log\"\n    MODELBOX_MANAGER_USER_CONF_FILE=\"$MODELBOX_CONF_DIR/manager.conf\"\n    MODELBOX_MANAGER_RUN_SCRIPT=\"$MODELBOX_USER_HOME/modelbox-manager\"\n\n    MODELBOX_USER_SERVER_IP=\"0.0.0.0\"\n    MODELBOX_USER_SERVER_PORT=\"$MODELBOX_DEFAULT_PORT\"\n}\n\ndebug_mode_info() {\n    echo \"Debug ModelBox Info:\"\n    [ ! -z \"${MODELBOX_ROOT}\" ] && echo \"  Modelbox Root:   ${MODELBOX_ROOT}\"\n    echo \"  Home:            $MODELBOX_USER_HOME\"\n    echo \"  Config:          $MODELBOX_USER_CONF_FILE\"\n    echo \"  Log:             $MODELBOX_LOG_DIR\"\n    echo \"  Service command: $MODELBOX_RUN_SCRIPT {start|stop|restart|status}\"\n    echo \"  Manager command: $MODELBOX_MANAGER_RUN_SCRIPT {start|stop|restart|status}\"\n    echo \"  Tool Command:    modelbox-tool server -conf $MODELBOX_USER_CONF_FILE\" \n    echo \"  UI URL:          http://$MODELBOX_USER_SERVER_IP:${MODELBOX_USER_SERVER_PORT}/editor/\"  \n    return 0;\n}\n\ncheckportbind() {\n    IP=$1\n    PORT=$2\n    ${MODELBOX_TOOL_BIN} server --check-port $IP:$PORT\n    return $?\n}\n\nsetup_init_script() {\n    if [ ! -f \"${MODELBOX_ROOT}/etc/init.d/modelbox\" ]; then\n        echo \"${MODELBOX_ROOT}/etc/init.d/modelbox not exists, please reinstall modelbox server\"\n        return 1\n    fi\n    \n    cp ${MODELBOX_ROOT}/etc/init.d/modelbox $MODELBOX_RUN_SCRIPT\n    if [ $? -ne 0 ]; then\n        return 1\n    fi\n\n    sed -i \"s@CUSTOM_ENV_FILE=\\\"\\\"@CUSTOM_ENV_FILE=\\\"$MODELBOX_USER_OPT\\\"@\" $MODELBOX_RUN_SCRIPT\n    if [ $? -ne 0 ]; then\n        echo \"Modify script failed, please check permission\"\n        return 1\n    fi\n\n    cp ${MODELBOX_ROOT}/etc/init.d/modelbox-manager $MODELBOX_MANAGER_RUN_SCRIPT\n    if [ $? -ne 0 ]; then\n        return 1\n    fi\n\n    sed -i \"s@CUSTOM_ENV_FILE=\\\"\\\"@CUSTOM_ENV_FILE=\\\"$MODELBOX_MANAGER_USER_OPT\\\"@\" $MODELBOX_MANAGER_RUN_SCRIPT\n    if [ $? -ne 0 ]; then\n        echo \"Modify script failed, please check permission\"\n        return 1\n    fi\n\n    return 0;\n}\n\nsetup_debug_env() {\n    echo \"MODELBOX_ROOT=${MODELBOX_ROOT}\" >> $MODELBOX_USER_OPT\n    echo \"PIDDIR=$MODELBOX_PID_DIR\" >> $MODELBOX_USER_OPT\n    echo \"PIDFILE=$MODELBOX_PID_FILE\" >> $MODELBOX_USER_OPT\n    echo \"MODELBOX_OPTS=\\\"-c $MODELBOX_USER_CONF_FILE\\\"\" >> $MODELBOX_USER_OPT\n\n    echo \"MODELBOX_ROOT=${MODELBOX_ROOT}\" >> $MODELBOX_MANAGER_USER_OPT\n    echo \"PIDDIR=$MODELBOX_PID_DIR\" >> $MODELBOX_MANAGER_USER_OPT\n    echo \"PIDFILE=$MODELBOX_MANAGER_PID_FILE\" >> $MODELBOX_MANAGER_USER_OPT\n    echo \"KEYFILE=$MODELBOX_MANAGER_KEY_FILE\" >> $MODELBOX_MANAGER_USER_OPT\n    echo \"MODELBOX_MANAGER_OPTS=\\\"-c $MODELBOX_MANAGER_USER_CONF_FILE\\\"\" >> $MODELBOX_MANAGER_USER_OPT\n    return 0\n}\n\nsetup_config_file() {\n    cp -a ${MODELBOX_DEV_FILE} ${MODELBOX_USER_CONF_FILE}\n    if [ $? -ne 0 ]; then\n        echo \"Setup develop enviroment failed\"\n        restore_origin\n        return 1\n    fi\n\n    sed -i \"s@#LOG_FILE_PATH@$MODELBOX_LOG_FILE@g\" ${MODELBOX_USER_CONF_FILE}\n    if [ $? -ne 0 ]; then\n        echo \"setup conf for log failed.\"\n        return 1\n    fi\n\n    sed -i \"s@#LISTEN_PATH@$MODELBOX_SOCK_FILE@g\" ${MODELBOX_USER_CONF_FILE}\n    if [ $? -ne 0 ]; then\n        echo \"Setup conf for sock failed.\"\n        return 1\n    fi\n\n    sed -i \"s@#SERVER_IP@$MODELBOX_USER_SERVER_IP@g\" ${MODELBOX_USER_CONF_FILE}\n    if [ $? -ne 0 ]; then\n        echo \"Setup conf for server ip failed.\"\n        return 1\n    fi\n\n    sed -i \"s@#SERVER_PORT@$MODELBOX_USER_SERVER_PORT@g\" ${MODELBOX_USER_CONF_FILE}\n    if [ $? -ne 0 ]; then\n        echo \"Setup conf for listen port failed.\"\n        return 1\n    fi\n\n    sed -i \"s@#FLOW_PATH@$MODELBOX_FLOW_DIR@g\" ${MODELBOX_USER_CONF_FILE}\n    if [ $? -ne 0 ]; then\n        echo \"Setup conf for graph dir failed.\"\n        return 1\n    fi\n\n    CLIENTIP=`env | grep SSH_CLIENT | awk -F'=' '{print $2}' | awk '{print $1}'`\n    if [ ! -z \"$CLIENTIP\" ]; then\n        sed -i \"s@#ACL_IPS@\\\"${CLIENTIP}\\\"@g\" ${MODELBOX_USER_CONF_FILE}\n        if [ $? -ne 0 ]; then\n            echo \"Setup develop mode failed\"\n            restore_origin\n            return 1\n        fi\n    fi\n\n    echo \"Warning: This command will make modelbox listen 0.0.0.0, which means that other people in the same network may access and attack the service.\n  For network security, we have added an access control list to allow only legitimate IPs to access the service.\n  The default whitelist only includes your local IP.\nFor more details on security, please refer to the manual.\n\"\n\n    echo \"logfile $MODELBOX_MANAGER_LOG_FILE\" > ${MODELBOX_MANAGER_USER_CONF_FILE}\n    echo \"loglevel INFO\" >> ${MODELBOX_MANAGER_USER_CONF_FILE}\n    appcmd=\"app -name \\\"modelbox\\\"\"\n    appcmd=\"$appcmd -pidfile ${MODELBOX_PID_FILE}\"\n    appcmd=\"$appcmd --\"\n    appcmd=\"$appcmd ${MODELBOX_RUN_SCRIPT} start\"\n    echo \"$appcmd\" >> ${MODELBOX_MANAGER_USER_CONF_FILE}\n\n\n    debug_mode_info\n    echo \"\"\n    echo \"Starting modelbox at $MODELBOX_USER_HOME for `id -un`:\" \n    return 0\n}\n\ninit_debug_home() {\n    if [ -d \"$MODELBOX_USER_HOME\" ]; then\n        if [ -x \"$MODELBOX_RUN_SCRIPT\" ]; then\n            echo \"Modelbox already exists.\"\n            $MODELBOX_RUN_SCRIPT status\n        else\n            echo \"Directory $MODELBOX_USER_HOME already exists, please choose another one with --home argument.\"\n        fi\n        return 1\n    fi\n\n    checkportbind \"$MODELBOX_USER_SERVER_IP\" \"$MODELBOX_USER_SERVER_PORT\"\n    if [ $? -ne 0 ]; then\n        echo \"Port $MODELBOX_USER_SERVER_PORT already used by other service or you have no permisson.\"\n        echo \"Please choose another port with --port [port] option.\"\n        echo \"Or stop related service.\"\n        return 1\n    fi\n\n    mkdir -p $MODELBOX_USER_HOME\n    mkdir -p $MODELBOX_FLOW_DIR\n    mkdir -p $MODELBOX_LOG_DIR\n    mkdir -p $MODELBOX_CONF_DIR\n\n    setup_init_script\n    if [ $? -ne 0 ]; then\n        echo \"Init debug home failed, failed to setup init script\"\n        return 1\n    fi\n\n    setup_debug_env\n    if [ $? -ne 0 ]; then\n        echo \"Init debug home failed, failed to setup debug env\"\n        return 1\n    fi\n\n    setup_config_file\n    if [ $? -ne 0 ]; then\n        echo \"Init debug home failed, failed to setup config file\"\n        return 1\n    fi\n\n    return 0\n}\n\nchange_user() {\n    SYSTEMFILE=`systemctl show -p FragmentPath modelbox | awk -F= '{print $2}' 2>&1`\n    if [ -z \"$SYSTEMFILE\" ]; then\n        return\n    fi\n\n    CONF_DIR=\"${SYSTEMFILE}.d\"\n    mkdir -p \"$CONF_DIR\"\n}\n\nrestart_modelbox() {\n    $MODELBOX_RUN_SCRIPT restart 2>&1 > /dev/null\n}\n\nstop_modelbox() {\n    $MODELBOX_RUN_SCRIPT stop\n}\n\ndevelop_status() {\n    if [ ! -d \"$MODELBOX_USER_HOME\" ] || [ ! -x \"$MODELBOX_RUN_SCRIPT\" ] || [ ! -f \"$MODELBOX_USER_CONF_FILE\" ]; then\n        echo \"Debug modelbox not found, please run -s to setup, or sepcify home with --home run again.\"\n        return 1\n    fi\n\n    MODELBOX_USER_SERVER_IP=\"`${MODELBOX_TOOL_BIN} server -conf $MODELBOX_USER_CONF_FILE --get-conf-value server.ip`\"\n    MODELBOX_USER_SERVER_PORT=\"`${MODELBOX_TOOL_BIN} server -conf $MODELBOX_USER_CONF_FILE --get-conf-value server.port`\"\n\n    debug_mode_info\n    MSG=`$MODELBOX_RUN_SCRIPT status 2>/dev/null`\n    echo \"  Service Status:  $MSG\"\n    MSG=`$MODELBOX_MANAGER_RUN_SCRIPT status 2>/dev/null`\n    echo \"  Manager Status:  $MSG\"\n\n    return 0\n}\n\nsetup_develop_mode() {\n    init_debug_home\n    if [ $? -ne 0 ]; then\n        return 1\n    fi\n\n    stop_modelbox 2>&1 > /dev/null\n\n    $MODELBOX_RUN_SCRIPT restart  2>&1 > /dev/null\n    $MODELBOX_MANAGER_RUN_SCRIPT restart  2>&1 > /dev/null\n    $MODELBOX_RUN_SCRIPT status\n    $MODELBOX_MANAGER_RUN_SCRIPT status\n}\n\nshowhelp() {\n    echo \" -s            setup modelbox develop enviroment.\"\n    echo \"  --port       setup develop modelbox port, default is 1104.\"\n    echo \" -q            develop modelbox status.\"\n    echo \" -h            show this help message.\"\n    echo \"\"\n    echo \"addition options\"\n    echo \" --home        home directory, default is \\$HOME/modelbox-service\"\n}\n\nmain() {\n    OPTS=`getopt -o siqh --long home:,port: \\\n        -n  \"\" -- \"$@\"`\n\n    if [ $# -lt 1 ]; then showhelp; exit 1; fi\n    if [ $? != 0 ] ; then echo \"Terminating...\" >&2 ; exit 1 ; fi\n\n    # Note the quotes around `$TEMP': they are essential!\n    eval set -- \"$OPTS\"\n    action=\"NONE\"\n\n    while true; do\n        case \"$1\" in\n        -s | --on)\n            action=\"setup\"\n            shift ;;\n        --home)\n            MODELBOX_USER_HOME=\"$2\"\n            shift 2;;\n        --port)\n            MODELBOX_DEFAULT_PORT=\"$2\"\n            shift 2;;\n        -q)\n            action=\"query\"\n            shift ;;\n        -h)\n            showhelp\n            exit 0\n            shift ;;\n        -- ) shift; break ;;\n        * ) break ;;\n          esac\n    done\n\n    setup_env\n\n    if [ \"$action\" = \"setup\" ]; then\n        setup_develop_mode\n    elif [ \"$action\" = \"query\" ]; then\n        develop_status\n    else \n        showhelp\n    fi\n\n    return $?\n}\n\nmain $@\n"
  },
  {
    "path": "src/modelbox/server/config.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/common/config.h\"\n\n#include <modelbox/base/configuration.h>\n#include <unistd.h>\n\n#include \"config.h\"\n\nnamespace modelbox {\n\nstd::shared_ptr<modelbox::Configuration> kConfig;\n\nstd::string kConfigPath = \"/etc/modelbox/modelbox.conf\";\n\nbool LoadConfig(const std::string &file) {\n  modelbox::ConfigurationBuilder config_builder;\n\n  kConfig = LoadSubConfig(file);\n  if (kConfig == nullptr) {\n    return false;\n  }\n\n  return true;\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/modelbox/server/config.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_SERVER_CONF_H_\n#define MODELBOX_SERVER_CONF_H_\n\n#include <modelbox/base/configuration.h>\n\n#include <memory>\n\nnamespace modelbox {\n\n/**\n * @brief Load configuration from file\n */\nbool LoadConfig(const std::string &file);\n\n/**\n * @brief Global configuration object.\n */\nextern std::shared_ptr<modelbox::Configuration> kConfig;\n\n/**\n * @brief Global configuration file path.\n */\nextern std::string kConfigPath;\n\n}  // namespace modelbox\n\n#endif  // MODELBOX_SERVER_CONF_H_"
  },
  {
    "path": "src/modelbox/server/control-command.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"control-command.h\"\n\n#include <modelbox/base/log.h>\n#include <modelbox/base/memory_pool.h>\n#include <modelbox/base/utils.h>\n#include <modelbox/common/log.h>\n\n#include \"modelbox/statistics.h\"\n\nnamespace modelbox {\n\nREG_MODELBOX_TOOL_COMMAND(ToolCommandLog)\n\nenum MODELBOX_SERVER_COMMAND_LOG {\n  MODELBOX_SERVER_COMMAND_LOG_GET,\n  MODELBOX_SERVER_COMMAND_LOG_SET,\n};\n\nstatic struct option server_log_options[] = {\n    {\"getlevel\", no_argument, nullptr, MODELBOX_SERVER_COMMAND_LOG_GET},\n    {\"setlevel\", required_argument, nullptr, MODELBOX_SERVER_COMMAND_LOG_SET},\n    {nullptr, 0, nullptr, 0},\n};\n\nToolCommandLog::ToolCommandLog() = default;\nToolCommandLog::~ToolCommandLog() = default;\n\nstd::string ToolCommandLog::GetHelp() {\n  char help[] =\n      \"option:\\n\"\n      \"  --getlevel          get current log level\\n\"\n      \"  --setlevel [level]  set server log level\\n\"\n      \"\\n\";\n  return help;\n}\n\nint ToolCommandLog::Run(int argc, char *argv[]) {\n  int cmdtype = 0;\n\n  if (argc <= 1) {\n    TOOL_COUT << GetHelp();\n    return 0;\n  }\n\n  auto logger = ModelBoxLogger.GetLogger();\n\n  MODELBOX_COMMAND_GETOPT_BEGIN(cmdtype, server_log_options)\n  switch (cmdtype) {\n    case MODELBOX_SERVER_COMMAND_LOG_GET:\n      TOOL_COUT << \"Log Level : \"\n                << modelbox::LogLevelToString(logger->GetLogLevel())\n                << std::endl;\n      return 0;\n    case MODELBOX_SERVER_COMMAND_LOG_SET: {\n      auto level = modelbox::LogLevelStrToLevel(optarg);\n      if (modelbox::StatusError != modelbox::STATUS_OK) {\n        TOOL_CERR << \"Log level is invalid.\";\n        return 1;\n      }\n      TOOL_COUT << \"Set Log Level : \" << modelbox::LogLevelToString(level)\n                << std::endl;\n      logger->SetLogLevel(level);\n      return 0;\n    } break;\n    default:\n      break;\n  }\n  MODELBOX_COMMAND_GETOPT_END()\n\n  return 0;\n}\n\nREG_MODELBOX_TOOL_COMMAND(ToolCommandSlab)\n\nenum MODELBOX_SERVER_COMMAND_SLAB {\n  MODELBOX_SERVER_COMMAND_SLAB_INFO_DEVICE_GET,\n};\n\nstatic struct option server_slab_options[] = {\n    {\"device\", no_argument, nullptr,\n     MODELBOX_SERVER_COMMAND_SLAB_INFO_DEVICE_GET},\n    {nullptr, 0, nullptr, 0},\n};\n\nenum AFILOG_SERVER_COMMAND_SLAB_DEVICE {\n  MODELBOX_SERVER_COMMAND_SLAB_INFO_DEVICE_TYPE,\n  MODELBOX_SERVER_COMMAND_SLAB_INFO_DEVICE_ID,\n};\n\nstatic struct option server_slab_device_options[] = {\n    {\"type\", required_argument, nullptr,\n     MODELBOX_SERVER_COMMAND_SLAB_INFO_DEVICE_TYPE},\n    {\"id\", required_argument, nullptr,\n     MODELBOX_SERVER_COMMAND_SLAB_INFO_DEVICE_ID},\n    {nullptr, 0, nullptr, 0},\n};\n\nconstexpr const char *CPU_MEMPOOL_TYPE = \"cpu\";\nconstexpr const char *CUDA_MEMPOOL_TYPE = \"cuda\";\nconstexpr const char *ASCEND_MEMPOOL_TYPE = \"ascend\";\n\nToolCommandSlab::ToolCommandSlab() = default;\nToolCommandSlab::~ToolCommandSlab() = default;\n\nstd::string ToolCommandSlab::GetHelp() {\n  char help[] =\n      \"option:\\n\"\n      \"  --device                               get all device slab info\\n\"\n      \"       --type [cpu|cuda]                 specified type. e.g. --device \"\n      \"--type cpu\\n\"\n      \"       --id [0|1|..]                     specified id. e.g. --device \"\n      \"--type cuda --id 0\\n\"\n      \"\\n\";\n  return help;\n}\n\nbool ToolCommandSlab::GetMemPools(\n    std::vector<std::shared_ptr<modelbox::MemoryPoolBase>> &mempools,\n    const std::string &type, const std::string &id) {\n  std::shared_ptr<modelbox::MemoryPoolBase> mempool;\n\n  std::string name = type;\n  if (id.length() > 0) {\n    name += \"-\" + id;\n  }\n\n  for (auto &p : modelbox::MemoryPoolBase::GetAllPools()) {\n    if (p->GetName().find(name) == std::string::npos) {\n      continue;\n    }\n\n    mempools.emplace_back(p);\n  }\n\n  if (mempools.size() == 0) {\n    return false;\n  }\n\n  return true;\n}\n\nvoid ToolCommandSlab::DisplaySlabInfo(\n    std::shared_ptr<modelbox::MemoryPoolBase> &mem_pool,\n    const std::string &type, const std::string &id) {\n  if (mem_pool == nullptr) {\n    return;\n  }\n\n  auto slabcaches = mem_pool->GetSlabCaches();\n  uint64_t total_memory = 0;\n  for (size_t i = 0; i < slabcaches.size(); ++i) {\n    if (i == 0) {\n      TOOL_COUT << \"object size\\t\\tactive_objs\\t\\tnum_objects\\n\";\n    }\n    TOOL_COUT << modelbox::GetBytesReadable(slabcaches[i]->ObjectSize())\n              << \"\\t\\t\\t\" << slabcaches[i]->GetActiveObjNumber() << \"\\t\\t\\t\"\n              << slabcaches[i]->GetObjNumber() << \"\\n\";\n    total_memory += slabcaches[i]->ObjectSize() * slabcaches[i]->GetObjNumber();\n  }\n  TOOL_COUT << \"name: \" << mem_pool->GetName()\n            << \"    total_active_objects: \" << mem_pool->GetAllActiveObjectNum()\n            << \"    total_objects: \" << mem_pool->GetAllObjectNum()\n            << \"    total_memory: \" << modelbox::GetBytesReadable(total_memory)\n            << \"\\n\\n\";\n}\n\nbool ToolCommandSlab::DisplayMemPools(const std::string &type) {\n  std::vector<std::shared_ptr<modelbox::MemoryPoolBase>> mem_pools;\n  auto mem_pool_flag = GetMemPools(mem_pools, type);\n  if (mem_pool_flag) {\n    for (size_t i = 0; i < mem_pools.size(); ++i) {\n      DisplaySlabInfo(mem_pools[i], type, std::to_string(i));\n    }\n  }\n  return mem_pool_flag;\n}\n\nint ToolCommandSlab::DisplaySlabsInfo(const std::string &type) {\n  bool mem_pool_flag = false;\n\n  std::vector<std::string> types;\n  if (type.empty()) {\n    types = {CPU_MEMPOOL_TYPE, CUDA_MEMPOOL_TYPE, ASCEND_MEMPOOL_TYPE};\n  } else {\n    types.emplace_back(type);\n  }\n\n  for (const auto &item : types) {\n    mem_pool_flag |= DisplayMemPools(item);\n  }\n\n  if (!mem_pool_flag) {\n    if (type.empty()) {\n      TOOL_CERR << \"There is no memory pools here.\\n\";\n    } else {\n      TOOL_CERR << \"There is no \" << type << \" memory pools here.\\n\";\n    }\n    return 1;\n  }\n\n  return 0;\n}\n\nint ToolCommandSlab::DeviceSlabInfo(const std::string &type,\n                                    const std::string &id) {\n  if (id.empty()) {\n    return DisplaySlabsInfo(type);\n  }\n\n  if (type.empty()) {\n    TOOL_CERR << \"Your format is wrong , not allow only --id, please try \"\n                 \"modelbox-tool sever slab \"\n                 \"--device --type [cpu|cuda] --id [0|1|...]\\n\";\n    return 1;\n  }\n\n  std::vector<std::shared_ptr<modelbox::MemoryPoolBase>> mem_pools;\n  auto res = GetMemPools(mem_pools, type, id);\n  if (!res) {\n    TOOL_CERR << type << \" \" << id << \" memory pool does not exist.\\n\";\n    return 1;\n  }\n  DisplaySlabInfo(mem_pools[0], type, id);\n\n  return 0;\n}\n\nint ToolCommandSlab::RunDeviceOption(int argc, char *argv[]) {\n  int cmdtype = 0;\n  std::string type;\n  std::string id;\n  MODELBOX_COMMAND_GETOPT_BEGIN(cmdtype, server_slab_device_options)\n  switch (cmdtype) {\n    case MODELBOX_SERVER_COMMAND_SLAB_INFO_DEVICE_TYPE: {\n      type = optarg;\n      break;\n    }\n    case MODELBOX_SERVER_COMMAND_SLAB_INFO_DEVICE_ID: {\n      id = optarg;\n      break;\n    }\n    default:\n      break;\n  }\n  MODELBOX_COMMAND_GETOPT_END()\n\n  return DeviceSlabInfo(type, id);\n}\n\nint ToolCommandSlab::Run(int argc, char *argv[]) {\n  int cmdtype = 0;\n\n  if (argc <= 1) {\n    TOOL_COUT << GetHelp();\n    return 1;\n  }\n\n  MODELBOX_COMMAND_GETOPT_BEGIN(cmdtype, server_slab_options)\n  switch (cmdtype) {\n    case MODELBOX_SERVER_COMMAND_SLAB_INFO_DEVICE_GET: {\n      MODELBOX_COMMAND_SUB_UNLOCK();\n      return RunDeviceOption(MODELBOX_COMMAND_SUB_ARGC,\n                             MODELBOX_COMMAND_SUB_ARGV);\n    } break;\n    default:\n      return 1;\n  }\n  MODELBOX_COMMAND_GETOPT_END()\n\n  return 0;\n}\n\nREG_MODELBOX_TOOL_COMMAND(ToolCommandStatistics)\n\nenum MODELBOX_SERVER_COMMAND_STATISTICS {\n  MODELBOX_SERVER_COMMAND_STAT_All,\n  MODELBOX_SERVER_COMMAND_STAT_NODE,\n};\n\nstatic struct option server_statistics_options[] = {\n    {\"all\", no_argument, nullptr, MODELBOX_SERVER_COMMAND_STAT_All},\n    {\"node\", required_argument, nullptr, MODELBOX_SERVER_COMMAND_STAT_NODE},\n    {nullptr, 0, nullptr, 0},\n};\n\nToolCommandStatistics::ToolCommandStatistics() = default;\nToolCommandStatistics::~ToolCommandStatistics() = default;\n\nstd::string ToolCommandStatistics::GetHelp() {\n  char help[] =\n      \"option:\\n\"\n      \"  --all               get all info\\n\"\n      \"  --node [name]       get specific node info\\n\"\n      \"\\n\";\n  return help;\n}\n\nint ToolCommandStatistics::Run(int argc, char *argv[]) {\n  int cmdtype = 0;\n  auto root = modelbox::Statistics::GetGlobalItem();\n\n  if (argc <= 1) {\n    TOOL_COUT << GetHelp();\n    return 0;\n  }\n\n  MODELBOX_COMMAND_GETOPT_BEGIN(cmdtype, server_statistics_options)\n\n  if (root == nullptr) {\n    TOOL_CERR << \"Root of Statistics have not been created\";\n    return 1;\n  }\n\n  switch (cmdtype) {\n    case MODELBOX_SERVER_COMMAND_STAT_All: {\n      TOOL_COUT << \"Display All Info: \" << std::endl;\n      root->ForEach(\n          [&](const std::shared_ptr<modelbox::StatisticsItem> &item,\n              const std::string &relative_path) {\n            auto value = item->GetValue();\n            TOOL_COUT << item->GetPath()\n                      << (value ? \" = \" + value->ToString() : \"\") << std::endl;\n            return modelbox::STATUS_OK;\n          },\n          true);\n    } break;\n\n    case MODELBOX_SERVER_COMMAND_STAT_NODE: {\n      auto *name = optarg;\n      bool if_found = false;\n      root->ForEach(\n          [&](const std::shared_ptr<modelbox::StatisticsItem> &item,\n              const std::string &relative_path) {\n            if (item->GetName() == name) {\n              auto value = item->GetValue();\n              TOOL_COUT << item->GetPath()\n                        << (value ? \" = \" + value->ToString() : \"\")\n                        << std::endl;\n              if_found = true;\n            }\n            return modelbox::STATUS_OK;\n          },\n          true);\n      if (!if_found) {\n        TOOL_COUT << name << \" is not found.\" << std::endl;\n      }\n    } break;\n\n    default:\n      TOOL_COUT << GetHelp();\n      return 1;\n  }\n\n  MODELBOX_COMMAND_GETOPT_END()\n\n  return 0;\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/modelbox/server/control-command.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_CONTROL_COMMAND_H_\n#define MODELBOX_CONTROL_COMMAND_H_\n\n#include \"modelbox/base/memory_pool.h\"\n#include \"modelbox/common/command.h\"\n\nnamespace modelbox {\n\nconstexpr const char *LOG_CONTROL_DESC = \"control server log\";\nconstexpr const char *SLAB_CONTROL_DESC = \"control server slab\";\nconstexpr const char *STATISTICS_DESC = \"control server statistics\";\n\nclass ToolCommandLog : public modelbox::ToolCommand {\n public:\n  ToolCommandLog();\n  ~ToolCommandLog() override;\n\n  int Run(int argc, char *argv[]) override;\n  std::string GetHelp() override;\n\n  std::string GetCommandName() override { return \"log\"; };\n  std::string GetCommandDesc() override { return LOG_CONTROL_DESC; };\n};\n\nclass ToolCommandSlab : public modelbox::ToolCommand {\n public:\n  ToolCommandSlab();\n  ~ToolCommandSlab() override;\n\n  int Run(int argc, char *argv[]) override;\n  std::string GetHelp() override;\n\n  std::string GetCommandName() override { return \"slab\"; };\n  std::string GetCommandDesc() override { return SLAB_CONTROL_DESC; };\n\n private:\n  int RunDeviceOption(int argc, char *argv[]);\n  int DisplaySlabsInfo(const std::string &type = \"\");\n  void DisplaySlabInfo(std::shared_ptr<modelbox::MemoryPoolBase> &mem_pool,\n                       const std::string &type, const std::string &id);\n  bool GetMemPools(\n      std::vector<std::shared_ptr<modelbox::MemoryPoolBase>> &mempools,\n      const std::string &type, const std::string &id = \"\");\n  int DeviceSlabInfo(const std::string &type, const std::string &id);\n  bool DisplayMemPools(const std::string &type);\n};\n\nclass ToolCommandStatistics : public modelbox::ToolCommand {\n public:\n  ToolCommandStatistics();\n  ~ToolCommandStatistics() override;\n\n  int Run(int argc, char *argv[]) override;\n  std::string GetHelp() override;\n  std::string GetCommandName() override { return \"stat\"; };\n  std::string GetCommandDesc() override { return STATISTICS_DESC; };\n};\n\n}  // namespace modelbox\n\n#endif  // MODELBOX_CONTROL_COMMAND_H_\n"
  },
  {
    "path": "src/modelbox/server/control.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"control.h\"\n\n#include <netdb.h>\n#include <poll.h>\n#include <sys/socket.h>\n#include <sys/stat.h>\n#include <sys/un.h>\n#include <functional>\n#include <iomanip>\n#include <utility>\n\n#include \"config.h\"\n#include \"modelbox/base/configuration.h\"\n#include \"modelbox/base/log.h\"\n#include \"modelbox/base/os.h\"\n#include \"modelbox/base/utils.h\"\n#include \"modelbox/common/command.h\"\n#include \"modelbox/common/control_msg.h\"\n#include \"securec.h\"\n\nnamespace modelbox {\n\nControl::Control() = default;\n\nControl::~Control() { Stop(); }\n\nmodelbox::Status Control::Init(\n    const std::shared_ptr<modelbox::Configuration> &config) {\n  config_ = config;\n  auto ret = modelbox::STATUS_OK;\n  DeferCond { return ret != modelbox::STATUS_OK; };\n\n  if (config_ == nullptr) {\n    return modelbox::STATUS_BADCONF;\n  }\n\n  if (config_->GetBool(\"control.enable\", false) == false) {\n    return modelbox::STATUS_OK;\n  }\n\n  listen_path_ = modelbox_full_path(\n      config->GetString(\"control.listen\", CONTROL_UNIX_PATH));\n\n  struct sockaddr_un server_sockaddr;\n  int fd = socket(AF_UNIX, SOCK_DGRAM, 0);\n  if (fd <= 0) {\n    std::string errmsg = \"create socket: \";\n    errmsg += modelbox::StrError(errno);\n    MBLOG_ERROR << errmsg;\n    ret = {modelbox::STATUS_FAULT, errmsg};\n    return ret;\n  }\n  DeferCondAdd { close(fd); };\n\n  server_sockaddr.sun_family = AF_UNIX;\n  strncpy_s(server_sockaddr.sun_path, sizeof(server_sockaddr.sun_path),\n            listen_path_.c_str(), listen_path_.length());\n  unlink(server_sockaddr.sun_path);\n  int rc =\n      bind(fd, (struct sockaddr *)&server_sockaddr, sizeof(server_sockaddr));\n  if (rc != 0) {\n    std::string errmsg =\n        \"bind socket: \" + std::string(server_sockaddr.sun_path) + \" failed, \";\n    errmsg += modelbox::StrError(errno);\n    MBLOG_ERROR << errmsg;\n    ret = {modelbox::STATUS_FAULT, errmsg};\n    return ret;\n  }\n  chmod(server_sockaddr.sun_path, 0660);\n\n  pool_ = std::make_shared<modelbox::ThreadPool>(0, 8);\n  pool_->SetName(\"Control-Sender\");\n\n  server_fd_ = fd;\n  return modelbox::STATUS_OK;\n}\n\nint Control::ProcessHelp(const std::shared_ptr<ControlMsgHelp> &msg,\n                         MsgSendFunc reply_func) {\n  auto cmds = modelbox::ToolCommandList::Instance()->GetAllCommands();\n  if (cmds.size() == 0) {\n    return 0;\n  }\n\n  auto out_msg = std::make_shared<ControlOutStream>();\n  out_msg->SetReplyFunc(std::move(reply_func));\n\n  *out_msg->Stream() << \"Server command lists:\\n\";\n  for (auto &cmd : cmds) {\n    *out_msg->Stream() << \"  \" << std::left << std::setw(23)\n                       << cmd->GetCommandName() << cmd->GetCommandDesc()\n                       << \"\\n\";\n  }\n\n  return 0;\n}\n\nint Control::ProcessCmd(const std::shared_ptr<ControlMsgCmd> &msg,\n                        const MsgSendFunc &reply_func) {\n  auto args = msg->GetArgv();\n  int argc = args.size();\n  char *argv[argc];\n\n  for (int i = 0; i < argc; i++) {\n    argv[i] = (char *)args[i].c_str();\n  }\n\n  if (argc <= 0) {\n    return -1;\n  }\n\n  const char *action = argv[0];\n  auto cmd = modelbox::ToolCommandList::Instance()->GetCommand(action);\n  if (cmd == nullptr) {\n    MBLOG_DEBUG << \"command \" << action << \" not exists\\n\";\n    return -1;\n  }\n\n  auto out_msg = std::make_shared<ControlOutStream>();\n  auto err_msg = std::make_shared<ControlErrStream>();\n\n  out_msg->SetReplyFunc(reply_func);\n  err_msg->SetReplyFunc(reply_func);\n  cmd->SetUp(out_msg, err_msg);\n\n  return cmd->Run(argc, argv);\n}\n\nvoid Control::ProcessMsg(const std::shared_ptr<ControlMsg> &msg,\n                         MsgSendFunc reply_func) {\n  int process_ret = 0;\n  modelbox::Status ret = modelbox::STATUS_OK;\n  Defer {\n    if (!ret) {\n      ReplyMsgError(ret.Code(), ret.WrapErrormsgs(), reply_func);\n    }\n  };\n\n  switch (msg->GetMsgType()) {\n    case SERVER_CONTROL_MSG_TYPE_HELP: {\n      auto help_msg = std::dynamic_pointer_cast<ControlMsgHelp>(msg);\n      if (help_msg == nullptr) {\n        const auto *errmsg = \"message is invalid\";\n        ret = {modelbox::STATUS_FAULT, errmsg};\n        return;\n      }\n      process_ret = ProcessHelp(help_msg, reply_func);\n    } break;\n    case SERVER_CONTROL_MSG_TYPE_CMD: {\n      auto cmd_msg = std::dynamic_pointer_cast<ControlMsgCmd>(msg);\n      if (cmd_msg == nullptr) {\n        const auto *errmsg = \"message is invalid\";\n        ret = {modelbox::STATUS_FAULT, errmsg};\n        return;\n      }\n      process_ret = ProcessCmd(cmd_msg, reply_func);\n    } break;\n    default:\n      const auto *errmsg = \"command not found\";\n      ret = {modelbox::STATUS_NOTFOUND, errmsg};\n      return;\n      break;\n  }\n\n  ControlMsgResult ret_msg;\n  ret_msg.SetResult(process_ret);\n  ret = ret_msg.Serialize();\n  if (!ret) {\n    MBLOG_ERROR << \"Serialize result msg failed.\" << ret;\n    return;\n  }\n\n  int len = reply_func(ret_msg.GetData(), ret_msg.GetDataLen());\n  if (len < 0) {\n    const auto *errmsg = \"send to client failed.\";\n    ret = {modelbox::STATUS_FAULT, errmsg};\n    return;\n  }\n}\n\nvoid Control::ReplyMsgError(int err_code, const std::string &err_msg,\n                            const MsgSendFunc &reply_func) {\n  ControlMsgError msg_err;\n  msg_err.SetError(err_code, err_msg);\n  if (msg_err.Serialize() != modelbox::STATUS_OK) {\n    return;\n  }\n  reply_func(msg_err.GetData(), msg_err.GetDataLen());\n}\n\nmodelbox::Status Control::RecvMsg(const std::shared_ptr<ControlMsg> &recv_msg) {\n  struct sockaddr_un client;\n  socklen_t client_len = sizeof(client);\n\n  size_t len = sizeof(struct sockaddr);\n  len = recvfrom(\n      server_fd_, recv_msg->GetDataTail(), recv_msg->GetRemainSpace(), 0,\n      (struct sockaddr *)((void *)&client), (socklen_t *)(&client_len));\n  if (len < 0) {\n    std::string errmsg = \"recv from client failed.\";\n    errmsg += modelbox::StrError(errno);\n    MBLOG_ERROR << errmsg;\n    return {modelbox::STATUS_FAULT, errmsg};\n  }\n\n  modelbox::Status ret = modelbox::STATUS_OK;\n\n  int fd = server_fd_;\n  auto reply_func = [fd, client, client_len](void *data, int len) -> int {\n    int rc = sendto(fd, data, len, 0, (struct sockaddr *)&client, client_len);\n    if (rc < 0) {\n      MBLOG_ERROR << \"send failed, \" << client.sun_path\n                  << \", error: \" << modelbox::StrError(errno);\n    }\n\n    return rc;\n  };\n\n  Defer {\n    if (!ret) {\n      ReplyMsgError(ret.Code(), ret.WrapErrormsgs(), reply_func);\n    }\n  };\n\n  ret = recv_msg->AppendDataLen(len);\n  if (!ret) {\n    MBLOG_ERROR << \"update message len failed, \" << ret;\n    return ret;\n  }\n\n  ret = recv_msg->Unserialize();\n  if (!ret) {\n    return ret;\n  }\n\n  auto process_msg = ControlMsgBuilder::Build(recv_msg);\n  if (process_msg == nullptr) {\n    MBLOG_ERROR << \"Invalid control message\";\n    ret = modelbox::STATUS_INVALID;\n    return ret;\n  }\n\n  pool_->Submit(&Control::ProcessMsg, this, process_msg, reply_func);\n  return ret;\n}\n\nvoid Control::ControlDaemon() {\n  struct pollfd fds[1];\n  std::shared_ptr<ControlMsg> msg = std::make_shared<ControlMsg>();\n\n  int nfds = sizeof(fds) / sizeof(struct pollfd);\n\n  memset_s(fds, sizeof(fds), 0, sizeof(fds));\n  fds[0].fd = server_fd_;\n  fds[0].events = POLLIN;\n\n  modelbox::os->Thread->SetName(\"Control-Daemon\");\n\n  while (run_) {\n    int rc = poll(fds, nfds, -1);\n    if (rc <= 0) {\n      continue;\n    }\n\n    if (run_ == false) {\n      break;\n    }\n\n    if (fds[0].revents != POLLIN) {\n      continue;\n    }\n\n    auto ret = RecvMsg(msg);\n    if (ret == modelbox::STATUS_AGAIN || ret == modelbox::STATUS_OK) {\n      continue;\n    }\n\n    msg->Reset();\n  }\n}\n\nmodelbox::Status Control::Start() {\n  if (run_ == true) {\n    return modelbox::STATUS_OK;\n  }\n\n  if (server_fd_ > 0) {\n    run_ = true;\n    daemon_thread_ = std::thread(&Control::ControlDaemon, this);\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status Control::Stop() {\n  if (run_ == false) {\n    return modelbox::STATUS_OK;\n  }\n\n  run_ = false;\n  if (server_fd_ > 0) {\n    shutdown(server_fd_, SHUT_RDWR);\n  }\n\n  if (daemon_thread_.joinable()) {\n    daemon_thread_.join();\n  }\n\n  pool_ = nullptr;\n\n  if (server_fd_ > 0) {\n    close(server_fd_);\n    if (listen_path_.length() > 0) {\n      unlink(listen_path_.c_str());\n      listen_path_.clear();\n    }\n    server_fd_ = -1;\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nControlStream::ControlStream() = default;\nControlStream::~ControlStream() = default;\n\nbool ControlStream::HasError() { return has_error_; }\n\nvoid ControlStream::SetReplyFunc(MsgSendFunc reply_func) {\n  reply_func_ = std::move(reply_func);\n}\n\nControlOutStream::ControlOutStream() = default;\nControlOutStream::~ControlOutStream() = default;\n\nvoid ControlOutStream::ProcessStream(OStream *st) {\n  ControlMsgStdout msg;\n  msg.SetString(st->str());\n  auto ret = msg.Serialize();\n  if (!ret) {\n    has_error_ = true;\n    return;\n  }\n\n  int len = reply_func_(msg.GetData(), msg.GetDataLen());\n  if (len <= 0) {\n    has_error_ = true;\n    return;\n  }\n}\n\nControlErrStream::ControlErrStream() = default;\nControlErrStream::~ControlErrStream() = default;\n\nvoid ControlErrStream::ProcessStream(OStream *st) {\n  ControlMsgErrout msg;\n  msg.SetString(st->str());\n  auto ret = msg.Serialize();\n  if (!ret) {\n    has_error_ = true;\n    return;\n  }\n\n  int len = reply_func_(msg.GetData(), msg.GetDataLen());\n  if (len <= 0) {\n    has_error_ = true;\n    return;\n  }\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/modelbox/server/control.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_CONTROL_H_\n#define MODELBOX_CONTROL_H_\n\n#include <modelbox/base/configuration.h>\n#include <modelbox/base/status.h>\n#include <modelbox/base/thread_pool.h>\n\n#include <iostream>\n#include <memory>\n\n#include \"modelbox/common/control_msg.h\"\n#include \"modelbox/common/utils.h\"\n#include \"server_plugin.h\"\n\nnamespace modelbox {\n\nusing MsgSendFunc = std::function<int(void *, int)>;\n\nclass Control {\n public:\n  Control();\n  virtual ~Control();\n  modelbox::Status Init(const std::shared_ptr<modelbox::Configuration>& config);\n  modelbox::Status Start();\n  modelbox::Status Stop();\n\n private:\n  void ControlDaemon();\n  void ProcessMsg(const std::shared_ptr<ControlMsg>& msg,\n                  MsgSendFunc reply_func);\n  void ReplyMsgError(int err_code, const std::string& err_msg,\n                     const MsgSendFunc& reply_func);\n  modelbox::Status RecvMsg(const std::shared_ptr<ControlMsg>& recv_msg);\n  int ProcessCmd(const std::shared_ptr<ControlMsgCmd>& msg,\n                 const MsgSendFunc& reply_func);\n  int ProcessHelp(const std::shared_ptr<ControlMsgHelp>& msg,\n                  MsgSendFunc reply_func);\n\n  std::shared_ptr<modelbox::Configuration> config_;\n  std::string listen_path_;\n  int server_fd_{-1};\n  bool run_{false};\n  std::thread daemon_thread_;\n  std::shared_ptr<modelbox::ThreadPool> pool_;\n};\n\nclass ControlStream : public OutStream {\n public:\n  ControlStream();\n  virtual ~ControlStream();\n\n  bool HasError();\n\n  void SetReplyFunc(MsgSendFunc reply_func);\n\n protected:\n  MsgSendFunc reply_func_;\n  bool has_error_{false};\n};\n\nclass ControlOutStream : public ControlStream {\n public:\n  ControlOutStream();\n  ~ControlOutStream() override;\n\n protected:\n  void ProcessStream(OStream *st) override;\n};\n\nclass ControlErrStream : public ControlStream {\n public:\n  ControlErrStream();\n  ~ControlErrStream() override;\n\n protected:\n  void ProcessStream(OStream *st) override;\n};\n\n}  // namespace modelbox\n\n#endif  // MODELBOX_CONTROL_H_\n"
  },
  {
    "path": "src/modelbox/server/etc/init.d/modelbox.in",
    "content": "#!/bin/sh\n#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n### BEGIN INIT INFO\n# Provides:        modelbox\n# Required-Start:  $network \n# Required-Stop:   $network \n# Default-Start:   2 3 4 5\n# Default-Stop:\n# Short-Description: Start modelbox service\n### END INIT INFO\n\nPATH=/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/bin\n\nMODELBOX_ROOT=$(cd $(dirname $0)/../../ && pwd)\nif [ \"$MODELBOX_ROOT\" = \"/\" ]; then\n\tMODELBOX_ROOT=\"\"\nfi\n\n# this env may changed by script, do not modify\nCUSTOM_ENV_FILE=\"\"\n\nif [ -f \"$CUSTOM_ENV_FILE\" ]; then\n\t. $CUSTOM_ENV_FILE\nelse\n\t. ${MODELBOX_ROOT}@CMAKE_INSTALL_FULL_SYSCONFDIR@/modelbox/modelbox-opts\n\tPIDDIR=${MODELBOX_ROOT}/@CMAKE_INSTALL_RUNSTATEDIR@/modelbox\n\tPIDFILE=${MODELBOX_ROOT}/@CMAKE_INSTALL_RUNSTATEDIR@/modelbox/modelbox.pid\n\tMODELBOX_INITSCRIPT_DIR=${MODELBOX_ROOT}@CMAKE_INSTALL_FULL_SYSCONFDIR@/modelbox/init-script/modelbox\nfi\n\nMODELBOX=${MODELBOX_ROOT}@CMAKE_INSTALL_FULL_BINDIR@/modelbox\n\nif [ ! -x \"${MODELBOX}\" ]; then\n\techo \"modelbox server not exists: ${MODELBOX}\"\n\texit 5\nfi\n\ncase $1 in\n\texec)\n\t\tif [ ! -d \"$PIDDIR\" ]; then\n\t\t\tmkdir $PIDDIR\n\t\tfi\n\n\t\tif [ -d ${MODELBOX_INITSCRIPT_DIR} ]; then\n\t\t\tfor i in ${MODELBOX_INITSCRIPT_DIR}/*.sh; do\n\t\t\t\tif [ -r $i ]; then\n\t\t\t\t. $i\n\t\t\t\tfi\n\t\t\tdone\n\t\t\tunset i\t\n\t\tfi\n\n\t\texec $MODELBOX $MODELBOX_OPTS -V -f -p $PIDFILE\n\t\t;;\n\tstart)\n\t\tif [ ! -d \"$PIDDIR\" ]; then\n\t\t\tmkdir $PIDDIR\n\t\tfi\n\n\t\tif [ -d ${MODELBOX_INITSCRIPT_DIR} ]; then\n\t\t\tfor i in ${MODELBOX_INITSCRIPT_DIR}/*.sh; do\n\t\t\t\tif [ -r $i ]; then\n\t\t\t\t. $i\n\t\t\t\tfi\n\t\t\tdone\n\t\t\tunset i\t\n\t\tfi\n\n\t\t$MODELBOX $MODELBOX_OPTS -p $PIDFILE\n\t\tLOOP=0\n\t\twhile true; do\n\t\t\tif [ -e \"$PIDFILE\" ]; then\n\t\t\t\tbreak;\n\t\t\tfi\n\t\t\tLOOP=$((LOOP+1))\n\n\t\t\tif [ $LOOP -gt 10 ]; then\n\t\t\t\techo \"start modelbox service failed.\"\n\t\t\t\t\"$0\" stop\n\t\t\t\texit 1\n\t\t\tfi\n\t\t\tsleep .5\n\t\tdone\n\n\t\tPID=\"$(cat $PIDFILE 2>/dev/null)\"\n\t\tif [ -z \"$PID\" ]; then\n\t\t\techo \"start modelbox service failed.\"\n\t\t\texit 1\n\t\tfi\n\t\tif [ ! -e \"/proc/$PID\" ]; then\n\t\t\techo \"start modelbox service failed.\"\n\t\t\texit 1\n\t\tfi\n\t\techo \"start modelbox service success.\"\n\t\t;;\n\tstop)\n\t\tif [ ! -f \"$PIDFILE\" ]; then\n\t\t\techo \"modelbox service is stopped.\"\n\t\t\texit 0\n\t\tfi\n\t\tPID=\"$(cat $PIDFILE 2>/dev/null)\"\n\t\tif [ ! -e \"/proc/$PID\" ] || [ -z \"$PID\" ]; then\n\t\t\techo \"modelbox service is stopped\"\n\t\t\texit 0\n\t\tfi\n\n\t\tkill -TERM \"$PID\"\n\t\tif [ $? -ne 0 ]; then\n\t\t\techo \"stop modelbox service failed.\"\n\t\t\texit 1;\n\t\tfi\n\n\t\tLOOP=1\n\t\twhile true; do\n\t\t\tif [ ! -d \"/proc/$PID\" ]; then\n\t\t\t\tbreak;\n\t\t\tfi\n\n\t\t\tif [ $LOOP -gt 10 ]; then\n\t\t\t\tkill -9 \"$PID\"\n\t\t\t\tbreak;\n\t\t\tfi\n\t\t\tLOOP=$((LOOP+1))\n\t\t\tsleep .5\n\t\tdone\n\t\techo \"stop modelbox service success.\"\n\t\t;;\n\trestart)\n\t\t\"$0\" stop && sleep 1 && \"$0\" start\n\t\t;;\n\tstatus)\n\t\tPID=\"$(cat \"$PIDFILE\" 2>/dev/null)\"\n\t\tif [ ! -e \"/proc/$PID\" ] || [ -z \"$PID\" ]; then\n\t\t\techo \"modelbox service is not running.\"\n\t\t\texit 1\n\t\tfi\n\t\techo \"modelbox service is running. pid $PID\"\n\t\tstatus=$?\n\t\t;;\n\t*)\n\t\techo \"Usage: $0 {start|exec|stop|restart|status}\"\n\t\texit 2\n\t\t;;\nesac\n\nexit $status\n\n"
  },
  {
    "path": "src/modelbox/server/etc/modelbox-dev.conf.in",
    "content": "[server]\nip = \"127.0.0.1\"\nport = \"1104\"\nflow_path = \"@MODELBOX_ROOT_VAR@@CMAKE_INSTALL_FULL_SYSCONFDIR@/modelbox/graph\"\n\n[plugin]\nfiles = [\n    \"@MODELBOX_ROOT_VAR@@CMAKE_INSTALL_FULL_LIBDIR@/modelbox-plugin.so\",\n    \"@MODELBOX_ROOT_VAR@@CMAKE_INSTALL_FULL_LIBDIR@/modelbox-plugin-editor.so\"\n]\n\n[control]\nenable = true\nlisten = \"@MODELBOX_ROOT_VAR@/@CMAKE_INSTALL_RUNSTATEDIR@/modelbox/modelbox.sock\"\n\n[acl]\nallow = [\n    \"127.0.0.1/8\",\n    # ADD CLIENT HOST HERE\n]\n\n[editor]\nenable = true\n# ip = \"127.0.0.1\"\n# port = \"1104\"\nroot = \"@MODELBOX_ROOT_VAR@@CMAKE_INSTALL_FULL_DATAROOTDIR@/modelbox/www\"\ndemo_root = \"@MODELBOX_ROOT_VAR@@CMAKE_INSTALL_FULL_DATAROOTDIR@/modelbox/demo\"\n\n[log]\n# log level, DEBUG, INFO, NOTICE, WARN, ERROR, FATAL, OFF\n# level = \"INFO\"\n\n# log archive number\n# num = 32\n\n# log to screen\nscreen = true\n\n# log file path\npath = \"@MODELBOX_ROOT_VAR@/var/log/modelbox/modelbox.log\""
  },
  {
    "path": "src/modelbox/server/etc/modelbox-opts",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nMODELBOX_ROOT=$(cd $(dirname $0)/../../ && pwd)\nif [ \"$MODELBOX_ROOT\" = \"/\" ]; then\n\tMODELBOX_ROOT=\"\"\nfi\n\n# custom env variable here\n\n# modelbox server opts\nMODELBOX_OPTS=\"$MODELBOX_OPTS -c ${MODELBOX_ROOT}@CMAKE_INSTALL_FULL_SYSCONFDIR@/modelbox/modelbox.conf\""
  },
  {
    "path": "src/modelbox/server/etc/modelbox-template.conf.in",
    "content": "[server]\nip = \"#SERVER_IP\"\nport = \"#SERVER_PORT\"\nflow_path = \"#FLOW_PATH\"\n\n[plugin]\nfiles = [\n    \"@MODELBOX_ROOT_VAR@@CMAKE_INSTALL_FULL_LIBDIR@/modelbox-plugin.so\",\n    \"@MODELBOX_ROOT_VAR@@CMAKE_INSTALL_FULL_LIBDIR@/modelbox-plugin-editor.so\"\n]\n\n[control]\nenable = true\nlisten = \"#LISTEN_PATH\"\n\n[acl]\nallow = [\n    \"127.0.0.1/8\",\n    # ADD CLIENT HOST HERE\n    #ACL_IPS\n]\n\n[editor]\nenable = true\n# ip = \"127.0.0.1\"\n# port = \"1104\"\nroot = \"@MODELBOX_ROOT_VAR@@CMAKE_INSTALL_FULL_DATAROOTDIR@/modelbox/www\"\ndemo_root = \"@MODELBOX_ROOT_VAR@@CMAKE_INSTALL_FULL_DATAROOTDIR@/modelbox/demo\"\n\n[log]\n# log level, DEBUG, INFO, NOTICE, WARN, ERROR, FATAL, OFF\nlevel = \"INFO\"\n\n# log archive number\nnum = 32\n\n# log file path\npath = \"#LOG_FILE_PATH\"\n"
  },
  {
    "path": "src/modelbox/server/etc/modelbox.conf.in",
    "content": "[server]\nip = \"127.0.0.1\"\nport = \"1104\"\nflow_path = \"@MODELBOX_ROOT_VAR@@CMAKE_INSTALL_FULL_SYSCONFDIR@/modelbox/graph\"\napplication_root = \"@MODELBOX_ROOT_VAR@/opt/modelbox/application\"\n\n# run as user\n# user = \"modelbox\"\n\n# [acl]\n# allow = [\n#     \"127.0.0.1/8\"\n# ]\n\n[control]\nenable = true\nlisten = \"@MODELBOX_ROOT_VAR@/@CMAKE_INSTALL_RUNSTATEDIR@/modelbox/modelbox.sock\"\n\n[plugin]\nfiles = [\n    \"@MODELBOX_ROOT_VAR@@CMAKE_INSTALL_FULL_LIBDIR@/modelbox-plugin.so\",\n    \"@MODELBOX_ROOT_VAR@@CMAKE_INSTALL_FULL_LIBDIR@/modelbox-plugin-editor.so\"\n]\n\n[editor]\nenable = false\n# ip = \"127.0.0.1\"\n# port = \"1104\"\nroot = \"@MODELBOX_ROOT_VAR@@CMAKE_INSTALL_FULL_DATAROOTDIR@/modelbox/www\"\ndemo_root = \"@MODELBOX_ROOT_VAR@@CMAKE_INSTALL_FULL_DATAROOTDIR@/modelbox/demo\"\n\n[log]\n# log level, DEBUG, INFO, NOTICE, WARN, ERROR, FATAL, OFF\n# level = \"INFO\"\n\n# log archive number\n# num = 32\n\n# log file path\npath = \"@MODELBOX_ROOT_VAR@/var/log/modelbox/modelbox.log\"\n\n# log to screen\nscreen = false\n\n# include config files\n[include]\nfiles = [\n    \"@MODELBOX_ROOT_VAR@@CMAKE_INSTALL_FULL_SYSCONFDIR@/modelbox/conf.d/*.conf\",    \n]"
  },
  {
    "path": "src/modelbox/server/etc/modelbox.service.in",
    "content": "[Unit]\nDescription=modelbox server\nAfter=network.target \nStartLimitBurst=0\nStartLimitIntervalSec=60\n\n[Service]\nUser=modelbox\nGroup=modelbox\nType=forking\nPermissionsStartOnly=True\nRuntimeDirectory=modelbox\nRuntimeDirectoryMode=755\nPIDFile=/@CMAKE_INSTALL_RUNSTATEDIR@/modelbox/modelbox.pid\nEnvironmentFile=@CMAKE_INSTALL_FULL_SYSCONFDIR@/modelbox/modelbox-opts\nExecStart=@CMAKE_INSTALL_FULL_BINDIR@/modelbox -p /@CMAKE_INSTALL_RUNSTATEDIR@/modelbox/modelbox.pid $MODELBOX_OPTS \nLimitNOFILE=100000\nRestart=always\nRestartSec=3\n\n[Install]\nWantedBy=multi-user.target\nAlias=modelbox.service\n"
  },
  {
    "path": "src/modelbox/server/http_helper.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/server/http_helper.h\"\n\n#include <modelbox/base/log.h>\n\n#include <regex>\n#include <utility>\n\nnamespace modelbox {\n\nconst HttpStatusCode HttpStatusCodes::CONTINUE = 100;\nconst HttpStatusCode HttpStatusCodes::SWITCHING_PROTOCOLS = 101;\nconst HttpStatusCode HttpStatusCodes::OK = 200;\nconst HttpStatusCode HttpStatusCodes::CREATED = 201;\nconst HttpStatusCode HttpStatusCodes::ACCEPTED = 202;\nconst HttpStatusCode HttpStatusCodes::NON_AUTH_INFO = 203;\nconst HttpStatusCode HttpStatusCodes::NO_CONTENT = 204;\nconst HttpStatusCode HttpStatusCodes::RESET_CONTENT = 205;\nconst HttpStatusCode HttpStatusCodes::PARTIAL_CONTENT = 206;\nconst HttpStatusCode HttpStatusCodes::MULTI_STATUS = 207;\nconst HttpStatusCode HttpStatusCodes::ALREADY_REPORTED = 208;\nconst HttpStatusCode HttpStatusCodes::IM_USED = 226;\nconst HttpStatusCode HttpStatusCodes::MULTIPLE_CHOICES = 300;\nconst HttpStatusCode HttpStatusCodes::MOVED_PERMANENTLY = 301;\nconst HttpStatusCode HttpStatusCodes::FOUND = 302;\nconst HttpStatusCode HttpStatusCodes::SEE_OTHER = 303;\nconst HttpStatusCode HttpStatusCodes::NOT_MODIFIED = 304;\nconst HttpStatusCode HttpStatusCodes::USE_PROXY = 305;\nconst HttpStatusCode HttpStatusCodes::TEMPORARY_REDIRECT = 307;\nconst HttpStatusCode HttpStatusCodes::PERMANENT_REDIRECT = 308;\nconst HttpStatusCode HttpStatusCodes::BAD_REQUEST = 400;\nconst HttpStatusCode HttpStatusCodes::UNAUTHORIZED = 401;\nconst HttpStatusCode HttpStatusCodes::PAYMENT_REQUIRED = 402;\nconst HttpStatusCode HttpStatusCodes::FORBIDDEN = 403;\nconst HttpStatusCode HttpStatusCodes::NOT_FOUND = 404;\nconst HttpStatusCode HttpStatusCodes::METHOD_NOT_ALLOWED = 405;\nconst HttpStatusCode HttpStatusCodes::NOT_ACCEPTABLE = 406;\nconst HttpStatusCode HttpStatusCodes::PROXY_AUTH_REQUIRED = 407;\nconst HttpStatusCode HttpStatusCodes::REQUEST_TIMEOUT = 408;\nconst HttpStatusCode HttpStatusCodes::CONFLICT = 409;\nconst HttpStatusCode HttpStatusCodes::GONE = 410;\nconst HttpStatusCode HttpStatusCodes::LENGTH_REQUIRED = 411;\nconst HttpStatusCode HttpStatusCodes::PRECONDITION_FAILED = 412;\nconst HttpStatusCode HttpStatusCodes::REQUEST_ENTITY_TOO_LARGE = 413;\nconst HttpStatusCode HttpStatusCodes::REQUEST_URI_TOO_LARGE = 414;\nconst HttpStatusCode HttpStatusCodes::UNSUPPORTED_MEDIA_TYPE = 415;\nconst HttpStatusCode HttpStatusCodes::RANGE_NOT_SATISFIABLE = 416;\nconst HttpStatusCode HttpStatusCodes::EXPECTATION_FAILED = 417;\nconst HttpStatusCode HttpStatusCodes::MISDIRECTED_REQUEST = 421;\nconst HttpStatusCode HttpStatusCodes::UNPROCESSABLE_ENTITY = 422;\nconst HttpStatusCode HttpStatusCodes::LOCKED = 423;\nconst HttpStatusCode HttpStatusCodes::FAILED_DEPENDENCY = 424;\nconst HttpStatusCode HttpStatusCodes::UPGRADE_REQUIRED = 426;\nconst HttpStatusCode HttpStatusCodes::PRECONDITION_REQUIRED = 428;\nconst HttpStatusCode HttpStatusCodes::TOO_MANY_REQUESTS = 429;\nconst HttpStatusCode HttpStatusCodes::REQUEST_HEADER_FIELDS_TOO_LARGE = 431;\nconst HttpStatusCode HttpStatusCodes::UNAVAILABLE_FOR_LEGAL_REASONS = 451;\nconst HttpStatusCode HttpStatusCodes::INTERNAL_ERROR = 500;\nconst HttpStatusCode HttpStatusCodes::NOT_IMPLEMENTED = 501;\nconst HttpStatusCode HttpStatusCodes::BAD_GATEWAY = 502;\nconst HttpStatusCode HttpStatusCodes::SERVICE_UNAVAILABLE = 503;\nconst HttpStatusCode HttpStatusCodes::GATEWAY_TIMEOUT = 504;\nconst HttpStatusCode HttpStatusCodes::HTTP_VERSION_NOT_SUPPORTED = 505;\nconst HttpStatusCode HttpStatusCodes::VARIANT_ALSO_NEGOTIATES = 506;\nconst HttpStatusCode HttpStatusCodes::INSUFFICIENT_STORAGE = 507;\nconst HttpStatusCode HttpStatusCodes::LOOP_DETECTED = 508;\nconst HttpStatusCode HttpStatusCodes::NOT_EXTENDED = 510;\nconst HttpStatusCode HttpStatusCodes::NETWORK_AUTHENTICATION_REQUIRED = 511;\n\nconst HttpMethod HttpMethods::GET = \"GET\";\nconst HttpMethod HttpMethods::POST = \"POST\";\nconst HttpMethod HttpMethods::PUT = \"PUT\";\nconst HttpMethod HttpMethods::DELETE = \"DELETE\";\nconst HttpMethod HttpMethods::HEAD = \"HEAD\";\nconst HttpMethod HttpMethods::OPTIONS = \"OPTIONS\";\nconst HttpMethod HttpMethods::TRACE = \"TRACE\";\nconst HttpMethod HttpMethods::CONNECT = \"CONNECT\";\nconst HttpMethod HttpMethods::MERGE = \"MERGE\";\nconst HttpMethod HttpMethods::PATCH = \"PATCH\";\n\n#define GET_SSL_ERR(ssl_err_code, ssl_err_str)          \\\n  char ssl_err_str[256];                                \\\n  auto ssl_err_code_num = ERR_get_error();              \\\n  /* NOLINTNEXTLINE */                                  \\\n  auto ssl_err_code = std::to_string(ssl_err_code_num); \\\n  ERR_error_string_n(ssl_err_code_num, ssl_err_str, 256);\n\nStatus UseCertificate(SSL_CTX &ctx, const void *cert_buf, int len,\n                      pem_password_cb cb, void *cb_user_data) {\n  auto *cert_bio = BIO_new_mem_buf(cert_buf, len);\n  if (cert_bio == nullptr) {\n    GET_SSL_ERR(err_code, err_str);\n    return {STATUS_FAULT,\n            \"load cert as bio failed, err: \" + err_code + \", \" + err_str};\n  }\n\n  auto *cert = PEM_read_bio_X509_AUX(cert_bio, nullptr, cb, cb_user_data);\n  BIO_free(cert_bio);\n  if (cert == nullptr) {\n    GET_SSL_ERR(err_code, err_str);\n    return {STATUS_FAULT,\n            \"read x509 failed, err: \" + err_code + \", \" + err_str};\n  }\n\n  auto ret = SSL_CTX_use_certificate(&ctx, cert);\n  if (ret != 1) {\n    GET_SSL_ERR(err_code, err_str);\n    return {STATUS_FAULT, \"use cert failed, err: \" + err_code + \", \" + err_str};\n  }\n\n  return STATUS_OK;\n}\n\nStatus UsePrivateKey(SSL_CTX &ctx, const void *key_buf, int len,\n                     pem_password_cb cb, void *cb_user_data) {\n  auto *key_bio = BIO_new_mem_buf(key_buf, len);\n  if (key_bio == nullptr) {\n    GET_SSL_ERR(err_code, err_str);\n    return {STATUS_FAULT,\n            \"load key as bio failed, err: \" + err_code + \", \" + err_str};\n  }\n\n  auto *key = PEM_read_bio_PrivateKey(key_bio, nullptr, cb, cb_user_data);\n  BIO_free(key_bio);\n  if (key == nullptr) {\n    GET_SSL_ERR(err_code, err_str);\n    return {STATUS_FAULT, \"read key failed, err: \" + err_code + \", \" + err_str};\n  }\n\n  auto ret = SSL_CTX_use_PrivateKey(&ctx, key);\n  if (ret != 1) {\n    GET_SSL_ERR(err_code, err_str);\n    return {STATUS_FAULT, \"use key failed, err: \" + err_code + \", \" + err_str};\n  }\n\n  return STATUS_OK;\n}\n\nvoid HttpServerConfig::SetTimeout(const std::chrono::seconds &timeout) {\n  timeout_ = timeout;\n}\n\nvoid HttpServerConfig::SetSSLConfigCallback(const SSLConfigCallback &cb) {\n  ssl_config_cb_ = cb;\n}\n\nstd::chrono::seconds HttpServerConfig::GetTimeout() const { return timeout_; }\n\nSSLConfigCallback HttpServerConfig::GetSSLConfigCallback() const {\n  return ssl_config_cb_;\n}\n\nbool HttpPathMatchNode::IsValid() { return is_valid_; }\n\nbool HttpPathMatchNode::HasChildren() { return !children_.empty(); }\n\nStatus HttpPathMatchNode::AddChild(std::list<std::string> node_path) {\n  if (node_path.empty()) {\n    if (is_valid_) {\n      return STATUS_EXIST;\n    }\n\n    is_valid_ = true;\n    return STATUS_OK;\n  }\n\n  auto child_name = node_path.front();\n  node_path.pop_front();\n  std::shared_ptr<HttpPathMatchNode> child;\n  auto item = children_.find(child_name);\n  if (item == children_.end()) {\n    child = std::make_shared<HttpPathMatchNode>();\n    children_[child_name] = child;\n  } else {\n    child = item->second;\n  }\n\n  return child->AddChild(std::move(node_path));\n}\n\nStatus HttpPathMatchNode::DelChild(std::list<std::string> node_path) {\n  if (node_path.empty()) {\n    is_valid_ = false;\n    return STATUS_OK;\n  }\n\n  auto child_name = node_path.front();\n  node_path.pop_front();\n  auto item = children_.find(child_name);\n  if (item == children_.end()) {\n    return STATUS_NOTFOUND;\n  }\n\n  auto &child = item->second;\n  auto ret = child->DelChild(std::move(node_path));\n  if (!ret) {\n    return ret;\n  }\n\n  if (!child->IsValid() && !child->HasChildren()) {\n    children_.erase(child_name);\n  }\n\n  return STATUS_OK;\n}\n\nStatus HttpPathMatchNode::Match(std::list<std::string> path,\n                                std::list<std::string> &node_path) {\n  auto ret = STATUS_NOTFOUND;\n  if (is_valid_) {\n    ret = STATUS_OK;\n  }\n\n  if (path.empty()) {\n    return ret;\n  }\n\n  auto child_name = path.front();\n  path.pop_front();\n  auto item = children_.find(child_name);\n  if (item == children_.end()) {\n    return ret;\n  }\n\n  node_path.push_back(child_name);\n  auto &child = item->second;\n  auto child_ret = child->Match(std::move(path), node_path);\n  if (child_ret) {\n    return STATUS_OK;\n  }\n\n  node_path.pop_back();\n  return ret;\n}\n\nStatus HttpPathMatchTree::AddNode(const std::string &node_path) {\n  auto node_name_list = SplitHttpPath(node_path);\n  return root_.AddChild(std::move(node_name_list));\n}\n\nvoid HttpPathMatchTree::DelNode(const std::string &node_path) {\n  auto node_name_list = SplitHttpPath(node_path);\n  root_.DelChild(std::move(node_name_list));\n}\n\nStatus HttpPathMatchTree::Match(const std::string &path,\n                                std::string &node_path) {\n  std::list<std::string> node_path_list;\n  auto node_name_list = SplitHttpPath(path);\n  auto ret = root_.Match(std::move(node_name_list), node_path_list);\n  if (!ret) {\n    return STATUS_NOTFOUND;\n  }\n\n  std::stringstream node_path_buffer;\n  if (node_path_list.empty()) {\n    node_path = \"/\";\n    return STATUS_OK;\n  }\n\n  for (auto &path_name : node_path_list) {\n    node_path_buffer << \"/\" << path_name;\n  }\n\n  node_path = node_path_buffer.str();\n  return STATUS_OK;\n}\n\nstd::list<std::string> HttpPathMatchTree::SplitHttpPath(\n    const std::string &http_path) {\n  std::list<std::string> name_list;\n  auto name_start_pos = http_path.find('/') + 1;\n  if (name_start_pos >= http_path.size()) {\n    return name_list;\n  }\n\n  while (true) {\n    auto name_end_pos = http_path.find('/', name_start_pos);\n    if (name_end_pos == std::string::npos) {\n      name_list.push_back(http_path.substr(name_start_pos));\n      break;\n    }\n\n    name_list.push_back(\n        http_path.substr(name_start_pos, name_end_pos - name_start_pos));\n    name_start_pos = name_end_pos + 1;\n    if (name_start_pos >= http_path.size()) {\n      break;\n    }\n  }\n\n  return name_list;\n}\n\nHttpServer::HttpServer(const std::string &endpoint)\n    : HttpServer(endpoint, {}) {}\n\nHttpServer::HttpServer(const std::string &endpoint,\n                       const HttpServerConfig &config) {\n  std::smatch http_match_result;\n  std::regex http_pattern(R\"((https?://)([\\w\\-\\.]+)(:[0-9]+)?)\");\n  std::regex_match(endpoint, http_match_result, http_pattern);\n  const size_t sub_str_count = 4;\n  if (http_match_result.size() != sub_str_count) {\n    status_ = {STATUS_BADCONF, endpoint + \" format is wrong\"};\n    return;\n  }\n\n  auto scheme = http_match_result[1].str();\n  auto ip = http_match_result[2].str();\n  auto port_str = http_match_result[3].str();\n  if (!port_str.empty()) {\n    port_str = port_str.substr(1);  // remove ':'\n  }\n\n  const auto *format_tips = \"http://ip[:port] or https://ip[:port]\";\n  if (scheme.empty()) {\n    status_ = {STATUS_BADCONF,\n               endpoint + \" format is wrong, should be \" + format_tips};\n    return;\n  }\n\n  if (ip.empty()) {\n    status_ = {STATUS_BADCONF,\n               endpoint + \" format is wrong, should be \" + format_tips};\n    return;\n  }\n\n  ip_ = ip;\n  port_ = scheme == \"http://\" ? 80 : 443;\n  if (!port_str.empty()) {\n    port_ = std::stoi(port_str);\n  }\n\n  if (scheme == \"http://\") {\n    server_impl_ = std::make_shared<httplib::Server>();\n  } else {\n    server_impl_ =\n        std::make_shared<httplib::SSLServer>(config.GetSSLConfigCallback());\n  }\n\n  server_impl_->set_write_timeout(config.GetTimeout());\n  server_impl_->set_read_timeout(config.GetTimeout());\n  server_impl_->set_socket_options([](socket_t sock) {\n    int yes = 1;\n    setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, reinterpret_cast<void *>(&yes),\n               sizeof(yes));\n  });\n  server_impl_->set_keep_alive_max_count(1);\n  if (!server_impl_->is_valid()) {\n    status_ = {STATUS_FAULT, \"server is not valid\"};\n    return;\n  }\n  RegisterHandleFunc();\n}\n\nHttpServer::~HttpServer() {\n  Stop();\n  if (server_thread_ != nullptr) {\n    server_thread_->join();\n  }\n}\n\nStatus HttpServer::Register(const std::string &path, const HttpMethod &method,\n                            const HttpHandleFunc &func) {\n  std::lock_guard<std::mutex> lock(handler_data_lock_);\n  auto &method_map = handler_map_[path];\n  auto func_item = method_map.find(method);\n  if (func_item != method_map.end()) {\n    return STATUS_EXIST;\n  }\n\n  method_map[method] = func;\n  match_tree_.AddNode(path);\n  return STATUS_OK;\n}\n\nvoid HttpServer::Unregister(const std::string &path, const HttpMethod &method) {\n  std::lock_guard<std::mutex> lock(handler_data_lock_);\n  auto method_item = handler_map_.find(path);\n  if (method_item == handler_map_.end()) {\n    return;\n  }\n\n  auto &method_map = method_item->second;\n  auto func_item = method_map.find(method);\n  if (func_item == method_map.end()) {\n    return;\n  }\n\n  method_map.erase(method);\n  if (method_map.empty()) {\n    handler_map_.erase(path);\n    match_tree_.DelNode(path);\n  }\n}\n\nStatus HttpServer::Start() {\n  if (status_ != STATUS_OK) {\n    return status_;\n  }\n\n  std::lock_guard<std::mutex> lock(server_running_lock_);\n  if (is_running_) {\n    return STATUS_SUCCESS;\n  }\n\n  auto ret = server_impl_->bind_to_port(ip_, port_);\n  if (!ret) {\n    status_ = {STATUS_ALREADY, \"bind to \" + ip_ + \":\" + std::to_string(port_) +\n                                   \" failed, might be conflict, error \" +\n                                   StrError(errno)};\n    return status_;\n  }\n\n  is_running_ = true;\n  server_thread_ =\n      std::make_shared<std::thread>(std::bind(&HttpServer::Listen, this));\n  return STATUS_SUCCESS;\n}\n\nvoid HttpServer::Listen() {\n  MBLOG_INFO << \"Start listen at \" << ip_ << \":\" << port_;\n  server_impl_->listen_after_bind();\n  MBLOG_INFO << \"End listen at \" << ip_ << \":\" << port_;\n  is_running_ = false;\n}\n\nvoid HttpServer::Stop() {\n  std::lock_guard<std::mutex> lock(server_running_lock_);\n  if (!is_running_) {\n    return;\n  }\n\n  server_impl_->stop();\n  is_running_ = false;\n}\n\nbool HttpServer::IsRunning() { return is_running_; }\n\nStatus HttpServer::GetStatus() { return status_; }\n\nvoid HttpServer::RegisterHandleFunc() {\n  std::string all_path = \"/.*\";\n  httplib::Server::Handler handle_func =\n      std::bind(&HttpServer::HandleFunc, this, std::placeholders::_1,\n                std::placeholders::_2);\n  server_impl_->Get(all_path, handle_func);\n  server_impl_->Post(all_path, handle_func);\n  server_impl_->Put(all_path, handle_func);\n  server_impl_->Delete(all_path, handle_func);\n  server_impl_->Options(all_path, handle_func);\n  server_impl_->Patch(all_path, handle_func);\n}\n\nvoid HttpServer::HandleFunc(const httplib::Request &request,\n                            httplib::Response &response) {\n  auto method = request.method;\n  auto path = request.path;\n  std::string handle_path;\n  std::lock_guard<std::mutex> lock(handler_data_lock_);\n  auto ret = match_tree_.Match(path, handle_path);\n  if (!ret) {\n    response.status = HttpStatusCodes::NOT_FOUND;\n    return;\n  }\n\n  auto method_item = handler_map_.find(handle_path);\n  if (method_item == handler_map_.end()) {\n    response.status = HttpStatusCodes::NOT_FOUND;\n    return;\n  }\n\n  auto &method_map = method_item->second;\n  auto func_item = method_map.find(method);\n  if (func_item == method_map.end()) {\n    response.status = HttpStatusCodes::NOT_FOUND;\n    return;\n  }\n\n  func_item->second(request, response);\n}\n\nstd::unordered_map<std::string, std::shared_ptr<HttpServer>>\n    HttpListener::shared_server_map_;\n\nstd::mutex HttpListener::shared_server_map_lock_;\n\nHttpListener::HttpListener(const std::string &endpoint) : endpoint_(endpoint) {\n  std::lock_guard<std::mutex> lock(shared_server_map_lock_);\n  auto item = shared_server_map_.find(endpoint);\n  if (item != shared_server_map_.end()) {\n    shared_server_ = item->second;\n    return;\n  }\n\n  shared_server_ = std::make_shared<HttpServer>(endpoint);\n  shared_server_map_[endpoint] = shared_server_;\n}\n\nHttpListener::~HttpListener() {\n  Stop();\n  std::lock_guard<std::mutex> lock(shared_server_map_lock_);\n  if (shared_server_.use_count() <= 2) {\n    shared_server_map_.erase(endpoint_);\n  }\n\n  shared_server_.reset();\n}\n\nStatus HttpListener::Register(const std::string &path, const HttpMethod &method,\n                              const HttpHandleFunc &func) {\n  auto support_func = [=](const httplib::Request &request,\n                          httplib::Response &response) {\n    if (enable_acl_ && !acl_.IsMatch(request.remote_addr)) {\n      response.status = HttpStatusCodes::FORBIDDEN;\n      response.body = \"Access Denied\";\n      AddSafeHeader(response);\n      return;\n    }\n\n    func(request, response);\n  };\n\n  shared_server_->Register(path, method, support_func);\n  registered_path_method_.emplace_back(path, method);\n  return STATUS_OK;\n}\n\nvoid HttpListener::Start() { shared_server_->Start(); }\n\nvoid HttpListener::Stop() {\n  for (auto &path_method : registered_path_method_) {\n    shared_server_->Unregister(path_method.first, path_method.second);\n  }\n\n  registered_path_method_.clear();\n}\n\nStatus HttpListener::GetStatus() { return shared_server_->GetStatus(); }\n\nbool HttpListener::IsRunning() { return shared_server_->IsRunning(); }\n\nvoid HttpListener::SetAclWhiteList(const std::vector<std::string> &white_list) {\n  for (const auto &white_rule : white_list) {\n    acl_.AddCidr(white_rule);\n    enable_acl_ = true;\n  }\n}\n\nvoid AddSafeHeader(httplib::Response &response) {\n  response.headers.insert(std::pair<std::string, std::string>(\n      \"Referrer-Policy\", \"strict-origin-when-cross-origin\"));\n  response.headers.insert(std::pair<std::string, std::string>(\n      \"Content-Security-Policy\",\n      \"default-src 'self'  data: 'unsafe-inline' 'unsafe-eval' \"\n      \"console-static.huaweicloud.com res.hc-cdn.com;object-src 'none'; \"\n      \"frame-ancestors 'none'\"));\n  response.headers.insert(\n      std::pair<std::string, std::string>(\"X-Frame-Options\", \"DENY\"));\n}\n\nHttpRequest::HttpRequest(std::string method, std::string url)\n    : method_(std::move(method)), url_(std::move(url)) {}\n\nvoid HttpRequest::SetHeaders(const httplib::Headers &headers) {\n  headers_ = headers;\n}\n\nvoid HttpRequest::SetBody(const std::string &body) { body_ = body; }\n\nvoid HttpRequest::SetResponse(const httplib::Response &response) {\n  response_ = response;\n}\n\nstd::string HttpRequest::GetMethod() { return method_; }\n\nstd::string HttpRequest::GetURL() { return url_; }\n\nhttplib::Headers HttpRequest::GetHeaders() { return headers_; }\n\nstd::string HttpRequest::GetRequestBody() { return body_; }\n\nhttplib::Response HttpRequest::GetResponse() { return response_; }\n\nstd::unordered_map<std::string,\n                   std::function<httplib::Result(\n                       httplib::Client &, const std::string &, HttpRequest &)>>\n    g_httpclient_func_map = {\n        {HttpMethods::GET,\n         [](httplib::Client &client, const std::string &path,\n            HttpRequest &request) {\n           return client.Get(path, request.GetHeaders());\n         }},\n        {HttpMethods::DELETE,\n         [](httplib::Client &client, const std::string &path,\n            HttpRequest &request) {\n           return client.Delete(path, request.GetHeaders());\n         }},\n        {HttpMethods::PUT,\n         [](httplib::Client &client, const std::string &path,\n            HttpRequest &request) {\n           return client.Put(path, request.GetHeaders(),\n                             request.GetRequestBody(), \"\");\n         }},\n        {HttpMethods::POST,\n         [](httplib::Client &client, const std::string &path,\n            HttpRequest &request) {\n           return client.Post(path, request.GetHeaders(),\n                              request.GetRequestBody(), \"\");\n         }},\n};\n\nStatus SendHttpRequest(HttpRequest &request) {\n  auto url = request.GetURL();\n  std::smatch url_match_result;\n  std::regex url_pattern(R\"((https?://[\\w\\-\\.]+(:[0-9]+)?)(/.*)?)\");\n  auto ret = std::regex_match(url, url_match_result, url_pattern);\n  if (!ret) {\n    return {STATUS_BADCONF, \"url \" + url + \" is wrong format\"};\n  }\n\n  if (url_match_result.size() != 4) {\n    return {STATUS_BADCONF, \"url \" + url + \" is wrong format\"};\n  }\n\n  auto scheme_host_port = url_match_result[1].str();\n  auto path = url_match_result[3].str();\n  if (path.empty()) {\n    path = \"/\";\n  }\n\n  httplib::Client client(scheme_host_port);\n  client.enable_server_certificate_verification(false);\n  client.set_write_timeout(std::chrono::seconds(30));\n\n  auto method = request.GetMethod();\n  auto func_item = g_httpclient_func_map.find(method);\n  if (func_item == g_httpclient_func_map.end()) {\n    return {STATUS_NOTSUPPORT, \"Not support http method \" + method};\n  }\n\n  auto result = func_item->second(client, path, request);\n  if (result == nullptr) {\n    return {STATUS_FAULT, \"Send request \" + method + \" failed, err \" +\n                              httplib::to_string(result.error())};\n  }\n\n  request.SetResponse(result.value());\n  return STATUS_OK;\n}\n\nvoid SplitPath(const std::string &path, std::string &prefix_path,\n               std::string &last_path) {\n  auto last_split_start_pos = path.rfind('/');\n  if (last_split_start_pos == std::string::npos) {\n    prefix_path = path;\n    last_path.clear();\n    return;\n  }\n\n  prefix_path = path.substr(0, last_split_start_pos);\n  last_path = path.substr(last_split_start_pos + 1);\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/modelbox/server/include/modelbox/server/http_helper.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_HTTP_HELPER_H_\n#define MODELBOX_HTTP_HELPER_H_\n\n#define CPPHTTPLIB_OPENSSL_SUPPORT\n#include <httplib.h>\n\n#include <thread>\n#include <unordered_map>\n\n#include \"modelbox/base/status.h\"\n#include \"modelbox/server/utils.h\"\n\nnamespace modelbox {\n\nconstexpr const char *TEXT_PLAIN = \"text/plain\";\nconstexpr const char *JSON = \"application/json\";\n\nusing HttpStatusCode = int32_t;\n\nclass HttpStatusCodes {\n public:\n  static const HttpStatusCode CONTINUE;\n  static const HttpStatusCode SWITCHING_PROTOCOLS;\n  static const HttpStatusCode OK;\n  static const HttpStatusCode CREATED;\n  static const HttpStatusCode ACCEPTED;\n  static const HttpStatusCode NON_AUTH_INFO;\n  static const HttpStatusCode NO_CONTENT;\n  static const HttpStatusCode RESET_CONTENT;\n  static const HttpStatusCode PARTIAL_CONTENT;\n  static const HttpStatusCode MULTI_STATUS;\n  static const HttpStatusCode ALREADY_REPORTED;\n  static const HttpStatusCode IM_USED;\n  static const HttpStatusCode MULTIPLE_CHOICES;\n  static const HttpStatusCode MOVED_PERMANENTLY;\n  static const HttpStatusCode FOUND;\n  static const HttpStatusCode SEE_OTHER;\n  static const HttpStatusCode NOT_MODIFIED;\n  static const HttpStatusCode USE_PROXY;\n  static const HttpStatusCode TEMPORARY_REDIRECT;\n  static const HttpStatusCode PERMANENT_REDIRECT;\n  static const HttpStatusCode BAD_REQUEST;\n  static const HttpStatusCode UNAUTHORIZED;\n  static const HttpStatusCode PAYMENT_REQUIRED;\n  static const HttpStatusCode FORBIDDEN;\n  static const HttpStatusCode NOT_FOUND;\n  static const HttpStatusCode METHOD_NOT_ALLOWED;\n  static const HttpStatusCode NOT_ACCEPTABLE;\n  static const HttpStatusCode PROXY_AUTH_REQUIRED;\n  static const HttpStatusCode REQUEST_TIMEOUT;\n  static const HttpStatusCode CONFLICT;\n  static const HttpStatusCode GONE;\n  static const HttpStatusCode LENGTH_REQUIRED;\n  static const HttpStatusCode PRECONDITION_FAILED;\n  static const HttpStatusCode REQUEST_ENTITY_TOO_LARGE;\n  static const HttpStatusCode REQUEST_URI_TOO_LARGE;\n  static const HttpStatusCode UNSUPPORTED_MEDIA_TYPE;\n  static const HttpStatusCode RANGE_NOT_SATISFIABLE;\n  static const HttpStatusCode EXPECTATION_FAILED;\n  static const HttpStatusCode MISDIRECTED_REQUEST;\n  static const HttpStatusCode UNPROCESSABLE_ENTITY;\n  static const HttpStatusCode LOCKED;\n  static const HttpStatusCode FAILED_DEPENDENCY;\n  static const HttpStatusCode UPGRADE_REQUIRED;\n  static const HttpStatusCode PRECONDITION_REQUIRED;\n  static const HttpStatusCode TOO_MANY_REQUESTS;\n  static const HttpStatusCode REQUEST_HEADER_FIELDS_TOO_LARGE;\n  static const HttpStatusCode UNAVAILABLE_FOR_LEGAL_REASONS;\n  static const HttpStatusCode INTERNAL_ERROR;\n  static const HttpStatusCode NOT_IMPLEMENTED;\n  static const HttpStatusCode BAD_GATEWAY;\n  static const HttpStatusCode SERVICE_UNAVAILABLE;\n  static const HttpStatusCode GATEWAY_TIMEOUT;\n  static const HttpStatusCode HTTP_VERSION_NOT_SUPPORTED;\n  static const HttpStatusCode VARIANT_ALSO_NEGOTIATES;\n  static const HttpStatusCode INSUFFICIENT_STORAGE;\n  static const HttpStatusCode LOOP_DETECTED;\n  static const HttpStatusCode NOT_EXTENDED;\n  static const HttpStatusCode NETWORK_AUTHENTICATION_REQUIRED;\n};\n\nusing HttpMethod = std::string;\n\n#undef DELETE\n\nclass HttpMethods {\n public:\n  static const HttpMethod GET;\n  static const HttpMethod POST;\n  static const HttpMethod PUT;\n  static const HttpMethod DELETE;\n  static const HttpMethod HEAD;\n  static const HttpMethod OPTIONS;\n  static const HttpMethod TRACE;\n  static const HttpMethod CONNECT;\n  static const HttpMethod MERGE;\n  static const HttpMethod PATCH;\n};\n\nusing HttpHandleFunc =\n    std::function<void(const httplib::Request &, httplib::Response &)>;\n\nusing SSLConfigCallback = std::function<bool(SSL_CTX &ctx)>;\n\nStatus UseCertificate(SSL_CTX &ctx, const void *cert_buf, int len,\n                      pem_password_cb cb = nullptr,\n                      void *cb_user_data = nullptr);\n\nStatus UsePrivateKey(SSL_CTX &ctx, const void *key_buf, int len,\n                     pem_password_cb cb = nullptr,\n                     void *cb_user_data = nullptr);\n\nclass HttpServerConfig {\n public:\n  void SetTimeout(const std::chrono::seconds &timeout);\n\n  void SetSSLConfigCallback(const SSLConfigCallback &cb);\n\n  std::chrono::seconds GetTimeout() const;\n\n  SSLConfigCallback GetSSLConfigCallback() const;\n\n private:\n  SSLConfigCallback ssl_config_cb_{[](SSL_CTX &ctx) { return true; }};\n  std::chrono::seconds timeout_{60};\n};\n\nclass HttpPathMatchNode {\n public:\n  bool IsValid();\n\n  bool HasChildren();\n\n  Status AddChild(std::list<std::string> node_path);\n\n  Status DelChild(std::list<std::string> node_path);\n\n  Status Match(std::list<std::string> path, std::list<std::string> &node_path);\n\n private:\n  bool is_valid_{false};\n  std::unordered_map<std::string, std::shared_ptr<HttpPathMatchNode>> children_;\n};\n\nclass HttpPathMatchTree {\n public:\n  Status AddNode(const std::string &node_path);\n\n  void DelNode(const std::string &node_path);\n\n  Status Match(const std::string &path, std::string &node_path);\n\n private:\n  std::list<std::string> SplitHttpPath(const std::string &http_path);\n\n  HttpPathMatchNode root_;\n};\n\nclass HttpServer {\n public:\n  HttpServer(const std::string &endpoint);\n\n  HttpServer(const std::string &endpoint, const HttpServerConfig &config);\n\n  virtual ~HttpServer();\n\n  Status Register(const std::string &path, const HttpMethod &method,\n                  const HttpHandleFunc &func);\n\n  void Unregister(const std::string &path, const HttpMethod &method);\n\n  Status Start();\n\n  void Stop();\n\n  bool IsRunning();\n\n  Status GetStatus();\n\n private:\n  void Listen();\n\n  void RegisterHandleFunc();\n\n  void HandleFunc(const httplib::Request &request, httplib::Response &response);\n\n  Status status_{STATUS_OK};\n  std::shared_ptr<httplib::Server> server_impl_;\n  std::string ip_;\n  std::mutex server_running_lock_;\n  std::atomic_bool is_running_{false};\n  int port_{0};\n  std::shared_ptr<std::thread> server_thread_;\n\n  std::mutex handler_data_lock_;\n  std::unordered_map<std::string,\n                     std::unordered_map<std::string, HttpHandleFunc>>\n      handler_map_;\n  HttpPathMatchTree match_tree_;\n};\n\nclass HttpListener {\n public:\n  HttpListener(const std::string &endpoint);\n\n  virtual ~HttpListener();\n\n  Status Register(const std::string &path, const HttpMethod &method,\n                  const HttpHandleFunc &func);\n\n  void Start();\n\n  void Stop();\n\n  Status GetStatus();\n\n  bool IsRunning();\n\n  void SetAclWhiteList(const std::vector<std::string> &white_list);\n\n private:\n  IPACL acl_;\n  bool enable_acl_{false};\n  std::string endpoint_;\n  std::shared_ptr<HttpServer> shared_server_;\n  std::list<std::pair<std::string, std::string>> registered_path_method_;\n\n  static std::unordered_map<std::string, std::shared_ptr<HttpServer>>\n      shared_server_map_;\n  static std::mutex shared_server_map_lock_;\n};\n\nvoid AddSafeHeader(httplib::Response &response);\n\nclass HttpRequest {\n public:\n  HttpRequest(std::string method, std::string url);\n\n  void SetHeaders(const httplib::Headers &headers);\n\n  void SetBody(const std::string &body);\n\n  void SetResponse(const httplib::Response &response);\n\n  std::string GetMethod();\n\n  std::string GetURL();\n\n  httplib::Headers GetHeaders();\n\n  std::string GetRequestBody();\n\n  httplib::Response GetResponse();\n\n private:\n  std::string method_;\n  std::string url_;\n  httplib::Headers headers_;\n  std::string body_;\n  httplib::Response response_;\n};\n\nStatus SendHttpRequest(HttpRequest &request);\n\nvoid SplitPath(const std::string &path, std::string &prefix_path,\n               std::string &last_path);\n\n}  // namespace modelbox\n\n#endif  // MODELBOX_HTTP_HELPER_H_"
  },
  {
    "path": "src/modelbox/server/include/modelbox/server/job.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_JOB_H_\n#define MODELBOX_JOB_H_\n\n#include <modelbox/flow.h>\n#include <modelbox/server/task_manager.h>\n\n#include <map>\n#include <memory>\n#include <string>\n#include <vector>\n\nnamespace modelbox {\n\n/**\n * @brief Job status\n */\nenum JobStatus {\n  /// @brief Job is creating\n  JOB_STATUS_CREATING,\n  /// @brief Job is running\n  JOB_STATUS_RUNNING,\n  /// @brief Job ran success.\n  JOB_STATUS_SUCCEEDED,\n  /// @brief Job ran failed.\n  JOB_STATUS_FAILED,\n  /// @brief Job is pending.\n  JOB_STATUS_PENDING,\n  /// @brief Job is deleting.\n  JOB_STATUS_DELETEING,\n  /// @brief Job status is unknown.\n  JOB_STATUS_UNKNOWN,\n  /// @brief Job is not exist.\n  JOB_STATUS_NOTEXIST\n};\n\nconstexpr const char* job_str_status[] = {\"CREATEING\", \"RUNNING\", \"SUCCEEDED\",\n                                          \"FAILED\",    \"PENDING\", \"DELETEING\",\n                                          \"UNKNOWN\",   \"NOTEXIST\"};\n\n/**\n * @brief Job error info\n */\nstruct ErrorInfo {\n  /// @brief Job error code\n  std::string error_code_;\n  /// @brief Job error message\n  std::string error_msg_;\n};\n\nclass Job {\n public:\n  /**\n   * @brief Create job\n   * @param job_name job name\n   * @param graph_path graph file\n   */\n  Job(std::string job_name, std::string graph_path);\n\n  /**\n   * @brief Create job\n   * @param job_name job name\n   * @param graph_name graph name\n   * @param graph graph in string\n   */\n  Job(std::string job_name, std::string graph_name, std::string graph);\n\n  virtual ~Job();\n\n  /**\n   * @brief Init job\n   * @return init result\n   */\n  Status Init();\n\n  /**\n   * @brief Build graph\n   * @return build result\n   */\n  Status Build();\n\n  /**\n   * @brief Run graph\n   */\n  void Run();\n\n  /**\n   * @brief Stop graph\n   */\n  void Stop();\n\n  /**\n   * @brief Wait graph finish\n   */\n  void Join();\n\n  /**\n   * @brief Get job status\n   * @return job status\n   */\n  JobStatus GetJobStatus();\n\n  /**\n   * @brief Get job status in string\n   * @return job status in string\n   */\n  std::string JobStatusString();\n\n  /**\n   * @brief Convert job status to string\n   * @param status job status\n   * @return job status in string\n   */\n  static std::string JobStatusToString(JobStatus status);\n\n  /**\n   * @brief Get job error info\n   * @return job error info\n   */\n  ErrorInfo GetErrorInfo();\n\n  /**\n   * @brief Get job error info in string\n   * @return job error info in string\n   */\n  std::string GetErrorMsg();\n\n  /**\n   * @brief Clear job error info\n   */\n  void ClearErrorInfo();\n\n  /**\n   * @brief Set error info to job\n   * @param errorInfo error info\n   */\n  void SetErrorInfo(ErrorInfo& errorInfo);\n\n  /**\n   * @brief Set error info to job\n   * @param code error code\n   * @param msg error msg\n   */\n  void SetErrorInfo(const std::string& code, const std::string& msg);\n\n  /**\n   * @brief Set error info to job\n   * @param status status\n   */\n  void SetError(const modelbox::Status& status);\n\n  /**\n   * @brief Set job name\n   * @param job_name job name\n   */\n  void SetJobName(const std::string& job_name);\n\n  /**\n   * @brief Get job name\n   * @return job name\n   */\n  std::string GetJobName();\n\n  /**\n   * @brief Get job flow\n   * @return job nflow\n   */\n  std::shared_ptr<modelbox::Flow> GetFlow();\n\n  /**\n   * @brief Create task manager\n   * @param limit_task_count task threshold\n   * @return task manager\n   */\n  std::shared_ptr<TaskManager> CreateTaskManger(int limit_task_count);\n\n private:\n  std::string job_name_;\n  std::string graph_path_;\n  std::string graph_name_;\n  std::string graph_;\n  JobStatus status_{JOB_STATUS_UNKNOWN};\n  ErrorInfo error_info_;\n  std::shared_ptr<modelbox::Flow> flow_;\n  std::shared_ptr<modelbox::TimerTask> heart_beat_task_;\n};\n\n}  // namespace modelbox\n#endif  // MODELBOX_JOB_H_\n"
  },
  {
    "path": "src/modelbox/server/include/modelbox/server/job_manager.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_JOB_MANAGER_H_\n#define MODELBOX_JOB_MANAGER_H_\n\n#include <modelbox/server/job.h>\n\n#include <utility>\n\nnamespace modelbox {\n/**\n * @brief Job management, used to manage multiple flows, provide management api\n * and query api of flowcharts.\n */\nclass JobManager {\n public:\n  /**\n   * @brief Create a new job with flow graph\n   * @param job_name name of the job\n   * @param graph_file_path file path of flow graph in modelbox graph format\n   * @return a new job\n   */\n  std::shared_ptr<modelbox::Job> CreateJob(const std::string& job_name,\n                                           const std::string& graph_file_path);\n\n  /**\n   * @brief Create a new job with inline flow graph\n   * @param job_name name of the job\n   * @param graph_name name of flow graph\n   * @param graph inline string of graph, in modelbox graph format.\n   * @return a new job\n   */\n  std::shared_ptr<modelbox::Job> CreateJob(const std::string& job_name,\n                                           const std::string& graph_name,\n                                           const std::string& graph);\n\n  /**\n   * @brief Delete job\n   * @param job_name name of the job\n   * @return delete result\n   */\n  bool DeleteJob(const std::string& job_name);\n\n  /**\n   * @brief Get job by job name\n   * @param job_name name of the job\n   * @return the job object\n   */\n  std::shared_ptr<modelbox::Job> GetJob(const std::string& job_name);\n\n  /**\n   * @brief Get all jobs\n   * @return all jobs\n   */\n  std::vector<std::shared_ptr<modelbox::Job>> GetJobList();\n\n  /**\n   * @brief Get all jobs in map container, key is job name, value is job object\n   * @return all jobs in map container.\n   */\n  std::unordered_map<std::string, std::shared_ptr<modelbox::Job>> GetJobMap();\n\n  /**\n   * @brief Get job status\n   * @param job_name name of the job\n   * @return job status\n   */\n  modelbox::JobStatus QueryJobStatus(const std::string& job_name);\n\n  /**\n   * @brief Get job status in string\n   * @param job_name name of the job\n   * @return job status in string\n   */\n  std::string QueryJobStatusString(const std::string& job_name);\n\n  /**\n   * @brief Get job error message\n   * @param job_name name of the job\n   * @return job error message.\n   */\n  std::string GetJobErrorMsg(const std::string& job_name);\n\n private:\n  std::unordered_map<std::string, std::shared_ptr<modelbox::Job>> jobs_;\n  std::mutex job_lock_;\n};\n\n}  // namespace modelbox\n\n#endif  // MODELBOX_JOB_MANAGER_H_"
  },
  {
    "path": "src/modelbox/server/include/modelbox/server/plugin.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_PLUGIN_H_\n#define MODELBOX_PLUGIN_H_\n\n#include <modelbox/base/configuration.h>\n#include <modelbox/base/status.h>\n#include <modelbox/base/thread_pool.h>\n\n#include <atomic>\n#include <mutex>\n\nnamespace modelbox {\n\nclass Plugin {\n public:\n  /**\n   * @brief Server plugin\n   */\n  Plugin();\n\n  virtual ~Plugin();\n\n  /**\n   * @brief plugin init\n   * @param config plugin configuration\n   * @return init result\n   */\n  virtual bool Init(std::shared_ptr<modelbox::Configuration> config) = 0;\n\n  /**\n   * @brief Start plugin\n   * @return start result\n   */\n  virtual bool Start() = 0;\n\n  /**\n   * @brief Stop plugin\n   * @return stop result\n   */\n  virtual bool Stop() = 0;\n\n  /**\n   * @brief Check plugin  status\n   * @return check result\n   */\n  virtual bool Check();\n};\n\nusing PluginRecvMsgFunc = std::function<void(\n    const std::string &msg_name, const std::shared_ptr<const void> &msg_data,\n    size_t msg_len)>;\n\nclass PluginMsgRouter {\n  friend class Server;\n\n public:\n  /**\n   * @brief Register a func to receive msg for the topic\n   * @param topic_name Msg for the topic you want\n   * @param func To process the msg, should not stuck for long time\n   * @return Result of register\n   */\n  modelbox::Status RegisterRecvFunc(const std::string &topic_name,\n                                    const PluginRecvMsgFunc &func);\n\n  /**\n   * @brief Route msg to the target async\n   * @param topic_name Route msg to the topic\n   * @param msg_name Indentify the msg\n   * @param msg_data Data route to others\n   * @param msg_len Data length\n   * @return Result of submit the msg\n   */\n  modelbox::Status RouteMsg(const std::string &topic_name,\n                            const std::string &msg_name,\n                            const std::shared_ptr<const void> &msg_data,\n                            size_t msg_len);\n\n  /**\n   * @brief Get msg router instance\n   * @return Instance of msg router\n   */\n  static std::shared_ptr<PluginMsgRouter> GetInstance();\n\n private:\n  void Clear() {\n    std::lock_guard<std::mutex> lck(receivers_lock_);\n    receivers_.clear();\n  }\n\n  std::mutex receivers_lock_;\n  std::map<std::string, std::vector<PluginRecvMsgFunc>> receivers_;\n  modelbox::ThreadPool thread_pool_{2, -1, 100};\n};\n\n}  // namespace modelbox\n\nextern \"C\" {\n\n#if defined(__clang__)\n#pragma clang diagnostic push\n#pragma clang diagnostic ignored \"-Wreturn-type-c-linkage\"\n#endif\n\nMODELBOX_DLL_PUBLIC std::shared_ptr<modelbox::Plugin> CreatePlugin();\n\n#if defined(__clang__)\n#pragma clang diagnostic pop\n#endif\n}\n\n#endif  // MODELBOX_PLUGIN_H_\n"
  },
  {
    "path": "src/modelbox/server/include/modelbox/server/statistics.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#ifndef MODELBOX_STATISTICS_H_\n#define MODELBOX_STATISTICS_H_\n\n#endif  // MODELBOX_STATISTICS_H_"
  },
  {
    "path": "src/modelbox/server/include/modelbox/server/task.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_SERVER_TASK_H_\n#define MODELBOX_SERVER_TASK_H_\n#include <modelbox/base/status.h>\n\n#include <memory>\n#include <string>\n#include <unordered_map>\n\n#include \"modelbox/base/uuid.h\"\n#include \"modelbox/flow.h\"\n#include \"modelbox/session_context.h\"\n#include \"modelbox/virtual_node.h\"\nnamespace modelbox {\n\nenum TaskStatus { UNKNOWN, WAITING, WORKING, STOPPED, ABNORMAL, FINISHED };\nclass TaskManager;\nclass OneShotTask;\n\nusing TaskDataCallback =\n    std::function<void(OneShotTask*, const OutputBufferList&)>;\nusing TaskStatusCallback = std::function<void(OneShotTask*, TaskStatus)>;\n\nclass Task : public std::enable_shared_from_this<Task> {\n public:\n  Task();\n  virtual ~Task();\n\n  /**\n   * @brief Create buffer list\n   * @return buffer list pointer\n   */\n  std::shared_ptr<BufferList> CreateBufferList();\n\n  /**\n   * @brief Set content to session\n   * @param key content key\n   * @param content pointer to content\n   * @return set result\n   */\n  Status SetSessionContent(const std::string& key,\n                           std::shared_ptr<void> content);\n\n  /**\n   * @brief Get session config to set before task run\n   * @return ref to session config\n   */\n  std::shared_ptr<modelbox::Configuration> GetSessionConfig();\n\n  /**\n   * @brief Set data meta to port\n   * @param port_name port name\n   * @param meta pointer to meta\n   * @return set result\n   */\n  Status SetDataMeta(const std::string& port_name,\n                     std::shared_ptr<DataMeta> meta);\n\n  /**\n   * @brief Get task status\n   * @return task status\n   */\n  TaskStatus GetTaskStatus();\n\n  /**\n   * @brief Get task last error\n   * @return task error\n   */\n  std::shared_ptr<FlowUnitError> GetLastError();\n\n  /**\n   * @brief Get task id\n   * @return task id\n   */\n  std::string GetTaskId();\n\n  /**\n   * @brief Start task\n   * @return task start result\n   */\n  Status Start();\n\n  /**\n   * @brief Stop task\n   * @return task stop result\n   */\n  Status Stop();\n\n  /**\n   * @brief Get task uuid\n   * @return task uuid\n   */\n  std::string GetUUID();\n\n protected:\n  /**\n   * @brief Feed data to task\n   * @return feed result\n   */\n  virtual Status FeedData() = 0;\n\n  /**\n   * @brief Fetch data from task\n   * @param fetch_status task status\n   * @param output_buf task output data\n   */\n  virtual void FetchData(Status fetch_status, OutputBufferList& output_buf) = 0;\n\n  /**\n   * @brief Pointer to external data\n   */\n  std::weak_ptr<ExternalDataMap> external_data_;\n\n private:\n  friend class TaskManager;\n  void SetTaskManager(const std::shared_ptr<TaskManager>& task_manager);\n  void UpdateTaskStatus(TaskStatus task_status);\n  Status SendData();\n  std::shared_ptr<ExternalDataMap> GetExternalData();\n  bool IsReady();\n  bool IsTaskSubmitted();\n\n  std::atomic<TaskStatus> status_{UNKNOWN};\n  std::shared_ptr<Flow> flow_;\n  std::weak_ptr<TaskManager> task_manager_;\n  std::atomic<bool> already_submit_;\n  std::string task_uuid_;\n  std::mutex lock_;\n  std::condition_variable cv_;\n};\n\nclass OneShotTask : public Task {\n public:\n  friend class TaskManager;\n\n  /**\n   * @brief One off data task\n   */\n  OneShotTask();\n  ~OneShotTask() override;\n\n  /**\n   * @brief Fill data to task\n   * @param data data list\n   * @return feed result\n   */\n  Status FillData(\n      std::unordered_map<std::string, std::shared_ptr<BufferList>>& data);\n\n  /**\n   * @brief Register data callback function\n   * @param callback data callback function\n   */\n  void RegisterDataCallback(const TaskDataCallback& callback);\n\n  /**\n   * @brief Register task status function\n   * @param callback status callback function\n   */\n  void RegisterStatusCallback(const TaskStatusCallback& callback);\n\n protected:\n  /**\n   * @brief Feed data to task\n   * @return feed result\n   */\n  Status FeedData() override;\n\n  /**\n   * @brief Fetch data from task\n   * @param fetch_status task status\n   * @param output_buf task output data\n   */\n  void FetchData(Status fetch_status, OutputBufferList& output_buf) override;\n\n private:\n  TaskDataCallback GetDataCallback();\n  TaskStatusCallback GetStatusCallback();\n  std::unordered_map<std::string, std::shared_ptr<BufferList>> data_;\n\n  TaskDataCallback data_callback_;\n  TaskStatusCallback status_callback_;\n};\n\n}  // namespace modelbox\n#endif\n"
  },
  {
    "path": "src/modelbox/server/include/modelbox/server/task_manager.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_SERVER_TASK_MANAGER_H_\n#define MODELBOX_SERVER_TASK_MANAGER_H_\n\n#include <modelbox/server/task.h>\n#include <modelbox/virtual_node.h>\n\nnamespace modelbox {\nenum TaskType { TASK_ONESHOT };\nclass TaskManager : public std::enable_shared_from_this<TaskManager> {\n public:\n  /**\n   * @brief Task manager\n   * @param flow pointer to flow\n   * @param task_num_limits task number threshold\n   */\n  TaskManager(std::shared_ptr<Flow> flow, uint32_t task_num_limits);\n  virtual ~TaskManager();\n\n  /**\n   * @brief Start task manager\n   * @return start result\n   */\n  Status Start();\n\n  /**\n   * @brief Stop task manager\n   */\n  void Stop();\n\n  /**\n   * @brief Create task\n   * @return pointer to task\n   */\n  std::shared_ptr<Task> CreateTask(TaskType task_type);\n\n  /**\n   * @brief Delete task by id\n   * @param taskid task id\n   * @return delete result\n   */\n  Status DeleteTaskById(const std::string& taskid);\n\n  /**\n   * @brief Get task by id\n   * @param taskid task id\n   * @return task pointer\n   */\n  std::shared_ptr<Task> GetTaskById(const std::string& taskid);\n\n  /**\n   * @brief Get task number\n   * @return task number\n   */\n  uint32_t GetTaskCount();\n\n  /**\n   * @brief Get all tasks\n   * @return task list\n   */\n  std::vector<std::shared_ptr<Task>> GetAllTasks();\n\n  /**\n   * @brief Set task threshold\n   * @param task_limits task threshold\n   */\n  void SetTaskNumLimit(int task_limits);\n\n  /**\n   * @brief Get task threshold\n   */\n  int GetTaskNumLimit();\n\n  /**\n   * @brief Get avaiable task number\n   */\n  int GetAvaiableTaskCount();\n\n  /**\n   * @brief Register new task\n   * @param task task pointer\n   */\n  void RegisterTask(const std::shared_ptr<Task>& task);\n\n private:\n  friend class Task;\n  void ReceiveWork();\n  Status Submit(const std::shared_ptr<Task>& task);\n  void StartWaittingTask();\n  std::shared_ptr<Flow> GetFlow();\n  std::shared_ptr<ExternalDataSelect> GetSelector();\n\n  std::shared_ptr<ThreadPool> thread_pool_;\n  std::mutex new_del_lock_;\n  std::mutex map_lock_;\n  std::shared_ptr<Flow> flow_;\n  std::atomic<uint32_t> task_num_limits_;\n  std::atomic<uint32_t> avaiable_task_counts_;\n  std::shared_ptr<ExternalDataSelect> selector_;\n  std::unordered_map<std::string, std::shared_ptr<Task>> task_maps_;\n  std::map<std::shared_ptr<ExternalDataMap>, std::shared_ptr<Task>>\n      external_task_maps_;\n  std::shared_ptr<std::thread> receive_thread_;\n  std::atomic<bool> thread_run_;\n};\n}  // namespace modelbox\n#endif"
  },
  {
    "path": "src/modelbox/server/include/modelbox/server/timer.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_SERVER_TIMER_H_\n#define MODELBOX_SERVER_TIMER_H_\n\n#include <modelbox/base/timer.h>\n\nnamespace modelbox {\n\nclass ServerTimer : public modelbox::Timer {\n public:\n  /**\n   * @brief Get server timer instance\n   * @return pointer to server timer\n   */\n  static ServerTimer *GetInstance();\n\n  /**\n   * @brief Server timer start\n   */\n  void Start();\n\n  /**\n   * @brief Server timer run\n   */\n  void Run() override;\n\n  /**\n   * @brief Server timer stop\n   */\n  void Stop();\n\n private:\n  ServerTimer() = default;\n  ~ServerTimer() override = default;\n};\n\n/**\n * @brief Global server timer\n */\nextern ServerTimer *kServerTimer;\n\n}  // namespace modelbox\n\n#endif  // MODELBOX_SERVER_TIMER_H_\n"
  },
  {
    "path": "src/modelbox/server/include/modelbox/server/utils.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_SERVER_UTILS_H_\n#define MODELBOX_SERVER_UTILS_H_\n\n#include <netdb.h>\n#include <sys/socket.h>\n#include <sys/types.h>\n\n#include <vector>\n\n#include \"modelbox/base/status.h\"\n\nnamespace modelbox {\n\n/**\n * @brief IP access control list\n */\nclass IPACL {\n public:\n  /**\n   * @brief constructor\n   */\n  IPACL();\n\n  /**\n   * @brief destructor\n   */\n  virtual ~IPACL();\n\n  /**\n   * @brief Add acl in cidr format\n   * @param cidr ip in cidr format\n   * @return add result\n   */\n  modelbox::Status AddCidr(const std::string &cidr);\n\n  /**\n   * @brief check whether ip is in acl list\n   * @param ipaddr ip address\n   * @return STATUS_SUCCESS ip is in acl list\n   *         STATUS_NOTFOUND ip is not in acl list\n   */\n  modelbox::Status IsMatch(const std::string &ipaddr);\n\n private:\n  uint32_t GetIPV4Addr(const std::shared_ptr<struct addrinfo> &addrinfo);\n  std::shared_ptr<struct addrinfo> GetAddrInfo(const std::string &host);\n\n  std::vector<std::pair<uint32_t, uint32_t>> ipv4_acl_;\n};\n\n}  // namespace modelbox\n\n#endif  // MODELBOX_SERVER_UTILS_H_\n"
  },
  {
    "path": "src/modelbox/server/job.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <modelbox/base/log.h>\n#include <modelbox/flow.h>\n#include <modelbox/server/job.h>\n#include <modelbox/server/timer.h>\n\n#include <utility>\n\nconstexpr uint64_t HEART_BEAT_PERIOD_MS = 60 * 1000;\n\nnamespace modelbox {\n\nJob::Job(std::string job_name, std::string graph_path)\n    : job_name_(std::move(job_name)), graph_path_(std::move(graph_path)) {}\n\nJob::Job(std::string job_name, std::string graph_name, std::string graph)\n    : job_name_(std::move(job_name)),\n      graph_name_(std::move(graph_name)),\n      graph_(std::move(graph)) {}\n\nJob::~Job() {\n  if (flow_ != nullptr) {\n    flow_->Stop();\n    flow_ = nullptr;\n  }\n}\n\nstd::string Job::JobStatusToString(JobStatus status) {\n  if ((int)status >= (int)(JOB_STATUS_NOTEXIST + 1)) {\n    return \"\";\n  }\n\n  return job_str_status[status];\n}\n\nstd::string Job::JobStatusString() { return JobStatusToString(GetJobStatus()); }\n\nStatus Job::Init() {\n  flow_ = std::make_shared<modelbox::Flow>();\n  Status status;\n  status_ = JOB_STATUS_CREATING;\n  if (graph_path_.length() > 0) {\n    status = flow_->Init(graph_path_);\n  }\n  if (graph_.length() > 0) {\n    status = flow_->Init(graph_name_, graph_);\n  }\n\n  if (!status) {\n    MBLOG_ERROR << \"flow init failed: \" << status;\n    SetError(status);\n  }\n\n  return status;\n}\n\nStatus Job::Build() {\n  if (flow_ == nullptr) {\n    Status status = {STATUS_SHUTDOWN, \"Job is shutdown\"};\n    SetError(status);\n    return status;\n  }\n\n  auto retval = flow_->Build();\n  if (!retval) {\n    SetError(retval);\n  }\n\n  return retval;\n}\n\nvoid Job::Run() {\n  if (flow_ == nullptr) {\n    return;\n  }\n\n  flow_->RunAsync();\n  status_ = JOB_STATUS_RUNNING;\n\n  heart_beat_task_ = std::make_shared<modelbox::TimerTask>([this]() {\n    auto job_status = this->GetJobStatus();\n\n    if (job_status != JOB_STATUS_RUNNING) {\n      MBLOG_ERROR << \"get job[\" << this->GetJobName()\n                  << \"] status:\" << this->JobStatusToString(job_status);\n    }\n  });\n\n  kServerTimer->Schedule(heart_beat_task_, 0, HEART_BEAT_PERIOD_MS, false);\n}\n\nvoid Job::Stop() {\n  if (flow_ == nullptr) {\n    return;\n  }\n\n  flow_->Stop();\n}\n\nvoid Job::Join() {\n  if (flow_ == nullptr) {\n    return;\n  }\n\n  flow_->Wait();\n}\n\nJobStatus Job::GetJobStatus() {\n  modelbox::Status retval;\n  if (status_ != JOB_STATUS_RUNNING) {\n    return status_;\n  }\n\n  if (flow_ == nullptr) {\n    return JOB_STATUS_PENDING;\n  }\n\n  auto status = flow_->Wait(-1, &retval);\n  switch (status.Code()) {\n    case modelbox::STATUS_SUCCESS:\n      if (retval == modelbox::STATUS_OK || retval == modelbox::STATUS_STOP ||\n          retval == STATUS_SHUTDOWN) {\n        status_ = JOB_STATUS_SUCCEEDED;\n        return status_;\n      }\n      break;\n    case modelbox::STATUS_NORESPONSE:\n    case modelbox::STATUS_BUSY:\n      return JOB_STATUS_RUNNING;\n      break;\n    default:\n      SetError(status);\n      flow_->Stop();\n      return JOB_STATUS_FAILED;\n      break;\n  }\n\n  return JOB_STATUS_UNKNOWN;\n}\n\nErrorInfo Job::GetErrorInfo() { return error_info_; }\n\nstd::string Job::GetErrorMsg() {\n  std::string msg;\n\n  if (error_info_.error_code_.length() > 0) {\n    msg = error_info_.error_code_;\n  }\n\n  if (error_info_.error_msg_.length() > 0) {\n    if (msg.length() > 0) {\n      msg += \", \";\n    }\n\n    msg += error_info_.error_msg_;\n  }\n\n  return msg;\n}\n\nvoid Job::SetErrorInfo(ErrorInfo& errorInfo) { error_info_ = errorInfo; }\n\nvoid Job::ClearErrorInfo() {\n  error_info_.error_code_ = \"\";\n  error_info_.error_msg_ = \"\";\n}\n\nvoid Job::SetErrorInfo(const std::string& code, const std::string& msg) {\n  error_info_.error_code_ = code;\n  error_info_.error_msg_ = msg;\n}\n\nvoid Job::SetError(const modelbox::Status& status) {\n  error_info_.error_msg_ = status.WrapErrormsgs();\n  status_ = JOB_STATUS_FAILED;\n}\n\nvoid Job::SetJobName(const std::string& job_name) { job_name_ = job_name; }\n\nstd::string Job::GetJobName() { return job_name_; }\n\nstd::shared_ptr<modelbox::Flow> Job::GetFlow() { return flow_; }\n\nstd::shared_ptr<TaskManager> Job::CreateTaskManger(int limit_task_count) {\n  auto task_manager = std::make_shared<TaskManager>(flow_, limit_task_count);\n  return task_manager;\n}\n\n}  // namespace modelbox\n"
  },
  {
    "path": "src/modelbox/server/job_manager.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <modelbox/base/log.h>\n#include <modelbox/server/job_manager.h>\n\nnamespace modelbox {\n\nstd::shared_ptr<modelbox::Job> JobManager::CreateJob(\n    const std::string& job_name, const std::string& graph_file_path) {\n  std::lock_guard<std::mutex> lock(job_lock_);\n  if (jobs_.find(job_name) != jobs_.end()) {\n    MBLOG_WARN << \"job \" << job_name << \" is running\";\n    modelbox::StatusError = {modelbox::STATUS_ALREADY, \"job already running\"};\n    return nullptr;\n  }\n  auto job = std::make_shared<modelbox::Job>(job_name, graph_file_path);\n  jobs_.emplace(std::make_pair(job_name, job));\n  return job;\n}\n\nstd::shared_ptr<modelbox::Job> JobManager::CreateJob(\n    const std::string& job_name, const std::string& graph_name,\n    const std::string& graph) {\n  std::lock_guard<std::mutex> lock(job_lock_);\n  if (jobs_.find(job_name) != jobs_.end()) {\n    std::string msg = \"job \" + job_name + \" is running\";\n    MBLOG_WARN << msg;\n    modelbox::StatusError = {modelbox::STATUS_INPROGRESS, msg};\n    return nullptr;\n  }\n  auto job = std::make_shared<modelbox::Job>(job_name, graph_name, graph);\n  jobs_.emplace(std::make_pair(job_name, job));\n  return job;\n}\n\nmodelbox::JobStatus JobManager::QueryJobStatus(const std::string& job_name) {\n  std::lock_guard<std::mutex> lock(job_lock_);\n  auto iter = jobs_.find(job_name);\n  if (iter != jobs_.end()) {\n    return iter->second->GetJobStatus();\n  }\n\n  return modelbox::JobStatus::JOB_STATUS_NOTEXIST;\n}\n\nstd::string JobManager::QueryJobStatusString(const std::string& job_name) {\n  std::lock_guard<std::mutex> lock(job_lock_);\n  auto iter = jobs_.find(job_name);\n  if (iter != jobs_.end()) {\n    return iter->second->JobStatusString();\n  }\n\n  return modelbox::Job::JobStatusToString(\n      modelbox::JobStatus::JOB_STATUS_NOTEXIST);\n}\n\nstd::string JobManager::GetJobErrorMsg(const std::string& job_name) {\n  std::lock_guard<std::mutex> lock(job_lock_);\n  auto iter = jobs_.find(job_name);\n  if (iter != jobs_.end()) {\n    return iter->second->GetErrorMsg();\n  }\n\n  return \"\";\n}\n\nbool JobManager::DeleteJob(const std::string& job_name) {\n  MBLOG_INFO << \"delete job : \" << job_name;\n  std::lock_guard<std::mutex> lock(job_lock_);\n  if (jobs_.find(job_name) != jobs_.end()) {\n    jobs_.erase(job_name);\n  } else {\n    MBLOG_WARN << \"can not delete job : \" << job_name << \", no this job\";\n    return false;\n  }\n\n  return true;\n}\n\nstd::shared_ptr<modelbox::Job> JobManager::GetJob(const std::string& job_name) {\n  std::lock_guard<std::mutex> lock(job_lock_);\n  if (jobs_.find(job_name) == jobs_.end()) {\n    return nullptr;\n  }\n\n  return jobs_[job_name];\n}\n\nstd::vector<std::shared_ptr<modelbox::Job>> JobManager::GetJobList() {\n  std::lock_guard<std::mutex> lock(job_lock_);\n  std::vector<std::shared_ptr<modelbox::Job>> jobs;\n  for (const auto& job : jobs_) {\n    jobs.push_back(job.second);\n  }\n  return jobs;\n}\n\nstd::unordered_map<std::string, std::shared_ptr<modelbox::Job>>\nJobManager::GetJobMap() {\n  return jobs_;\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/modelbox/server/js_engine.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"js_engine.h\"\n#ifdef ENABLE_JS_PLUGIN\n#include <fstream>\n#include <iostream>\n#include <utility>\n\n#include \"modelbox/base/log.h\"\n#include \"securec.h\"\n\nnamespace modelbox {\n\nvoid fatal_function(void *udata, const char *msg) {\n  MBLOG_FATAL << \"Duktape fatal error, detail:\" << msg << std::endl;\n  abort();  // If not abort, duktape engine will stuck\n}\n\nJSFunctionParam::JSFunctionParam(std::shared_ptr<duk_context> ctx)\n    : ctx_(std::move(ctx)) {}\n\nvoid JSFunctionParam::AddBoolean(bool val) {\n  ++argc_;\n  duk_push_boolean(ctx_.get(), val);\n}\n\nvoid JSFunctionParam::AddInt32(int32_t val) {\n  ++argc_;\n  duk_push_int(ctx_.get(), val);\n}\n\nvoid JSFunctionParam::AddUint32(uint32_t val) {\n  ++argc_;\n  duk_push_uint(ctx_.get(), val);\n}\n\nvoid JSFunctionParam::AddNum(double val) {\n  ++argc_;\n  duk_push_number(ctx_.get(), val);\n}\n\nvoid *JSFunctionParam::AddBuffer(size_t size) {\n  ++argc_;\n  return duk_push_fixed_buffer(ctx_.get(), size);\n}\n\nvoid JSFunctionParam::AddString(const std::string &val) {\n  ++argc_;\n  duk_push_lstring(ctx_.get(), val.c_str(), val.size());\n}\n\nvoid JSFunctionParam::AddPointer(void *val) {\n  ++argc_;\n  duk_push_pointer(ctx_.get(), val);\n}\n\nvoid JSFunctionParam::AddNull() {\n  ++argc_;\n  duk_push_null(ctx_.get());\n}\n\nvoid JSFunctionParam::AddHeapPtr(void *ptr) {\n  ++argc_;\n  duk_push_heapptr(ctx_.get(), ptr);\n}\n\nsize_t JSFunctionParam::GetParamSize() { return argc_; }\n\nJSFunctionReturn::JSFunctionReturn(std::shared_ptr<duk_context> ctx)\n    : ctx_(std::move(ctx)) {}\n\nbool JSFunctionReturn::GetBool() { return duk_get_boolean(ctx_.get(), -1); }\n\nint32_t JSFunctionReturn::GetInt32() { return duk_get_int(ctx_.get(), -1); }\n\nuint32_t JSFunctionReturn::GetUint32() { return duk_get_uint(ctx_.get(), -1); }\n\ndouble JSFunctionReturn::GetNum() { return duk_get_number(ctx_.get(), -1); }\n\nstd::string JSFunctionReturn::GetString() {\n  const auto *duk_str = duk_get_string(ctx_.get(), -1);\n  if (duk_str == nullptr) {\n    return \"\";\n  }\n\n  return duk_str;\n}\n\nvoid *JSFunctionReturn::GetPointer() { return duk_get_pointer(ctx_.get(), -1); }\n\nJSCtx::JSCtx() = default;\n\nmodelbox::Status JSCtx::Init() {\n  auto *ctx_ptr =\n      duk_create_heap(nullptr, nullptr, nullptr, nullptr, fatal_function);\n  if (ctx_ptr == nullptr) {\n    std::string err = \"Init js ctx failed\";\n    MBLOG_ERROR << err;\n    return modelbox::STATUS_FAULT;\n  }\n\n  ctx_.reset(ctx_ptr, [](duk_context *ctx) { duk_destroy_heap(ctx); });\n  duk_push_global_object(ctx_.get());\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status JSCtx::LoadCode(const std::string &code,\n                                 const std::string &code_name) {\n  auto handle = WaitCtx();\n  auto *buf = (char *)duk_push_fixed_buffer(ctx_.get(), code.size());\n  if (buf == nullptr) {\n    std::string err =\n        \"Create buffer in js ctx failed, size \" + std::to_string(code.size());\n    MBLOG_ERROR << err;\n    return {STATUS_FAULT, err};\n  }\n\n  auto e_ret = memcpy_s(buf, code.size(), code.data(), code.size()); // NOLINT\n  if (e_ret != EOK) {\n    std::string err = \"memcpy failed, size \" + std::to_string(code.size()) +\n                      \", ret \" + std::to_string(e_ret);\n    MBLOG_ERROR << err;\n    return {STATUS_FAULT, err};\n  }\n\n  duk_buffer_to_string(ctx_.get(), -1);\n  duk_push_string(ctx_.get(), code_name.c_str());\n  auto ret = CompileCode();\n  if (!ret) {\n    return ret;\n  }\n\n  MBLOG_INFO << \"Load source code success\";\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status JSCtx::LoadSource(const std::string &source_path) {\n  std::ifstream file(source_path);\n  if (!file.is_open()) {\n    std::string err = \"Open file \" + source_path + \" failed\";\n    MBLOG_ERROR << err;\n    return {STATUS_FAULT, err};\n  }\n\n  file.seekg(0, std::ifstream::end);\n  size_t file_len = file.tellg();\n  const size_t max_file_size = 1 * 1024 * 1024;\n  if (file_len > max_file_size) {\n    std::string err = \"Size of file \" + source_path + \" is great than 1MB\";\n    MBLOG_ERROR << err;\n    return {STATUS_FAULT, err};\n  }\n\n  auto handle = WaitCtx();\n  auto *buf = (char *)duk_push_fixed_buffer(ctx_.get(), file_len);\n  if (buf == nullptr) {\n    std::string err =\n        \"Create buffer in js ctx failed, size \" + std::to_string(file_len);\n    MBLOG_ERROR << err;\n    return {STATUS_FAULT, err};\n  }\n\n  file.seekg(0, std::ifstream::beg);\n  file.read(buf, file_len);\n  file.close();\n\n  duk_buffer_to_string(ctx_.get(), -1);\n  duk_push_string(ctx_.get(), source_path.c_str());\n  auto ret = CompileCode();\n  if (!ret) {\n    return ret;\n  }\n\n  MBLOG_INFO << \"Load source \" << source_path << \" success\";\n  return STATUS_OK;\n}\n\nmodelbox::Status JSCtx::CompileCode() {\n  auto ret = duk_pcompile(ctx_.get(), DUK_COMPILE_EVAL);\n  if (ret != 0) {\n    std::string err = \"Load source failed\";\n    const auto *duk_errmsg = duk_safe_to_string(ctx_.get(), -1);\n    if (duk_errmsg) {\n      err += \", err:\";\n      err += duk_errmsg;\n    }\n    MBLOG_ERROR << err;\n    duk_pop(ctx_.get());  // duk_pcomile\n    return {STATUS_FAULT, err};\n  }\n\n  duk_push_global_object(ctx_.get());\n  ret = duk_pcall_method(ctx_.get(), 0);\n  if (ret != 0) {\n    std::string err = \"Load source failed, err:\";\n    const auto *duk_errmsg = duk_safe_to_string(ctx_.get(), -1);\n    if (duk_errmsg) {\n      err += duk_errmsg;\n    }\n\n    MBLOG_ERROR << err;\n    duk_pop(ctx_.get());  // pop duk_pcall_method return\n    return {STATUS_FAULT, err};\n  }\n\n  duk_pop(ctx_.get());  // pop duk_pcall_method return\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status JSCtx::RegisterFunc(const std::string &name,\n                                     duk_c_function func, int32_t argc) {\n  auto handle = WaitCtx();\n  duk_push_c_function(ctx_.get(), func, argc);\n  duk_put_global_string(ctx_.get(), name.c_str());\n  return STATUS_OK;\n}\n\nmodelbox::Status JSCtx::CallFunc(const std::string &func_name,\n                                 const FillParam &fill_param,\n                                 const ReadReturn &read_return) {\n  auto handle = WaitCtx();\n  auto b_ret = duk_get_prop_string(ctx_.get(), -1, func_name.c_str());\n  if (!b_ret) {\n    duk_pop(ctx_.get());  // duk_get_prop_string\n    std::string err = \"func \" + func_name + \" not defined\";\n    MBLOG_ERROR << err;\n    return {STATUS_FAULT, err};\n  }\n\n  JSFunctionParam func_param(ctx_);\n  fill_param(func_param);\n\n  auto ret = duk_pcall(ctx_.get(), func_param.GetParamSize());\n  if (ret != 0) {\n    const auto *err = duk_safe_to_string(ctx_.get(), -1);\n    std::string err_msg = \"call \" + func_name + \" failed\";\n    if (err) {\n      err_msg += \", err:\";\n      err_msg += err;\n    }\n    duk_pop(ctx_.get());  // duk_pcall\n    MBLOG_ERROR << err_msg;\n    return {STATUS_FAULT, err_msg};\n  }\n\n  JSFunctionReturn func_ret(ctx_);\n  read_return(func_ret);\n  duk_pop(ctx_.get());  // duk_pcall\n  return STATUS_OK;\n}\n\n}  // namespace modelbox\n\n#endif  // ENABLE_JS_PLUGIN"
  },
  {
    "path": "src/modelbox/server/js_engine.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_JAVASCRIPT_ENGINE_H_\n#define MODELBOX_JAVASCRIPT_ENGINE_H_\n\n#ifdef ENABLE_JS_PLUGIN\n#include <duktape.h>\n#include <modelbox/base/status.h>\n#include <modelbox/base/utils.h>\n\n#include <atomic>\n#include <condition_variable>\n#include <functional>\n#include <memory>\n#include <mutex>\n#include <string>\n#include <vector>\n\nnamespace modelbox {\n\nclass JSFunctionParam {\n public:\n  JSFunctionParam(std::shared_ptr<duk_context> ctx);\n  virtual ~JSFunctionParam() = default;\n\n  void AddBoolean(bool val);\n  void AddInt32(int32_t val);\n  void AddUint32(uint32_t val);\n  void AddNum(double val);\n  void *AddBuffer(size_t size);\n  void AddString(const std::string &val);\n  void AddPointer(void *val);\n  void AddNull();\n  void AddHeapPtr(void *ptr);\n\n  size_t GetParamSize();\n\n private:\n  std::shared_ptr<duk_context> ctx_;\n  size_t argc_{0};\n};\n\nclass JSFunctionReturn {\n public:\n  JSFunctionReturn(std::shared_ptr<duk_context> ctx);\n  virtual ~JSFunctionReturn() = default;\n\n  bool GetBool();\n  int32_t GetInt32();\n  uint32_t GetUint32();\n  double GetNum();\n  std::string GetString();\n  void *GetPointer();\n\n private:\n  std::shared_ptr<duk_context> ctx_;\n};\n\nusing FillParam = std::function<void(JSFunctionParam &param)>;\nusing ReadReturn = std::function<void(JSFunctionReturn &ret)>;\n\nclass JSCtx {\n public:\n  JSCtx();\n  virtual ~JSCtx() = default;\n\n  /**\n   * @brief Init js context\n   * @return Result of init\n   */\n  modelbox::Status Init();\n\n  /**\n   * @brief Load js source code to ctx\n   * @param code Js source code\n   * @param code_name To indentify the source code\n   * @return Result of load\n   */\n  modelbox::Status LoadCode(const std::string &code,\n                            const std::string &code_name);\n\n  /**\n   * @brief Load js source file to ctx\n   * @param source_path Indicate js source file to load\n   * @return Result of load\n   */\n  modelbox::Status LoadSource(const std::string &source_path);\n\n  /**\n   * @brief Register target c function to js ctx\n   * @param name Function name which is visiable in js\n   * @param func Target function\n   * @param argc Arg number of target function\n   * @return Result of register\n   */\n  modelbox::Status RegisterFunc(const std::string &name, duk_c_function func,\n                                int32_t argc);\n\n  /**\n   * @brief Call target function in js\n   * @param func_name Target function name\n   * @param fill_param Func to fill param for target function\n   * @param read_return Func to read target function result\n   * @return Result of call func operation\n   */\n  modelbox::Status CallFunc(const std::string &func_name,\n                            const FillParam &fill_param = FillParamDefault,\n                            const ReadReturn &read_return = ReadReturnDefault);\n\n  /**\n   * @brief Get runtime pointer\n   * @return Runtime pointer\n   */\n  void *GetRuntime() const { return ctx_.get(); }\n\n  /**\n   * @brief Defaul fill param func, if target function needs no param\n   * @param param Param for target function\n   */\n  static void FillParamDefault(JSFunctionParam &param){};\n\n  /**\n   * @brief Defaul read return func, if you do not care target function return\n   * @param param Return of target function\n   */\n  static void ReadReturnDefault(JSFunctionReturn &ret){};\n\n private:\n  modelbox::Status CompileCode();\n\n  inline std::shared_ptr<modelbox::DeferGuard> WaitCtx() {\n    // need this to ensure excution order\n    // avoid starving caused by direct lock\n    auto my_seq = waiting_seq_.fetch_add(1);\n    std::unique_lock<std::mutex> lck(ctx_running_lock_);\n    ctx_running_cv_.wait(lck,\n                         [this, my_seq]() { return my_seq == execute_seq_; });\n    auto defer_guard = std::make_shared<modelbox::DeferGuard>([this]() {\n      std::unique_lock<std::mutex> lck(ctx_running_lock_);\n      ++execute_seq_;\n      ctx_running_cv_.notify_all();\n    });\n\n    return defer_guard;\n  }\n\n  std::shared_ptr<duk_context> ctx_;\n  std::atomic_uint64_t waiting_seq_{0};\n  std::atomic_uint64_t execute_seq_{0};\n  std::mutex ctx_running_lock_;\n  std::condition_variable ctx_running_cv_;\n};\n\n}  // namespace modelbox\n\n#endif  // ENABLE_JS_PLUGIN\n#endif  // MODELBOX_JAVASCRIPT_ENGINE_H_"
  },
  {
    "path": "src/modelbox/server/main.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <getopt.h>\n#include <netdb.h>\n#include <signal.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include <sys/socket.h>\n#include <sys/types.h>\n#include <unistd.h>\n\n#include <memory>\n#include <thread>\n\n#include \"config.h\"\n#include \"modelbox/base/utils.h\"\n#include \"modelbox/common/command.h\"\n#include \"modelbox/common/log.h\"\n#include \"modelbox/common/utils.h\"\n#include \"modelbox/manager/manager_monitor_client.h\"\n#include \"modelbox/server/timer.h\"\n#include \"modelbox/server/utils.h\"\n#include \"securec.h\"\n#include \"server.h\"\n\n#define MODELBOX_SERVER_LOG_PATH \"/var/log/modelbox/modelbox.log\"\n#define MODELBOX_SERVER_PID_FILE \"/var/run/modelbox.pid\"\n#define MODELBOX_MAX_INIT_TIME (60 * 12)\n\nstatic int g_sig_list[] = {\n    SIGIO,   SIGPWR,    SIGSTKFLT, SIGPROF, SIGINT,  SIGTERM,\n    SIGBUS,  SIGVTALRM, SIGTRAP,   SIGXCPU, SIGXFSZ, SIGILL,\n    SIGABRT, SIGFPE,    SIGSEGV,   SIGQUIT, SIGSYS,\n};\n\nstatic int g_sig_num = sizeof(g_sig_list) / sizeof(g_sig_list[0]);\nstatic bool kVerbose = false;\nstatic bool kForground = false;\n\nstatic void showhelp() {\n  /* clang-format off */\n    char help[] = \"\"\n        \"Usage: modelbox [OPTION]...\\n\"\n        \"Start modelbox server.\\n\"\n        \"  -c            configuration file.\\n\"\n        \"  -f            run forground.\\n\"\n        \"  -p            pid file.\\n\"\n        \"  -u [user]     run as user.\\n\"\n        \"  -V            output log to screen.\\n\"\n        \"  -v            show server version.\\n\"\n        \"  -h            show this help message.\\n\"\n        \"\\n\";\n\n    printf(\"%s\", help);\n  /* clang-format on */\n}\n\nvoid modelbox_stop() { modelbox::kServerTimer->Stop(); }\n\nstatic void modelbox_sig_handler(int volatile sig_no, siginfo_t *sig_info,\n                                 void *volatile ptr) {\n  switch (sig_no) {\n    case SIGINT:\n    case SIGTERM:\n      modelbox_stop();\n      return;\n      break;\n    case SIGQUIT:\n      return;\n      break;\n    case SIGSEGV:\n    case SIGPIPE:\n    case SIGFPE:\n    case SIGABRT:\n    case SIGBUS:\n    case SIGILL: {\n      char buf[4096];\n      MBLOG_ERROR << \"Segment fault\"\n                  << \", Signal: \" << sig_no << \", Addr: \" << sig_info->si_addr\n                  << \", Code: \" << sig_info->si_code << \", Caused by: \";\n      if (modelbox::modelbox_cpu_register_data(buf, sizeof(buf),\n                                               (ucontext_t *)ptr) == 0) {\n        MBLOG_ERROR << \"CPU Register Info:\\n\" << buf;\n      }\n      MBLOG_STACKTRACE(modelbox::LOG_FATAL);\n      sleep(1);\n    } break;\n    default:\n      break;\n  }\n\n  _exit(1);\n}\n\nstatic int modelbox_reg_signal() {\n  if (modelbox::modelbox_sig_register(g_sig_list, g_sig_num,\n                                      modelbox_sig_handler) != 0) {\n    fprintf(stderr, \"register signal failed.\\n\");\n    return 1;\n  }\n\n  return 0;\n}\n\nint modelbox_init_log(std::vector<std::string> &path_list) {\n  std::shared_ptr<modelbox::ModelboxServerLogger> logger =\n      std::make_shared<modelbox::ModelboxServerLogger>();\n\n  auto log_size = modelbox::GetBytesFromReadable(\n      modelbox::kConfig->GetString(\"log.size\", \"64MB\"));\n  auto log_num = modelbox::kConfig->GetUint32(\"log.num\", 64);\n  auto log_path =\n      modelbox::kConfig->GetString(\"log.path\", MODELBOX_SERVER_LOG_PATH);\n  auto log_screen = modelbox::kConfig->GetBool(\"log.screen\", false);\n  auto log_level = modelbox::kConfig->GetString(\"log.level\", \"INFO\");\n  if (log_screen) {\n    kVerbose = true;\n  }\n\n  log_path = modelbox::modelbox_full_path(log_path);\n\n  if (logger->Init(log_path, log_size, log_num, kVerbose) == false) {\n    fprintf(stderr, \"init logger failed.\\n\");\n    return 1;\n  }\n\n  ModelBoxLogger.SetLogger(logger);\n  logger->SetLogLevel(modelbox::LogLevelStrToLevel(log_level));\n\n  modelbox::CreateDirectory(modelbox::GetDirName(log_path), 0750);\n  path_list.push_back(modelbox::GetDirName(log_path));\n  path_list.push_back(log_path);\n\n  return 0;\n}\n\nint modelbox_init(std::vector<std::string> &path_list) {\n  if (modelbox_reg_signal() != 0) {\n    fprintf(stderr, \"register signal failed.\\n\");\n    return 1;\n  }\n\n  if (modelbox_init_log(path_list)) {\n    return 1;\n  }\n\n  /* if in standalone mode */\n  if (modelbox::modelbox_root_dir().length() > 0) {\n    std::string default_scanpath =\n        modelbox::modelbox_full_path(std::string(modelbox::MODELBOX_ROOT_VAR) +\n                                     MODELBOX_DEFAULT_DRIVER_PATH);\n    modelbox::Drivers::SetDefaultScanPath(default_scanpath);\n\n    std::string default_driver_info_path =\n        modelbox::modelbox_full_path(std::string(modelbox::MODELBOX_ROOT_VAR) +\n                                     \"/var/run/modelbox-driver-info\");\n    modelbox::Drivers::SetDefaultInfoPath(default_driver_info_path);\n  }\n\n  return 0;\n}\n\nvoid modelbox_hung_check(const std::shared_ptr<modelbox::Server> &server) {\n  int is_status_ok = 1;\n  auto root = modelbox::Statistics::GetGlobalItem();\n\n  auto flowitem = root->GetItem(\"flow\");\n  if (flowitem == nullptr) {\n    app_monitor_heartbeat();\n    return;\n  }\n\n  auto flownames = flowitem->GetItemNames();\n  for (auto const &name : flownames) {\n    auto schedule_item = flowitem->GetItem(name + \".scheduler.status\");\n    if (schedule_item == nullptr) {\n      continue;\n    }\n    std::string schedule_status;\n    schedule_item->GetValue(schedule_status);\n    if (schedule_status != \"blocking\") {\n      continue;\n    }\n    MBLOG_WARN << \"flow \" << name << \" is blocking\";\n\n    is_status_ok = 0;\n  }\n\n  if (server->Check() != modelbox::STATUS_OK) {\n    is_status_ok = 0;\n  }\n\n  if (is_status_ok == 0) {\n    return;\n  }\n\n  app_monitor_heartbeat();\n}\n\nint modelbox_run(const std::shared_ptr<modelbox::Server> &server) {\n  int retval = 0;\n  auto server_init_timer = std::make_shared<modelbox::TimerTask>([]() {\n    MBLOG_INFO << \"server init timeout, you may change the init timeout \"\n                  \"value by setting the init_timeout in modelbox.conf\";\n    modelbox::kServerTimer->Stop();\n    modelbox::Abort(\"server init timeout\");\n  });\n\n  auto future =\n      std::async(std::launch::async, [server, &retval, &server_init_timer]() {\n        modelbox::Status ret;\n        Defer {\n          if (!ret) {\n            modelbox::kServerTimer->Stop();\n            retval = 1;\n          }\n        };\n\n        ret = server->Init();\n        if (!ret) {\n          MBLOG_ERROR << \"server init failed !\";\n          return 1;\n        }\n\n        ret = server->Start();\n        if (!ret) {\n          MBLOG_ERROR << \"server start failed !\";\n          return 1;\n        }\n\n        server_init_timer->Stop();\n        server_init_timer = nullptr;\n\n        return 0;\n      });\n\n  if (app_monitor_init(nullptr, nullptr) == 0) {\n    MBLOG_INFO << \"start manager heartbeat\";\n    std::shared_ptr<modelbox::TimerTask> heart_beattask =\n        std::make_shared<modelbox::TimerTask>();\n    heart_beattask->Callback(modelbox_hung_check, server);\n    modelbox::kServerTimer->Schedule(\n        heart_beattask, 0, 1000 * app_monitor_heartbeat_interval(), true);\n  }\n\n  auto init_timeout = modelbox::kConfig->GetUint32(\"server.init_timeout\",\n                                                   MODELBOX_MAX_INIT_TIME);\n  modelbox::kServerTimer->Schedule(server_init_timer, 1000 * init_timeout, 0,\n                                   false);\n\n  // run timer loop.\n  modelbox::kServerTimer->Run();\n\n  future.get();\n  server->Stop();\n  return retval;\n}\n\nstatic void onexit() {}\n\nint modelbox_change_user(const std::string &user_from_cmd,\n                         const std::vector<std::string> &path_list) {\n  /* run as user */\n  auto user = modelbox::kConfig->GetString(\"server.user\", \"\");\n  if (user_from_cmd.length() > 0) {\n    user = user_from_cmd;\n  }\n\n  if (user.length() == 0) {\n    return 0;\n  }\n\n  for (const auto &path : path_list) {\n    modelbox::ChownToUser(user, path);\n  }\n\n  auto ret = modelbox::RunAsUser(user);\n  if (ret == modelbox::STATUS_PERMIT) {\n    MBLOG_WARN << \"change user may fail, \" << user << \", \" << ret.WrapErrormsgs();\n  } else if (ret != modelbox::STATUS_OK) {\n    MBLOG_ERROR << \"run as user \" << user << \" failed, %s\"\n               << ret.WrapErrormsgs();\n    return 1;\n  }\n\n  MBLOG_INFO << \"run as user: \" << user;\n  return 0;\n}\n\n#ifdef BUILD_TEST\nint modelbox_server_main(int argc, char *argv[])\n#else\nint main(int argc, char *argv[])\n#endif\n{\n  std::string pidfile = MODELBOX_SERVER_PID_FILE;\n  int cmdtype = 0;\n  std::vector<std::string> path_list;\n  std::string user_from_cmd;\n\n  MODELBOX_COMMAND_GETOPT_SHORT_BEGIN(cmdtype, \"hc:Vvfp:n:k:Ku:\", nullptr)\n  switch (cmdtype) {\n    case 'p':\n      pidfile = modelbox::modelbox_full_path(optarg);\n      break;\n    case 'V':\n      kVerbose = true;\n      break;\n    case 'f':\n      kForground = true;\n      break;\n    case 'h':\n      showhelp();\n      return 1;\n    case 'c':\n      modelbox::kConfigPath = modelbox::modelbox_full_path(optarg);\n      break;\n    case 'v':\n      printf(\"modelbox-server %s\\n\", modelbox::GetModelBoxVersion());\n      return 0;\n    case 'u':\n      user_from_cmd = optarg;\n      break;\n    default:\n      printf(\"Try %s -h for more information.\\n\", argv[0]);\n      return 1;\n      break;\n  }\n  MODELBOX_COMMAND_GETOPT_END()\n\n  if (modelbox::LoadConfig(modelbox::kConfigPath) == false) {\n    fprintf(stderr, \"can not load configuration : %s \\n\",\n            modelbox::kConfigPath.c_str());\n    return 1;\n  }\n\n  auto log_screen = modelbox::kConfig->GetBool(\"log.screen\", kVerbose);\n  if (kForground == false && kVerbose == false) {\n    if (daemon(0, log_screen) < 0) {\n      fprintf(stderr, \"run daemon process failed, %s\\n\",\n              modelbox::StrError(errno).c_str());\n      return 1;\n    }\n  }\n\n  Defer { onexit(); };\n  /* 忽略SIGPIPE，避免发送缓冲区慢导致的进程退出 */\n  signal(SIGPIPE, SIG_IGN);\n\n  if (pidfile != \"-\") {\n    modelbox::CreateDirectory(modelbox::GetDirName(pidfile), 0755);\n    path_list.push_back(modelbox::GetDirName(pidfile));\n    if (modelbox::modelbox_create_pid(pidfile.c_str()) != 0) {\n      fprintf(stderr, \"create pid file failed.\\n\");\n      return 1;\n    }\n    path_list.push_back(pidfile);\n  }\n\n  if (modelbox_init(path_list) != 0) {\n    fprintf(stderr, \"init failed.\\n\");\n    return 1;\n  }\n\n  if (modelbox_change_user(user_from_cmd, path_list) != 0) {\n    fprintf(stderr, \"change user failed.\\n\");\n    return 1;\n  }\n\n  MBLOG_INFO << \"modelbox config path : \" << modelbox::kConfigPath;\n  auto server = std::make_shared<modelbox::Server>(modelbox::kConfig);\n  modelbox::kServerTimer->Start();\n\n  if (modelbox_run(server) != 0) {\n    return 1;\n  }\n\n  MBLOG_INFO << \"exit modelbox process\";\n  return 0;\n}\n"
  },
  {
    "path": "src/modelbox/server/misc/modelbox-server-cmd.json.in",
    "content": "{\n    \"cmd-list\": [\n        {\n            \"name\": \"develop\",\n            \"exec\": \"@MODELBOX_ROOT_VAR@@MODELBOX_TOOLS_PATH@/develop\",\n            \"desc\": \"Setup develop enviroment\",\n            \"help-cmd\": \"@MODELBOX_ROOT_VAR@@MODELBOX_TOOLS_PATH@/develop --help\"\n        }\n    ]\n}\n"
  },
  {
    "path": "src/modelbox/server/plugin/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nadd_subdirectory(editor)\nadd_subdirectory(tasks)\n"
  },
  {
    "path": "src/modelbox/server/plugin/editor/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nset(UNIT_NAME \"plugin-editor\")\n\nproject(modelbox-${UNIT_NAME})\n\nfile(GLOB_RECURSE MODELBOX_UNIT_SOURCE *.cpp *.cc *.c)\nexclude_files_from_dir_in_list(MODELBOX_UNIT_SOURCE \"${MODELBOX_UNIT_SOURCE}\" \"${CMAKE_BINARY_DIR}/\")\n\nconfigure_file(${CMAKE_CURRENT_LIST_DIR}/config.h.in ${CMAKE_CURRENT_BINARY_DIR}/config.h @ONLY)\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${NLOHMANN_INCLUDE_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\n\nset(MODELBOX_SERVER_PLUGIN_EDITOR modelbox-plugin-editor)\n\nadd_library(${MODELBOX_SERVER_PLUGIN_EDITOR} SHARED ${MODELBOX_UNIT_SOURCE})\n\nset_target_properties(${MODELBOX_SERVER_PLUGIN_EDITOR} PROPERTIES \n    OUTPUT_NAME \"modelbox-plugin-editor\"\n    PREFIX \"\"\n    SUFFIX \".so\")\n\ninstall(TARGETS ${MODELBOX_SERVER_PLUGIN_EDITOR} \n    COMPONENT server\n    RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n    LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    ARCHIVE DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    )\n\nset(MODELBOX_PLUGIN_EDITOR_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/${MODELBOX_SERVER_PLUGIN_EDITOR}.so)\n\ntarget_link_libraries(${MODELBOX_SERVER_PLUGIN_EDITOR} ${MODELBOX_COMMON_LIBRARY})\ntarget_link_libraries(${MODELBOX_SERVER_PLUGIN_EDITOR} pthread)\ntarget_link_libraries(${MODELBOX_SERVER_PLUGIN_EDITOR} rt)\n\nset(MODELBOX_SERVER_PLUGIN_EDITOR ${MODELBOX_SERVER_PLUGIN_EDITOR} CACHE INTERNAL \"\")\nset(MODELBOX_PLUGIN_EDITOR_SO_PATH ${MODELBOX_PLUGIN_EDITOR_SO_PATH} CACHE INTERNAL \"\")\n"
  },
  {
    "path": "src/modelbox/server/plugin/editor/config.h.in",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#ifndef MODELBOX_EDITOR_CONFIG_H_\n#define MODELBOX_EDITOR_CONFIG_H_\n\nnamespace modelbox {\n\n// driver default search path\n#define MODELBOX_DEMO_PATH \"@MODELBOX_DEMO_INSTALL_DIR@\"\n\n}  // namespace modelbox\n\n#endif  // MODELBOX_EDITOR_CONFIG_H_"
  },
  {
    "path": "src/modelbox/server/plugin/editor/editor_plugin.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n#include \"editor_plugin.h\"\n\n#include <modelbox/base/popen.h>\n#include <pwd.h>\n#include <stdlib.h>\n#include <sys/stat.h>\n#include <sys/wait.h>\n\n#include <fstream>\n#include <iostream>\n#include <nlohmann/json.hpp>\n#include <toml.hpp>\n#include <typeinfo>\n#include <vector>\n\n#include \"config.h\"\n#include \"modelbox/base/log.h\"\n#include \"modelbox/common/flowunit_info.h\"\n#include \"modelbox/common/utils.h\"\n#include \"modelbox/server/utils.h\"\n\nconst std::string DEFAULT_WEB_ROOT =\n    \"${MODELBOX_ROOT}/usr/local/share/modelbox/www\";\nconst std::string DEFAULT_PROJECT_TEMPLATE_DIR =\n    \"${MODELBOX_ROOT}/usr/local/share/modelbox/project-template\";\nconst std::string DEFAULT_DEMO_ROOT_DIR = MODELBOX_DEMO_PATH;\nconstexpr const char* DEFAULT_MODELBOX_TEMPLATE_CMD =\n    \"${MODELBOX_ROOT}/usr/local/bin/modelbox-tool template\";\n\nconst std::string UI_url = \"/\";\nconst std::string flowunit_info_url = \"/editor/flow-info\";\nconst std::string basic_info = \"/editor/basic-info\";\nconst std::string demo_url = \"/editor/demo\";\nconst std::string graph_url = \"/editor/graph\";\nconst std::string flowunit_create_url = \"/editor/flowunit/create\";\nconst std::string project_url = \"/editor/project\";\nconst std::string project_template_url = \"/editor/project/template\";\nconst std::string project_list_url = \"/editor/project/list\";\nconst std::string project_create_url = \"/editor/project/create\";\nconst std::string pass_encode_url = \"/editor/password/encode\";\nconst std::string postman_url = \"/editor/postman\";\n\nconst char* HTTP_GRAPH_FORMAT_JSON = \"json\";\nconst char* HTTP_GRAPH_FORMAT_TOML = \"toml\";\n\nconstexpr const char* HTTP_RESP_ERR_GETINFO_FAILED = \"Get info failed\";\nconstexpr const char* HTTP_RESP_ERR_PATH_NOT_FOUND = \"Path not found\";\nconstexpr const char* HTTP_RESP_ERR_PATH_NOT_FILE = \"Path not a file\";\nconstexpr const char* HTTP_RESP_ERR_CANNOT_READ = \"Can not read file\";\n\nstd::string ModelboxGetMimeType(const std::string& file) {\n  std::string ext = file.substr(file.find_last_of('.') + 1);\n\n  const static std::map<std::string, std::string> mime_map = {\n      {\"htm\", \"text/html\"},         {\"html\", \"text/html\"},\n      {\"js\", \"text/javascript\"},    {\"css\", \"text/css\"},\n      {\"json\", \"application/json\"}, {\"png\", \"image/png\"},\n      {\"gif\", \"image/gif\"},         {\"jpeg\", \"image/jpeg\"},\n      {\"svg\", \"image/svg+xml\"},     {\"tar\", \"application/x-tar\"},\n      {\"txt\", \"text/plain\"},        {\"ico\", \"application/octet-stream\"},\n      {\"xml\", \"text/xml\"},          {\"mpeg\", \"video/mpeg\"},\n      {\"mp3\", \"audio/mpeg\"},\n  };\n\n  auto itr = mime_map.find(ext);\n  if (itr != mime_map.end()) {\n    return itr->second;\n  }\n\n  return \"application/octet-stream\";\n}\n\nbool ModelboxEditorPlugin::CheckBlackDir(const std::string& dir) {\n  if ((dir.find('\\r') != std::string::npos) ||\n      (dir.find('\\n') != std::string::npos)) {\n    return true;\n  }\n  const std::string filter_dir =\n      \"(/bin)|(/boot)|(/sbin)|(/etc)|(/dev)|(/proc)|(/sys)|(/var)\";\n  const std::string black_dir_str =\n      filter_dir + \"|\" + \"((\" + filter_dir + \")/.*)\";\n  std::regex invalid_str(black_dir_str);\n  std::string path = modelbox::PathCanonicalize(dir);\n  return std::regex_match(path, invalid_str);\n}\n\nbool ModelboxEditorPlugin::Init(\n    std::shared_ptr<modelbox::Configuration> config) {\n  MBLOG_INFO << \"modelbox editor plugin init\";\n\n  bool ret = ParseConfig(config);\n  if (!ret) {\n    MBLOG_ERROR << \"parse config file failed\";\n    return false;\n  }\n\n  if (enable_ == false) {\n    MBLOG_INFO << \"editor is disabled.\";\n    return true;\n  }\n\n  auto endpoint = \"http://\" + server_ip_ + \":\" + server_port_;\n  listener_ = std::make_shared<modelbox::HttpListener>(endpoint);\n  MBLOG_INFO << \"run editor on \" << endpoint;\n  RegistHandlers();\n\n  return true;\n}\n\nstd::shared_ptr<modelbox::Plugin> CreatePlugin() {\n  MBLOG_INFO << \"create modelbox editor plugin\";\n  return std::make_shared<ModelboxEditorPlugin>();\n}\n\nvoid ModelboxEditorPlugin::RegistHandlers() {\n  struct Handler_Map {\n    const std::string path;\n    const modelbox::HttpMethod method;\n    void (ModelboxEditorPlugin::*func)(const httplib::Request& request,\n                                       httplib::Response& response);\n  };\n  Handler_Map handler_list[] = {\n      {UI_url, modelbox::HttpMethods::GET, &ModelboxEditorPlugin::HandlerUIGet},\n      {basic_info, modelbox::HttpMethods::GET,\n       &ModelboxEditorPlugin::HanderBasicInfoGet},\n      {flowunit_info_url, modelbox::HttpMethods::PUT,\n       &ModelboxEditorPlugin::HandlerFlowUnitInfoPut},\n      {flowunit_info_url, modelbox::HttpMethods::GET,\n       &ModelboxEditorPlugin::HandlerFlowUnitInfoGet},\n      {demo_url, modelbox::HttpMethods::GET,\n       &ModelboxEditorPlugin::HandlerDemoGet},\n      {project_url, modelbox::HttpMethods::GET,\n       &ModelboxEditorPlugin::HandlerProjectGet},\n      {project_template_url, modelbox::HttpMethods::GET,\n       &ModelboxEditorPlugin::HandlerProjectTemplateListGet},\n      {project_list_url, modelbox::HttpMethods::GET,\n       &ModelboxEditorPlugin::HandlerProjectListGet},\n      {project_create_url, modelbox::HttpMethods::PUT,\n       &ModelboxEditorPlugin::HandlerProjectCreate},\n      {flowunit_create_url, modelbox::HttpMethods::PUT,\n       &ModelboxEditorPlugin::HandlerFlowUnitCreate},\n      {graph_url, modelbox::HttpMethods::PUT,\n       &ModelboxEditorPlugin::HandlerSaveGraph},\n      {graph_url, modelbox::HttpMethods::GET,\n       &ModelboxEditorPlugin::HandlerGraphModifyTime},\n      {pass_encode_url, modelbox::HttpMethods::PUT,\n       &ModelboxEditorPlugin::HandlerPassEncode},\n      {postman_url, modelbox::HttpMethods::POST,\n       &ModelboxEditorPlugin::HandlerPostman},\n  };\n\n  for (const auto& hander : handler_list) {\n    listener_->Register(hander.path, hander.method,\n                        std::bind(hander.func, this, std::placeholders::_1,\n                                  std::placeholders::_2));\n  }\n}\n\nstd::string ModelboxEditorPlugin::ResultMsg(const std::string& code,\n                                            const std::string& msg) {\n  nlohmann::json result_json;\n  result_json[\"code\"] = code;\n  result_json[\"msg\"] = msg;\n\n  return result_json.dump();\n}\n\nstd::string ModelboxEditorPlugin::ResultMsg(modelbox::Status& status) {\n  return ResultMsg(status.StrCode(), status.WrapErrormsgs());\n}\n\nvoid ModelboxEditorPlugin::SetUpResponse(httplib::Response& response,\n                                         modelbox::Status& status) {\n  switch (status.Code()) {\n    case modelbox::STATUS_SUCCESS:\n      response.status = modelbox::HttpStatusCodes::OK;\n      break;\n    case modelbox::STATUS_NOTFOUND:\n      response.status = modelbox::HttpStatusCodes::NOT_FOUND;\n      break;\n    case modelbox::STATUS_INVALID:\n    case modelbox::STATUS_BADCONF:\n      response.status = modelbox::HttpStatusCodes::BAD_REQUEST;\n      break;\n    default:\n      response.status = modelbox::HttpStatusCodes::INTERNAL_ERROR;\n      break;\n  }\n\n  modelbox::AddSafeHeader(response);\n  response.set_content(ResultMsg(status), modelbox::JSON);\n}\n\nbool ModelboxEditorPlugin::GetHtmlFile(const std::string& in_file,\n                                       std::string* out_file,\n                                       std::string* redirect_file) {\n  auto in_file_canon = modelbox::PathCanonicalize(in_file, web_root_);\n  std::string base_filename = in_file.substr(in_file.find_last_of(\"/\\\\\") + 1);\n  if (base_filename.length() == 0 || base_filename.c_str()[0] == '\\0') {\n    base_filename = \"/\";\n  }\n\n  auto file_name = in_file_canon;\n  if (base_filename.find_first_of('.') == std::string::npos) {\n    if (base_filename != \"/\") {\n      // if not a specify file, then must be a directory\n      *redirect_file = in_file + \"/\";\n      return false;\n    }\n\n    auto default_file_name = file_name + \"/index.htm\";\n    if (access(default_file_name.c_str(), R_OK) != 0) {\n      default_file_name = file_name + \"/index.html\";\n      if (access(default_file_name.c_str(), R_OK) == 0) {\n        file_name = default_file_name;\n      }\n    } else {\n      file_name = default_file_name;\n    }\n  }\n\n  if (file_name.length() == 0) {\n    return false;\n  }\n\n  *out_file = file_name;\n\n  return true;\n}\n\nvoid ModelboxEditorPlugin::HandlerFlowUnitInfoGet(\n    const httplib::Request& request, httplib::Response& response) {\n  modelbox::ConfigurationBuilder config_builder;\n\n  return HandlerFlowUnitInfo(request, response, config_builder.Build());\n}\n\nvoid ModelboxEditorPlugin::HandlerFlowUnitInfoPut(\n    const httplib::Request& request, httplib::Response& response) {\n  modelbox::ConfigurationBuilder config_builder;\n  try {\n    auto body = nlohmann::json::parse(request.body);\n    if (body.find(\"skip-default\") != body.end()) {\n      config_builder.AddProperty(\n          \"driver.\" + std::string(DRIVER_SKIP_DEFAULT),\n          std::to_string(body[\"skip-default\"].get<bool>()));\n    }\n\n    if (body.find(\"dir\") != body.end()) {\n      std::vector<std::string> dirs;\n      for (auto& it : body[\"dir\"]) {\n        auto dir = it.get<std::string>();\n        if (!CheckBlackDir(dir)) {\n          dirs.push_back(dir);\n        }\n      }\n      config_builder.AddProperty(\"driver.\" + std::string(DRIVER_DIR), dirs);\n    }\n  } catch (const std::exception& e) {\n    std::string errmsg = \"Get info failed: \";\n    errmsg += e.what();\n    modelbox::Status ret = {modelbox::STATUS_INVALID, errmsg};\n    SetUpResponse(response, ret);\n    return;\n  }\n\n  return HandlerFlowUnitInfo(request, response, config_builder.Build());\n}\n\nmodelbox::Status ModelboxEditorPlugin::RunTemplateCommand(\n    const httplib::Request& request, httplib::Response& response,\n    const std::string& cmd) {\n  modelbox::Status ret = modelbox::STATUS_FAULT;\n\n  try {\n    std::string runcmd;\n    runcmd = template_cmd_ + \" \" + cmd;\n    auto body = nlohmann::json::parse(request.body);\n    ret = GenerateCommandFromJson(body, runcmd);\n    if (ret == modelbox::STATUS_OK) {\n      ret = RunCommand(runcmd);\n    }\n  } catch (const std::exception& e) {\n    modelbox::Status errret(\n        ret, std::string(\"run modelbox-tool failed, \") + e.what());\n    ret = errret;\n  }\n\n  SetUpResponse(response, ret);\n  if (ret != modelbox::STATUS_SUCCESS) {\n    MBLOG_WARN << ret.WrapErrormsgs();\n    return ret;\n  }\n\n  return ret;\n}\n\nvoid ModelboxEditorPlugin::HandlerFlowUnitCreate(\n    const httplib::Request& request, httplib::Response& response) {\n  auto ret = RunTemplateCommand(request, response, \"--flowunit\");\n  if (ret == modelbox::STATUS_OK) {\n    response.status = modelbox::HttpStatusCodes::CREATED;\n  }\n}\n\nvoid ModelboxEditorPlugin::HandlerSaveGraph(const httplib::Request& request,\n                                            httplib::Response& response) {\n  auto ret = SaveGraph(request);\n  SetUpResponse(response, ret);\n  if (ret == modelbox::STATUS_OK) {\n    response.status = modelbox::HttpStatusCodes::CREATED;\n  }\n}\n\nmodelbox::Status ModelboxEditorPlugin::SaveGraph(\n    const httplib::Request& request) {\n  try {\n    auto body = nlohmann::json::parse(request.body);\n    std::string toml_data;\n\n    auto graph_data = body[\"graph\"].dump();\n    auto graph_name = body[\"graph_name\"].get<std::string>();\n    auto path = body[\"graph_path\"].get<std::string>();\n    MBLOG_INFO << \"Save graph to : \" << path;\n\n    if (IsModelboxProjectDir(path) == false) {\n      return {modelbox::STATUS_INVALID, \"path is not a modelbox project\"};\n    }\n\n    auto ret = modelbox::JsonToToml(graph_data, &toml_data);\n    if (!ret) {\n      return {ret, \"convert json failed\"};\n    }\n\n    std::string graphfile = path + \"/src/graph/\" + graph_name + \".toml\";\n    std::ofstream out(graphfile, std::ios::trunc);\n    if (out.fail()) {\n      return {modelbox::STATUS_FAULT, std::string(\"save graph file failed, \") +\n                                          modelbox::StrError(errno) +\n                                          \", path: \" + graphfile};\n    }\n\n    chmod(graphfile.c_str(), 0600);\n    Defer { out.close(); };\n\n    out << toml_data;\n    if (out.fail()) {\n      return {modelbox::STATUS_FAULT, std::string(\"save graph file failed, \") +\n                                          modelbox::StrError(errno) +\n                                          \", path: \" + graphfile};\n    }\n\n  } catch (const std::exception& e) {\n    std::string errmsg = \"save graph info failed: \";\n    errmsg += e.what();\n    return {modelbox::STATUS_INVALID, errmsg};\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status ModelboxEditorPlugin::ReadProjectName(const std::string& path,\n                                                       std::string& name) {\n  auto ret = RunCommand(template_cmd_ + \" -project -getname \\\"\" + path + \"\\\"\",\n                        nullptr, &name);\n  if (!ret) {\n    return ret;\n  }\n\n  name.erase(std::remove(name.begin(), name.end(), '\\n'), name.end());\n  name.erase(std::remove(name.begin(), name.end(), '\\r'), name.end());\n\n  return modelbox::STATUS_OK;\n}\n\nvoid ModelboxEditorPlugin::HandlerProjectGet(const httplib::Request& request,\n                                             httplib::Response& response) {\n  std::string project_name;\n\n  try {\n    if (request.has_param(\"path\") == false) {\n      modelbox::Status ret = {modelbox::STATUS_INVALID,\n                              \"argument path is not set.\"};\n      SetUpResponse(response, ret);\n      return;\n    }\n\n    std::string project_path;\n    project_path = request.params.find(\"path\")->second;\n    MBLOG_INFO << \"loading project: \" << project_path;\n\n    auto ret = ReadProjectName(project_path, project_name);\n    if (!ret) {\n      modelbox::Status rspret = {ret, \"Get project name failed.\"};\n      SetUpResponse(response, rspret);\n      return;\n    }\n\n    nlohmann::json json;\n    std::string result;\n    std::vector<std::string> graphs;\n    auto flowunit_path = project_path + \"/src/flowunit\";\n    auto graph_path = project_path + \"/src/graph\";\n\n    json[\"project_name\"] = project_name;\n    json[\"project_path\"] = project_path;\n    json[\"flowunits\"] = nlohmann::json::array();\n    json[\"graphs\"] = nlohmann::json::array();\n\n    ret = modelbox::ListSubDirectoryFiles(graph_path, \"*.toml\", &graphs);\n    if (!ret) {\n      modelbox::Status ret = {modelbox::STATUS_NOTFOUND,\n                              HTTP_RESP_ERR_CANNOT_READ};\n      SetUpResponse(response, ret);\n      return;\n    }\n\n    std::string json_data;\n    nlohmann::json graph;\n    for (const auto& g : graphs) {\n      ret = GraphFileToJson(g, json_data);\n      if (!ret) {\n        modelbox::Status rspret = {ret, \"graph toml\"};\n        SetUpResponse(response, rspret);\n        return;\n      }\n      graph = nlohmann::json::parse(json_data);\n      graph[\"name\"] = modelbox::GetBaseName(g);\n      json[\"graphs\"].push_back(graph);\n    }\n\n    std::vector<std::string> flowunits;\n    ret = modelbox::ListSubDirectoryFiles(flowunit_path, \"*.toml\", &flowunits);\n    if (!ret) {\n      modelbox::Status ret = {modelbox::STATUS_NOTFOUND,\n                              HTTP_RESP_ERR_CANNOT_READ};\n      SetUpResponse(response, ret);\n      return;\n    }\n    for (const auto& f : flowunits) {\n      ret = GraphFileToJson(f, json_data);\n      if (!ret) {\n        modelbox::Status rspret = {ret, \"flowunit toml\"};\n        SetUpResponse(response, rspret);\n        return;\n      }\n      json[\"flowunits\"].push_back(nlohmann::json::parse(json_data));\n    }\n    result = json.dump();\n    MBLOG_DEBUG << \"infos: \" << result;\n    response.set_content(result, modelbox::JSON);\n  } catch (const std::exception& e) {\n    std::string errmsg = \"Get info failed: \";\n    errmsg += e.what();\n    MBLOG_ERROR << errmsg;\n    modelbox::Status ret = {modelbox::STATUS_INVALID, errmsg};\n    SetUpResponse(response, ret);\n    return;\n  }\n\n  response.status = modelbox::HttpStatusCodes::OK;\n}\n\nvoid ModelboxEditorPlugin::HandlerGraphModifyTime(\n    const httplib::Request& request, httplib::Response& response) {\n  nlohmann::json response_json;\n  modelbox::Status rspret;\n\n  Defer {\n    if (!rspret) {\n      SetUpResponse(response, rspret);\n    }\n  };\n\n  try {\n    if (request.has_param(\"graph_path\") == false) {\n      rspret = {modelbox::STATUS_INVALID, \"argument graph path is not set.\"};\n      SetUpResponse(response, rspret);\n      return;\n    }\n\n    std::string graph_path;\n    struct stat result;\n    graph_path = request.params.find(\"graph_path\")->second;\n    MBLOG_DEBUG << \"graph path: \" << graph_path;\n\n    auto root_path = graph_path.substr(0, graph_path.rfind(\"/src/graph\"));\n    MBLOG_DEBUG << \"root path: \" << root_path;\n\n    if (IsModelboxProjectDir(root_path) == false) {\n      rspret = {modelbox::STATUS_INVALID, \"path is not a modelbox project\"};\n      SetUpResponse(response, rspret);\n      return;\n    }\n\n    if (stat(graph_path.c_str(), &result) == 0) {\n      response_json[\"modify_time\"] = result.st_mtime;\n      response.status = modelbox::HttpStatusCodes::OK;\n    } else {\n      response.status = modelbox::HttpStatusCodes::NOT_FOUND;\n    }\n  } catch (const std::exception& e) {\n    std::string errmsg = \"internal error when searching path, \";\n    errmsg += e.what();\n    MBLOG_ERROR << errmsg;\n    rspret = {modelbox::STATUS_FAULT, errmsg};\n    return;\n  }\n\n  modelbox::AddSafeHeader(response);\n  response.set_content(response_json.dump(), modelbox::JSON);\n}\n\nmodelbox::Status ModelboxEditorPlugin::GenerateCommandFromJson(\n    const nlohmann::json& body, std::string& cmd) {\n  nlohmann::json error_json;\n  for (const auto& element : body.items()) {\n    cmd += \" -\" + element.key();\n    if (element.value().is_null()) {\n      continue;\n    }\n\n    if (element.value().type() != nlohmann::json::value_t::array) {\n      cmd += \"=\" + element.value().dump();\n      continue;\n    }\n\n    int num = 0;\n    for (const auto& port : element.value()) {\n      if (num > 0) {\n        cmd += \" -\" + element.key() + \"=\";\n      } else {\n        cmd += \" \";\n      }\n\n      for (const auto& i : port.items()) {\n        cmd += i.key() + \"=\" + i.value().dump();\n        if (i != port.items().end()) {\n          cmd += \",\";\n        }\n      }\n      cmd = cmd.substr(0, cmd.length() - 1);\n      num += 1;\n    }\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status ModelboxEditorPlugin::RunCommand(const std::string& cmd,\n                                                  const std::string* in,\n                                                  std::string* out) {\n  modelbox::Popen p;\n  std::string outmsg;\n  std::string err;\n  std::string mode;\n\n  MBLOG_INFO << \"exec: \" << cmd;\n\n  if (in) {\n    mode = \"wre\";\n  } else {\n    mode = \"re\";\n  }\n\n  auto retstatus = p.Open(cmd, 3000, mode.c_str(), template_cmd_env_);\n  if (!retstatus) {\n    return retstatus;\n  }\n\n  if (in) {\n    p.WriteString(*in);\n  }\n\n  auto ret = p.ReadAll(&outmsg, &err);\n  ret = p.Close();\n  if (ret == 0) {\n    retstatus = modelbox::STATUS_OK;\n  } else {\n    std::string errmsg = \"Execute command failed, ret: \";\n    errmsg += std::to_string(ret);\n    errmsg += \" error: \" + err;\n    retstatus = {modelbox::STATUS_FAULT, errmsg};\n  }\n\n  if (out) {\n    *out = outmsg;\n  }\n\n  return retstatus;\n}\n\nvoid ModelboxEditorPlugin::HandlerProjectCreate(const httplib::Request& request,\n                                                httplib::Response& response) {\n  auto ret = RunTemplateCommand(request, response, \"--project\");\n  if (ret == modelbox::STATUS_OK) {\n    response.status = modelbox::HttpStatusCodes::CREATED;\n  }\n}\n\nvoid ModelboxEditorPlugin::HandlerFlowUnitInfo(\n    const httplib::Request& request, httplib::Response& response,\n    const std::shared_ptr<modelbox::Configuration>& config) {\n  modelbox::FlowUnitInfo flowunit_info;\n\n  modelbox::Status rspret;\n\n  Defer {\n    if (!rspret) {\n      SetUpResponse(response, rspret);\n    }\n  };\n\n  auto status = flowunit_info.Init(config);\n  if (!status) {\n    rspret = {status, HTTP_RESP_ERR_GETINFO_FAILED};\n    return;\n  }\n\n  std::string info;\n  status = flowunit_info.GetInfoInJson(&info);\n  if (!status) {\n    rspret = {status, HTTP_RESP_ERR_GETINFO_FAILED};\n    return;\n  }\n\n  modelbox::AddSafeHeader(response);\n  response.status = modelbox::HttpStatusCodes::OK;\n  response.set_content(info, modelbox::JSON);\n}\n\nvoid ModelboxEditorPlugin::HandlerProjectTemplateListGet(\n    const httplib::Request& request, httplib::Response& response) {\n  std::vector<std::string> dirs;\n  std::map<std::string, std::vector<std::string>> graphs;\n  modelbox::Status rspret;\n\n  Defer {\n    if (!rspret) {\n      SetUpResponse(response, rspret);\n    }\n  };\n\n  std::vector<std::string> files;\n  std::string template_dir = template_dir_ + \"/project\";\n  MBLOG_INFO << \"template_dir:\" << template_dir;\n  auto ret = modelbox::ListSubDirectoryFiles(template_dir, \"desc.toml\", &files);\n  if (!ret) {\n    rspret = {ret, HTTP_RESP_ERR_CANNOT_READ};\n    MBLOG_INFO << \"read template dir \" << template_dir << \" failed, \" << ret;\n    return;\n  }\n\n  nlohmann::json response_json;\n  response_json[\"project_template_list\"] = nlohmann::json::array();\n\n  for (const auto& file : files) {\n    std::string dirname = modelbox::GetBaseName(modelbox::GetDirName(file));\n    std::string name = dirname;\n\n    std::string json_data;\n    std::string desc;\n    auto ret = GraphFileToJson(file, json_data);\n    if (ret) {\n      try {\n        auto desc_json = nlohmann::json::parse(json_data);\n        desc_json[\"dirname\"] = dirname;\n        response_json[\"project_template_list\"].push_back(desc_json);\n      } catch (const std::exception& e) {\n        MBLOG_WARN << \"parser json \" << file << \" failed, \" << e.what();\n      }\n    }\n  }\n\n  modelbox::AddSafeHeader(response);\n  response.status = modelbox::HttpStatusCodes::OK;\n  response.set_content(response_json.dump(), modelbox::JSON);\n}\n\nvoid ModelboxEditorPlugin::HandlerProjectListGet(\n    const httplib::Request& request, httplib::Response& response) {\n  nlohmann::json response_json;\n  nlohmann::json subdir_json;\n  std::vector<std::string> listfiles;\n  subdir_json = nlohmann::json::array();\n  modelbox::Status ret;\n  modelbox::Status rspret;\n\n  Defer {\n    if (!rspret) {\n      SetUpResponse(response, rspret);\n    }\n  };\n\n  try {\n    if (request.has_param(\"path\") == false) {\n      rspret = {modelbox::STATUS_INVALID, \"argument path is not set.\"};\n      return;\n    }\n\n    std::string list_path;\n    list_path = request.params.find(\"path\")->second;\n    MBLOG_DEBUG << \"list path: \" << list_path;\n    ret = modelbox::ListFiles(list_path, \"*\", &listfiles,\n                              modelbox::LIST_FILES_DIR);\n    if (!ret) {\n      rspret = {ret, \"List path failed.\"};\n      return;\n    }\n\n    response_json[\"dirname\"] = list_path;\n    response_json[\"isproject\"] = IsModelboxProjectDir(list_path);\n\n    for (auto f : listfiles) {\n      nlohmann::json dirname;\n      dirname[\"dirname\"] = modelbox::GetBaseName(f);\n      dirname[\"isproject\"] = IsModelboxProjectDir(f);\n      subdir_json.push_back(dirname);\n    }\n\n    response_json[\"subdir\"] = subdir_json;\n  } catch (const std::exception& e) {\n    std::string errmsg = \"internal error when searching path, \";\n    errmsg += e.what();\n    MBLOG_ERROR << errmsg;\n    rspret = {modelbox::STATUS_FAULT, errmsg};\n    return;\n  }\n\n  modelbox::AddSafeHeader(response);\n  response.set_content(response_json.dump(), modelbox::JSON);\n}\n\nbool ModelboxEditorPlugin::IsModelboxProjectDir(std::string& path) {\n  struct stat statbuf;\n  std::string checkfile;\n  checkfile = path + \"/CMakeLists.txt\";\n  if (stat(checkfile.c_str(), &statbuf) || !S_ISREG(statbuf.st_mode)) {\n    return false;\n  }\n\n  checkfile = path + \"/src\";\n  if (stat(checkfile.c_str(), &statbuf) || !S_ISDIR(statbuf.st_mode)) {\n    return false;\n  }\n\n  checkfile = path + \"/src/flowunit\";\n  if (stat(checkfile.c_str(), &statbuf) || !S_ISDIR(statbuf.st_mode)) {\n    return false;\n  }\n\n  checkfile = path + \"/src/graph\";\n  if (stat(checkfile.c_str(), &statbuf) || !S_ISDIR(statbuf.st_mode)) {\n    return false;\n  }\n\n  return true;\n}\n\nvoid ModelboxEditorPlugin::HandlerUIGet(const httplib::Request& request,\n                                        httplib::Response& response) {\n  auto path = request.path;\n  auto file_name = path;\n  struct stat path_stat;\n  std::string resolve_file;\n  std::string redirect_file;\n  modelbox::Status rspret;\n\n  Defer {\n    if (!rspret) {\n      SetUpResponse(response, rspret);\n    }\n  };\n\n  MBLOG_DEBUG << \"request file:\" << file_name;\n  if (GetHtmlFile(file_name, &resolve_file, &redirect_file) == false) {\n    if (!redirect_file.empty()) {\n      response.status = modelbox::HttpStatusCodes::FOUND;\n      response.headers.insert(std::make_pair(\"location\", redirect_file));\n      return;\n    }\n\n    rspret = {modelbox::STATUS_NOTFOUND, HTTP_RESP_ERR_PATH_NOT_FOUND};\n    return;\n  }\n\n  file_name = resolve_file;\n  if (file_name.find(web_root_) != 0) {\n    rspret = {modelbox::STATUS_NOTFOUND, HTTP_RESP_ERR_PATH_NOT_FOUND};\n    return;\n  }\n\n  if (stat(file_name.c_str(), &path_stat) != 0) {\n    rspret = {modelbox::STATUS_NOTFOUND, HTTP_RESP_ERR_PATH_NOT_FOUND};\n    return;\n  }\n\n  if (!S_ISREG(path_stat.st_mode)) {\n    rspret = {modelbox::STATUS_NOTFOUND, HTTP_RESP_ERR_PATH_NOT_FILE};\n    return;\n  }\n\n  if (access(file_name.c_str(), R_OK | F_OK) != 0) {\n    rspret = {modelbox::STATUS_NOTFOUND, HTTP_RESP_ERR_CANNOT_READ};\n    return;\n  }\n\n  SendFile(file_name, response);\n}\n\nvoid ModelboxEditorPlugin::SendFile(const std::string& file_name,\n                                    httplib::Response& response) {\n  auto content_type = ModelboxGetMimeType(file_name);\n\n  modelbox::Status rspret;\n\n  Defer {\n    if (!rspret) {\n      SetUpResponse(response, rspret);\n    }\n  };\n\n  auto file = std::shared_ptr<std::ifstream>(new std::ifstream(file_name),\n                                             [](std::ifstream* ptr) {\n                                               ptr->close();\n                                               delete ptr;\n                                             });\n  if (!file->is_open()) {\n    rspret = {modelbox::STATUS_NOTFOUND, HTTP_RESP_ERR_CANNOT_READ};\n    return;\n  }\n\n  size_t data_size = 4096;\n  auto data = std::shared_ptr<char>(new (std::nothrow) char[data_size],\n                                    [](const char* ptr) { delete[] ptr; });\n  if (data == nullptr) {\n    rspret = {modelbox::STATUS_FAULT, HTTP_RESP_ERR_CANNOT_READ};\n    return;\n  }\n\n  modelbox::AddSafeHeader(response);\n  response.status = modelbox::HttpStatusCodes::OK;\n  response.set_content_provider(\n      content_type,\n      [file, data, data_size](size_t offset, httplib::DataSink& sink) {\n        if (file->eof()) {\n          sink.done();\n          return true;\n        }\n\n        file->read(data.get(), data_size);\n        auto ret = sink.write(data.get(), file->gcount());\n        if (!ret) {\n          return false;\n        }\n\n        return true;\n      });\n}\n\nmodelbox::Status ModelboxEditorPlugin::GraphFileToJson(const std::string& file,\n                                                       std::string& json_data) {\n  std::ifstream infile(file);\n  if (infile.fail()) {\n    return {modelbox::STATUS_NOTFOUND,\n            \"Get graph failed\" + modelbox::StrError(errno)};\n  }\n  Defer { infile.close(); };\n\n  std::string data((std::istreambuf_iterator<char>(infile)),\n                   std::istreambuf_iterator<char>());\n  if (data.length() <= 0) {\n    return {modelbox::STATUS_BADCONF, \"graph file is invalid.\"};\n  }\n\n  std::string extension = file.substr(file.find_last_of('.') + 1);\n  if (extension == \"json\" || data[0] == '{') {\n    json_data = data;\n  } else {\n    auto ret = modelbox::TomlToJson(data, &json_data);\n    if (!ret) {\n      return {ret, \"graph format error\"};\n    }\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nvoid ModelboxEditorPlugin::HanderBasicInfoGet(const httplib::Request& request,\n                                              httplib::Response& response) {\n  modelbox::Status rspret;\n\n  Defer {\n    if (!rspret) {\n      SetUpResponse(response, rspret);\n    }\n  };\n\n  nlohmann::json response_json;\n  struct passwd pwd;\n  struct passwd* result;\n  std::vector<char> buff;\n  buff.resize(sysconf(_SC_GETPW_R_SIZE_MAX));\n  getpwuid_r(getuid(), &pwd, buff.data(), buff.size(), &result);\n  if (result == nullptr) {\n    rspret = {modelbox::STATUS_FAULT,\n              \"Get pw info failed, \" + modelbox::StrError(errno)};\n    return;\n  }\n\n  response_json[\"user\"] = result->pw_name;\n  response_json[\"home-dir\"] = result->pw_dir;\n\n  modelbox::AddSafeHeader(response);\n  response.status = modelbox::HttpStatusCodes::OK;\n  response.set_content(response_json.dump(), modelbox::JSON);\n}\n\nvoid ModelboxEditorPlugin::HandlerDemoGetList(const httplib::Request& request,\n                                              httplib::Response& response) {\n  std::vector<std::string> dirs;\n  std::map<std::string, std::vector<std::string>> graphs;\n  modelbox::Status rspret;\n\n  Defer {\n    if (!rspret) {\n      SetUpResponse(response, rspret);\n    }\n  };\n\n  auto ret =\n      modelbox::ListFiles(demo_path_, \"*\", &dirs, modelbox::LIST_FILES_DIR);\n  if (!ret) {\n    rspret = {ret, HTTP_RESP_ERR_CANNOT_READ};\n    return;\n  }\n\n  for (auto const& dir : dirs) {\n    std::vector<std::string> files;\n    std::string graphdir = dir + \"/graph\";\n\n    auto ret = modelbox::ListSubDirectoryFiles(graphdir, \"*.toml\", &files);\n    if (!ret) {\n      MBLOG_INFO << \"list directory \" << demo_path_ << \"failed.\";\n    }\n\n    ret = modelbox::ListSubDirectoryFiles(graphdir, \"*.json\", &files);\n    if (!ret) {\n      MBLOG_INFO << \"list directory \" << demo_path_ << \"failed.\";\n    }\n\n    if (files.size() == 0) {\n      continue;\n    }\n\n    graphs[dir] = files;\n  }\n\n  nlohmann::json response_json;\n  response_json[\"demo_list\"] = nlohmann::json::array();\n  for (const auto& it : graphs) {\n    std::string demoname = modelbox::GetBaseName(it.first);\n    for (const auto& file : it.second) {\n      nlohmann::json demo;\n      std::string filename = modelbox::GetBaseName(file);\n      std::string name = filename;\n\n      std::string json_data;\n      std::string desc;\n      auto ret = GraphFileToJson(file, json_data);\n      if (ret) {\n        try {\n          auto response = nlohmann::json::parse(json_data);\n          desc = response[\"flow\"][\"desc\"].get<std::string>();\n          name = response[\"flow\"][\"name\"].get<std::string>();\n        } catch (const std::exception& e) {\n          MBLOG_WARN << \"parser json \" << file << \" failed, \" << e.what();\n        }\n      }\n\n      demo[\"demo\"] = demoname;\n      demo[\"name\"] = name;\n      demo[\"desc\"] = desc;\n      demo[\"graphfile\"] = filename;\n      response_json[\"demo_list\"].push_back(demo);\n    }\n  }\n\n  modelbox::AddSafeHeader(response);\n  response.status = modelbox::HttpStatusCodes::OK;\n  response.set_content(response_json.dump(), modelbox::JSON);\n}\n\nvoid ModelboxEditorPlugin::HandlerDemoGet(const httplib::Request& request,\n                                          httplib::Response& response) {\n  modelbox::Status rspret;\n\n  Defer {\n    if (!rspret) {\n      SetUpResponse(response, rspret);\n    }\n  };\n\n  try {\n    std::string relative_path = request.path.substr(demo_url.size());\n    std::string graph_file;\n    std::string demo_file;\n    std::string demo_name;\n    modelbox::SplitPath(relative_path, demo_name, graph_file);\n    if (demo_name.length() == 0 && graph_file.length() == 0) {\n      HandlerDemoGetList(request, response);\n      return;\n    }\n\n    if (graph_file.length() == 0 || demo_name.length() == 0) {\n      rspret = {modelbox::STATUS_NOTFOUND, HTTP_RESP_ERR_PATH_NOT_FOUND};\n      return;\n    }\n\n    demo_file = modelbox::PathCanonicalize(demo_name + \"/graph/\" + graph_file,\n                                           demo_path_);\n    if (demo_file.length() == 0) {\n      rspret = {modelbox::STATUS_NOTFOUND, HTTP_RESP_ERR_PATH_NOT_FOUND};\n      return;\n    }\n\n    std::string json_data;\n    MBLOG_INFO << \"load demo file \" << demo_file;\n    rspret = GraphFileToJson(demo_file, json_data);\n    if (!rspret) {\n      MBLOG_WARN << \"Get graph file failed, \" << rspret.WrapErrormsgs();\n      std::string msg = \"demo file is invalid.\";\n      rspret = {modelbox::STATUS_BADCONF, msg};\n      return;\n    }\n\n    modelbox::AddSafeHeader(response);\n    response.status = modelbox::HttpStatusCodes::OK;\n    response.set_content(json_data, modelbox::JSON);\n    return;\n  } catch (const std::exception& e) {\n    MBLOG_ERROR << \"demo get failed, \" << e.what();\n    rspret = {modelbox::STATUS_FAULT,\n              std::string(HTTP_RESP_ERR_GETINFO_FAILED) + e.what()};\n    return;\n  }\n}\n\nvoid ModelboxEditorPlugin::HandlerPassEncode(const httplib::Request& request,\n                                             httplib::Response& response) {\n  std::string out;\n  nlohmann::json response_json;\n  modelbox::Status rspret;\n\n  Defer {\n    if (!rspret) {\n      SetUpResponse(response, rspret);\n    }\n  };\n\n  try {\n    std::string keypass;\n    std::string plainpass;\n    std::string sysrelated = \" -n\";\n    keypass = \"modelbox-tool key -pass\";\n\n    auto body = nlohmann::json::parse(request.body);\n\n    if (body.find(\"password\") == body.end()) {\n      std::string errmsg = \"password key not found\";\n      rspret = {modelbox::STATUS_NOTFOUND, errmsg};\n      return;\n    }\n\n    plainpass = body[\"password\"].get<std::string>();\n\n    if (body.find(\"sysrelated\") != body.end()) {\n      auto issys = body[\"sysrelated\"].get<bool>();\n      if (issys == false) {\n        sysrelated = \"\";\n      }\n    }\n\n    keypass += sysrelated;\n    plainpass += \"\\n\";\n    rspret = RunCommand(keypass, &plainpass, &out);\n  } catch (const std::exception& e) {\n    std::string errmsg = \"encrypt password failed, \" + std::string(e.what());\n    rspret = {modelbox::STATUS_FAULT, errmsg};\n    return;\n  }\n\n  if (rspret != modelbox::STATUS_SUCCESS) {\n    MBLOG_WARN << rspret.WrapErrormsgs();\n    return;\n  }\n\n  auto lines = modelbox::StringSplit(out, '\\n');\n  if (lines.size() != 2) {\n    rspret = {modelbox::STATUS_FAULT, \"Run key command failed.\"};\n    return;\n  }\n\n  for (auto const& line : lines) {\n    auto values = modelbox::StringSplit(line, ':');\n    if (values.size() != 2) {\n      rspret = {modelbox::STATUS_FAULT, \"Get values failed.\"};\n      return;\n    }\n\n    if (values[0].find(\"Key\") != std::string::npos) {\n      response_json[\"key\"] =\n          std::string(values[1].begin() + 1, values[1].end());\n    } else if (values[0].find(\"Encrypted password\") >= 0) {\n      response_json[\"enpass\"] =\n          std::string(values[1].begin() + 1, values[1].end());\n    }\n  }\n\n  modelbox::AddSafeHeader(response);\n  response.status = modelbox::HttpStatusCodes::OK;\n  response.set_content(response_json.dump(), modelbox::JSON);\n}\n\nvoid ModelboxEditorPlugin::HandlerPostman(const httplib::Request& request,\n                                          httplib::Response& response) {\n  modelbox::Status rspret;\n\n  Defer {\n    if (!rspret) {\n      SetUpResponse(response, rspret);\n    }\n  };\n\n  try {\n    std::string method;\n    std::string url;\n    bool hasbody = false;\n    bool hasheader = false;\n    nlohmann::json rheader;\n    nlohmann::json rbody;\n    modelbox::HttpMethod hmethod;\n\n    auto body = nlohmann::json::parse(request.body);\n    if (body.find(\"method\") != body.end()) {\n      method = body[\"method\"].get<std::string>();\n    } else {\n      rspret = {modelbox::STATUS_FAULT, \"Get method failed.\"};\n      return;\n    }\n\n    if (body.find(\"url\") != body.end()) {\n      url = body[\"url\"].get<std::string>();\n    } else {\n      rspret = {modelbox::STATUS_FAULT, \"Get url failed.\"};\n      return;\n    }\n\n    if (body.find(\"header\") != body.end()) {\n      rheader = nlohmann::json::parse(body[\"header\"].get<std::string>());\n      hasheader = true;\n    }\n\n    if (body.find(\"body\") != body.end()) {\n      rbody = nlohmann::json::parse(body[\"body\"].get<std::string>());\n      hasbody = true;\n    }\n\n    if (method == \"POST\") {\n      hmethod = modelbox::HttpMethods::POST;\n    } else if (method == \"GET\") {\n      hmethod = modelbox::HttpMethods::GET;\n    } else if (method == \"DELETE\") {\n      hmethod = modelbox::HttpMethods::DELETE;\n    } else if (method == \"PUT\") {\n      hmethod = modelbox::HttpMethods::PUT;\n    }\n\n    modelbox::HttpRequest hrequest(hmethod, url);\n\n    if (hasbody) {\n      hrequest.SetBody(rbody);\n    }\n\n    if (hasheader) {\n      hrequest.SetHeaders(rheader);\n    }\n\n    rspret = modelbox::SendHttpRequest(hrequest);\n    if (rspret != modelbox::STATUS_SUCCESS) {\n      return;\n    }\n    auto test_response = hrequest.GetResponse();\n    modelbox::AddSafeHeader(response);\n\n    nlohmann::json response_json;\n    nlohmann::json test_response_json;\n\n    test_response_json[\"status\"] = test_response.status;\n    test_response_json[\"body\"] = test_response.body;\n    test_response_json[\"headers\"] = test_response.headers;\n\n    response_json[\"body\"] = test_response_json;\n\n    response.status = modelbox::HttpStatusCodes::OK;\n    response.set_content(response_json.dump(), modelbox::JSON);\n\n  } catch (const std::exception& e) {\n    std::string errmsg = \"internal error when debugging\";\n    errmsg += e.what();\n    MBLOG_ERROR << errmsg;\n    rspret = {modelbox::STATUS_FAULT, errmsg};\n    return;\n  }\n}\n\nbool ModelboxEditorPlugin::Start() {\n  if (enable_ == false) {\n    return true;\n  }\n\n  listener_->SetAclWhiteList(acl_white_list_);\n\n  listener_->Start();\n\n  auto ret = listener_->GetStatus();\n  if (!ret) {\n    MBLOG_ERROR << \"Start editor failed, err \" << ret;\n    return false;\n  }\n\n  return true;\n}\n\nbool ModelboxEditorPlugin::Stop() {\n  if (enable_ == false) {\n    return true;\n  }\n\n  listener_->Stop();\n\n  return true;\n}\n\nbool ModelboxEditorPlugin::ParseConfig(\n    const std::shared_ptr<modelbox::Configuration>& config) {\n  enable_ = config->GetBool(\"editor.enable\", false);\n  web_root_ = config->GetString(\"editor.root\", DEFAULT_WEB_ROOT);\n  server_ip_ = config->GetString(\"editor.ip\",\n                                 config->GetString(\"server.ip\", \"127.0.0.1\"));\n  server_port_ = config->GetString(\"editor.port\",\n                                   config->GetString(\"server.port\", \"1104\"));\n  demo_path_ = config->GetString(\"editor.demo_root\", DEFAULT_DEMO_ROOT_DIR);\n\n  template_cmd_ = config->GetString(\"editor.test.template_cmd\",\n                                    DEFAULT_MODELBOX_TEMPLATE_CMD);\n  template_cmd_env_ = config->GetString(\"editor.test.template_cmd_env\", \"\");\n  template_dir_ =\n      config->GetString(\"editor.template_dir\", DEFAULT_PROJECT_TEMPLATE_DIR);\n  acl_white_list_ = config->GetStrings(\"acl.allow\");\n\n  web_root_ = modelbox::modelbox_full_path(web_root_);\n  demo_path_ = modelbox::modelbox_full_path(demo_path_);\n  template_dir_ = modelbox::modelbox_full_path(template_dir_);\n  template_cmd_ = modelbox::modelbox_full_path(template_cmd_);\n\n  return true;\n}"
  },
  {
    "path": "src/modelbox/server/plugin/editor/editor_plugin.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_MODELBOX_EDITOR_PLUGIN_H_\n#define MODELBOX_MODELBOX_EDITOR_PLUGIN_H_\n\n#include <nlohmann/json.hpp>\n\n#include \"modelbox/server/http_helper.h\"\n#include \"modelbox/server/plugin.h\"\n\nclass ModelboxEditorPlugin : public modelbox::Plugin {\n public:\n  ModelboxEditorPlugin() = default;\n  ~ModelboxEditorPlugin() override = default;\n\n  bool Init(std::shared_ptr<modelbox::Configuration> config) override;\n  bool Start() override;\n  bool Stop() override;\n\n  void RegistHandlers();\n  bool ParseConfig(const std::shared_ptr<modelbox::Configuration> &config);\n\n private:\n  void HandlerPassEncode(const httplib::Request &request,\n                         httplib::Response &response);\n  void HandlerDemoGet(const httplib::Request &request,\n                      httplib::Response &response);\n  void HandlerDemoGetList(const httplib::Request &request,\n                          httplib::Response &response);\n  void HandlerUIGet(const httplib::Request &request,\n                    httplib::Response &response);\n  void HanderBasicInfoGet(const httplib::Request &request,\n                          httplib::Response &response);\n  void SendFile(const std::string &file_name, httplib::Response &response);\n  void HandlerFlowUnitInfoPut(const httplib::Request &request,\n                              httplib::Response &response);\n  void HandlerFlowUnitInfoGet(const httplib::Request &request,\n                              httplib::Response &response);\n  void HandlerFlowUnitInfo(\n      const httplib::Request &request, httplib::Response &response,\n      const std::shared_ptr<modelbox::Configuration> &config);\n  void HandlerFlowUnitCreate(const httplib::Request &request,\n                             httplib::Response &response);\n  void HandlerFlowUnitGet(const httplib::Request &request,\n                          httplib::Response &response);\n  void HandlerProjectCreate(const httplib::Request &request,\n                            httplib::Response &response);\n  void HandlerProjectTemplateListGet(const httplib::Request &request,\n                                     httplib::Response &response);\n  void HandlerProjectGet(const httplib::Request &request,\n                         httplib::Response &response);\n  void HandlerProjectListGet(const httplib::Request &request,\n                             httplib::Response &response);\n  void HandlerSaveGraph(const httplib::Request &request,\n                        httplib::Response &response);\n  void HandlerGraphModifyTime(const httplib::Request& request,\n                        httplib::Response& response);\n  void HandlerPostman(const httplib::Request &request,\n                      httplib::Response &response);\n  bool GetHtmlFile(const std::string &in_file, std::string *out_file,\n                   std::string *redirect_file);\n  modelbox::Status GraphFileToJson(const std::string &file,\n                                   std::string &json_data);\n  bool CheckBlackDir(const std::string &dir);\n\n  std::string ResultMsg(const std::string &code, const std::string &msg);\n  std::string ResultMsg(modelbox::Status &status);\n  void SetUpResponse(httplib::Response &response, modelbox::Status &status);\n  modelbox::Status GenerateCommandFromJson(const nlohmann::json &body,\n                                           std::string &cmd);\n  modelbox::Status RunCommand(const std::string &cmd,\n                              const std::string *in = nullptr,\n                              std::string *out = nullptr);\n  modelbox::Status RunTemplateCommand(const httplib::Request &request,\n                                      httplib::Response &response,\n                                      const std::string &cmd);\n  modelbox::Status SaveGraph(const httplib::Request &request);\n  modelbox::Status ReadProjectName(const std::string &path, std::string &name);\n  bool IsModelboxProjectDir(std::string &path);\n  std::shared_ptr<modelbox::HttpListener> listener_;\n  std::string web_root_;\n  std::string demo_path_;\n  std::string server_ip_;\n  std::string server_port_;\n  // for test setting\n  std::string template_cmd_;\n  std::string template_cmd_env_;\n  std::string template_dir_;\n  std::vector<std::string> acl_white_list_;\n  bool enable_{false};\n  std::string url_;\n};\n\n#endif  // MODELBOX_MODELBOX_EDITOR_PLUGIN_H_\n"
  },
  {
    "path": "src/modelbox/server/plugin/tasks/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nset(UNIT_NAME \"plugin\")\n\nproject(modelbox-${UNIT_NAME})\n\nfile(GLOB_RECURSE MODELBOX_UNIT_SOURCE *.cpp *.cc *.c)\nexclude_files_from_dir_in_list(MODELBOX_UNIT_SOURCE \"${MODELBOX_UNIT_SOURCE}\" \"${CMAKE_BINARY_DIR}/\")\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${NLOHMANN_INCLUDE_DIR})\n\nset(MODELBOX_SERVER_PLUGIN modelbox-plugin)\n\nadd_library(${MODELBOX_SERVER_PLUGIN} SHARED ${MODELBOX_UNIT_SOURCE})\n\nset_target_properties(${MODELBOX_SERVER_PLUGIN} PROPERTIES \n    OUTPUT_NAME \"modelbox-plugin\"\n    PREFIX \"\"\n    SUFFIX \".so\")\n\ninstall(TARGETS ${MODELBOX_SERVER_PLUGIN} \n    COMPONENT server\n    RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n    LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    ARCHIVE DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    )\n\nset(MODELBOX_PLUGIN_SO_PATH ${CMAKE_CURRENT_BINARY_DIR}/${MODELBOX_SERVER_PLUGIN}.so)\n\ntarget_link_libraries(${MODELBOX_SERVER_PLUGIN} pthread)\ntarget_link_libraries(${MODELBOX_SERVER_PLUGIN} rt)\n\nset(MODELBOX_SERVER_PLUGIN ${MODELBOX_SERVER_PLUGIN} CACHE INTERNAL \"\")\nset(MODELBOX_PLUGIN_SO_PATH ${MODELBOX_PLUGIN_SO_PATH} CACHE INTERNAL \"\")\n"
  },
  {
    "path": "src/modelbox/server/plugin/tasks/modelbox_plugin.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox_plugin.h\"\n\n#include <dirent.h>\n\n#include <nlohmann/json.hpp>\n#include <toml.hpp>\n\n#include \"modelbox/base/log.h\"\n#include \"modelbox/base/utils.h\"\n#include \"modelbox/common/utils.h\"\n\nconst std::string SERVER_PATH = \"/v1/modelbox/job\";\nconstexpr const char* GRAPH_DISABLED_FLAG = \"DISABLED_\";\nconstexpr int MAX_FILES = 1 << 16;\n\nstd::map<std::string, std::string> ERROR_INFO = {\n    {\"MODELBOX_001\", \"server internal error\"},\n    {\"MODELBOX_002\", \"request invalid, no such job\"},\n    {\"MODELBOX_003\", \"request invalid, can not get jobId\"},\n    {\"MODELBOX_004\", \"request invalid, can not get graph\"},\n    {\"MODELBOX_005\", \"request invalid, job already exist\"},\n    {\"MODELBOX_006\", \"request invalid, invalid command\"},\n    {\"MODELBOX_007\",\n     \"job id contain invalid characters or contains more than 64 \"\n     \"characters, please rename job with valid \"\n     \"charaters.\"},\n    {\"MODELBOX_008\", \"request invalid, job config is invalid\"}};\n\nconst std::string ERROR_CODE = \"error_code\";\nconst std::string ERROR_MSG = \"error_msg\";\nconst char* HTTP_GRAPH_FORMAT_JSON = \"json\";\nconst char* HTTP_GRAPH_FORMAT_TOML = \"toml\";\n\nbool ModelboxPlugin::Init(std::shared_ptr<modelbox::Configuration> config) {\n  MBLOG_INFO << \"modelbox plugin init\";\n\n  bool ret = ParseConfig(config);\n  if (!ret) {\n    MBLOG_ERROR << \"parse config file failed\";\n    return false;\n  }\n\n  auto endpoint = \"http://\" + ip_ + \":\" + port_;\n  listener_ = std::make_shared<modelbox::HttpListener>(endpoint);\n  MBLOG_INFO << \"run modelbox plugin on \" << endpoint;\n  RegistHandlers();\n\n  return CreateLocalJobs();\n}\n\nstd::shared_ptr<modelbox::Plugin> CreatePlugin() {\n  MBLOG_INFO << \"create modelbox plugin\";\n  return std::make_shared<ModelboxPlugin>();\n}\n\nvoid ModelboxPlugin::RegistHandlers() {\n  MBLOG_INFO << \"modelbox plugin register handlers\";\n  MBLOG_INFO << \"regist url : \" << SERVER_PATH;\n\n  listener_->Register(SERVER_PATH, modelbox::HttpMethods::PUT,\n                      std::bind(&ModelboxPlugin::HandlerPut, this,\n                                std::placeholders::_1, std::placeholders::_2));\n\n  listener_->Register(SERVER_PATH, modelbox::HttpMethods::DELETE,\n                      std::bind(&ModelboxPlugin::HandlerDel, this,\n                                std::placeholders::_1, std::placeholders::_2));\n\n  listener_->Register(SERVER_PATH, modelbox::HttpMethods::GET,\n                      std::bind(&ModelboxPlugin::HandlerGet, this,\n                                std::placeholders::_1, std::placeholders::_2));\n}\n\nbool ModelboxPlugin::Start() {\n  listener_->SetAclWhiteList(acl_white_list_);\n  listener_->Start();\n\n  auto ret = listener_->GetStatus();\n  if (!ret) {\n    MBLOG_ERROR << \"Start mdeolbox plugin failed, err \" << ret;\n    return false;\n  }\n\n  return true;\n}\n\nbool ModelboxPlugin::Stop() {\n  listener_->Stop();\n\n  return true;\n}\n\nbool ModelboxPlugin::CheckJobIdValid(const std::string& job_id) {\n  constexpr int max_id_length = 64;\n  const std::string valid_char =\n      R\"(^[0-9a-zA-Z\\-\\+\\~\\_][0-9a-zA-Z\\-\\+\\~\\_\\.]+)\";\n  std::regex valid_str(valid_char);\n\n  if (job_id.length() > max_id_length) {\n    return false;\n  }\n\n  return std::regex_match(job_id, valid_str);\n}\n\nbool ModelboxPlugin::ParseConfig(\n    const std::shared_ptr<modelbox::Configuration>& config) {\n  ip_ = config->GetString(\"server.ip\");\n  if (ip_.length() <= 0) {\n    MBLOG_ERROR << \"can not find ip from config file\";\n    return false;\n  }\n\n  port_ = config->GetString(\"server.port\");\n  if (port_.length() <= 0) {\n    MBLOG_ERROR << \"can not find port from config file\";\n    return false;\n  }\n\n  default_flow_path_ = config->GetString(\"server.flow_path\");\n  acl_white_list_ = config->GetStrings(\"acl.allow\");\n  default_application_path_ = config->GetString(\"server.application_root\");\n  oneshot_flow_path_ = default_flow_path_ + \"/oneshot\";\n\n  default_flow_path_ = modelbox::modelbox_full_path(default_flow_path_);\n  default_application_path_ =\n      modelbox::modelbox_full_path(default_application_path_);\n  oneshot_flow_path_ = modelbox::modelbox_full_path(oneshot_flow_path_);\n  return true;\n}\n\nmodelbox::Status ModelboxPlugin::CreateLocalJobs() {\n  MBLOG_INFO << \"create local job\";\n  std::vector<std::string> files;\n  if (default_flow_path_.length() > 0) {\n    if (!modelbox::IsDirectory(default_flow_path_)) {\n      files.emplace_back(default_flow_path_);\n    } else {\n      auto ret = modelbox::ListFiles(default_flow_path_, \"*\", &files,\n                                     modelbox::LIST_FILES_FILE);\n      if (!ret) {\n        MBLOG_WARN << \"Load flow path failed. \" << ret;\n      }\n    }\n  }\n\n  if (default_application_path_.length() > 0) {\n    std::vector<std::string> project_dirs;\n    auto ret = modelbox::ListFiles(default_application_path_, \"*\",\n                                   &project_dirs, modelbox::LIST_FILES_DIR);\n    if (!ret) {\n      MBLOG_WARN << \"Load project path failed. \" << ret;\n    }\n\n    for (const auto& dir : project_dirs) {\n      std::string graph_dir = dir + \"/graph\";\n      modelbox::ListFiles(graph_dir, \"*.toml\", &files,\n                          modelbox::LIST_FILES_FILE);\n      modelbox::ListFiles(graph_dir, \"*.json\", &files,\n                          modelbox::LIST_FILES_FILE);\n    }\n  }\n\n  for (auto& file : files) {\n    // do not check return\n    auto jobname = modelbox::GetBaseName(file);\n    if (jobname.length() == 0) {\n      continue;\n    }\n\n    const std::string& job_id = jobname;\n    if (job_id.find(GRAPH_DISABLED_FLAG) != std::string::npos) {\n      MBLOG_INFO << \"graph '\" << file << \"' is disabled.\";\n      continue;\n    }\n\n    MBLOG_INFO << \"Create local job \" << file;\n    auto ret = CreateJobByFile(job_id, file);\n    if (!ret) {\n      MBLOG_WARN << \"create job \" << file << \" failed, \" << ret.WrapErrormsgs();\n    }\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status ModelboxPlugin::SaveGraphFile(const std::string& job_id,\n                                               const std::string& toml_graph) {\n  auto ret = modelbox::CreateDirectory(oneshot_flow_path_);\n  if (!ret) {\n    return {modelbox::STATUS_FAULT,\n            std::string(\"create graph directory failed, \") +\n                modelbox::StrError(errno) + \", path: \" + oneshot_flow_path_};\n  }\n\n  std::vector<std::string> list_files;\n  ret = modelbox::ListSubDirectoryFiles(oneshot_flow_path_, \"*\", &list_files);\n  if (!ret) {\n    return {modelbox::STATUS_FAULT,\n            std::string(\"list subdirectoryfiles failed, \") +\n                modelbox::StrError(errno) + \", path: \" + oneshot_flow_path_};\n  }\n\n  while (list_files.size() > MAX_FILES) {\n    size_t earliest_file_index = modelbox::FindTheEarliestFileIndex(list_files);\n    auto& earliest_file_path = list_files[earliest_file_index];\n    MBLOG_WARN << \"the graph file nums is more than \" << MAX_FILES\n               << \", remove the earliest access one, path: \"\n               << earliest_file_path;\n    auto ret = remove(earliest_file_path.c_str());\n    if (ret) {\n      return {modelbox::STATUS_FAULT,\n              std::string(\"remove earlier access file failed, \") +\n                  modelbox::StrError(errno)};\n    }\n    list_files.erase(list_files.begin() + earliest_file_index);\n  }\n\n  std::string path = oneshot_flow_path_ + \"/\" + job_id;\n  std::ofstream out(path, std::ios::trunc);\n  if (out.fail()) {\n    return {modelbox::STATUS_FAULT, std::string(\"save graph file failed, \") +\n                                        modelbox::StrError(errno) +\n                                        \", path: \" + path};\n  }\n\n  chmod(path.c_str(), 0600);\n  Defer { out.close(); };\n\n  out << toml_graph;\n  if (out.fail()) {\n    return {modelbox::STATUS_FAULT, std::string(\"save graph file failed, \") +\n                                        modelbox::StrError(errno) +\n                                        \", path: \" + path};\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status ModelboxPlugin::StartJob(\n    const std::shared_ptr<modelbox::Job>& job) {\n  auto ret = job->Init();\n  if (!ret) {\n    MBLOG_ERROR << \"start job init failed:\" << ret;\n    return ret;\n  }\n\n  ret = job->Build();\n  if (!ret) {\n    MBLOG_ERROR << \"start job build failed: \" << ret;\n    return ret;\n  }\n\n  job->Run();\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status ModelboxPlugin::CreateJobByFile(\n    const std::string& job_id, const std::string& graph_file) {\n  auto job = jobmanager_.CreateJob(job_id, graph_file);\n  if (job == nullptr) {\n    MBLOG_ERROR << \"create job \" << job_id << \" from \" << graph_file\n                << \"failed.\";\n    return modelbox::StatusError;\n  }\n\n  auto ret = StartJob(job);\n  if (!ret) {\n    MBLOG_ERROR << \"create job \" << job_id << \" from file failed\";\n    return ret;\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status ModelboxPlugin::CreateJobByString(const std::string& job_id,\n                                                   const std::string& graph,\n                                                   const std::string& format) {\n  std::string toml_data;\n  modelbox::Status ret;\n  if (format == HTTP_GRAPH_FORMAT_TOML || format.length() == 0) {\n    toml_data = graph;\n  } else if (format == HTTP_GRAPH_FORMAT_JSON) {\n    if (modelbox::JsonToToml(graph, &toml_data) == false) {\n      return {modelbox::STATUS_INVALID, \"graph data is invalid.\"};\n    }\n  } else {\n    return {modelbox::STATUS_INVALID, \"graph type:\" + format + \" is invalid.\"};\n  }\n\n  auto job = jobmanager_.CreateJob(job_id, job_id, toml_data);\n  if (job == nullptr) {\n    return modelbox::StatusError;\n  }\n  Defer {\n    if (!ret) {\n      job->SetError(ret);\n    }\n  };\n\n  ret = SaveGraphFile(job_id, toml_data);\n  if (!ret) {\n    return ret;\n  }\n\n  ret = StartJob(job);\n  if (!ret) {\n    MBLOG_ERROR << \"create job \" << job_id << \" from string failed\";\n    return ret;\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nstd::string BuildErrorResponse(const std::string& error,\n                               const std::string& msg = \"\") {\n  nlohmann::json response;\n  std::string errmsg = ERROR_INFO[error];\n  if (msg.length() > 0) {\n    errmsg += \", \" + msg;\n  }\n\n  response[ERROR_CODE] = error;\n  response[ERROR_MSG] = errmsg;\n\n  return response.dump();\n}\n\nvoid ModelboxPlugin::HandlerPut(const httplib::Request& request,\n                                httplib::Response& response) {\n  std::string graph_format = HTTP_GRAPH_FORMAT_JSON;\n  std::string error_code = \"MODELBOX_001\";\n  std::string error_msg;\n  modelbox::AddSafeHeader(response);\n  bool is_failed = true;\n  Defer {\n    if (is_failed == false) {\n      return;\n    }\n\n    MBLOG_ERROR << \"Create task failed, \" << error_msg;\n    const auto& response_content = BuildErrorResponse(error_code, error_msg);\n    response.status = modelbox::HttpStatusCodes::BAD_REQUEST;\n    response.set_content(response_content, modelbox::JSON);\n  };\n\n  try {\n    nlohmann::json body;\n    try {\n      body = nlohmann::json::parse(request.body);\n    } catch (const std::exception& e) {\n      error_code = \"MODELBOX_006\";\n      MBLOG_ERROR << \"process request failed, \" << e.what();\n      error_msg = e.what();\n      return;\n    }\n\n    if (body.find(\"job_id\") == body.end()) {\n      error_code = \"MODELBOX_003\";\n      return;\n    }\n\n    auto jobid = body[\"job_id\"].get<std::string>();\n    if (body.find(\"job_graph\") == body.end()) {\n      error_code = \"MODELBOX_004\";\n      return;\n    }\n\n    if (!CheckJobIdValid(jobid)) {\n      error_code = \"MODELBOX_007\";\n      return;\n    }\n\n    if (body.find(\"job_graph_format\") != body.end()) {\n      graph_format = body[\"job_graph_format\"].get<std::string>();\n    }\n\n    std::string graph_data;\n    if (graph_format == HTTP_GRAPH_FORMAT_JSON) {\n      graph_data = body[\"job_graph\"].dump();\n    } else if (graph_format == HTTP_GRAPH_FORMAT_TOML) {\n      graph_data = body[\"job_graph\"].get<std::string>();\n    } else {\n      error_msg = \"Unsupport graph format: \" + graph_format;\n      error_code = \"MODELBOX_006\";\n      return;\n    }\n\n    if (modelbox::JobStatus::JOB_STATUS_NOTEXIST !=\n        jobmanager_.QueryJobStatus(jobid)) {\n      error_code = \"MODELBOX_005\";\n      return;\n    }\n\n    auto status = CreateJobByString(jobid, graph_data, graph_format);\n    if (!status) {\n      error_code = \"MODELBOX_008\";\n      error_msg = status.WrapErrormsgs();\n      return;\n    }\n\n    is_failed = false;\n  } catch (const std::exception& e) {\n    MBLOG_ERROR << \"process request failed, \" << e.what();\n    error_msg = e.what();\n    return;\n  }\n\n  response.status = modelbox::HttpStatusCodes::CREATED;\n}\n\nvoid ModelboxPlugin::HandlerGet(const httplib::Request& request,\n                                httplib::Response& response) {\n  modelbox::AddSafeHeader(response);\n  try {\n    std::string relative_path = request.path.substr(SERVER_PATH.size());\n    std::string pre_path;\n    std::string job_id;\n    modelbox::SplitPath(relative_path, pre_path, job_id);\n    if (pre_path.empty()) {\n      if (modelbox::JobStatus::JOB_STATUS_NOTEXIST ==\n          jobmanager_.QueryJobStatus(job_id)) {\n        const auto& response_content = BuildErrorResponse(\"MODELBOX_002\");\n        response.status = modelbox::HttpStatusCodes::NOT_FOUND;\n        response.set_content(response_content, modelbox::JSON);\n        return;\n      }\n\n      auto job_status = jobmanager_.QueryJobStatusString(job_id);\n      auto job_msg = jobmanager_.GetJobErrorMsg(job_id);\n      nlohmann::json response_json;\n      response_json[\"job_id\"] = job_id;\n      response_json[\"job_status\"] = job_status;\n      response_json[\"job_error_msg\"] = job_msg;\n      response.status = modelbox::HttpStatusCodes::OK;\n      response.set_content(response_json.dump(), modelbox::JSON);\n      return;\n    }\n\n    if (pre_path == \"/list\" && job_id == \"all\") {\n      nlohmann::json response_json;\n      response_json[\"job_list\"] = nlohmann::json::array();\n      auto jobs = jobmanager_.GetJobMap();\n      for (const auto& job : jobs) {\n        nlohmann::json job_state;\n        auto job_id = job.first;\n        auto job_status = jobmanager_.QueryJobStatusString(job_id);\n        auto job_msg = jobmanager_.GetJobErrorMsg(job_id);\n        job_state[\"job_id\"] = job_id;\n        job_state[\"job_status\"] = job_status;\n        job_state[\"job_error_msg\"] = job_msg;\n        response_json[\"job_list\"].push_back(job_state);\n      }\n\n      response.status = modelbox::HttpStatusCodes::OK;\n      response.set_content(response_json.dump(), modelbox::JSON);\n      return;\n    }\n\n    const auto& response_content = BuildErrorResponse(\"MODELBOX_006\");\n    response.status = modelbox::HttpStatusCodes::INTERNAL_ERROR;\n    response.set_content(response_content, modelbox::JSON);\n    return;\n  } catch (const std::exception& e) {\n    const auto& response_content = BuildErrorResponse(\"MODELBOX_001\");\n    response.status = modelbox::HttpStatusCodes::INTERNAL_ERROR;\n    response.set_content(response_content, modelbox::JSON);\n    return;\n  }\n}\n\nvoid ModelboxPlugin::HandlerDel(const httplib::Request& request,\n                                httplib::Response& response) {\n  modelbox::AddSafeHeader(response);\n  try {\n    std::string relative_path = request.path.substr(SERVER_PATH.size());\n    std::string pre_path;\n    std::string job_id;\n    modelbox::SplitPath(relative_path, pre_path, job_id);\n    if (modelbox::JobStatus::JOB_STATUS_NOTEXIST ==\n        jobmanager_.QueryJobStatus(job_id)) {\n      const auto& response_content = BuildErrorResponse(\"MODELBOX_002\");\n      response.status = modelbox::HttpStatusCodes::NOT_FOUND;\n      response.set_content(response_content, modelbox::JSON);\n      return;\n    }\n\n    auto job = jobmanager_.GetJob(job_id);\n    job->Stop();\n    bool ret = jobmanager_.DeleteJob(job_id);\n    if (!ret) {\n      const auto& response_content = BuildErrorResponse(\"MODELBOX_002\");\n      response.status = modelbox::HttpStatusCodes::BAD_REQUEST;\n      response.set_content(response_content, modelbox::JSON);\n    }\n  } catch (const std::exception& e) {\n    const auto& response_content = BuildErrorResponse(\"MODELBOX_001\");\n    response.status = modelbox::HttpStatusCodes::INTERNAL_ERROR;\n    response.set_content(response_content, modelbox::JSON);\n    return;\n  }\n\n  response.status = modelbox::HttpStatusCodes::NO_CONTENT;\n}"
  },
  {
    "path": "src/modelbox/server/plugin/tasks/modelbox_plugin.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_MODELBOX_PLUGIN_H_\n#define MODELBOX_MODELBOX_PLUGIN_H_\n\n#include \"memory\"\n#include \"modelbox/server/http_helper.h\"\n#include \"modelbox/server/job_manager.h\"\n#include \"modelbox/server/plugin.h\"\n\nclass ModelboxPlugin : public modelbox::Plugin {\n public:\n  ModelboxPlugin() = default;\n  ~ModelboxPlugin() override = default;\n\n  bool Init(std::shared_ptr<modelbox::Configuration> config) override;\n  bool Start() override;\n  bool Stop() override;\n\n  void RegistHandlers();\n  bool ParseConfig(const std::shared_ptr<modelbox::Configuration>& config);\n  void RegistCallbacks();\n  bool CheckMethodVaild(std::string method);\n  bool CheckUrlVaild(std::string url);\n\n private:\n  void HandlerPut(const httplib::Request& request, httplib::Response& response);\n  void HandlerGet(const httplib::Request& request, httplib::Response& response);\n  void HandlerDel(const httplib::Request& request, httplib::Response& response);\n\n  modelbox::Status CreateLocalJobs();\n  modelbox::Status CreateJobByFile(const std::string& job_id,\n                                   const std::string& graph_file);\n  modelbox::Status CreateJobByString(const std::string& job_id,\n                                     const std::string& graph,\n                                     const std::string& format);\n\n  modelbox::Status StartJob(const std::shared_ptr<modelbox::Job>& job);\n\n  modelbox::Status SaveGraphFile(const std::string& job_id,\n                                 const std::string& toml_graph);\n  bool CheckJobIdValid(const std::string& job_id);\n\n  std::string ip_;\n  std::string port_;\n  std::string default_flow_path_;\n  std::string default_application_path_;\n  std::string oneshot_flow_path_;\n\n  std::vector<std::string> acl_white_list_;\n  std::shared_ptr<modelbox::HttpListener> listener_;\n  modelbox::JobManager jobmanager_;\n};\n\n#endif  // MODELBOX_MODELBOX_PLUGIN_H_\n"
  },
  {
    "path": "src/modelbox/server/plugin.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <modelbox/server/plugin.h>\n\nnamespace modelbox {\n\nPlugin::Plugin() = default;\n\nPlugin::~Plugin() = default;\n\nbool Plugin::Check() { return true; }\n\nmodelbox::Status PluginMsgRouter::RegisterRecvFunc(\n    const std::string &topic_name, const PluginRecvMsgFunc &func) {\n  std::lock_guard<std::mutex> lock(receivers_lock_);\n  auto &func_list = receivers_[topic_name];\n  func_list.push_back(func);\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status PluginMsgRouter::RouteMsg(\n    const std::string &topic_name, const std::string &msg_name,\n    const std::shared_ptr<const void> &msg_data, size_t msg_len) {\n  std::lock_guard<std::mutex> lock(receivers_lock_);\n  if (receivers_.find(topic_name) == receivers_.end()) {\n    MBLOG_ERROR << \"Topic \" << topic_name << \" is not found, send msg \"\n                << msg_name << \" failed\";\n    return modelbox::STATUS_NOTFOUND;\n  }\n\n  auto &func_list = receivers_[topic_name];\n  auto receive_action = [](const std::vector<PluginRecvMsgFunc> &func_list,\n                           const std::string &msg_name,\n                           const std::shared_ptr<const void> &msg_data,\n                           size_t msg_len) {\n    for (const auto &func : func_list) {\n      func(msg_name, msg_data, msg_len);\n    }\n  };\n\n  thread_pool_.Submit(receive_action, func_list, msg_name, msg_data, msg_len);\n  return modelbox::STATUS_OK;\n}\n\nstd::shared_ptr<PluginMsgRouter> PluginMsgRouter::GetInstance() {\n  static auto instance = std::make_shared<PluginMsgRouter>();\n  return instance;\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/modelbox/server/server.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"server.h\"\n\n#include <functional>\n\n#include \"config.h\"\n#include \"modelbox/base/configuration.h\"\n#include \"modelbox/base/log.h\"\n#include \"modelbox/base/utils.h\"\n\nnamespace modelbox {\n\nServer::~Server() { plugins_.clear(); }\n\nmodelbox::Status Server::Init() {\n  if (config_ == nullptr) {\n    return modelbox::STATUS_BADCONF;\n  }\n\n  auto ret = control_.Init(config_);\n  if (!ret) {\n    MBLOG_ERROR << \"Init control failed.\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  ret = GetPluginList();\n  if (!ret) {\n    MBLOG_ERROR << \"server parse config failed\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  for (auto &plugin : plugins_) {\n    auto ret = plugin->Init(config_);\n    if (!ret) {\n      MBLOG_ERROR << \"init plugin \" << plugin->PluginFile() << \" failed\";\n      return ret;\n    }\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status Server::Start() {\n  MBLOG_INFO << \"app server start\";\n  if (!control_.Start()) {\n    MBLOG_ERROR << \"start control failed.\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  std::shared_ptr<ServerPlugin> last_plugin;\n  try {\n    for (auto &plugin : plugins_) {\n      auto ret = plugin->Start();\n      if (!ret) {\n        MBLOG_ERROR << \"Plugin, start failed, \" << plugin->PluginFile();\n        return modelbox::STATUS_FAULT;\n      }\n      plugin->SetInit(true);\n      last_plugin = plugin;\n    }\n  } catch (const std::exception &e) {\n    if (last_plugin) {\n      MBLOG_ERROR << \"Plugin, start failed, \" << last_plugin->PluginFile()\n                  << \" reason: \" << e.what();\n    } else {\n      MBLOG_ERROR << \"Plugin, start failed, \"\n                  << \" reason: \" << e.what();\n    }\n    return {modelbox::STATUS_FAULT,\n            std::string(\"start plugin failed, \") + e.what()};\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status Server::Stop() {\n  MBLOG_INFO << \"app server stop\";\n  for (auto &plugin : plugins_) {\n    if (plugin->IsInit() == false) {\n      continue;\n    }\n    plugin->Stop();\n  }\n\n  auto router = PluginMsgRouter::GetInstance();\n  router->Clear();\n  control_.Stop();\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status Server::Check() {\n  for (auto &plugin : plugins_) {\n    if (plugin->IsInit() == false) {\n      continue;\n    }\n    \n    if (plugin->Check() != modelbox::STATUS_OK) {\n      return modelbox::STATUS_FAULT;\n    }\n  }\n\n  return modelbox::STATUS_OK;\n}\n\n\nmodelbox::Status Server::GetPluginList() {\n  auto plugin_path_list = config_->GetStrings(\"plugin.files\");\n  if (plugin_path_list.size() <= 0) {\n    MBLOG_ERROR << \"can not find plugin path from config file\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  MBLOG_INFO << \"plugin list:\";\n  for (auto path : plugin_path_list) {\n    MBLOG_INFO << \" \" << path;\n    path = modelbox_full_path(path);\n    auto plugin = ServerPlugin::MakePlugin(path);\n    plugins_.push_back(plugin);\n  }\n\n  return modelbox::STATUS_OK;\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/modelbox/server/server.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_SERVER_H_\n#define MODELBOX_SERVER_H_\n\n#include <modelbox/base/configuration.h>\n#include <modelbox/base/status.h>\n\n#include <iostream>\n#include <memory>\n#include <utility>\n\n#include \"control.h\"\n#include \"server_plugin.h\"\n\nnamespace modelbox {\n\nclass Server {\n public:\n  Server(std::shared_ptr<modelbox::Configuration> config)\n      : config_(std::move(config)){};\n  virtual ~Server();\n  modelbox::Status Init();\n  modelbox::Status Start();\n  modelbox::Status Stop();\n  modelbox::Status Check();\n\n private:\n  modelbox::Status GetPluginList();\n\n  std::vector<std::shared_ptr<ServerPlugin>> plugins_;\n  std::shared_ptr<modelbox::Configuration> config_;\n  Control control_;\n};\n\n}  // namespace modelbox\n\n#endif  // MODELBOX_SERVER_H_\n"
  },
  {
    "path": "src/modelbox/server/server_plugin.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"server_plugin.h\"\n\n#include <dlfcn.h>\n\n#include <functional>\n#include <map>\n#include <utility>\n\nnamespace modelbox {\n\ntypedef std::shared_ptr<Plugin> (*CreatePluginFunc)();\n\nstd::map<std::string, std::function<std::shared_ptr<ServerPlugin>(\n                          const std::string &plugin_path)>>\n    plugin_construct_map = {{\".so\",\n                             [](const std::string &plugin_path) {\n                               return std::make_shared<DlPlugin>(plugin_path);\n                             }},\n                            {\".js\", [](const std::string &plugin_path) {\n                               return std::make_shared<JsPlugin>(plugin_path);\n                             }}};\n\nServerPlugin::ServerPlugin(std::string plugin_path)\n    : plugin_path_(std::move(plugin_path)) {}\n\nServerPlugin::~ServerPlugin() = default;\n\nstd::shared_ptr<ServerPlugin> ServerPlugin::MakePlugin(\n    const std::string &plugin_path) {\n  auto suffix_pos = plugin_path.find_last_of('.');\n  if (suffix_pos != std::string::npos) {\n    auto suffix = plugin_path.substr(suffix_pos);\n    auto type_item = plugin_construct_map.find(suffix);\n    if (type_item != plugin_construct_map.end()) {\n      return type_item->second(plugin_path);\n    }\n  }\n\n  return std::make_shared<DlPlugin>(plugin_path);\n}\n\nmodelbox::Status ServerPlugin::Check() { return modelbox::STATUS_OK; }\n\nstd::string ServerPlugin::PluginFile() { return plugin_path_; }\n\nbool ServerPlugin::IsInit() { return is_init_; }\n\nvoid ServerPlugin::SetInit(bool init) { is_init_ = init; }\n\nDlPlugin::DlPlugin(const std::string &plugin_path)\n    : ServerPlugin(plugin_path) {}\n\nDlPlugin::~DlPlugin() {\n  plugin_.reset();\n  if (plugin_handler_ == nullptr) {\n    return;\n  }\n\n  dlclose(plugin_handler_);\n}\n\nmodelbox::Status DlPlugin::Init(\n    std::shared_ptr<modelbox::Configuration> config) {\n  modelbox::Status ret = modelbox::STATUS_FAULT;\n\n  plugin_handler_ = dlopen(plugin_path_.c_str(), RTLD_NOW);\n  if (plugin_handler_ == nullptr) {\n    std::string errmsg = \"Open library \" + plugin_path_ + \" failed\";\n    auto *dlerr_msg = dlerror();\n    if (dlerr_msg != nullptr) {\n      errmsg += \", \";\n      errmsg += dlerr_msg;\n    }\n    MBLOG_ERROR << errmsg;\n    return {modelbox::STATUS_FAULT, errmsg};\n  }\n\n  Defer {\n    if (!ret && plugin_handler_ != nullptr) {\n      dlclose(plugin_handler_);\n      plugin_handler_ = nullptr;\n    }\n  };\n\n  CreatePluginFunc create_plugin_func;\n  create_plugin_func = (CreatePluginFunc)dlsym(plugin_handler_, \"CreatePlugin\");\n\n  if (create_plugin_func == nullptr) {\n    std::string errmsg = \"Cannot find symbol CreatePlugin\";\n    auto *dlerr_msg = dlerror();\n    if (dlerr_msg != nullptr) {\n      errmsg += \", \";\n      errmsg += dlerr_msg;\n    }\n    MBLOG_ERROR << errmsg;\n    return {modelbox::STATUS_FAULT, errmsg};\n  }\n\n  auto plugin = create_plugin_func();\n  if (plugin == nullptr) {\n    MBLOG_ERROR << \"create plugin failed\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  if (!plugin->Init(config)) {\n    return modelbox::STATUS_FAULT;\n  }\n\n  plugin_ = plugin;\n  ret = modelbox::STATUS_OK;\n\n  return ret;\n}\n\nmodelbox::Status DlPlugin::Start() {\n  if (plugin_ == nullptr) {\n    MBLOG_ERROR << \"plugin is null\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  return plugin_->Start();\n}\n\nmodelbox::Status DlPlugin::Stop() {\n  if (plugin_ == nullptr) {\n    return modelbox::STATUS_FAULT;\n  }\n\n  return plugin_->Stop();\n}\n\nmodelbox::Status DlPlugin::Check() {\n  if (plugin_ == nullptr) {\n    return modelbox::STATUS_FAULT;\n  }\n\n  if (plugin_->Check() == false) {\n    return STATUS_FAULT;\n  }\n\n  return STATUS_OK;\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/modelbox/server/server_plugin.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_SERVER_PLUGIN_H_\n#define MODELBOX_SERVER_PLUGIN_H_\n\n#include <modelbox/base/configuration.h>\n#include <modelbox/base/status.h>\n#include <modelbox/statistics.h>\n\n#include <memory>\n#include <string>\n\n#include \"modelbox/server/plugin.h\"\n\nnamespace modelbox {\n\nclass ServerPlugin {\n public:\n  ServerPlugin(std::string plugin_path);\n  virtual ~ServerPlugin();\n\n  virtual modelbox::Status Init(\n      std::shared_ptr<modelbox::Configuration> config) = 0;\n  virtual modelbox::Status Start() = 0;\n  virtual modelbox::Status Stop() = 0;\n  virtual modelbox::Status Check();\n\n  static std::shared_ptr<ServerPlugin> MakePlugin(\n      const std::string &plugin_path);\n\n  std::string PluginFile();\n\n protected:\n  std::string plugin_path_;\n\n private:\n  friend class Server;\n  bool IsInit();\n  void SetInit(bool init);\n  bool is_init_{false};\n};\n\n/**\n * @brief Dynamic library plugin\n */\nclass DlPlugin : public ServerPlugin {\n public:\n  DlPlugin(const std::string &plugin_path);\n  ~DlPlugin() override;\n\n  modelbox::Status Init(\n      std::shared_ptr<modelbox::Configuration> config) override;\n  modelbox::Status Start() override;\n  modelbox::Status Stop() override;\n  modelbox::Status Check() override;\n\n private:\n  std::shared_ptr<Plugin> plugin_;\n  void *plugin_handler_{nullptr};\n};\n\nclass JSCtx;\n\n/**\n * @brief Javascript plugin\n */\nclass JsPlugin : public ServerPlugin {\n public:\n  JsPlugin(const std::string &plugin_path);\n  ~JsPlugin() override;\n\n  modelbox::Status Init(\n      std::shared_ptr<modelbox::Configuration> config) override;\n  modelbox::Status Start() override;\n  modelbox::Status Stop() override;\n\n  void RegisterStatsNotify(\n      const std::string &path_pattern,\n      const std::set<modelbox::StatisticsNotifyType> &type_list,\n      const std::string &func_name, void *priv_data, size_t delay = 0,\n      size_t interval = 0);\n\n  static void AddMap(void *runtime, JsPlugin *plugin);\n\n  static void DelMap(void *runtime);\n\n  static JsPlugin *GetPlugin(void *runtime);\n\n private:\n  modelbox::Status RegisterCFunction();\n  modelbox::Status LoadInitCode();\n\n  std::shared_ptr<JSCtx> js_ctx_;\n  std::vector<std::shared_ptr<modelbox::StatisticsNotifyCfg>> notify_cfg_list_;\n\n  static std::mutex runtime_to_plugin_lock;\n  static std::map<void *, JsPlugin *> runtime_to_plugin;\n};\n\n}  // namespace modelbox\n\n#endif  // MODELBOX_SERVER_PLUGIN_H_"
  },
  {
    "path": "src/modelbox/server/server_plugin_js.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <map>\n#include <mutex>\n\n#include \"server_plugin.h\"\n\n#ifdef ENABLE_JS_PLUGIN\n#include \"js_engine.h\"\n#endif\n\nnamespace modelbox {\n\nstd::mutex JsPlugin::runtime_to_plugin_lock;\nstd::map<void *, JsPlugin *> JsPlugin::runtime_to_plugin;\n\n#ifdef ENABLE_JS_PLUGIN\n\nstd::set<modelbox::StatisticsNotifyType> NotifyTypesFromUint(\n    uint32_t type_mask) {\n  std::set<modelbox::StatisticsNotifyType> type_list;\n  if (type_mask & (uint32_t)modelbox::StatisticsNotifyType::CREATE) {\n    type_list.insert(modelbox::StatisticsNotifyType::CREATE);\n  }\n\n  if (type_mask & (uint32_t)modelbox::StatisticsNotifyType::CHANGE) {\n    type_list.insert(modelbox::StatisticsNotifyType::CHANGE);\n  }\n\n  if (type_mask & (uint32_t)modelbox::StatisticsNotifyType::DELETE) {\n    type_list.insert(modelbox::StatisticsNotifyType::DELETE);\n  }\n\n  if (type_mask & (uint32_t)modelbox::StatisticsNotifyType::TIMER) {\n    type_list.insert(modelbox::StatisticsNotifyType::TIMER);\n  }\n\n  return type_list;\n}\n\nstatic duk_ret_t JSRegisterStatsNotify(duk_context *ctx) {\n  const auto *path_pattern = duk_require_string(ctx, 0);\n  auto type_mask = duk_require_uint(ctx, 1);\n  const auto *func_name = duk_require_string(ctx, 2);\n  auto *priv_data = duk_get_heapptr(ctx, 3);\n\n  if (func_name == nullptr || path_pattern == nullptr) {\n    MBLOG_ERROR << \"param is invalid.\";\n    return -1;\n  }\n\n  MBLOG_INFO << \"JSPlugin register notify[path:\" << path_pattern\n             << \",type:\" << type_mask << \",func:\" << func_name\n             << \",priv_data:\" << priv_data << \"]\";\n\n  auto *plugin_ctx = JsPlugin::GetPlugin(ctx);\n  if (plugin_ctx == nullptr) {\n    MBLOG_ERROR << \"plugin_ctx is null\";\n    return -1;  // return error to js\n  }\n\n  auto type_list = NotifyTypesFromUint(type_mask);\n  if (type_list.empty()) {\n    MBLOG_ERROR << \"register type should not be empty\";\n    return 0;\n  }\n\n  ((JsPlugin *)plugin_ctx)\n      ->RegisterStatsNotify(path_pattern, type_list, func_name, priv_data);\n  return 0;  // means this function has no return value in value stack\n}\n\n/**\n * @note Not recommended\n */\nstatic duk_ret_t JSRegisterStatsTimerNotify(duk_context *ctx) {\n  const auto *path_pattern = duk_require_string(ctx, 0);\n  auto timer_delay = duk_require_uint(ctx, 1);\n  auto timer_interval = duk_require_uint(ctx, 2);\n  const auto *func_name = duk_require_string(ctx, 3);\n  auto *priv_data = duk_get_heapptr(ctx, 4);\n  if (path_pattern == nullptr || func_name == nullptr) {\n    MBLOG_ERROR << \"register param is invalid\";\n    return -1;\n  }\n  MBLOG_INFO << \"JSPlugin register timer notify[path:\" << path_pattern\n             << \",delay:\" << timer_delay << \",interval:\" << timer_interval\n             << \",func:\" << func_name << \",priv_data:\" << priv_data << \"]\";\n\n  auto *plugin_ctx = JsPlugin::GetPlugin(ctx);\n  if (plugin_ctx == nullptr) {\n    MBLOG_ERROR << \"plugin_ctx is null\";\n    return -1;  // return error to js\n  }\n\n  ((JsPlugin *)plugin_ctx)\n      ->RegisterStatsNotify(path_pattern,\n                            {modelbox::StatisticsNotifyType::TIMER}, func_name,\n                            priv_data, timer_delay, timer_interval);\n  return 0;\n}\n\nstatic duk_ret_t JSGetStatsValue(duk_context *ctx) {\n  const auto *path = duk_require_string(ctx, 0);\n  auto stats = modelbox::Statistics::GetGlobalItem();\n  if (stats == nullptr || path == nullptr) {\n    MBLOG_ERROR << \"Global item is invalid.\";\n    return -1;\n  }\n\n  auto item = stats->GetItem(path);\n  if (item == nullptr) {\n    MBLOG_ERROR << \"Get value for \" << path << \" failed\";\n    duk_push_null(ctx);\n  } else {\n    duk_push_string(ctx, item->GetValue()->ToString().c_str());\n  }\n\n  return 1;  // means this function has return value in value stack\n}\n\nstatic duk_ret_t JSRouteData(duk_context *ctx) {\n  const auto *topic = duk_require_string(ctx, 0);\n  const auto *msg_name = duk_require_string(ctx, 1);\n  if (topic == nullptr || msg_name == nullptr) {\n    MBLOG_ERROR << \"get message name failed.\";\n    return -1;\n  }\n\n  auto msg_data = std::make_shared<std::string>();\n  *msg_data = duk_require_string(ctx, 2);\n  if (msg_data == nullptr) {\n    MBLOG_ERROR << \"get message data failed.\";\n    return -1;\n  }\n\n  MBLOG_DEBUG << \"Send data to \" << topic << \", \" << msg_name << \", \"\n              << *msg_data;\n  std::shared_ptr<const void> buffer;\n  auto router = PluginMsgRouter::GetInstance();\n  buffer.reset(msg_data->data(), [msg_data](const void *ptr) {});\n  auto ret = router->RouteMsg(topic, msg_name, buffer, msg_data->size());\n  if (ret != modelbox::STATUS_OK) {\n    MBLOG_ERROR << \"JSPlugin: route msg failed: \" << topic << \", \" << msg_name\n                << \", \" << *msg_data;\n  }\n\n  return 0;\n}\n\nstatic void GetCallerInfo(duk_context *ctx, std::string &file_name,\n                          int32_t &line_no) {\n  line_no = 0;\n  file_name = \"unknown\";\n  duk_inspect_callstack_entry(ctx, -3);\n  if (duk_is_object(ctx, -1)) {\n    duk_get_prop_string(ctx, -1, \"lineNumber\");\n    line_no = duk_to_int(ctx, -1);\n    duk_pop(ctx);\n\n    duk_get_prop_string(ctx, -1, \"function\");\n\n    duk_get_prop_string(ctx, -1, \"fileName\");\n    const auto *js_file_name = duk_to_string(ctx, -1);\n    if (js_file_name) {\n      file_name = js_file_name;\n    }\n    duk_pop(ctx);\n\n    duk_pop(ctx);\n  }\n\n  duk_pop(ctx);  // Callstack\n}\n\nstatic duk_ret_t JSModelboxLog(duk_context *ctx) {\n  const auto *level = duk_require_string(ctx, 0);\n  const auto *msg = duk_require_string(ctx, 1);\n\n  if (level == nullptr || msg == nullptr) {\n    MBLOG_ERROR << \"input param is invalid\";\n    return -1;\n  }\n\n  std::string file_name;\n  int32_t line_no;\n  GetCallerInfo(ctx, file_name, line_no);\n\n  std::string pre_fix =\n      \"[ \" + file_name + \":\" + std::to_string(line_no) + \" ] \";\n  auto print_msg = pre_fix + msg;\n  if (level == std::string(\"fatal\")) {\n    MBLOG_FATAL << print_msg;\n  } else if (level == std::string(\"error\")) {\n    MBLOG_ERROR << print_msg;\n  } else if (level == std::string(\"warn\")) {\n    MBLOG_WARN << print_msg;\n  } else if (level == std::string(\"notice\")) {\n    MBLOG_NOTICE << print_msg;\n  } else if (level == std::string(\"info\")) {\n    MBLOG_INFO << print_msg;\n  } else {\n    MBLOG_DEBUG << print_msg;\n  }\n\n  return 0;\n}\n\nJsPlugin::JsPlugin(const std::string &plugin_path)\n    : ServerPlugin(plugin_path), js_ctx_(std::make_shared<JSCtx>()) {}\n\nJsPlugin::~JsPlugin() { DelMap(js_ctx_->GetRuntime()); }\n\nmodelbox::Status JsPlugin::Init(\n    std::shared_ptr<modelbox::Configuration> config) {\n  auto ret = js_ctx_->Init();\n  if (!ret) {\n    return ret;\n  }\n\n  AddMap(js_ctx_->GetRuntime(), this);\n  (void)RegisterCFunction();\n  ret = LoadInitCode();\n  if (ret != modelbox::STATUS_OK) {\n    MBLOG_ERROR << \"Load init code failed\";\n    return ret;\n  }\n\n  ret = js_ctx_->LoadSource(plugin_path_);\n  if (ret != modelbox::STATUS_OK) {\n    MBLOG_ERROR << \"Load plugin \" << plugin_path_ << \" failed\";\n    return ret;\n  }\n\n  int32_t js_func_ret = -1;\n  ret = js_ctx_->CallFunc(\n      \"init\", [this](JSFunctionParam &param) { param.AddPointer(this); },\n      [&js_func_ret](JSFunctionReturn &ret) { js_func_ret = ret.GetInt32(); });\n  if (ret != modelbox::STATUS_OK) {\n    MBLOG_ERROR << \"Call plugin init \" << plugin_path_ << \" failed\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  if (js_func_ret != 0) {\n    MBLOG_ERROR << \"Plugin init \" << plugin_path_\n                << \" failed, ret:\" << js_func_ret;\n    return modelbox::STATUS_FAULT;\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status JsPlugin::RegisterCFunction() {\n  js_ctx_->RegisterFunc(\"registerStatsNotify\", JSRegisterStatsNotify, 4);\n  js_ctx_->RegisterFunc(\"registerStatsTimerNotify\", JSRegisterStatsTimerNotify,\n                        5);\n  js_ctx_->RegisterFunc(\"getStatsValue\", JSGetStatsValue, 1);\n  js_ctx_->RegisterFunc(\"routeData\", JSRouteData, 3);\n  js_ctx_->RegisterFunc(\"modelboxLog\", JSModelboxLog, 2);\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status JsPlugin::LoadInitCode() {\n  std::string init_var_code = R\"(\n    var NOTIFY_CREATE = 1;\n    var NOTIFY_DELETE = 2;\n    var NOTIFY_CHANGE = 4;\n    var NOTIFY_TIMER = 8;\n\n    var console = {};\n\n    console.log = function (msg) {\n      modelboxLog(\"info\", msg);\n    }\n\n    console.info = console.log\n\n    console.warn = function (msg) {\n      modelboxLog(\"warn\", msg);\n    }\n\n    console.error = function (msg) {\n      modelboxLog(\"error\", msg);\n    }\n  )\";\n  return js_ctx_->LoadCode(init_var_code, \"JsPluginInitCode\");\n}\n\nmodelbox::Status JsPlugin::Start() {\n  int32_t js_func_ret = -1;\n  auto ret = js_ctx_->CallFunc(\n      \"start\", [this](JSFunctionParam &param) { param.AddPointer(this); },\n      [&js_func_ret](JSFunctionReturn &ret) { js_func_ret = ret.GetInt32(); });\n  if (ret != modelbox::STATUS_OK) {\n    MBLOG_ERROR << \"Call plugin start \" << plugin_path_ << \" failed\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  if (js_func_ret != 0) {\n    MBLOG_ERROR << \"Plugin start \" << plugin_path_\n                << \" failed, ret:\" << js_func_ret;\n    return modelbox::STATUS_FAULT;\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status JsPlugin::Stop() {\n  int32_t js_func_ret = -1;\n  auto stats = modelbox::Statistics::GetGlobalItem();\n  for (auto &notify_cfg : notify_cfg_list_) {\n    stats->UnRegisterNotify(notify_cfg);\n  }\n\n  auto ret = js_ctx_->CallFunc(\n      \"stop\", [this](JSFunctionParam &param) { param.AddPointer(this); },\n      [&js_func_ret](JSFunctionReturn &ret) { js_func_ret = ret.GetInt32(); });\n  if (ret != modelbox::STATUS_OK) {\n    MBLOG_ERROR << \"Call plugin stop \" << plugin_path_ << \" failed\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  if (js_func_ret != 0) {\n    MBLOG_ERROR << \"Plugin stop \" << plugin_path_\n                << \" failed, ret:\" << js_func_ret;\n    return modelbox::STATUS_FAULT;\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nvoid JsPlugin::RegisterStatsNotify(\n    const std::string &path_pattern,\n    const std::set<modelbox::StatisticsNotifyType> &type_list,\n    const std::string &func_name, void *priv_data, size_t delay,\n    size_t interval) {\n  auto stats = modelbox::Statistics::GetGlobalItem();\n  std::weak_ptr<JSCtx> js_ctx_ref = js_ctx_;\n  auto notify_cfg = std::make_shared<modelbox::StatisticsNotifyCfg>(\n      path_pattern,\n      [js_ctx_ref, func_name, priv_data](\n          const std::shared_ptr<const modelbox::StatisticsNotifyMsg> &msg) {\n        auto js_ctx_ptr = js_ctx_ref.lock();\n        if (js_ctx_ptr == nullptr) {\n          return;\n        }\n\n        js_ctx_ptr->CallFunc(func_name, [&](JSFunctionParam &param) {\n          param.AddString(msg->path_);\n          std::string value_str;\n          if (msg->value_ != nullptr) {\n            value_str = msg->value_->ToString();\n          }\n          param.AddString(value_str);\n          param.AddUint32((uint32_t)msg->type_);\n          param.AddHeapPtr(priv_data);\n        });\n      },\n      type_list);\n  if (type_list.find(modelbox::StatisticsNotifyType::TIMER) !=\n      type_list.end()) {\n    notify_cfg->SetNotifyTimer(delay, interval);\n  }\n\n  notify_cfg_list_.push_back(notify_cfg);\n  stats->RegisterNotify(notify_cfg);\n}\n#else   // ENABLE_JS_PLUGIN\nJsPlugin::JsPlugin(const std::string &plugin_path)\n    : ServerPlugin(plugin_path) {}\nJsPlugin::~JsPlugin() = default;\n\nmodelbox::Status JsPlugin::Init(\n    std::shared_ptr<modelbox::Configuration> config) {\n  MBLOG_ERROR << \"Js plugin is not enabled, please remove [\" << plugin_path_\n              << \"] from conf\";\n  return modelbox::STATUS_NOTSUPPORT;\n}\nmodelbox::Status JsPlugin::Start() { return modelbox::STATUS_NOTSUPPORT; }\nmodelbox::Status JsPlugin::Stop() { return modelbox::STATUS_NOTSUPPORT; }\n\nvoid JsPlugin::RegisterStatsNotify(\n    const std::string &path_pattern,\n    const std::set<modelbox::StatisticsNotifyType> &type_list,\n    const std::string &func_name, void *priv_data, size_t delay,\n    size_t interval) {}\n#endif  // ENABLE_JS_PLUGIN\n\nvoid JsPlugin::AddMap(void *runtime, JsPlugin *plugin) {\n  std::lock_guard<std::mutex> lck(runtime_to_plugin_lock);\n  runtime_to_plugin[runtime] = plugin;\n}\n\nvoid JsPlugin::DelMap(void *runtime) {\n  std::lock_guard<std::mutex> lck(runtime_to_plugin_lock);\n  runtime_to_plugin.erase(runtime);\n}\n\nJsPlugin *JsPlugin::GetPlugin(void *runtime) {\n  std::lock_guard<std::mutex> lck(runtime_to_plugin_lock);\n  auto item = runtime_to_plugin.find(runtime);\n  if (item == runtime_to_plugin.end()) {\n    return nullptr;\n  }\n\n  return item->second;\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/modelbox/server/task.cc",
    "content": "\n/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <modelbox/base/utils.h>\n#include <modelbox/server/task.h>\n#include <modelbox/server/task_manager.h>\n\n#include <utility>\n\nnamespace modelbox {\n\nTask::Task() {\n  flow_ = nullptr;\n  status_ = WAITING;\n  already_submit_ = false;\n}\n\nTask::~Task() = default;\n\nstd::string Task::GetUUID() { return task_uuid_; }\n\nstd::shared_ptr<BufferList> Task::CreateBufferList() {\n  auto external_data = external_data_.lock();\n  if (external_data == nullptr) {\n    return nullptr;\n  }\n  return external_data->CreateBufferList();\n}\n\nStatus Task::SetDataMeta(const std::string& port_name,\n                         std::shared_ptr<DataMeta> meta) {\n  std::unique_lock<std::mutex> guard(lock_);\n  auto external_data = external_data_.lock();\n  if (external_data != nullptr) {\n    return {STATUS_NOTFOUND, \"external_data not found\"};\n  }\n\n  if (status_ != WAITING) {\n    return {STATUS_INVALID, \"the task already start\"};\n  }\n\n  return external_data->SetOutputMeta(port_name, std::move(meta));\n}\n\nStatus Task::SetSessionContent(const std::string& key,\n                               std::shared_ptr<void> content) {\n  std::unique_lock<std::mutex> guard(lock_);\n  auto external_data = external_data_.lock();\n  if (external_data == nullptr) {\n    return {STATUS_NOTFOUND, \"external_data not found\"};\n  }\n\n  if (status_ != WAITING) {\n    return {STATUS_INVALID, \"the task already start\"};\n  }\n\n  auto session_ctx = external_data->GetSessionContext();\n  if (session_ctx == nullptr) {\n    return {STATUS_NOTFOUND, \"session_ctx not found\"};\n  }\n\n  session_ctx->SetPrivate(key, std::move(content));\n  return STATUS_SUCCESS;\n}\n\nstd::shared_ptr<modelbox::Configuration> Task::GetSessionConfig() {\n  std::unique_lock<std::mutex> guard(lock_);\n  auto external_data = external_data_.lock();\n  if (external_data == nullptr) {\n    return nullptr;\n  }\n\n  return external_data->GetSessionConfig();\n}\n\nstd::string Task::GetTaskId() { return task_uuid_; }\n\nTaskStatus Task::GetTaskStatus() {\n  std::unique_lock<std::mutex> guard(lock_);\n  return status_;\n}\n\nstd::shared_ptr<FlowUnitError> Task::GetLastError() {\n  auto external_data = external_data_.lock();\n  if (external_data == nullptr) {\n    return nullptr;\n  }\n  return external_data->GetLastError();\n}\n\nStatus Task::Stop() {\n  std::unique_lock<std::mutex> guard(lock_);\n  auto external_data = external_data_.lock();\n  if (external_data == nullptr) {\n    return {STATUS_NOTFOUND, \"external_data not found\"};\n  }\n\n  if (status_ == STOPPED || status_ == ABNORMAL || status_ == FINISHED) {\n    return {STATUS_INVALID, \"task is already finished\"};\n  }\n\n  auto status = external_data->Shutdown();\n  if (status != STATUS_SUCCESS) {\n    return status;\n  }\n  cv_.wait(guard, [this]() {\n    return status_ == STOPPED || status_ == ABNORMAL || status_ == FINISHED;\n  });\n  return STATUS_SUCCESS;\n}\n\nbool Task::IsReady() {\n  std::unique_lock<std::mutex> guard(lock_);\n  if (already_submit_ && status_ == WAITING) {\n    return true;\n  }\n  return false;\n}\n\nbool Task::IsTaskSubmitted() {\n  std::unique_lock<std::mutex> guard(lock_);\n  return already_submit_;\n}\n\nStatus Task::Start() {\n  std::lock_guard<std::mutex> guard(lock_);\n  auto task_manager = task_manager_.lock();\n  if (task_manager == nullptr) {\n    return {STATUS_NOTFOUND, \"task manger is empty\"};\n  }\n\n  if (status_ != WAITING) {\n    return {STATUS_PERMIT, \"task is already started\"};\n  }\n\n  if (already_submit_) {\n    return {STATUS_PERMIT, \"task is in the waitting queue\"};\n  }\n  already_submit_ = true;\n\n  auto status = task_manager->Submit(shared_from_this());\n  if (status != STATUS_SUCCESS) {\n    return status;\n  }\n\n  return STATUS_SUCCESS;\n}\n\nvoid Task::SetTaskManager(const std::shared_ptr<TaskManager>& task_manager) {\n  task_manager_ = task_manager;\n  flow_ = task_manager->GetFlow();\n  std::shared_ptr<ExternalDataMap> external_data;\n  if (flow_ != nullptr) {\n    external_data = flow_->CreateExternalDataMap();\n  }\n\n  std::shared_ptr<SessionContext> session_ctx;\n  if (external_data != nullptr) {\n    session_ctx = external_data->GetSessionContext();\n  }\n\n  if (session_ctx != nullptr) {\n    // Set task_uuid equal to session_id to establish connection between task\n    // and graph session\n    task_uuid_ = session_ctx->GetSessionId();\n  }\n\n  auto selector = task_manager->GetSelector();\n  selector->RegisterExternalData(external_data);\n  external_data_ = external_data;\n}\n\nstd::shared_ptr<ExternalDataMap> Task::GetExternalData() {\n  auto external_data = external_data_.lock();\n  return external_data;\n}\n\nStatus Task::SendData() {\n  std::unique_lock<std::mutex> guard(lock_);\n  if (status_ != WAITING) {\n    MBLOG_ERROR << \"Task \" << task_uuid_ << \" already started\";\n    return {STATUS_INVALID, \"the task already start\"};\n  }\n\n  status_ = WORKING;\n  guard.unlock();\n  while (true) {\n    auto status = FeedData();\n    if (status == STATUS_EOF) {\n      break;\n    }\n    if (status != STATUS_SUCCESS) {\n      MBLOG_ERROR << \"Feed data to task \" << task_uuid_\n                  << \" failed, ret: \" << status;\n      Stop();\n      return status;\n    }\n  }\n  return STATUS_SUCCESS;\n}\n\nvoid Task::UpdateTaskStatus(TaskStatus task_status) {\n  std::unique_lock<std::mutex> guard(lock_);\n  status_ = task_status;\n  cv_.notify_all();\n}\n\nOneShotTask::OneShotTask() : Task() {}\nOneShotTask::~OneShotTask() = default;\n\nStatus PreCheckData(\n    std::unordered_map<std::string, std::shared_ptr<BufferList>> datas) {\n  if (datas.empty()) {\n    return {STATUS_NODATA, \"no data available to start task\"};\n  }\n  int buffer_number = -1;\n  auto data_iter = datas.begin();\n  while (data_iter != datas.end()) {\n    auto port_name = data_iter->first;\n    auto buffer_list = data_iter->second;\n    if (buffer_number == -1) {\n      buffer_number = buffer_list->Size();\n    }\n\n    if (size_t(buffer_number) != buffer_list->Size()) {\n      return {STATUS_FAULT, port_name + \" size not equal to the first\"};\n    }\n    data_iter++;\n  }\n  return STATUS_OK;\n}\n\nStatus OneShotTask::FillData(\n    std::unordered_map<std::string, std::shared_ptr<BufferList>>& data) {\n  auto status = PreCheckData(data);\n  if (status != STATUS_SUCCESS) {\n    return status;\n  }\n  auto data_iter = data.begin();\n  while (data_iter != data.end()) {\n    data_.emplace(data_iter->first, data_iter->second);\n    data_iter++;\n  }\n  return STATUS_SUCCESS;\n}\n\nStatus OneShotTask::FeedData() {\n  auto data_iter = data_.begin();\n  auto external_data = external_data_.lock();\n  if (external_data == nullptr) {\n    return {STATUS_NOTFOUND, \"external_data not found\"};\n  }\n  if (data_.size() == 0) {\n    return {STATUS_NODATA, \"No data avalaible\"};\n  }\n  while (data_iter != data_.end()) {\n    auto port_name = data_iter->first;\n    auto buffer_list = data_iter->second;\n    if (buffer_list->Size() == 0) {\n      return {STATUS_NODATA, port_name + \" buffer_list size is 0\"};\n    }\n    auto status = external_data->Send(port_name, buffer_list);\n    if (status != STATUS_SUCCESS) {\n      return status;\n    }\n    data_iter++;\n  }\n  external_data->Close();\n  return STATUS_EOF;\n}\n\nvoid OneShotTask::FetchData(Status fetch_status, OutputBufferList& output_buf) {\n  if (fetch_status == STATUS_SUCCESS) {\n    auto data_callback = GetDataCallback();\n    if (data_callback) {\n      data_callback(this, output_buf);\n    }\n    MBLOG_DEBUG << \"recv external\";\n  } else {\n    auto status_callback = GetStatusCallback();\n    if (status_callback) {\n      status_callback(this, GetTaskStatus());\n    }\n  }\n}\n\nvoid OneShotTask::RegisterDataCallback(const TaskDataCallback& callback) {\n  data_callback_ = callback;\n}\n\nTaskDataCallback OneShotTask::GetDataCallback() { return data_callback_; }\n\nvoid OneShotTask::RegisterStatusCallback(const TaskStatusCallback& callback) {\n  status_callback_ = callback;\n}\n\nTaskStatusCallback OneShotTask::GetStatusCallback() { return status_callback_; }\n\n}  // namespace modelbox"
  },
  {
    "path": "src/modelbox/server/task_manager.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <modelbox/server/task.h>\n#include <modelbox/server/task_manager.h>\n\n#include <utility>\nnamespace modelbox {\n\nTaskManager::TaskManager(std::shared_ptr<Flow> flow, uint32_t task_num_limits) {\n  flow_ = std::move(flow);\n  thread_pool_ = std::make_shared<ThreadPool>(0, task_num_limits);\n  task_num_limits_ = task_num_limits;\n  avaiable_task_counts_ = 0;\n  thread_run_ = true;\n  selector_ = std::make_shared<ExternalDataSelect>();\n}\nTaskManager::~TaskManager() { Stop(); }\n\nStatus TaskManager::Submit(const std::shared_ptr<Task>& task) {\n  std::unique_lock<std::mutex> guard(new_del_lock_);\n  if (avaiable_task_counts_ >= task_num_limits_) {\n    MBLOG_INFO << \"Running Task exceed task_num_limits \"\n               << avaiable_task_counts_;\n    return STATUS_SUCCESS;\n  }\n\n  if (thread_pool_ == nullptr) {\n    return {STATUS_NOTFOUND, \"thread_pool not exist\"};\n  }\n  avaiable_task_counts_++;\n  guard.unlock();\n\n  thread_pool_->Submit(task->GetTaskId(), &Task::SendData, task.get());\n\n  return STATUS_SUCCESS;\n}\n\nvoid TaskManager::StartWaittingTask() {\n  std::unique_lock<std::mutex> guard(map_lock_);\n  auto task_iter = task_maps_.begin();\n  if (avaiable_task_counts_ >= task_num_limits_) {\n    return;\n  }\n\n  while (task_iter != task_maps_.end()) {\n    auto task = task_iter->second;\n    if (task->IsReady()) {\n      Submit(task);\n    }\n    task_iter++;\n  }\n}\n\nvoid TaskManager::ReceiveWork() {\n  while (thread_run_) {\n    std::list<std::shared_ptr<ExternalDataMap>> external_list;\n    auto select_status = selector_->SelectExternalData(\n        external_list, std::chrono::milliseconds(200));\n\n    for (const auto& external : external_list) {\n      std::unique_lock<std::mutex> map_guard(map_lock_);\n      auto task_iter = external_task_maps_.find(external);\n      if (task_iter == external_task_maps_.end()) {\n        MBLOG_DEBUG << \"task already deleted\";\n        continue;\n      }\n      auto task = task_iter->second;\n      map_guard.unlock();\n\n      modelbox::OutputBufferList map_buffer_list;\n      auto status = external->Recv(map_buffer_list);\n\n      std::unique_lock<std::mutex> guard(new_del_lock_);\n      if (status == STATUS_INVALID) {\n        MBLOG_WARN << \"recv external failed\";\n        auto error = external->GetLastError();\n        if (error->GetDesc() == \"EOF\") {\n          task->UpdateTaskStatus(STOPPED);\n        } else {\n          task->UpdateTaskStatus(ABNORMAL);\n        }\n        if (task->IsTaskSubmitted()) {\n          avaiable_task_counts_--;\n        }\n      } else if (status == STATUS_EOF) {\n        MBLOG_DEBUG << \"recv external finished\";\n        task->UpdateTaskStatus(FINISHED);\n        if (task->IsTaskSubmitted()) {\n          avaiable_task_counts_--;\n        }\n      }\n      guard.unlock();\n      task->FetchData(status, map_buffer_list);\n    }\n    StartWaittingTask();\n  }\n}\n\nStatus TaskManager::Start() {\n  receive_thread_ =\n      std::make_shared<std::thread>(&TaskManager::ReceiveWork, this);\n  return STATUS_SUCCESS;\n}\n\nvoid TaskManager::Stop() {\n  if (receive_thread_) {\n    thread_run_ = false;\n    receive_thread_->join();\n    receive_thread_ = nullptr;\n  }\n  if (thread_pool_) {\n    thread_pool_->Shutdown();\n  }\n  selector_ = nullptr;\n}\n\nstd::shared_ptr<Task> TaskManager::CreateTask(TaskType task_type) {\n  std::shared_ptr<Task> task = nullptr;\n  switch (task_type) {\n    case TASK_ONESHOT:\n    default:\n      task = std::make_shared<OneShotTask>();\n      break;\n  }\n  RegisterTask(task);\n  return task;\n}\n\nint TaskManager::GetTaskNumLimit() { return task_num_limits_; }\n\nint TaskManager::GetAvaiableTaskCount() { return avaiable_task_counts_; }\n\nvoid TaskManager::SetTaskNumLimit(int task_limits) {\n  std::lock_guard<std::mutex> guard(new_del_lock_);\n  task_num_limits_ = task_limits;\n}\n\nstd::shared_ptr<Flow> TaskManager::GetFlow() { return flow_; }\nstd::shared_ptr<ExternalDataSelect> TaskManager::GetSelector() {\n  return selector_;\n}\n\nStatus TaskManager::DeleteTaskById(const std::string& taskid) {\n  std::unique_lock<std::mutex> guard(map_lock_);\n  if (task_maps_.find(taskid) == task_maps_.end()) {\n    return {STATUS_NOTFOUND, \"task can not be found\"};\n  }\n  auto task = task_maps_[taskid];\n  auto external_data = task->GetExternalData();\n  task_maps_.erase(taskid);\n  guard.unlock();\n\n  task->Stop();\n\n  selector_->RemoveExternalData(external_data);\n  external_task_maps_.erase(external_data);\n  return STATUS_SUCCESS;\n}\n\nstd::shared_ptr<Task> TaskManager::GetTaskById(const std::string& taskid) {\n  std::unique_lock<std::mutex> guard(map_lock_);\n  if (task_maps_.find(taskid) == task_maps_.end()) {\n    return nullptr;\n  }\n  return task_maps_[taskid];\n}\n\nuint32_t TaskManager::GetTaskCount() {\n  std::unique_lock<std::mutex> guard(map_lock_);\n  return task_maps_.size();\n}\n\nstd::vector<std::shared_ptr<Task>> TaskManager::GetAllTasks() {\n  std::unique_lock<std::mutex> guard(map_lock_);\n  std::vector<std::shared_ptr<Task>> task_list;\n  auto task_iter = task_maps_.begin();\n  while (task_iter != task_maps_.end()) {\n    task_list.push_back(task_iter->second);\n    task_iter++;\n  }\n  return task_list;\n}\n\nvoid TaskManager::RegisterTask(const std::shared_ptr<Task>& task) {\n  std::unique_lock<std::mutex> guard(map_lock_);\n  task->SetTaskManager(shared_from_this());\n  auto external_data = task->GetExternalData();\n  task_maps_.emplace(task->GetTaskId(), task);\n  external_task_maps_.emplace(external_data, task);\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/modelbox/server/timer.cc",
    "content": "\n/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <modelbox/server/timer.h>\n\nnamespace modelbox {\n\nServerTimer *ServerTimer::GetInstance() {\n  static ServerTimer server_timer;\n  return &server_timer;\n}\n\nServerTimer *kServerTimer = ServerTimer::GetInstance();\n\nvoid ServerTimer::Start() {\n  Timer::SetName(\"\");\n  Timer::StartAsync();\n}\n\nvoid ServerTimer::Run() { Timer::Run(); }\n\nvoid ServerTimer::Stop() { Timer::StopAsync(); }\n\n}  // namespace modelbox"
  },
  {
    "path": "src/modelbox/server/utils.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/server/utils.h\"\n\n#include <list>\n\n#include \"modelbox/base/log.h\"\n#include \"securec.h\"\n\nnamespace modelbox {\n\nIPACL::IPACL() = default;\nIPACL::~IPACL() = default;\n\nmodelbox::Status IPACL::AddCidr(const std::string &cidr) {\n  uint32_t mask_len = sizeof(uint32_t) * 8;\n  std::string host;\n\n  host = cidr;\n  int pos = cidr.find_first_of('/');\n  if (pos > 0) {\n    auto str_mask = cidr.substr(pos + 1, cidr.length());\n    mask_len = atol(str_mask.c_str());\n    host = cidr.substr(0, pos);\n  }\n\n  if (host.length() == 0) {\n    return {modelbox::STATUS_INVALID, \"ip address is invalid\"};\n  }\n\n  auto addrinfo = GetAddrInfo(host);\n  if (addrinfo == nullptr) {\n    return modelbox::StatusError;\n  }\n\n  if (addrinfo->ai_family == AF_INET) {\n    uint32_t shift = (sizeof(uint32_t) * 8) - mask_len;\n    uint32_t mask = GetIPV4Addr(addrinfo) >> shift;\n    std::pair<uint32_t, uint32_t> acl(mask, shift);\n    ipv4_acl_.emplace_back(acl);\n    return modelbox::STATUS_OK;\n  }\n\n  return modelbox::STATUS_NOTSUPPORT;\n}\n\nmodelbox::Status IPACL::IsMatch(const std::string &ipaddr) {\n  auto addrinfo = GetAddrInfo(ipaddr);\n  if (addrinfo == nullptr) {\n    return modelbox::StatusError;\n  }\n\n  if (addrinfo->ai_family == AF_INET) {\n    uint32_t ip = GetIPV4Addr(addrinfo);\n    for (const auto &mask : ipv4_acl_) {\n      uint32_t ip_a = ip >> mask.second;\n      uint32_t ip_b = mask.first;\n      if (ip_a == ip_b || (ip_b == 0 && mask.second == sizeof(uint32_t) * 8)) {\n        return modelbox::STATUS_OK;\n      }\n    }\n  } else {\n    return modelbox::STATUS_NOTSUPPORT;\n  }\n\n  return modelbox::STATUS_NOTFOUND;\n}\n\nuint32_t IPACL::GetIPV4Addr(const std::shared_ptr<struct addrinfo> &addrinfo) {\n  auto *in4 = (struct sockaddr_in *)addrinfo->ai_addr;\n  uint32_t ip = ntohl(in4->sin_addr.s_addr); // NOLINT\n  return ip;\n}\n\nstd::shared_ptr<struct addrinfo> IPACL::GetAddrInfo(const std::string &host) {\n  struct addrinfo hints;\n  struct addrinfo *result = nullptr;\n\n  memset_s(&hints, sizeof(hints), 0, sizeof(hints));\n  hints.ai_family = AF_UNSPEC;\n\n  auto ret = getaddrinfo(host.c_str(), \"0\", &hints, &result);\n  if (ret != 0) {\n    modelbox::StatusError = {modelbox::STATUS_FAULT, gai_strerror(ret)};\n    return nullptr;\n  }\n\n  std::shared_ptr<struct addrinfo> addrinfo(\n      result, [](struct addrinfo *ptr) { freeaddrinfo(ptr); });\n\n  return addrinfo;\n}\n\n}  // namespace modelbox\n"
  },
  {
    "path": "src/modelbox/serving/CMakeLists.txt",
    "content": "#\n# Copyright 2022 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nfile(GLOB_RECURSE MODELBOX_SOURCES *.cpp *.cc *.c)\nset(MODELBOX_INCLUDE ${CMAKE_CURRENT_LIST_DIR}/include)\n\ninclude_directories(${MODELBOX_INCLUDE})\ninclude_directories(${MODELBOX_COMMON_INCLUDE})\ninclude_directories(${HUAWEI_SECURE_C_INCLUDE_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\n\nadd_executable(modelbox-serving ${MODELBOX_SOURCES})\ntarget_link_libraries(modelbox-serving pthread)\ntarget_link_libraries(modelbox-serving rt)\ntarget_link_libraries(modelbox-serving ${LIBMODELBOX_SHARED})\ntarget_link_libraries(modelbox-serving ${MODELBOX_COMMON_LIBRARY})\ntarget_link_libraries(modelbox-serving ${HUAWEI_SECURE_C_LIBRARIES})\n\nconfigure_file(${CMAKE_CURRENT_SOURCE_DIR}/test_file/model.toml.in ${TEST_WORKING_DATA_DIR}/test_serving_model.toml @ONLY)\nconfigure_file(${CMAKE_CURRENT_SOURCE_DIR}/test_file/custom_service.py.in ${TEST_WORKING_DATA_DIR}/test_custom_service.py @ONLY)\nconfigure_file(${CMAKE_CURRENT_SOURCE_DIR}/test_file/test_client.py.in ${TEST_WORKING_DATA_DIR}/test_client.py @ONLY)\n\nif (STANDALONE)\n    set_target_properties(modelbox-serving PROPERTIES INSTALL_RPATH \"$ORIGIN/../lib\")\nendif()\n\ninstall(TARGETS modelbox-serving \n    COMPONENT server\n    RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n    LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    )\n\ninstall(DIRECTORY \n    DESTINATION ${CMAKE_INSTALL_RUNSTATEDIR}/modelbox/\n    COMPONENT server\n)\n\nset(MODELBOX_SERVING_SOURCES ${MODELBOX_SOURCES} CACHE INTERNAL \"\")"
  },
  {
    "path": "src/modelbox/serving/main.cc",
    "content": "/*\n * Copyright 2022 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <getopt.h>\n#include <netdb.h>\n#include <signal.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include <sys/socket.h>\n#include <sys/types.h>\n#include <unistd.h>\n\n#include <memory>\n#include <thread>\n\n#include \"modelbox/base/config.h\"\n#include \"modelbox/base/driver.h\"\n#include \"modelbox/base/popen.h\"\n#include \"modelbox/base/utils.h\"\n#include \"modelbox/common/command.h\"\n#include \"modelbox/common/utils.h\"\n#include \"serving.h\"\n\nstatic int g_sig_list[] = {\n    SIGIO,   SIGPWR,    SIGSTKFLT, SIGPROF, SIGINT,  SIGTERM,\n    SIGBUS,  SIGVTALRM, SIGTRAP,   SIGXCPU, SIGXFSZ, SIGILL,\n    SIGABRT, SIGFPE,    SIGSEGV,   SIGQUIT, SIGSYS,\n};\n\nstatic int g_sig_num = sizeof(g_sig_list) / sizeof(g_sig_list[0]);\n\nenum MODELBOX_SERVING_ARG {\n  MODELBOX_SERVING_ARG_MODEL_NAME,\n  MODELBOX_SERVING_ARG_MODEL_PATH,\n  MODELBOX_SERVING_ARG_DAEMON,\n  MODELBOX_SERVING_ARG_PORT,\n  MODELBOX_SERVING_ARG_HELP,\n};\n\nstatic struct option options[] = {\n    {\"model-name\", 1, nullptr, MODELBOX_SERVING_ARG_MODEL_NAME},\n    {\"model-path\", 1, nullptr, MODELBOX_SERVING_ARG_MODEL_PATH},\n    {\"daemon\", 0, nullptr, MODELBOX_SERVING_ARG_DAEMON},\n    {\"port\", 1, nullptr, MODELBOX_SERVING_ARG_PORT},\n    {\"h\", 0, nullptr, MODELBOX_SERVING_ARG_HELP},\n    {nullptr, 0, nullptr, 0},\n};\n\nstatic void showhelp() {\n  /* clang-format off */\n    char help[] = \"\"\n        \"Usage: modelbox-serving [OPTION]...\\n\"\n        \"Start modelbox-serving.\\n\"\n        \"  -model-name            model-name.\\n\"\n        \"  -model-path            model-path.\\n\"\n        \"  -daemon                run by daemon.\\n\"\n        \"  -port                  rest api port.\\n\"\n        \"\\n\";\n\n    printf(\"%s\", help);\n  /* clang-format on */\n}\n\nstatic void modelbox_sig_handler(int volatile sig_no, siginfo_t *sig_info,\n                                 void *volatile ptr) {\n  switch (sig_no) {\n    case SIGINT:\n    case SIGTERM:\n      exit(1);\n      return;\n      break;\n    case SIGQUIT:\n      return;\n      break;\n    case SIGSEGV:\n    case SIGPIPE:\n    case SIGFPE:\n    case SIGABRT:\n    case SIGBUS:\n    case SIGILL: {\n      char buf[4096];\n      MBLOG_ERROR << \"Segment fault\"\n                  << \", Signal: \" << sig_no << \", Addr: \" << sig_info->si_addr\n                  << \", Code: \" << sig_info->si_code << \", Caused by: \";\n      if (modelbox::modelbox_cpu_register_data(buf, sizeof(buf),\n                                               (ucontext_t *)ptr) == 0) {\n        MBLOG_ERROR << \"CPU Register Info:\\n\" << buf;\n      }\n      MBLOG_STACKTRACE(modelbox::LOG_FATAL);\n      sleep(1);\n    } break;\n    default:\n      break;\n  }\n\n  _exit(1);\n}\n\nstatic int modelbox_reg_signal() {\n  if (modelbox::modelbox_sig_register(g_sig_list, g_sig_num,\n                                      modelbox_sig_handler) != 0) {\n    fprintf(stderr, \"register signal failed.\\n\");\n    return 1;\n  }\n\n  return 0;\n}\n\nint modelbox_serving_init() {\n  if (modelbox_reg_signal() != 0) {\n    fprintf(stderr, \"register signal failed.\\n\");\n    return 1;\n  }\n\n  if (modelbox::modelbox_root_dir().length() > 0) {\n    std::string default_scanpath =\n        modelbox::modelbox_full_path(std::string(modelbox::MODELBOX_ROOT_VAR) +\n                                     MODELBOX_DEFAULT_DRIVER_PATH);\n    modelbox::Drivers::SetDefaultScanPath(default_scanpath);\n\n    std::string default_driver_info_path =\n        modelbox::modelbox_full_path(std::string(modelbox::MODELBOX_ROOT_VAR) +\n                                     \"/var/run/modelbox-driver-info\");\n    modelbox::Drivers::SetDefaultInfoPath(default_driver_info_path);\n  }\n\n  return 0;\n}\n\nint modelbox_serving_generate_template(const std::string &model_name,\n                                       const std::string &model_path,\n                                       int port) {\n  fprintf(stdout, \"modelbox config path : %s\\n\", model_path.c_str());\n  auto serving = std::make_shared<ModelServing>();\n  auto status = serving->GenerateTemplate(model_name, model_path, port);\n  if (status != modelbox::STATUS_OK) {\n    return 1;\n  }\n\n  return 0;\n}\n\nint modelbox_run() {\n  auto p = std::make_shared<modelbox::Popen>();\n  std::string modelbox_bin{\"/etc/init.d/modelbox\"};\n  if (modelbox::modelbox_root_dir().length() > 0) {\n    modelbox_bin = modelbox::modelbox_full_path(\n        std::string(modelbox::MODELBOX_ROOT_VAR) + modelbox_bin);\n  }\n\n  modelbox_bin += \" restart\";\n\n  auto status = p->Open(modelbox_bin, -1, \"\");\n  if (status != modelbox::STATUS_OK) {\n    fprintf(stderr, \"execute modelbox cmd failed\");\n    return 1;\n  }\n\n  int ret = p->Close();\n  if (ret != 0) {\n    fprintf(stderr, \"run %s failed, ret: %d\\n\", modelbox_bin.c_str(), ret);\n    return 1;\n  }\n\n  return 0;\n}\n\nstatic void onexit() {}\n\n#ifdef BUILD_TEST\nint modelbox_serving_main(int argc, char *argv[])\n#else\nint main(int argc, char *argv[])\n#endif\n{\n  std::string model_name;\n  std::string model_path;\n  bool kDaemon{false};\n  int port = 9110;\n  int cmdtype = 0;\n\n  MODELBOX_COMMAND_GETOPT_BEGIN(cmdtype, options)\n  switch (cmdtype) {\n    case MODELBOX_SERVING_ARG_HELP:\n      showhelp();\n      return 1;\n    case MODELBOX_SERVING_ARG_MODEL_NAME:\n      printf(\"model-name %s \\n\", optarg);\n      model_name = optarg;\n      break;\n    case MODELBOX_SERVING_ARG_MODEL_PATH:\n      printf(\"model-path %s \\n\", optarg);\n      model_path = optarg;\n      break;\n    case MODELBOX_SERVING_ARG_PORT:\n      try {\n        port = std::stoi(optarg);\n      } catch (const std::string &exception) {\n        fprintf(stderr, \"rest port %s failed, use default port %d\", optarg,\n                port);\n        return 1;\n      }\n      break;\n    case MODELBOX_SERVING_ARG_DAEMON:\n      kDaemon = true;\n      break;\n    default:\n      printf(\"Try %s -h for more information.\\n\", argv[0]);\n      return 1;\n      break;\n  }\n  MODELBOX_COMMAND_GETOPT_END()\n\n  if (argc <= 1) {\n    showhelp();\n    return 1;\n  }\n\n  if (kDaemon == true) {\n    if (daemon(0, 0) < 0) {\n      fprintf(stderr, \"run daemon process failed, %s\\n\",\n              modelbox::StrError(errno).c_str());\n      return 1;\n    }\n  }\n\n  Defer { onexit(); };\n  /* 忽略SIGPIPE，避免发送缓冲区慢导致的进程退出 */\n  signal(SIGPIPE, SIG_IGN);\n\n  if (modelbox_serving_init() != 0) {\n    fprintf(stderr, \"init failed.\\n\");\n    return 1;\n  }\n\n  if (modelbox_serving_generate_template(model_name, model_path, port) != 0) {\n    fprintf(stderr, \"generate model serving failed.\\n\");\n    return 1;\n  }\n\n  if (modelbox_run() != 0) {\n    return 1;\n  }\n\n  fprintf(stdout, \"modelbox run successed\\n\");\n  return 0;\n}\n"
  },
  {
    "path": "src/modelbox/serving/serving.cc",
    "content": "/*\n * Copyright 2022 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"serving.h\"\n\n#include <modelbox/base/configuration.h>\n#include <modelbox/base/status.h>\n#include <modelbox/base/utils.h>\n#include <unistd.h>\n\n#include <fstream>\n\nconst std::string copyright_content = R\"(\n# Copyright 2022 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n)\";\n\nconst std::string import_content = R\"(\nimport _flowunit as modelbox\nimport numpy as np\nimport json\n)\";\n\nconst std::string template_content = R\"(\n    def __init__(self):\n         super().__init__()\n    def open(self, config):\n        return modelbox.Status()\n    def close(self):\n        return modelbox.Status()\n    def data_pre(self, data_context):\n        return modelbox.Status()\n    def data_post(self, data_context):\n        return modelbox.Status()\n    def data_group_pre(self, data_context):\n        return modelbox.Status()\n    def data_group_post(self, data_context):\n        return modelbox.Status()\n)\";\n\nstatic std::map<std::string, std::string> configtype_nptype_map = {\n    {\"FLOAT\", \"np.float32\"},\n    {\"DOUBLE\", \"np.float64\"},\n    {\"INT\", \"np.int32\"},\n    {\"UINT8\", \"np.uint8\"},\n    {\"LONG\", \"np.int64\"}};\n\nstd::string GetNumpyType(std::string type) {\n  std::transform(type.begin(), type.end(), type.begin(), ::toupper);\n  auto iter = configtype_nptype_map.find(type);\n  if (iter == configtype_nptype_map.end()) {\n    auto err_msg = \"get numpy type failde, unspported type: \" + type;\n    fprintf(stderr, \"%s\\n\", err_msg.c_str());\n    return \"\";\n  }\n  return iter->second;\n}\n\nmodelbox::Status ModelServing::GenerateTemplate(const std::string &model_name,\n                                                const std::string &model_path,\n                                                int port) {\n  auto status = CheckConfigFiles(model_path);\n  if (status != modelbox::STATUS_OK) {\n    auto err_msg = \"check path failed, err: \" + status.WrapErrormsgs();\n    fprintf(stderr, \"%s\\n\", err_msg.c_str());\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  status = ParseModelToml();\n  if (status != modelbox::STATUS_OK) {\n    auto err_msg = \"parse model toml failed, err: \" + status.WrapErrormsgs();\n    fprintf(stderr, \"%s\\n\", err_msg.c_str());\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  std::string default_file_path =\n      std::string(DEFAULT_FLOWUNIT_PATH) + \"/\" + model_name;\n\n  bool generate_failed = false;\n  DeferCond { return generate_failed; };\n  DeferCondAdd {\n    if (!graph_toml_file_.empty()) {\n      remove(graph_toml_file_.c_str());\n    }\n\n    if (access(default_file_path.c_str(), R_OK) == 0) {\n      modelbox::RemoveDirectory(default_file_path);\n    }\n  };\n\n  status = GenerateModelServingTemplate(model_name, port);\n  if (status != modelbox::STATUS_OK) {\n    auto err_msg = \"generate model-serving template failed, err: \" +\n                   status.WrapErrormsgs();\n    fprintf(stderr, \"%s\\n\", err_msg.c_str());\n    generate_failed = true;\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  if (custom_service_) {\n    fprintf(stdout, \"generate custom custom_service\\n\");\n    status = GeneratePrePostFlowUnit(default_file_path, \"preprocess\");\n    if (status != modelbox::STATUS_OK) {\n      auto err_msg = \"generate default custom_service flowunit failed, err: \" +\n                     status.WrapErrormsgs();\n      fprintf(stderr, \"%s\\n\", err_msg.c_str());\n      generate_failed = true;\n      return {modelbox::STATUS_FAULT, err_msg};\n    }\n\n    status = GeneratePrePostFlowUnit(default_file_path, \"postprocess\");\n    if (status != modelbox::STATUS_OK) {\n      auto err_msg = \"generate default custom_service flowunit failed, err: \" +\n                     status.WrapErrormsgs();\n      fprintf(stderr, \"%s\\n\", err_msg.c_str());\n      generate_failed = true;\n      return {modelbox::STATUS_FAULT, err_msg};\n    }\n\n  } else {\n    fprintf(stdout, \"generate default custom_service\\n\");\n    status = GenerateDefaultPrePostFlowUnit(default_file_path, \"preprocess\");\n    if (status != modelbox::STATUS_OK) {\n      auto err_msg = \"generate default custom_service flowunit failed, err: \" +\n                     status.WrapErrormsgs();\n      fprintf(stderr, \"%s\\n\", err_msg.c_str());\n      generate_failed = true;\n      return {modelbox::STATUS_FAULT, err_msg};\n    }\n\n    status = GenerateDefaultPrePostFlowUnit(default_file_path, \"postprocess\");\n    if (status != modelbox::STATUS_OK) {\n      auto err_msg = \"generate default custom_service flowunit failed, err: \" +\n                     status.WrapErrormsgs();\n      fprintf(stderr, \"%s\\n\", err_msg.c_str());\n      generate_failed = true;\n      return {modelbox::STATUS_FAULT, err_msg};\n    }\n  }\n\n  status = UpdateGraphTemplateByToml(model_name);\n  if (status != modelbox::STATUS_OK) {\n    auto err_msg =\n        \"update graph template failed, err: \" + status.WrapErrormsgs();\n    fprintf(stderr, \"%s\\n\", err_msg.c_str());\n    generate_failed = true;\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status ModelServing::CheckConfigFiles(const std::string &model_path) {\n  std::vector<std::string> files;\n  auto status = modelbox::ListFiles(model_path, \"*\", &files);\n  if (status != modelbox::STATUS_OK) {\n    return status;\n  }\n\n  for (auto &file : files) {\n    auto str_vec = modelbox::StringSplit(file, '/');\n    auto file_name = str_vec[str_vec.size() - 1];\n\n    if (file_name == \"model.toml\") {\n      model_toml_ = modelbox::PathCanonicalize(file);\n      continue;\n    }\n\n    if (file_name == \"custom_service.py\") {\n      model_custom_service_file_ = file;\n      custom_service_ = true;\n      continue;\n    }\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status ModelServing::FillModelItem(const std::string &type) {\n  auto item = config_->GetSubKeys(type);\n  if (item.empty()) {\n    fprintf(stderr, \"the key %s is not found in the config file.\\n\",\n            type.c_str());\n    return modelbox::STATUS_BADCONF;\n  }\n\n  std::vector<std::string> item_names;\n  std::vector<std::string> item_types;\n  for (unsigned int i = 1; i <= item.size(); ++i) {\n    std::string item_name;\n    std::string item_type;\n    auto key = type;\n    key += \".\";\n    key += type;\n    key += std::to_string(i);\n    auto item_table = config_->GetSubKeys(key);\n    if (item_table.empty()) {\n      fprintf(stderr, \"the key %s is not found in the config file.\\n\",\n              key.c_str());\n      return modelbox::STATUS_BADCONF;\n    }\n\n    for (const auto &inner_item : item_table) {\n      auto item_index = key;\n      item_index += \".\";\n      item_index += inner_item;\n      if (inner_item == \"name\") {\n        item_name = config_->GetString(item_index);\n        if (item_name.empty()) {\n          fprintf(stderr, \"the key %s should have key name.\\n\", key.c_str());\n          return modelbox::STATUS_BADCONF;\n        }\n\n        continue;\n      }\n\n      if (inner_item == \"type\") {\n        item_type = config_->GetString(item_index);\n        if (item_type.empty()) {\n          fprintf(stderr, \"the key %s should have key type.\\n\", key.c_str());\n          return modelbox::STATUS_BADCONF;\n        }\n\n        continue;\n      }\n    }\n    item_names.emplace_back(item_name);\n    item_types.emplace_back(item_type);\n  }\n\n  if (type == \"input\") {\n    model_serving_config_.SetInputNames(item_names);\n    model_serving_config_.SetInputTypes(item_types);\n  } else {\n    model_serving_config_.SetOutputNames(item_names);\n    model_serving_config_.SetOutputTypes(item_types);\n  }\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status ModelServing::ParseModelToml() {\n  modelbox::ConfigurationBuilder configbuilder;\n  config_ = configbuilder.Build(model_toml_);\n  if (config_ == nullptr) {\n    std::string err_msg = \"parse model toml failed, err: \" +\n                          modelbox::StatusError.WrapErrormsgs();\n    fprintf(stderr, \"%s\\n\", err_msg.c_str());\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  auto base_config = config_->GetSubConfig(\"base\");\n  model_serving_config_.SetModelEntry(base_config->GetString(\"entry\", \"\"));\n  model_serving_config_.SetMaxBatchSize(\n      base_config->GetInt64(\"max_batch_size\", 1));\n  model_serving_config_.SetDevices(base_config->GetString(\"device\", {\"cpu\"}));\n  model_serving_config_.SetModelEngine(\n      base_config->GetString(\"engine\", \"tensorflow\"));\n  model_serving_config_.SetMode(base_config->GetString(\"mode\", \"model\"));\n\n  auto status = FillModelItem(\"input\");\n  if (status != modelbox::STATUS_OK) {\n    auto err_msg =\n        \"fille model input config failed, err: \" + status.WrapErrormsgs();\n    fprintf(stderr, \"%s\\n\", err_msg.c_str());\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  status = FillModelItem(\"output\");\n  if (status != modelbox::STATUS_OK) {\n    auto err_msg =\n        \"fille model output config failed, err: \" + status.WrapErrormsgs();\n    fprintf(stderr, \"%s\\n\", err_msg.c_str());\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status ModelServing::GenerateModelServingTemplate(\n    const std::string &model_name, int port) {\n  auto status = GenerateDefaultGraphConfig(model_name, port);\n  if (status != modelbox::STATUS_OK) {\n    auto err_msg =\n        \"generate default graph config failed, err: \" + status.WrapErrormsgs();\n    fprintf(stderr, \"%s\\n\", err_msg.c_str());\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  std::string default_flowunit_path =\n      std::string(DEFAULT_FLOWUNIT_PATH) + \"/\" + model_name;\n  status = modelbox::CreateDirectory(default_flowunit_path);\n  if (status != modelbox::STATUS_OK) {\n    auto err_msg = \"create directory /tmp\" + model_name +\n                   \" failed, err: \" + status.WrapErrormsgs();\n    fprintf(stderr, \"%s\\n\", err_msg.c_str());\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  status = GenerateInferConfig(default_flowunit_path, model_name);\n  if (status != modelbox::STATUS_OK) {\n    auto err_msg =\n        \"generate infer config failed, err: \" + status.WrapErrormsgs();\n    fprintf(stderr, \"%s\\n\", err_msg.c_str());\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  status = GeneratePrePostConfig(default_flowunit_path, \"preprocess\");\n  if (status != modelbox::STATUS_OK) {\n    auto err_msg =\n        \"generate preprocess config failed, err: \" + status.WrapErrormsgs();\n    fprintf(stderr, \"%s\\n\", err_msg.c_str());\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  status = GeneratePrePostConfig(default_flowunit_path, \"postprocess\");\n  if (status != modelbox::STATUS_OK) {\n    auto err_msg =\n        \"generate postprocess config failed, err: \" + status.WrapErrormsgs();\n    fprintf(stderr, \"%s\\n\", err_msg.c_str());\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status ModelServing::GetDeviceType(std::string &device_type) {\n  auto devices = model_serving_config_.GetDevices();\n  if (devices.compare(0, 4, \"cuda\") == 0) {\n    device_type = \"cuda\";\n  } else if (devices.compare(0, 3, \"cpu\") == 0) {\n    device_type = \"cpu\";\n  } else if (devices.compare(0, 5, \"ascend\") == 0) {\n    device_type = \"ascend\";\n  } else {\n    auto err_msg = \"base.device is not corret in toml config: \" + devices;\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status ModelServing::GenerateInferConfig(\n    const std::string &default_flowunit_path, const std::string &model_name) {\n  std::string infer_flowunit_path = default_flowunit_path + \"/\" + model_name;\n  auto status = modelbox::CreateDirectory(infer_flowunit_path);\n  if (status != modelbox::STATUS_OK) {\n    auto err_msg = \"create directory \" + infer_flowunit_path +\n                   \"failed, err: \" + status.WrapErrormsgs();\n    fprintf(stderr, \"%s\\n\", err_msg.c_str());\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  std::string infer_toml = infer_flowunit_path + \"/\" + model_name + \".toml\";\n  std::ofstream file{infer_toml};\n  if (!file.is_open()) {\n    auto err_msg = \"Open failed, path \" + infer_toml;\n    fprintf(stderr, \"%s\\n\", err_msg.c_str());\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  Defer { file.close(); };\n\n  std::string device_type;\n  status = GetDeviceType(device_type);\n  if (status != modelbox::STATUS_OK) {\n    auto err_msg = \"get device type failed  \" + status.WrapErrormsgs();\n    fprintf(stderr, \"%s\\n\", err_msg.c_str());\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  std::stringstream ss;\n  std::string base_content = R\"([base]\nname = \")\" + model_name + R\"(\"\ndevice = \")\" + device_type + R\"(\"\nversion = \"1.0.0\"\ndescription = \"model-serving template description.\"\nentry = \")\" + model_serving_config_.GetModelEntry() +\n                             R\"(\"\ntype = \"inference\"\nvirtual_type = \")\" + model_serving_config_.GetModelEngine() +\n                             R\"(\"\n\n[input])\";\n\n  ss << base_content;\n  std::string input_content;\n  auto input_names = model_serving_config_.GetInputNames();\n  auto input_types = model_serving_config_.GetInputTypes();\n  for (size_t i = 0; i < input_names.size(); ++i) {\n    std::string input_content = R\"(\n[input.input)\" + std::to_string(i + 1) +\n                                R\"(]\nname = \")\" + input_names[i] +\n                                R\"(\"\ntype = \")\" + input_types[i] +\n                                R\"(\"\n    )\";\n    ss << input_content;\n  }\n  ss << std::endl;\n  ss << R\"([output])\";\n  auto output_names = model_serving_config_.GetOutputNames();\n  auto output_types = model_serving_config_.GetOutputTypes();\n  for (size_t i = 0; i < output_names.size(); ++i) {\n    std::string output_content = R\"(\n[output.output)\" + std::to_string(i + 1) +\n                                 R\"(]\nname = \")\" + output_names[i] +\n                                 R\"(\"\ntype = \")\" + output_types[i] + R\"(\")\";\n    ss << output_content;\n  }\n\n  file << ss.str();\n  if (!file.good()) {\n    std::string err_msg = \"write infer config failed\";\n    fprintf(stderr, \"%s\\n\", err_msg.c_str());\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status ModelServing::GeneratePrePostConfig(\n    const std::string &default_flowunit_path, const std::string &type) {\n  std::string python_flowunit_path = default_flowunit_path + \"/\" + type;\n  auto status = modelbox::CreateDirectory(python_flowunit_path);\n  if (status != modelbox::STATUS_OK) {\n    auto err_msg = \"create directory \" + python_flowunit_path +\n                   \"failed, err: \" + status.WrapErrormsgs();\n    fprintf(stderr, \"%s\\n\", err_msg.c_str());\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  std::string python_toml = python_flowunit_path + \"/\" + type + \".toml\";\n  std::ofstream file{python_toml};\n  if (!file.is_open()) {\n    auto err_msg = \"Open failed, path \" + python_toml;\n    fprintf(stderr, \"%s\\n\", err_msg.c_str());\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  Defer { file.close(); };\n\n  std::stringstream ss;\n  std::string class_name;\n  std::string input_output_content;\n  if (type == \"preprocess\") {\n    class_name = R\"(Preprocess\")\";\n    input_output_content = R\"([input]\n[input.input1]\nname = \"in_data\"\ntype = \"string\")\";\n  } else {\n    class_name = R\"(Postprocess\")\";\n    input_output_content = R\"([output]\n[output.output1]\nname = \"out_data\"\ntype = \"string\")\";\n  }\n\n  std::string content = R\"([base]\nname = \")\" + type + R\"(\"\ndevice = \"cpu\"\nversion = \"1.0.0\"\ndescription = \"model-serving pre/post template description.\"\nentry = \")\" + type + R\"(@)\" +\n                        class_name + R\"(\ntype = \"python\" \n\n)\" + input_output_content;\n\n  ss << content;\n  file << ss.str();\n  if (!file.good()) {\n    std::string err_msg = \"write python config failed\";\n    fprintf(stderr, \"%s\\n\", err_msg.c_str());\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status ModelServing::GenerateDefaultGraphConfig(\n    const std::string &model_name, int port) {\n  graph_toml_file_ =\n      std::string(DEFAULT_GRAPTH_PATH) + \"/\" + model_name + \"_origin.toml\";\n  std::ofstream file{graph_toml_file_};\n  if (!file.is_open()) {\n    auto err_msg = \"Open failed, path \" + graph_toml_file_;\n    fprintf(stderr, \"%s\\n\", err_msg.c_str());\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  Defer { file.close(); };\n\n  std::stringstream ss;\n  std::string content = R\"([driver]\nskip-default=false\ndir=\"/tmp/)\" + model_name +\n                        R\"(\"\n[graph]\nformat = \"graphviz\"\ngraphconf = '''digraph demo {\n      httpserver_sync_receive[type=flowunit, flowunit=httpserver_sync_receive, device=cpu, time_out_ms=5000, endpoint=\"http://0.0.0.0:)\" +\n                        std::to_string(port) + R\"(\", max_requests=100]\n      preprocess[type=flowunit, flowunit=preprocess, device=cpu]\n)\" + \"     \" + model_name +\n                        R\"([type=flowunit, flowunit=)\" + model_name +\n                        R\"(, device=infer_device, deviceid=0]\n      postprocess[type=flowunit, flowunit=postprocess, device=cpu]\n      httpserver_sync_reply[type=flowunit, flowunit=httpserver_sync_reply, device=cpu]\n      \n      httpserver_sync_receive:out_request_info -> preprocess:in_data\n      preprocess:out_data -> )\" +\n                        model_name + R\"(:input\n      )\" + model_name +\n                        R\"(:output -> postprocess:in_data\n      postprocess:out_data -> httpserver_sync_reply:in_reply_info\n}''')\";\n  ss << content;\n  file << ss.str();\n  if (!file.good()) {\n    std::string err_msg = \"write default graph config failed\";\n    fprintf(stderr, \"%s\\n\", err_msg.c_str());\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status ModelServing::GeneratePrePostFlowUnit(\n    const std::string &default_file_path, const std::string &type) {\n  std::string copy_custom_python =\n      default_file_path + \"/\" + type + \"/custom_service.py\";\n  auto status = modelbox::CopyFile(model_custom_service_file_,\n                                   copy_custom_python, 0, true);\n  if (status != modelbox::STATUS_OK) {\n    auto err_msg =\n        \"copy custom service file failed, err: \" + status.WrapErrormsgs();\n    fprintf(stderr, \"%s\\n\", err_msg.c_str());\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  std::string flowunit_path =\n      default_file_path + \"/\" + type + \"/\" + type + \".py\";\n\n  std::ofstream file{flowunit_path};\n  if (!file.is_open()) {\n    auto err_msg = \"Open failed, path \" + flowunit_path;\n    fprintf(stderr, \"%s\\n\", err_msg.c_str());\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  Defer { file.close(); };\n\n  std::stringstream ss;\n  auto input_names = model_serving_config_.GetInputNames();\n  auto input_types = model_serving_config_.GetInputTypes();\n  auto output_names = model_serving_config_.GetOutputNames();\n  ss << copyright_content << \"\\n\";\n  ss << import_content << \"\\n\";\n\n  ss << \"import custom_service\\n\\n\";\n\n  if (type == \"preprocess\") {\n    ss << \"class Preprocess(modelbox.FlowUnit):\\n\";\n  } else {\n    ss << \"class Postprocess(modelbox.FlowUnit):\\n\";\n  }\n\n  ss << template_content << \"\\n\";\n\n  ss << \"    def process(self, data_context):\\n\";\n  if (type == \"preprocess\") {\n    ss << \"        in_data = data_context.input(\\\"in_data\\\")\\n\";\n    for (auto &input_name : input_names) {\n      ss << \"        \" << input_name << \" = data_context.output(\\\"\"\n         << input_name << \"\\\")\\n\";\n    }\n    ss << \"\\n\";\n    ss << \"        for buffer in in_data:\\n\";\n    ss << \"            # get data from json\\n\";\n    ss << \"            request_body = \"\n          \"json.loads(buffer.as_object().strip(chr(0)))\\n\";\n    ss << \"\\n\";\n    ss << \"            for item in dir(custom_service):\\n\";\n    ss << \"                class_name = getattr(custom_service, item)\\n\";\n    ss << \"                if isinstance(class_name, type):\\n\";\n    ss << \"                    try:\\n\";\n    ss << \"                        instance = class_name()\\n\";\n    ss << \"                        preprocess = getattr(instance, \"\n          \"\\\"_preprocess\\\")\\n\";\n    ss << \"                        result = preprocess(request_body)\\n\";\n    for (size_t i = 0; i < input_names.size(); ++i) {\n      ss << \"                        data_\" << input_names[i]\n         << \" = np.asarray(result[\\\"\" << input_names[i] << \"\\\"])\";\n      if (input_types[i].empty()) {\n        ss << \"\\n\";\n      } else {\n        ss << \".astype(\" << GetNumpyType(input_types[i]) << \")\\n\";\n      }\n      ss << \"                        add_buffer_\" << input_names[i]\n         << \" = self.create_buffer(data_\" << input_names[i] << \")\\n\";\n      ss << \"                        \" << input_names[i]\n         << \".push_back(add_buffer_\" << input_names[i] << \")\\n\";\n    }\n    ss << \"                    except Exception as e:\\n\";\n    ss << \"                        print(\\\"custom preprocess failed, \\\", \"\n          \"e)\\n\";\n    ss << \"                        return \"\n          \"modelbox.Status.StatusCode.STATUS_FAULT\\n\";\n    ss << \"        return modelbox.Status()\\n\";\n  } else {\n    for (auto &output_name : output_names) {\n      ss << \"        \" << output_name << \" = data_context.input(\\\"\"\n         << output_name << \"\\\")\\n\";\n    }\n    ss << \"        out_data = data_context.output(\\\"out_data\\\")\\n\";\n    ss << \"\\n\";\n    ss << \"        for i in range(\" << output_names[0] << \".size()):\\n\";\n    ss << \"            postprocess_result = {}\\n\";\n    for (auto &output_name : output_names) {\n      ss << \"            postprocess_result[\\\"\" << output_name\n         << \"\\\"] = \" << output_name << \"[i].as_object()\\n\";\n    }\n    ss << \"            for item in dir(custom_service):\\n\";\n    ss << \"                class_name = getattr(custom_service, item)\\n\";\n    ss << \"                if isinstance(class_name, type):\\n\";\n    ss << \"                    try:\\n\";\n    ss << \"                        instance = class_name()\\n\";\n    ss << \"                        postprocess = getattr(instance, \"\n          \"\\\"_postprocess\\\")\\n\";\n    ss << \"                        result = postprocess(postprocess_result)\\n\";\n    ss << \"                        result_str = (json.dumps(result) + \"\n          \"chr(0)).encode('utf-8').strip()\\n\";\n    ss << \"                        add_buffer = \"\n          \"self.create_buffer(result_str)\\n\";\n    ss << \"                        out_data.push_back(add_buffer)\\n\";\n    ss << \"                    except Exception as e:\\n\";\n    ss << \"                        print(\\\"custom postprocess failed, \\\", \"\n          \"e)\\n\";\n    ss << \"                        return \"\n          \"modelbox.Status.StatusCode.STATUS_FAULT\\n\";\n    ss << \"        return modelbox.Status()\\n\";\n  }\n\n  file << ss.str();\n  if (!file.good()) {\n    std::string err_msg = \"write pre/post flowunit failed\";\n    fprintf(stderr, \"%s\\n\", err_msg.c_str());\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status ModelServing::GenerateDefaultPrePostFlowUnit(\n    const std::string &default_file_path, const std::string &type) {\n  std::string flowunit_path =\n      default_file_path + \"/\" + type + \"/\" + type + \".py\";\n  std::ofstream file{flowunit_path};\n  if (!file.is_open()) {\n    auto err_msg = \"Open failed, path \" + flowunit_path;\n    fprintf(stderr, \"%s\\n\", err_msg.c_str());\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  Defer { file.close(); };\n\n  std::stringstream ss;\n  auto input_names = model_serving_config_.GetInputNames();\n  auto input_types = model_serving_config_.GetInputTypes();\n  auto output_names = model_serving_config_.GetOutputNames();\n  ss << copyright_content << \"\\n\";\n  ss << import_content << \"\\n\";\n  ss << \"import cv2\\n\";\n  ss << \"import base64\\n\";\n  ss << \"\\n\";\n\n  if (type == \"preprocess\") {\n    ss << \"class Preprocess(modelbox.FlowUnit):\\n\";\n  } else {\n    ss << \"class Postprocess(modelbox.FlowUnit):\\n\";\n  }\n\n  ss << template_content << \"\\n\";\n  ss << \"    def process(self, data_context):\\n\";\n  if (type == \"preprocess\") {\n    ss << \"        in_data = data_context.input(\\\"in_data\\\")\\n\";\n    for (auto &input_name : input_names) {\n      ss << \"        \" << input_name << \" = data_context.output(\\\"\"\n         << input_name << \"\\\")\\n\";\n    }\n    ss << \"\\n\";\n    ss << \"        for buffer in in_data:\\n\";\n    ss << \"            # get data from json\\n\";\n    ss << \"            request_body = \"\n          \"json.loads(buffer.as_object().strip(chr(0)))\\n\";\n    ss << \"\\n\";\n    for (size_t i = 0; i < input_names.size(); ++i) {\n      ss << \"            if request_body.get(\\\"\" << input_names[i] << \"\\\"):\\n\";\n      ss << \"                data = np.asarray(request_body[\\\"\"\n         << input_names[i] << \"\\\"])\";\n      if (input_types[i].empty()) {\n        ss << \"\\n\";\n      } else {\n        ss << \".astype(\" << GetNumpyType(input_types[i]) << \")\\n\";\n      }\n      ss << \"                add_buffer = self.create_buffer(data)\\n\";\n      ss << \"                \" << input_names[i] << \".push_back(add_buffer)\\n\";\n      ss << \"            else:\\n\";\n      ss << \"                print(\\\"wrong key of request_body\\\")\\n\";\n      ss << \"                return modelbox.Status.StatusCode.STATUS_FAULT\\n\";\n      ss << \"\\n\";\n    }\n\n    ss << \"        return modelbox.Status()\\n\";\n  } else {\n    for (auto &output_name : output_names) {\n      ss << \"        \" << output_name << \" = data_context.input(\\\"\"\n         << output_name << \"\\\")\\n\";\n    }\n    ss << \"        out_data = data_context.output(\\\"out_data\\\")\\n\";\n    ss << \"\\n\";\n    ss << \"        for i in range(\" << output_names[0] << \".size()):\\n\";\n    ss << \"            result = {}\\n\";\n    for (auto &output_name : output_names) {\n      ss << \"            data = \" << output_name << \"[i].as_object()\\n\";\n      ss << \"            result[\\\"\" << output_name << \"\\\"] = data.tolist()\\n\";\n    }\n\n    ss << \"            result_str = (json.dumps(result) + \"\n          \"chr(0)).encode('utf-8').strip()\\n\";\n    ss << \"            add_buffer = self.create_buffer(result_str)\\n\";\n    ss << \"            out_data.push_back(add_buffer)\\n\";\n    ss << \"        return modelbox.Status()\\n\";\n  }\n  file << ss.str();\n  if (!file.good()) {\n    std::string err_msg = \"write default pre/post flowunit failed\";\n    fprintf(stderr, \"%s\\n\", err_msg.c_str());\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  file.close();\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status ModelServing::UpdatePreFlowUnit(\n    const std::string &model_name) {\n  std::string pre_writer_toml_path = std::string(DEFAULT_FLOWUNIT_PATH) + \"/\" +\n                                     model_name + \"/preprocess/preprocess.toml\";\n  std::ofstream pre_writer(pre_writer_toml_path, std::ios::app);\n  if (!pre_writer.is_open()) {\n    auto err_msg = \"Open failed, path \" + pre_writer_toml_path;\n    fprintf(stderr, \"%s\\n\", err_msg.c_str());\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  Defer { pre_writer.close(); };\n\n  std::stringstream ss;\n  auto input_names = model_serving_config_.GetInputNames();\n  auto input_types = model_serving_config_.GetInputTypes();\n  ss << \"\\n\";\n  ss << \"[output]\\n\";\n  for (size_t i = 0; i < input_names.size(); ++i) {\n    ss << \"[output.output\" << std::to_string(i + 1) << \"]\\n\";\n    ss << \"name = \\\"\" << input_names[i] << \"\\\"\\n\";\n    ss << \"type = \\\"\" << input_types[i] << \"\\\"\\n\";\n  }\n\n  pre_writer << ss.str();\n  if (!pre_writer.good()) {\n    std::string err_msg = \"update pre flowunit config failed\";\n    fprintf(stderr, \"%s\\n\", err_msg.c_str());\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status ModelServing::UpdatePostFlowUnit(\n    const std::string &model_name) {\n  std::string post_writer_toml_path = std::string(DEFAULT_FLOWUNIT_PATH) + \"/\" +\n                                      model_name +\n                                      \"/postprocess/postprocess.toml\";\n  std::ofstream post_writer(post_writer_toml_path, std::ios::app);\n  if (!post_writer.is_open()) {\n    auto err_msg = \"Open failed, path \" + post_writer_toml_path;\n    fprintf(stderr, \"%s\\n\", err_msg.c_str());\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  Defer { post_writer.close(); };\n  std::stringstream ss;\n  auto output_names = model_serving_config_.GetOutputNames();\n  auto output_types = model_serving_config_.GetOutputTypes();\n  ss << \"\\n\";\n  ss << \"[input]\\n\";\n  for (size_t i = 0; i < output_names.size(); ++i) {\n    ss << \"[input.input\" << std::to_string(i + 1) << \"]\\n\";\n    ss << \"name = \\\"\" << output_names[i] << \"\\\"\\n\";\n    ss << \"type = \\\"\" << output_types[i] << \"\\\"\\n\";\n  }\n  post_writer << ss.str();\n  if (!post_writer.good()) {\n    std::string err_msg = \"update post flowunit config failed\";\n    fprintf(stderr, \"%s\\n\", err_msg.c_str());\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status ModelServing::UpdateGraphToml(const std::string &model_name) {\n  std::ifstream graph_reader(graph_toml_file_);\n  if (!graph_reader.is_open()) {\n    auto err_msg = \"Open failed, path \" + graph_toml_file_;\n    fprintf(stderr, \"%s\\n\", err_msg.c_str());\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  Defer { graph_reader.close(); };\n\n  std::string update_grah_toml_file =\n      std::string(DEFAULT_GRAPTH_PATH) + \"/\" + model_name + \".toml\";\n  std::ofstream new_graph_writer(update_grah_toml_file);\n  if (!new_graph_writer.is_open()) {\n    auto err_msg = \"Open failed, path \" + update_grah_toml_file;\n    fprintf(stderr, \"%s\\n\", err_msg.c_str());\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  Defer { new_graph_writer.close(); };\n\n  std::stringstream ss;\n  std::string content;\n  while (std::getline(graph_reader, content)) {\n    size_t pos;\n    pos = content.find(\"infer_device\");\n    if (pos != std::string::npos) {\n      auto config_device = config_->GetString(\"base.device\");\n      bool single_device = true;\n      if (config_device.find(':') != std::string::npos) {\n        modelbox::StringReplaceAll(config_device, \"~\", \";\");\n        single_device = false;\n      }\n\n      ss << \"      \" << model_name << \"[type=flowunit, flowunit=\" << model_name\n         << \", device=\\\"\";\n      if (single_device) {\n        ss << config_device << \"\\\", deviceid=0]\\n\";\n      } else {\n        ss << config_device << \"\\\"]\\n\";\n      }\n\n      continue;\n    }\n\n    pos = content.find(\"preprocess:out_data\");\n    auto input_names = model_serving_config_.GetInputNames();\n    if (pos != std::string::npos) {\n      for (auto &port_name : input_names) {\n        ss << \"      preprocess:\" << port_name << \" -> \" << model_name << \":\"\n           << port_name << \"\\n\";\n      }\n      continue;\n    }\n\n    pos = content.find(\"postprocess:in_data\");\n    auto output_names = model_serving_config_.GetOutputNames();\n    if (pos != std::string::npos) {\n      for (auto &port_name : output_names) {\n        ss << \"      \" << model_name << \":\" << port_name << \" -> \"\n           << \"postprocess:\" << port_name << \"\\n\";\n      }\n      continue;\n    }\n\n    ss << content << \"\\n\";\n  }\n\n  new_graph_writer << ss.str();\n  if (!new_graph_writer.good()) {\n    std::string err_msg = \"update graph failed\";\n    fprintf(stderr, \"%s\\n\", err_msg.c_str());\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  if (remove(graph_toml_file_.c_str()) == -1) {\n    fprintf(stderr, \"remove origin template graph toml failed.\\n\");\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status ModelServing::UpdateGraphTemplateByToml(\n    const std::string &model_name) {\n  auto status = UpdatePreFlowUnit(model_name);\n  if (status != modelbox::STATUS_OK) {\n    std::string err_msg = \"update preprocess flowunit failed.\";\n    fprintf(stderr, \"%s\\n\", err_msg.c_str());\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  status = UpdatePostFlowUnit(model_name);\n  if (status != modelbox::STATUS_OK) {\n    std::string err_msg = \"update postprocess flowunit failed.\";\n    fprintf(stderr, \"%s\\n\", err_msg.c_str());\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  status = UpdateGraphToml(model_name);\n  if (status != modelbox::STATUS_OK) {\n    std::string err_msg = \"update graph toml failed.\";\n    fprintf(stderr, \"%s\\n\", err_msg.c_str());\n    return {modelbox::STATUS_FAULT, err_msg};\n  }\n\n  return modelbox::STATUS_OK;\n}\n"
  },
  {
    "path": "src/modelbox/serving/serving.h",
    "content": "/*\n * Copyright 2022 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_SERVING_H_\n#define MODELBOX_SERVING_H_\n\n#include <vector>\n\n#include \"modelbox/base/configuration.h\"\n#include \"modelbox/base/status.h\"\n#ifdef BUILD_TEST\nconstexpr const char *DEFAULT_GRAPTH_PATH = \"/tmp\";\nconstexpr const char *DEFAULT_FLOWUNIT_PATH = \"/tmp\";\n#else\nconstexpr const char *DEFAULT_GRAPTH_PATH = \"/usr/local/etc/modelbox/graph\";\nconstexpr const char *DEFAULT_FLOWUNIT_PATH = \"/tmp\";\n#endif\n\nclass ModelServingConfig {\n public:\n  ModelServingConfig() = default;\n  virtual ~ModelServingConfig() {\n    input_names_.clear();\n    output_names_.clear();\n    input_types_.clear();\n    output_types_.clear();\n  }\n\n  std::string GetModelEntry() { return model_entry_; }\n  std::string GetModelEngine() { return model_engine_; }\n  int64_t GetMaxBatchSize() { return max_batch_size_; }\n  std::string GetDevices() { return devices_; }\n  std::string GetMode() { return mode_; }\n  std::vector<std::string> GetInputNames() { return input_names_; }\n  std::vector<std::string> GetOutputNames() { return output_names_; }\n  std::vector<std::string> GetInputTypes() { return input_types_; }\n  std::vector<std::string> GetOutputTypes() { return output_types_; }\n\n  void SetModelEntry(const std::string &model_entry) {\n    model_entry_ = model_entry;\n  }\n\n  void SetModelEngine(const std::string &model_engine) {\n    model_engine_ = model_engine;\n  }\n\n  void SetMaxBatchSize(int64_t max_batch_size) {\n    max_batch_size_ = max_batch_size;\n  }\n\n  void SetDevices(const std::string &devices) { devices_ = devices; }\n\n  void SetMode(const std::string &mode) { mode_ = mode; }\n\n  void SetInputNames(const std::vector<std::string> &input_names) {\n    input_names_ = input_names;\n  }\n\n  void SetOutputNames(const std::vector<std::string> &output_names) {\n    output_names_ = output_names;\n  }\n\n  void SetInputTypes(const std::vector<std::string> &input_types) {\n    input_types_ = input_types;\n  }\n\n  void SetOutputTypes(const std::vector<std::string> &output_types) {\n    output_types_ = output_types;\n  }\n\n  std::string model_entry_;\n  std::string model_engine_;\n  int64_t max_batch_size_;\n  std::string devices_;\n  std::string mode_;\n\n  std::vector<std::string> input_names_, output_names_;\n  std::vector<std::string> input_types_, output_types_;\n};\n\nclass ModelServing {\n public:\n  ModelServing() = default;\n  virtual ~ModelServing() = default;\n\n  modelbox::Status GenerateTemplate(const std::string &model_name,\n                                    const std::string &model_path, int port);\n\n private:\n  modelbox::Status CheckConfigFiles(const std::string &model_path);\n  modelbox::Status ParseModelToml();\n  modelbox::Status FillModelItem(const std::string &type);\n\n  modelbox::Status GetDeviceType(std::string &device_type);\n  modelbox::Status GenerateModelServingTemplate(const std::string &model_name,\n                                                int port);\n  modelbox::Status GenerateDefaultGraphConfig(const std::string &model_name,\n                                              int port);\n  modelbox::Status GenerateInferConfig(const std::string &default_flowunit_path,\n                                       const std::string &model_name);\n  modelbox::Status GeneratePrePostConfig(\n      const std::string &default_flowunit_path, const std::string &type);\n  modelbox::Status GeneratePrePostFlowUnit(const std::string &default_file_path,\n                                           const std::string &type);\n  modelbox::Status GenerateDefaultPrePostFlowUnit(\n      const std::string &default_file_path, const std::string &type);\n  modelbox::Status UpdateGraphTemplateByToml(const std::string &model_name);\n\n  modelbox::Status UpdatePreFlowUnit(const std::string &model_name);\n  modelbox::Status UpdatePostFlowUnit(const std::string &model_name);\n  modelbox::Status UpdateGraphToml(const std::string &model_name);\n\n  std::string model_toml_;\n  std::string model_custom_service_file_;\n  std::string graph_toml_file_;\n  bool custom_service_{false};\n  ModelServingConfig model_serving_config_;\n  std::shared_ptr<modelbox::Configuration> config_;\n};\n\n#endif  // MODELBOX_SERVING_H_\n"
  },
  {
    "path": "src/modelbox/serving/test_file/custom_service.py.in",
    "content": "import numpy as np\n\nclass mnist_service:\n\n    def _preprocess(self, data):\n        preprocessed_data = {}\n\n        input_data = data[\"input\"]\n        infer_data = [i + 1.0 for i in input_data]\n\n        preprocessed_data['input'] = infer_data\n        return preprocessed_data\n\n    def _postprocess(self, data):\n        output = {}\n        max_index = np.argmax(data['output'])\n        output = {\n            \"predict_result\": str(max_index)\n        }\n        return output\n"
  },
  {
    "path": "src/modelbox/serving/test_file/model.toml.in",
    "content": "[base]\nentry = \"@CMAKE_SOURCE_DIR@/test/assets/tensorflow/TF_VERSION/tensorflow_pb/frozen_model.pb\"\nengine = \"tensorflow\"\nmax_batch_size = 32\ndevice = \"cuda:0,1\"\nmode = \"model\"\n\n[input]\n[input.input1]\nname = \"input\"\ntype = \"float\"\n\n[output]\n[output.output1]\nname = \"output\""
  },
  {
    "path": "src/modelbox/serving/test_file/test_client.py.in",
    "content": "import http.client\nimport os\nfrom urllib.parse import urlparse\nimport urllib.request\nimport json\n\nclass HttpConfig:\n    def __init__(self):\n        self.httpMethod = \"POST\"\n        self.requstURL = \"/test_default_\"\n\n        self.headerdata = {\n            \"Content-Type\": \"application/json\"\n        }\n\n        self.test_data = {\n            \"input\": [1.0, 2.0, 3.0]\n        }\n\n        self.body = json.dumps(self.test_data)\n\ndef DoInfer(host):\n    o = urlparse('//' + host)\n\n    http_config = HttpConfig()\n    http_config.hostIP = o.hostname\n    http_config.Port = o.port\n\n    conn = http.client.HTTPConnection(host=http_config.hostIP, port=http_config.Port)\n    try:\n        conn.request(method=http_config.httpMethod, url=http_config.requstURL, body=http_config.body,\n                headers=http_config.headerdata)\n    except  Exception as e:\n        print(\"Connect to \" + host + \" failed, reaseon: \")\n        print(e)\n        print(\"Please check the service status, or use '-host [ip:port]' to specify the address of the service.\")\n        return 1\n\n    response = conn.getresponse().read().decode()\n    print(response)\n    return 0\n\n\nif __name__ == \"__main__\":\n    host = \"0.0.0.0:39110\"\n    DoInfer(host)\n"
  },
  {
    "path": "src/modelbox/tool/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nfile(GLOB_RECURSE MODELBOX_TOOL_SOURCES *.cpp *.cc *.c)\nset(MODELBOX_TOOL_INCLUDE ${CMAKE_CURRENT_LIST_DIR}/include)\n\nlist(APPEND MODELBOX_TOOL_INCLUDES ${MODELBOX_TOOL_INCLUDE})\nlist(APPEND MODELBOX_TOOL_INCLUDES ${LIBMODELBOX_INCLUDE})\nlist(APPEND MODELBOX_TOOL_INCLUDES ${LIBMODELBOX_BASE_INCLUDE})\nlist(APPEND MODELBOX_TOOL_INCLUDES ${TLOG_INCLUDE})\nlist(APPEND MODELBOX_TOOL_INCLUDES ${NLOHMANN_INCLUDE_DIR})\nlist(APPEND MODELBOX_TOOL_INCLUDES ${MODELBOX_COMMON_INCLUDE})\nlist(APPEND MODELBOX_TOOL_INCLUDES ${HUAWEI_SECURE_C_INCLUDE_DIR})\nlist(REMOVE_DUPLICATES MODELBOX_TOOL_INCLUDES)\n\ninclude_directories(${MODELBOX_TOOL_INCLUDES})\n\nset(HEADER \n    ${MODELBOX_TOOL_INCLUDE}/modelbox\n    )\n\nadd_executable(modelbox-tool ${MODELBOX_TOOL_SOURCES})\nset_target_properties(modelbox-tool PROPERTIES ENABLE_EXPORTS 1)\ntarget_link_libraries(modelbox-tool ${TLOG_STATIC_LIBRARIES})\ntarget_link_libraries(modelbox-tool ${LIBMODELBOX_SHARED})\ntarget_link_libraries(modelbox-tool ${MODELBOX_COMMON_LIBRARY})\ntarget_link_libraries(modelbox-tool pthread)\ntarget_link_libraries(modelbox-tool rt)\ntarget_link_libraries(modelbox-tool ${HUAWEI_SECURE_C_LIBRARIES})\n\nadd_dependencies(modelbox-tool ${LIBMODELBOX_SHARED})\n\nset(PYTHON_DEBUG_BIN ${CMAKE_CURRENT_LIST_DIR}/bin/modelbox-python-debug)\nset_target_properties(modelbox-tool PROPERTIES OUTPUT_NAME \"modelbox-tool\")\n\nif (STANDALONE)\n    set_target_properties(modelbox-tool PROPERTIES INSTALL_RPATH \"$ORIGIN/../lib\")\nendif()\n\ninstall(TARGETS modelbox-tool\n    COMPONENT libmodelbox\n    RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n    LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n    )\n\nset(MODELBOX_TOOL_INCLUDE \n    ${MODELBOX_TOOL_INCLUDE} \n    ${TLOG_INCLUDE}\n    CACHE INTERNAL \"\")\n    \ninstall(PROGRAMS ${PYTHON_DEBUG_BIN}\n    DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n    COMPONENT libmodelbox)\n\nset(MODELBOX_TOOL_SOURCES ${MODELBOX_TOOL_SOURCES} CACHE INTERNAL \"\")\nset(MODELBOX_TOOL_LINK_LIBRARIES \n    ${TLOG_STATIC_LIBRARIES}\n    ${HUAWEI_SECURE_C_LIBRARIES}\n    CACHE INTERNAL \"\")"
  },
  {
    "path": "src/modelbox/tool/bin/modelbox-python-debug",
    "content": "#!/usr/bin/env python3\n# -*- coding: UTF-8 -*-\n#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport argparse\nimport os\nimport sys\nimport modelbox\nimport pathlib\nimport datetime\n\n__log = modelbox.Log()\n\n\ndef log_level_str_to_level(level: str) -> modelbox.Log.Level:\n    '''\n    log level str to enum level\n    '''\n    if level == 'DEBUG':\n        return modelbox.Log.Level.DEBUG\n    elif level == 'INFO':\n        return modelbox.Log.Level.INFO\n    elif level == 'NOTICE':\n        return modelbox.Log.Level.NOTICE\n    elif level == 'WARN':\n        return modelbox.Log.Level.WARN\n    elif level == 'ERROR':\n        return modelbox.Log.Level.ERROR\n    elif level == 'FATAL':\n        return modelbox.Log.Level.FATAL\n\n    return modelbox.Log.Level.OFF\n\n\ndef log_level_to_str(level: modelbox.Log.Level) -> str:\n    '''\n    log level str to enum level\n    '''\n    if level == modelbox.Log.Level.DEBUG:\n        return 'DEBUG'\n    elif level == modelbox.Log.Level.INFO:\n        return 'INFO'\n    elif level == modelbox.Log.Level.NOTICE:\n        return 'NOTICE'\n    elif level == modelbox.Log.Level.WARN:\n        return 'WARN'\n    elif level == modelbox.Log.Level.ERROR:\n        return 'ERROR'\n    elif level == modelbox.Log.Level.FATAL:\n        return 'FATAL'\n\n    return \"NONE\"\n\n\ndef log_callback(level: modelbox.Log.Level, file: str, lineno: int, func: str, msg: str):\n    '''\n    Log callback function\n    '''\n    print(\"[{time}][{level:>5}][{file:>20}:{lineno:>4}] {msg}\".format(\n        time=datetime.datetime.now(), level=log_level_to_str(level),\n        file=file, lineno=lineno, msg=msg\n    ))\n\n\ndef reg_log():\n    '''\n    Register log function\n    '''\n    __log.reg(log_callback)\n    __log.set_log_level(modelbox.Log.Level.INFO)\n\n\ndef set_log_level(level: modelbox.Log.Level):\n    '''\n    Set log level\n    '''\n    __log.set_log_level(level)\n\n\ndef run_flow(flow_file: str):\n    '''\n    Run modelbox flow\n    '''\n    try:\n        with open(flow_file) as f:\n            pass\n    except IOError as e:\n        print(\"Open file {} failed, {}\".format(\n            flow_file, os.strerror(e.errno)), file=sys.stderr)\n        return 1\n\n    # initialize flow\n    flow = modelbox.Flow()\n    ret = flow.init(flow_file)\n    if ret == False:\n        modelbox.error(ret)\n        return 1\n\n    # build graph\n    ret = flow.build()\n    if ret == False:\n        modelbox.error(ret)\n        return 1\n\n    # run flow async\n    ret = flow.run_async()\n    if ret == False:\n        modelbox.error(ret)\n        return 1\n\n    # get flow result\n    retval = modelbox.Status()\n    ret = flow.wait(0, retval)\n    if ret == False:\n        modelbox.error(ret)\n        return 1\n    modelbox.info(\"Running result: \" + str(retval))\n    return 0\n\n\ndef run_python_debug_tool():\n    '''\n    Run modelbox python debug tool\n    '''\n    parser = argparse.ArgumentParser(description='Modelbox python debug tool.')\n    parser.add_argument('--flow', dest='flow_file', type=str, required=True,\n                        help='Run flow file')\n    parser.add_argument('--loglevel', dest=\"log_level\", choices=['DEBUG', 'INFO', 'NOTICE', \"WARN\", 'ERROR', 'FATAL'],\n                        help='log level')\n    parser.add_argument(\n        '--verbose', help=\"output log to screen\", dest=\"log_verbose\", action='store_true')\n    parser.add_argument(\n        '--no-debug', help=\"disable debug support\", dest=\"disable_debug\", action='store_true'\n    )\n\n    args = parser.parse_args()\n    flow_file = args.flow_file\n\n    if args.disable_debug == False:\n        os.environ['MODELBOX_DEBUG_PYTHON'] = \"yes\"\n\n    reg_log()\n\n    level = log_level_str_to_level(args.log_level)\n    set_log_level(level)\n\n    return run_flow(flow_file)\n\n\nif __name__ == '__main__':\n    ret = run_python_debug_tool()\n    sys.exit(ret)\n"
  },
  {
    "path": "src/modelbox/tool/driver.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"driver.h\"\n\n#include <getopt.h>\n#include <modelbox/base/config.h>\n#include <modelbox/modelbox.h>\n#include <stdio.h>\n\n#include <nlohmann/json.hpp>\n#include <utility>\n\nnamespace modelbox {\n\nconstexpr const char *DRIVER_CONF = \"driver\";\n\nenum MODELBOX_TOOL_DRIVER_COMMAND {\n  MODELBOX_TOOL_DRIVER_INFO,\n};\n\nstatic struct option driver_options[] = {\n    {\"info\", 0, nullptr, MODELBOX_TOOL_DRIVER_INFO},\n    {nullptr, 0, nullptr, 0},\n};\n\nenum MODELBOX_TOOL_DRIVER_INFO_COMMAND {\n  MODELBOX_TOOL_DRIVER_INFO_PATH,\n  MODELBOX_TOOL_DRIVER_INFO_FROM_CONF,\n  MODELBOX_TOOL_DRIVER_INFO_DETAILS,\n  MODELBOX_TOOL_DRIVER_INFO_DETAILS_FILTER_NAME,\n  MODELBOX_TOOL_DRIVER_INFO_FORMAT_JSON,\n  MODELBOX_TOOL_DRIVER_INFO_TYPE,\n};\n\nstatic struct option driver_info_options[] = {\n    {\"path\", 1, nullptr, MODELBOX_TOOL_DRIVER_INFO_PATH},\n    {\"conf\", 1, nullptr, MODELBOX_TOOL_DRIVER_INFO_FROM_CONF},\n    {\"details\", 0, nullptr, MODELBOX_TOOL_DRIVER_INFO_DETAILS},\n    {\"name\", 1, nullptr, MODELBOX_TOOL_DRIVER_INFO_DETAILS_FILTER_NAME},\n    {\"format-json\", 0, nullptr, MODELBOX_TOOL_DRIVER_INFO_FORMAT_JSON},\n    {\"type\", 1, nullptr, MODELBOX_TOOL_DRIVER_INFO_TYPE},\n    {nullptr, 0, nullptr, 0},\n};\n\nREG_MODELBOX_TOOL_COMMAND(ToolCommandDriver)\n\nToolCommandDriver::ToolCommandDriver() = default;\nToolCommandDriver::~ToolCommandDriver() = default;\n\nstd::string ToolCommandDriver::GetHelp() {\n  char help[] =\n      \" driver options: \\n\"\n      \"   -info    List all driver information\\n\"\n      \"     -type               Filter driver type, support value: flowunit\\n\"\n      \"     -path               Scan additional path, format: dir1,dir2\\n\"\n      \"     -details            Show detail information\\n\"\n      \"        -name [name]     Filter name for details\\n\"\n      \"     -conf [toml file]   Read toml config, and list drivers\\n\"\n      \"     -format-json        export json format driver information\\n\"\n      \"\\n\";\n  return help;\n}\n\nint ToolCommandDriver::Run(int argc, char *argv[]) {\n  int cmdtype = 0;\n\n  if (argc == 1) {\n    std::cerr << GetHelp();\n    return 1;\n  }\n\n  MODELBOX_COMMAND_GETOPT_BEGIN(cmdtype, driver_options)\n  switch (cmdtype) {\n    case MODELBOX_TOOL_DRIVER_INFO:\n      optind = 1;\n      MODELBOX_COMMAND_SUB_UNLOCK();\n      return RunInfoCommand(MODELBOX_COMMAND_SUB_ARGC,\n                            MODELBOX_COMMAND_SUB_ARGV);\n    default:\n      break;\n  }\n  MODELBOX_COMMAND_GETOPT_END()\n\n  return 0;\n}\n\nint ToolCommandDriver::RunInfoCommand(int argc, char *argv[]) {\n  int cmdtype = 0;\n  ConfigurationBuilder config_builder;\n  std::shared_ptr<Configuration> config_merge;\n  enum DRIVER_OUTFORMAT format = DRIVER_OUTFORMAT_LIST;\n  enum DRIVER_TYPE type = DRIVER_TYPE_ALL;\n  std::string filter_name;\n  MODELBOX_COMMAND_GETOPT_BEGIN(cmdtype, driver_info_options)\n  switch (cmdtype) {\n    case MODELBOX_TOOL_DRIVER_INFO_PATH: {\n      std::string path = optarg;\n      std::vector<std::string> paths = modelbox::StringSplit(path, ',');\n      config_builder.AddProperty(std::string(DRIVER_CONF) + \".\" + DRIVER_DIR,\n                                 paths);\n      config_builder.AddProperty(\n          std::string(DRIVER_CONF) + \".\" + DRIVER_SKIP_DEFAULT, \"0\");\n      break;\n    }\n    case MODELBOX_TOOL_DRIVER_INFO_DETAILS:\n      format = DRIVER_OUTFORMAT_DETAILS;\n      break;\n    case MODELBOX_TOOL_DRIVER_INFO_DETAILS_FILTER_NAME:\n      format = DRIVER_OUTFORMAT_DETAILS;\n      filter_name = optarg;\n      break;\n    case MODELBOX_TOOL_DRIVER_INFO_FROM_CONF: {\n      ConfigurationBuilder builder;\n      std::string confile_file = optarg;\n      config_merge = config_builder.Build(confile_file);\n      if (config_merge == nullptr) {\n        fprintf(stderr, \"parser config '%s' failed, %s\\n\", confile_file.c_str(),\n                StatusError.WrapErrormsgs().c_str());\n        return 1;\n      }\n      break;\n    }\n    case MODELBOX_TOOL_DRIVER_INFO_FORMAT_JSON:\n      format = DRIVER_OUTFORMAT_JSON;\n    case MODELBOX_TOOL_DRIVER_INFO_TYPE: {\n      std::string t = optarg;\n      if (t == \"flowunit\") {\n        type = DRIVER_TYPE_FLOWUNIT;\n      }\n      break;\n    }\n    default:\n      break;\n  }\n  MODELBOX_COMMAND_GETOPT_END()\n\n  auto config = config_builder.Build();\n  if (config_merge) {\n    config->Add(*config_merge);\n  }\n\n  auto status = OutputInfo(config, type, format, filter_name);\n  if (!status) {\n    fprintf(stderr, \"display driver info failed, %s\\n\",\n            status.WrapErrormsgs().c_str());\n    return 1;\n  }\n\n  return 0;\n}\n\nStatus ToolCommandDriver::OutputDriverInfo(\n    const std::shared_ptr<Configuration> &config, enum DRIVER_OUTFORMAT format,\n    const std::string &filter_name) {\n  if (format == DRIVER_OUTFORMAT_LIST) {\n    return DisplayDriverInList(config);\n  }\n\n  if (format == DRIVER_OUTFORMAT_JSON) {\n    return DisplayDriverInJson(config);\n  }\n\n  if (format == DRIVER_OUTFORMAT_DETAILS) {\n    return DisplayDriverInDetails(config, filter_name);\n  }\n\n  return STATUS_NOTSUPPORT;\n}\n\nStatus ToolCommandDriver::OutputFlowunitInfo(\n    const std::shared_ptr<Configuration> &config, enum DRIVER_OUTFORMAT format,\n    const std::string &filter_name) {\n  if (format == DRIVER_OUTFORMAT_LIST) {\n    return DisplayFlowunitInList(config);\n  }\n\n  if (format == DRIVER_OUTFORMAT_JSON) {\n    return DisplayFlowunitInJson(config);\n  }\n\n  if (format == DRIVER_OUTFORMAT_DETAILS) {\n    return DisplayFlowunitInDetails(config, filter_name);\n  }\n\n  return STATUS_NOTSUPPORT;\n}\n\nStatus ToolCommandDriver::OutputInfo(\n    const std::shared_ptr<Configuration> &config, enum DRIVER_TYPE type,\n    enum DRIVER_OUTFORMAT format, const std::string &filter_name) {\n  if (type == DRIVER_TYPE_ALL) {\n    return OutputDriverInfo(config, format, filter_name);\n  }\n  if (type == DRIVER_TYPE_FLOWUNIT) {\n    return OutputFlowunitInfo(config, format, filter_name);\n  }\n\n  return STATUS_NOTSUPPORT;\n}\n\nStatus ToolCommandDriver::DisplayDriverInList(\n    const std::shared_ptr<Configuration> &config) {\n  auto drivers = std::make_shared<Drivers>();\n  auto status = drivers->Initialize(config->GetSubConfig(DRIVER_CONF));\n  if (!status) {\n    fprintf(stderr, \"initialize drivers failed, %s\\n\",\n            status.WrapErrormsgs().c_str());\n    return {status, \"initialize failed.\"};\n  }\n\n  status = drivers->Scan();\n  if (!status) {\n    fprintf(stderr, \"scan failed, %s\\n\", status.WrapErrormsgs().c_str());\n    return {status, \"scan failed.\"};\n  }\n  int index = 0;\n  auto drivers_list = drivers->GetAllDriverList();\n  printf(\"Drivers Information:\\n\");\n  printf(\"%-25s%-30s%-10s%-10s%s\\n\", \"Class\", \"Name\", \"Type\", \"Version\",\n         \"Path\");\n  for (const auto &driver : drivers_list) {\n    auto desc = driver->GetDriverDesc();\n    printf(\"%-25s%-30s%-10s%-10s%s\\n\", desc->GetClass().c_str(),\n           desc->GetName().c_str(), desc->GetType().c_str(),\n           desc->GetVersion().c_str(), desc->GetFilePath().c_str());\n    index++;\n  }\n\n  return STATUS_OK;\n}\n\nvoid ToolCommandDriver::DisplayFlowunitByFilter(\n    const std::shared_ptr<FlowUnitInfo> &flowunit_info,\n    const std::string &filter_name) {\n  if (flowunit_info == nullptr) {\n    printf(\"DisplayFlowunitByFilter:  flowunit_info is nullptr.\");\n    return;\n  }\n  int index = 0;\n  auto flow_list = flowunit_info->GetFlowUnitManager()->GetAllFlowUnitDesc();\n  for (const auto &flow : flow_list) {\n    auto driver_desc = flow->GetDriverDesc();\n    if (filter_name.length() > 0 && (filter_name != driver_desc->GetName() &&\n                                     filter_name != driver_desc->GetType() &&\n                                     filter_name != flow->GetFlowUnitName())) {\n      continue;\n    }\n    index++;\n    if (index == 1) {\n      printf(\"FlowUnit Information\\t\\t:\\n\");\n    }\n    DisplayFlowunit(flow);\n  }\n}\n\nStatus ToolCommandDriver::DisplayDriverInDetails(\n    const std::shared_ptr<Configuration> &config,\n    const std::string &filter_name) {\n  auto flowunit_info = std::make_shared<FlowUnitInfo>();\n  int index = 0;\n  auto status = flowunit_info->Init(config);\n  if (!status) {\n    std::cerr << status << std::endl;\n    return status;\n  }\n\n  auto device_desc_list =\n      flowunit_info->GetDeviceManager()->GetDeviceDescList();\n  for (const auto &itr_list : device_desc_list) {\n    for (const auto &itr_device : itr_list.second) {\n      auto desc = itr_device.second;\n      if (filter_name.length() > 0 && filter_name != itr_device.first) {\n        continue;\n      }\n      index++;\n      if (index == 1) {\n        printf(\"Device Information\\t\\t:\\n\");\n        printf(\"--------------------------------\\t\\t:\\n\");\n      }\n      printf(\"name:\\t\\t%s\\n\", itr_device.first.c_str());\n      printf(\"type:\\t\\t%s\\n\", desc->GetDeviceType().c_str());\n      printf(\"version:\\t\\t%s\\n\", desc->GetDeviceVersion().c_str());\n      printf(\"description: %s\\n\", desc->GetDeviceDesc().c_str());\n      printf(\"\\n\");\n    }\n  }\n\n  auto drivers_list = flowunit_info->GetDriverManager()->GetAllDriverList();\n  index = 0;\n  for (const auto &driver : drivers_list) {\n    auto desc = driver->GetDriverDesc();\n    if (filter_name.length() > 0 && filter_name != desc->GetName()) {\n      continue;\n    }\n    index++;\n    if (index == 1) {\n      printf(\"Driver Information\\t\\t:\\n\");\n      printf(\"--------------------------------\\t\\t:\\n\");\n    }\n    printf(\"driver name:\\t\\t%s\\n\", desc->GetName().c_str());\n    printf(\"device type:\\t\\t%s\\n\", desc->GetType().c_str());\n    printf(\"version:\\t\\t%s\\n\", desc->GetVersion().c_str());\n    printf(\"class:\\t\\t%s\\n\", desc->GetClass().c_str());\n    printf(\"description: %s\\n\", desc->GetDescription().c_str());\n    printf(\"\\n\");\n  }\n\n  DisplayFlowunitByFilter(flowunit_info, filter_name);\n\n  return STATUS_OK;\n}\n\nStatus ToolCommandDriver::DisplayDriverInJson(\n    const std::shared_ptr<Configuration> &config) {\n  FlowUnitInfo flowunit_info;\n\n  auto status = flowunit_info.Init(config);\n  if (!status) {\n    std::cerr << status << std::endl;\n    return status;\n  }\n\n  std::string info;\n  status = flowunit_info.GetInfoInJson(&info);\n  if (!status) {\n    std::cerr << status << std::endl;\n    return status;\n  }\n\n  std::cout << info << std::endl;\n\n  return STATUS_OK;\n}\n\nStatus ToolCommandDriver::DisplayFlowunitInList(\n    const std::shared_ptr<Configuration> &config) {\n  FlowUnitInfo flowunit_info;\n\n  auto status = flowunit_info.Init(config);\n  if (!status) {\n    std::cerr << status << std::endl;\n    return status;\n  }\n\n  auto flowunit_list = flowunit_info.GetFlowUnitManager()->GetAllFlowUnitDesc();\n\n  printf(\"Flowunit Information:\\n\");\n  printf(\"%-30s%-15s%-15s%-30s%-15s%-30s%-30s\\n\", \"FlowunitName\", \"DeviceType\",\n         \"GroupType\", \"DriverName\", \"Version\", \"InputPort\", \"OutputPort\");\n\n  for (const auto &flowunit : flowunit_list) {\n    std::string input_ports;\n    std::string output_ports;\n    for (const auto &input : flowunit->GetFlowUnitInput()) {\n      auto s =\n          (input_ports == \"\") ? input.GetPortName() : \",\" + input.GetPortName();\n      input_ports += s;\n    }\n    for (const auto &output : flowunit->GetFlowUnitOutput()) {\n      auto s = (output_ports == \"\") ? output.GetPortName()\n                                    : \",\" + output.GetPortName();\n      output_ports += s;\n    }\n    auto driverdesc = flowunit->GetDriverDesc();\n    printf(\"%-30s%-15s%-15s%-30s%-15s%-30s%-30s\\n\",\n           flowunit->GetFlowUnitName().c_str(), driverdesc->GetType().c_str(),\n           flowunit->GetGroupType().c_str(), driverdesc->GetName().c_str(),\n           driverdesc->GetVersion().c_str(), input_ports.c_str(),\n           output_ports.c_str());\n  }\n\n  return STATUS_OK;\n}\n\nvoid ToolCommandDriver::DisplayFlowunit(\n    std::shared_ptr<FlowUnitDesc> flowunit) {\n  auto driverdesc = flowunit->GetDriverDesc();\n  printf(\"--------------------------------------\\n\");\n  printf(\"flowunit name\\t: %s\\n\", flowunit->GetFlowUnitName().c_str());\n  printf(\"type\\t\\t: %s\\n\", driverdesc->GetType().c_str());\n  printf(\"driver name\\t: %s\\n\", driverdesc->GetName().c_str());\n  printf(\"version\\t\\t: %s\\n\", driverdesc->GetVersion().c_str());\n  printf(\"description\\t: %s\\n\", flowunit->GetDescription().c_str());\n  printf(\"group\\t\\t: %s\\n\",\n         [&]() -> std::string {\n           auto type = flowunit->GetGroupType();\n           if (type.empty()) {\n             return \"Generic\";\n           }\n\n           return type;\n         }()\n                      .c_str());\n\n  int index = 0;\n  for (const auto &input : flowunit->GetFlowUnitInput()) {\n    index++;\n    if (index == 1) {\n      printf(\"inputs\\t\\t:\\n\");\n    }\n    printf(\"  input index\\t: %d\\n\", index);\n    printf(\"    name\\t: %s\\n\", input.GetPortName().c_str());\n    printf(\"    type\\t: %s\\n\", input.GetPortType().c_str());\n    printf(\"    device\\t: %s\\n\", input.GetDeviceType().c_str());\n  }\n\n  index = 0;\n  for (const auto &output : flowunit->GetFlowUnitOutput()) {\n    index++;\n    if (index == 1) {\n      printf(\"outputs\\t\\t:\\n\");\n    }\n    printf(\"  output index\\t: %d\\n\", index);\n    printf(\"    name\\t: %s\\n\", output.GetPortName().c_str());\n    printf(\"    device\\t: %s\\n\", output.GetDeviceType().c_str());\n  }\n\n  index = 0;\n  for (auto &option : flowunit->GetFlowUnitOption()) {\n    index++;\n    if (index == 1) {\n      printf(\"options\\t\\t:\\n\");\n    }\n    printf(\"  option\\t: %d\\n\", index);\n    printf(\"    name\\t: %s\\n\", option.GetOptionName().c_str());\n    printf(\"    default\\t: %s\\n\", option.GetOptionDefault().c_str());\n    printf(\"    desc\\t: %s\\n\", option.GetOptionDesc().c_str());\n    printf(\"    required\\t: %s\\n\", option.IsRequire() ? \"true\" : \"false\");\n    printf(\"    type\\t: %s\\n\", option.GetOptionType().c_str());\n    auto values = option.GetOptionValues();\n    if (values.size() > 0) {\n      nlohmann::json json_values;\n      for (const auto &value : values) {\n        printf(\"        %s\\t: %s\\n\", value.first.c_str(), value.second.c_str());\n      }\n    }\n  }\n  printf(\"\\n\");\n}\n\nStatus ToolCommandDriver::DisplayFlowunitInDetails(\n    const std::shared_ptr<Configuration> &config,\n    const std::string &filter_name) {\n  auto flowunit_info = std::make_shared<FlowUnitInfo>();\n  auto status = flowunit_info->Init(config);\n  if (!status) {\n    std::cerr << status << std::endl;\n    return status;\n  }\n\n  DisplayFlowunitByFilter(flowunit_info, filter_name);\n\n  return STATUS_OK;\n}\n\nStatus ToolCommandDriver::DisplayFlowunitInJson(\n    const std::shared_ptr<Configuration> &config) {\n  DisplayDriverInJson(config);\n  return STATUS_OK;\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/modelbox/tool/driver.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_TOOL_DRIVER_H\n#define MODELBOX_TOOL_DRIVER_H\n\n#include <modelbox/base/configuration.h>\n#include <modelbox/base/driver.h>\n\n#include \"modelbox/common/command.h\"\n#include \"modelbox/common/flowunit_info.h\"\n\nnamespace modelbox {\n\nenum DRIVER_OUTFORMAT {\n  DRIVER_OUTFORMAT_LIST,\n  DRIVER_OUTFORMAT_DETAILS,\n  DRIVER_OUTFORMAT_JSON,\n};\n\nenum DRIVER_TYPE {\n  DRIVER_TYPE_ALL,\n  DRIVER_TYPE_FLOWUNIT,\n};\n\nconstexpr const char *DRIVER_DESC = \"List all driver information\";\n\nclass ToolCommandDriver : public ToolCommand {\n public:\n  ToolCommandDriver();\n  ~ToolCommandDriver() override;\n\n  int Run(int argc, char *argv[]) override;\n\n  std::string GetHelp() override;\n\n  std::string GetCommandName() override { return \"driver\"; };\n\n  std::string GetCommandDesc() override { return DRIVER_DESC; };\n\n protected:\n  int RunInfoCommand(int argc, char *argv[]);\n  Status OutputInfo(const std::shared_ptr<Configuration>& config,\n                    enum DRIVER_TYPE type, enum DRIVER_OUTFORMAT format,\n                    const std::string& filter_name);\n  Status OutputDriverInfo(const std::shared_ptr<Configuration>& config,\n                          enum DRIVER_OUTFORMAT format,\n                          const std::string& filter_name);\n  Status OutputFlowunitInfo(const std::shared_ptr<Configuration>& config,\n                            enum DRIVER_OUTFORMAT format,\n                            const std::string& filter_name);\n  Status DisplayDriverInList(const std::shared_ptr<Configuration>& config);\n  Status DisplayDriverInDetails(const std::shared_ptr<Configuration>& config,\n                                const std::string& filter_name);\n  Status DisplayDriverInJson(const std::shared_ptr<Configuration>& config);\n  Status DisplayFlowunitInList(const std::shared_ptr<Configuration>& config);\n  Status DisplayFlowunitInDetails(const std::shared_ptr<Configuration>& config,\n                                  const std::string& filter_name);\n  Status DisplayFlowunitInJson(const std::shared_ptr<Configuration>& config);\n  void DisplayFlowunit(std::shared_ptr<FlowUnitDesc> flowunit);\n  void DisplayFlowunitByFilter(\n      const std::shared_ptr<FlowUnitInfo>& flowunit_info,\n      const std::string& filter_name);\n};\n\n}  // namespace modelbox\n\n#endif"
  },
  {
    "path": "src/modelbox/tool/external_command.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"external_command.h\"\n\n#include <errno.h>\n#include <getopt.h>\n#include <modelbox/base/crypto.h>\n#include <modelbox/base/log.h>\n#include <modelbox/base/popen.h>\n#include <modelbox/base/utils.h>\n#include <openssl/evp.h>\n#include <stdio.h>\n#include <string.h>\n#include <termios.h>\n#include <unistd.h>\n\n#include <fstream>\n#include <iostream>\n#include <nlohmann/json.hpp>\n\nnamespace modelbox {\n\nExternalCommandKey::ExternalCommandKey() = default;\nExternalCommandKey::~ExternalCommandKey() = default;\n\nStatus ExternalCommandKey::SetExecuteCmd(const std::string &cmd) {\n  cmd_ = cmd;\n  return STATUS_OK;\n}\n\nvoid ExternalCommandKey::SetCommandName(const std::string &name) {\n  name_ = name;\n}\n\nvoid ExternalCommandKey::SetCommandDesc(const std::string &desc) {\n  desc_ = desc;\n}\n\nvoid ExternalCommandKey::SetHelpCmd(const std::string &help_cmd) {\n  help_cmd_ = help_cmd;\n}\n\nvoid ExternalCommandKey::SetTimeout(int timeout) { timeout_ = timeout; }\n\nint ExternalCommandKey::Run(int argc, char *argv[]) {\n  std::vector<std::string> cmd;\n\n  cmd.push_back(cmd_);\n  for (int i = 1; i < argc; i++) {\n    cmd.emplace_back(argv[i]);\n  }\n\n  modelbox::Popen p;\n  p.Open(cmd, timeout_, \"\", \"MODELBOX_ROOT=\" + modelbox_root_dir());\n  int ret = p.Close();\n  return WEXITSTATUS(ret);\n}\n\nstd::string ExternalCommandKey::GetHelp() {\n  modelbox::Popen p;\n  p.Open(help_cmd_, -1, \"\");\n  return \"\";\n}\n\nstd::string ExternalCommandKey::GetCommandName() {\n  return modelbox::GetBaseName(cmd_);\n}\n\nstd::string ExternalCommandKey::GetCommandDesc() { return desc_; }\n\nStatus ExternalCommandLoader::LoadCmds(const std::string &cmd_json_file) {\n  auto full_path = modelbox_full_path(cmd_json_file);\n  std::ifstream infile(cmd_json_file);\n  if (infile.fail()) {\n    std::cerr << \"read file \" << cmd_json_file << \" failed, \"\n              << modelbox::StrError(errno) << std::endl;\n    return {STATUS_BADCONF, modelbox::StrError(errno)};\n  }\n\n  Defer { infile.close(); };\n  std::string data((std::istreambuf_iterator<char>(infile)),\n                   std::istreambuf_iterator<char>());\n\n  try {\n    auto conf = nlohmann::json::parse(data);\n    auto list = conf[\"cmd-list\"];\n    for (auto &cmd : list) {\n      auto name = cmd[\"name\"].get<std::string>();\n      auto exec = cmd[\"exec\"].get<std::string>();\n      auto desc = cmd[\"desc\"].get<std::string>();\n      auto timeout = -1;\n      if (cmd.contains(\"timeout\")) {\n        timeout = cmd[\"timeout\"].get<int>();\n      }\n      auto help_cmd = cmd[\"help-cmd\"].get<std::string>();\n\n      exec = modelbox_full_path(exec);\n      help_cmd = modelbox_full_path(help_cmd);\n\n      auto ext_cmd = std::make_shared<ExternalCommandKey>();\n      ext_cmd->SetExecuteCmd(exec);\n      ext_cmd->SetCommandName(name);\n      ext_cmd->SetCommandDesc(desc);\n      ext_cmd->SetHelpCmd(help_cmd);\n      ext_cmd->SetTimeout(timeout);\n      auto new_func = [ext_cmd]() -> std::shared_ptr<ExternalCommandKey> {\n        auto new_cmd = std::make_shared<ExternalCommandKey>();\n        *new_cmd = *ext_cmd;\n        return new_cmd;\n      };\n\n      MODELBOX_TOOL_ADD_COMMAND(new_func);\n    }\n  } catch (const std::exception &e) {\n    fprintf(stderr, \"Load external command failed, %s\\n\", e.what());\n    return {STATUS_BADCONF, e.what()};\n  }\n\n  return STATUS_OK;\n}\n\nStatus ExternalCommandLoader::Load(const std::string &path) {\n  std::vector<std::string> files;\n  std::string full_path = modelbox_full_path(path);\n  auto ret = modelbox::ListFiles(full_path, \"*.json\", &files, LIST_FILES_FILE);\n  if (!ret) {\n    return ret;\n  }\n\n  for (auto &file : files) {\n    ret = ExternalCommandLoader::LoadCmds(file);\n    if (!ret) {\n      continue;\n    }\n  }\n\n  return STATUS_OK;\n}\n\n}  // namespace modelbox\n"
  },
  {
    "path": "src/modelbox/tool/external_command.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#ifndef MODELBOX_TOOL_EXTERNAL_COMMAND_H\n#define MODELBOX_TOOL_EXTERNAL_COMMAND_H\n\n#include <modelbox/base/status.h>\n\n#include \"modelbox/common/command.h\"\n\nnamespace modelbox {\n\nclass ExternalCommandKey : public ToolCommand {\n public:\n  ExternalCommandKey();\n  ~ExternalCommandKey() override;\n\n  int Run(int argc, char *argv[]) override;\n  std::string GetHelp() override;\n\n  Status SetExecuteCmd(const std::string &cmd);\n  void SetCommandName(const std::string &name);\n  void SetCommandDesc(const std::string &desc);\n  void SetHelpCmd(const std::string &help_cmd);\n  void SetTimeout(int timeout);\n  std::string GetCommandName() override;\n  std::string GetCommandDesc() override;\n\n private:\n  std::string cmd_;\n  std::string name_;\n  std::string desc_;\n  std::string help_cmd_;\n  int timeout_{-1};\n};\n\nconstexpr const char *EXTERNAL_TOOLS_PATH = \"${MODELBOX_ROOT}/usr/local/share/modelbox/tools\";\n\nclass ExternalCommandLoader {\n public:\n  static Status Load(const std::string &path = EXTERNAL_TOOLS_PATH);\n\n private:\n  static Status LoadCmds(const std::string &cmd_json_file);\n};\n\n}  // namespace modelbox\n#endif"
  },
  {
    "path": "src/modelbox/tool/flow.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include \"flow.h\"\n\n#include <modelbox/modelbox.h>\n#include <modelbox/flow.h>\n#include <getopt.h>\n#include <stdio.h>\n\n#include <fstream>\n#include <iostream>\n#include <nlohmann/json.hpp>\n\nnamespace modelbox {\n\nREG_MODELBOX_TOOL_COMMAND(ToolCommandFlow)\n\nenum MODELBOX_TOOL_FLOW_COMMAND {\n  MODELBOX_TOOL_FLOW_RUN,\n  MODELBOX_TOOL_FLOW_CONF_CONVERT,\n};\n\nenum MODELBOX_TOOL_FLOW_CONVERT_COMMAND {\n  MODELBOX_TOOL_FLOW_CONVERT_COMMAND_PATH,\n  MODELBOX_TOOL_FLOW_CONVERT_COMMAND_OUTFORMAT,\n};\n\nstatic struct option flow_convert_options[] = {\n    {\"path\", 1, nullptr, MODELBOX_TOOL_FLOW_CONVERT_COMMAND_PATH},\n    {\"out-format\", 1, nullptr, MODELBOX_TOOL_FLOW_CONVERT_COMMAND_OUTFORMAT},\n    {nullptr, 0, nullptr, 0},\n};\nstatic struct option flow_options[] = {\n    {\"run\", 1, nullptr, MODELBOX_TOOL_FLOW_RUN},\n    {\"conf-convert\", 0, nullptr, MODELBOX_TOOL_FLOW_CONF_CONVERT},\n    {nullptr, 0, nullptr, 0},\n};\n\nToolCommandFlow::ToolCommandFlow() = default;\nToolCommandFlow::~ToolCommandFlow() = default;\n\nstd::string ToolCommandFlow::GetHelp() {\n  char help[] =\n      \" option:\\n\"\n      \"   -run [toml file]          run flow from file\\n\"\n      \"   -conf-convert             convert graph file format to json or \"\n      \"toml\\n\"\n      \"     -path [conf file]       graph file path\\n\"\n      \"     -out-format [json|toml| output format, default is toml\\n\"\n      \"\\n\";\n  return help;\n}\n\nint ToolCommandFlow::Run(int argc, char *argv[]) {\n  int cmdtype = 0;\n  int ret = -1;\n\n  if (argc == 1) {\n    std::cerr << GetHelp();\n    return 1;\n  }\n\n  MODELBOX_COMMAND_GETOPT_BEGIN(cmdtype, flow_options)\n  switch (cmdtype) {\n    case MODELBOX_TOOL_FLOW_RUN:\n      return RunFlow(optarg);\n    case MODELBOX_TOOL_FLOW_CONF_CONVERT:\n      optind = 1;\n      MODELBOX_COMMAND_SUB_UNLOCK();\n      return RunConfConvertCommand(MODELBOX_COMMAND_SUB_ARGC,\n                                   MODELBOX_COMMAND_SUB_ARGV);\n    default:\n      break;\n  }\n  MODELBOX_COMMAND_GETOPT_END()\n\n  return ret;\n}\n\nint ToolCommandFlow::RunFlow(const std::string &file) {\n  auto flow = std::make_shared<modelbox::Flow>();\n  MBLOG_INFO << \"run flow \" << file;\n  auto ret = flow->Init(file);\n  if (!ret) {\n    MBLOG_ERROR << \"init flow failed, \" << ret.WrapErrormsgs();\n    return 1;\n  }\n\n  ret = flow->Build();\n  if (!ret) {\n    MBLOG_ERROR << \"build flow failed, \" << ret.WrapErrormsgs();\n    return 1;\n  }\n\n  flow->RunAsync();\n\n  ret = flow->Wait();\n  if (!ret) {\n    MBLOG_ERROR << \"run flow failed, \" << ret.WrapErrormsgs();\n    return 1;\n  }\n\n  flow->Stop();\n  MBLOG_INFO << \"run flow \" << file << \" success\";\n\n  return 0;\n}\n\nint ToolCommandFlow::RunConfConvertCommand(int argc, char *argv[]) {\n  int cmdtype = 0;\n  ConfigurationBuilder config_builder;\n  std::shared_ptr<Configuration> config_merge;\n  std::string path;\n  std::string format = \"toml\";\n  std::string out_result;\n  modelbox::Status ret;\n\n  MODELBOX_COMMAND_GETOPT_BEGIN(cmdtype, flow_convert_options)\n  switch (cmdtype) {\n    case MODELBOX_TOOL_FLOW_CONVERT_COMMAND_PATH:\n      path = optarg;\n      break;\n    case MODELBOX_TOOL_FLOW_CONVERT_COMMAND_OUTFORMAT:\n      format = optarg;\n      break;\n    default:\n      break;\n  }\n  MODELBOX_COMMAND_GETOPT_END()\n  if (path.length() == 0 || format.length() == 0) {\n    std::cerr << \"please input conf file path and format.\" << std::endl;\n    return 1;\n  }\n\n  std::ifstream infile(path);\n  if (infile.fail()) {\n    std::cerr << \"read file \" << path << \" failed, \" << modelbox::StrError(errno)\n              << std::endl;\n    return 1;\n  }\n\n  Defer { infile.close(); };\n  std::string data((std::istreambuf_iterator<char>(infile)),\n                   std::istreambuf_iterator<char>());\n\n  if (format == \"json\") {\n    ret = modelbox::TomlToJson(data, &out_result, true);\n  } else if (format == \"toml\") {\n    ret = modelbox::JsonToToml(data, &out_result);\n  } else {\n    std::cerr << \"output format is not supported\" << std::endl;\n    return 1;\n  }\n\n  if (!ret) {\n    std::cerr << \"convert failed, \" << ret.WrapErrormsgs() << std::endl;\n    return 1;\n  }\n\n  std::cout << out_result << std::endl;\n\n  return 0;\n}  // namespace modelbox\n\n}  // namespace modelbox"
  },
  {
    "path": "src/modelbox/tool/flow.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#ifndef MODELBOX_TOOL_FLOW_H\n#define MODELBOX_TOOL_FLOW_H\n\n#include \"modelbox/common/command.h\"\nnamespace modelbox {\n\nconstexpr const char *FLOW_DESC = \"Run flow, convert config file format\";\n\nclass ToolCommandFlow : public ToolCommand {\n public:\n  ToolCommandFlow();\n  ~ToolCommandFlow() override;\n\n  int Run(int argc, char *argv[]) override;\n  std::string GetHelp() override;\n\n  std::string GetCommandName() override { return \"flow\"; };\n  std::string GetCommandDesc() override { return FLOW_DESC; };\n\n protected:\n  int RunFlow(const std::string &file);\n  int RunConfConvertCommand(int argc, char *argv[]);\n};\n\n}  // namespace modelbox\n#endif"
  },
  {
    "path": "src/modelbox/tool/help.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include \"help.h\"\n\n#include <modelbox/modelbox.h>\n#include <modelbox/flow.h>\n#include <getopt.h>\n#include <stdio.h>\n\n#include <fstream>\n#include <nlohmann/json.hpp>\n\nnamespace modelbox {\n\nREG_MODELBOX_TOOL_COMMAND(ToolCommandHelp)\n\nToolCommandHelp::ToolCommandHelp() = default;\nToolCommandHelp::~ToolCommandHelp() = default;\n\nstd::string ToolCommandHelp::GetHelp() {\n  char help[] =\n      \"help [cmd]    Display help for command [cmd]\"\n      \"\\n\";\n  return help;\n}\n\nint ToolCommandHelp::Run(int argc, char *argv[]) {\n  if (argc <= 1) {\n    fprintf(stderr, \"please input command for help, try modelbox-tool cmd [cmd]\\n\");\n    return -1;\n  }\n\n  auto cmd = modelbox::ToolCommandList::Instance()->GetCommand(argv[1]);\n  if (cmd == nullptr) {\n    fprintf(stderr, \"Command %s not found.\\n\", argv[1]);\n    return -1;\n  }\n\n  std::cout << \"modelbox-tool \" << cmd->GetCommandName() << \" [OPTION]...\"\n            << std::endl;\n  std::cout << cmd->GetHelp();\n\n  return 0;\n}\n\n}  // namespace modelbox\n"
  },
  {
    "path": "src/modelbox/tool/help.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#ifndef MODELBOX_TOOL_FLOW_H\n#define MODELBOX_TOOL_FLOW_H\n\n#include \"modelbox/common/command.h\"\nnamespace modelbox {\n\nconstexpr const char *HELP_DESC = \"Display command help\";\n\nclass ToolCommandHelp : public ToolCommand {\n public:\n  ToolCommandHelp();\n  ~ToolCommandHelp() override;\n\n  int Run(int argc, char *argv[]) override;\n  std::string GetHelp() override;\n\n  std::string GetCommandName() override { return \"help\"; };\n  std::string GetCommandDesc() override { return HELP_DESC; };\n};\n\n}  // namespace modelbox\n#endif"
  },
  {
    "path": "src/modelbox/tool/key.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"key.h\"\n\n#include <errno.h>\n#include <getopt.h>\n#include <modelbox/base/crypto.h>\n#include <modelbox/base/utils.h>\n#include <openssl/evp.h>\n#include <stdio.h>\n#include <string.h>\n#include <termios.h>\n#include <unistd.h>\n\n#include <fstream>\n#include <iostream>\n#include <memory>\n\nnamespace modelbox {\n\nREG_MODELBOX_TOOL_COMMAND(ToolCommandKey)\n\nenum MODELBOX_TOOL_KEY_COMMAND {\n  MODELBOX_TOOL_KEY_PASS,\n  MODELBOX_TOOL_KEY_MODEL,\n};\n\nstatic struct option key_options[] = {\n    {\"pass\", 0, nullptr, MODELBOX_TOOL_KEY_PASS},\n    {\"model\", 1, nullptr, MODELBOX_TOOL_KEY_MODEL},\n    {nullptr, 0, nullptr, 0},\n};\n\nenum MODELBOX_TOOL_KEY_PASS_COMMAND {\n  MODELBOX_TOOL_KEY_PASS_NON_SYSRELATED,\n};\n\nstatic struct option key_pass_option[] = {\n    {\"n\", 0, nullptr, MODELBOX_TOOL_KEY_PASS_NON_SYSRELATED},\n    {nullptr, 0, nullptr, 0},\n};\n\nconstexpr int ASCII_ETX = 0x3;\nconstexpr int ASCII_BACKSPACE = 127;\nconstexpr int ASCII_DEL = 126;\nconstexpr int AES256_KEY_LEN = 32;\nconstexpr int ENCRYPT_BLOCK_SIZE = (AES256_KEY_LEN * 256);\n\nToolCommandKey::ToolCommandKey() = default;\nToolCommandKey::~ToolCommandKey() = default;\n\nstd::string ToolCommandKey::GetHelp() {\n  char help[] =\n      \" action:\\n\"\n      \"   -pass     Encrypt password, the password can be environment\\n\"\n      \"variables 'MODELBOX_PASSWORD' or read from stdin\\n\"\n      \"   -model [model file]          Encrypt model or file, the password can \"\n      \"be environment\\n\"\n      \"variables 'MODELBOX_PASSWORD' or read from stdin\\n\"\n      \"Important! Model Encryption may be unsafe if rootkey and en_pass are \"\n      \"exposured\\n\"\n      \"   -n       None system related password\\n\"\n      \"\\n\";\n  return help;\n}\n\nStatus OpenFile(const std::string &plain_path, const std::string &encypt_path,\n                std::ifstream &fplain, std::ofstream &fencypt) {\n  fplain.open(plain_path, std::ios::binary);\n  if (fplain.fail() || !fplain.is_open()) {\n    std::cout << \"open model '\" << plain_path << \"' failed, \"\n              << modelbox::StrError(errno);\n    return STATUS_FAULT;\n  }\n\n  fencypt.open(encypt_path, std::ios::binary);\n  if (fencypt.fail() || !fencypt.is_open()) {\n    std::cout << \"write en_model '\" << encypt_path << \" failed, \"\n              << modelbox::StrError(errno);\n    return STATUS_FAULT;\n  }\n  return STATUS_SUCCESS;\n}\n\nStatus EncryptWithFile(const std::string &plain_path,\n                       const std::string &encypt_path, unsigned char *key,\n                       unsigned char *iv) {\n  std::ifstream fplain;\n  std::ofstream fencypt;\n  auto ret = OpenFile(plain_path, encypt_path, fplain, fencypt);\n  if (ret != STATUS_SUCCESS) {\n    return ret;\n  }\n\n  Defer {\n    fplain.close();\n    fencypt.close();\n  };\n\n  std::shared_ptr<uint8_t> read_buf(new (std::nothrow)\n                                        uint8_t[ENCRYPT_BLOCK_SIZE],\n                                    [](const uint8_t *p) { delete[] p; });\n  std::shared_ptr<uint8_t> en_buf(\n      new (std::nothrow) uint8_t[ENCRYPT_BLOCK_SIZE + EVP_MAX_BLOCK_LENGTH + 1],\n      [](const uint8_t *p) { delete[] p; });\n\n  if (en_buf == nullptr || read_buf == nullptr) {\n    return {STATUS_NOMEM, \"no memory to encode\"};\n  }\n\n  std::shared_ptr<EVP_CIPHER_CTX> ctx;\n  const EVP_CIPHER *cipher = nullptr;\n  EVP_CIPHER_CTX *ctx_new = nullptr;\n  int len = 0;\n\n  if (read_buf == nullptr) {\n    return {STATUS_FAULT, \"read_buf new err\"};\n  }\n\n  cipher = EVP_get_cipherbyname(DEFAULT_CIPHER_AES256_CBC);\n  if (cipher == nullptr) {\n    return {STATUS_NOTSUPPORT, \"cipher not support aes256_cbc\"};\n  }\n\n  /* Create and initialise the context */\n  ctx_new = EVP_CIPHER_CTX_new();\n  if (ctx_new == nullptr) {\n    return {STATUS_NOMEM, \"create cipher failed.\"};\n  }\n\n  ctx.reset(ctx_new, [](EVP_CIPHER_CTX *ctx) { EVP_CIPHER_CTX_free(ctx); });\n\n  /* Initialise the encryption operation. IMPORTANT - ensure you use a key\n   * and IV size appropriate for your cipher\n   * In this example we are using 256 bit AES (i.e. a 256 bit key). The\n   * IV size for *most* modes is the same as the block size. For AES this\n   * is 128 bits */\n  if (1 != EVP_EncryptInit_ex(ctx.get(), cipher, nullptr, key, iv)) {\n    return {STATUS_FAULT, \"encrypt init failed.\"};\n  }\n\n  /* Provide the message to be encrypted, and obtain the encrypted output.\n   * EVP_EncryptUpdate can be called multiple times if necessary\n   */\n  while (!fplain.eof()) {\n    fplain.read((char *)read_buf.get(), ENCRYPT_BLOCK_SIZE);\n    int read_len = fplain.gcount();\n    if (read_len != ENCRYPT_BLOCK_SIZE && !fplain.eof()) {\n      return {STATUS_FAULT, \"Read file fail.\"};\n    }\n    if (1 != EVP_EncryptUpdate(ctx.get(), en_buf.get(), &len, read_buf.get(),\n                               read_len)) {\n      return {STATUS_FAULT, \"encrypt update failed.\"};\n    }\n    fencypt.write((char *)en_buf.get(), len);\n  }\n\n  /* Finalise the encryption. Further ciphertext bytes may be written at\n   * this stage.\n   */\n  uint8_t *pend = en_buf.get() + len;\n  if (1 != EVP_EncryptFinal_ex(ctx.get(), pend, &len)) {\n    return {STATUS_FAULT, \"encrypt final failed.\"};\n  }\n  fencypt.write((char *)pend, len);\n\n  return STATUS_OK;\n}\n\n/**\n * @brief Encrypt model\n * @param model_path model path\n * @param pass password in plain text\n * @param sysrelated Whether encryption system related\n * @param rootkey output rootkey\n * @param en_pass encrypted password\n * @param ciphername ciphter name, like aes-256-cbc\n * @return whether success\n */\nStatus ModelEncrypt(const std::string &model_path,\n                    const std::vector<char> &pass, bool sysrelated,\n                    std::string *rootkey, std::string *en_pass) {\n  std::vector<char> aes256_pass(pass);\n  // fill key with \"0\"\n  aes256_pass.resize(AES256_KEY_LEN, '\\0');\n  auto ret = PassEncrypt(aes256_pass, sysrelated, rootkey, en_pass,\n                         DEFAULT_CIPHER_AES256_CBC);\n  if (ret != STATUS_SUCCESS) {\n    return ret;\n  }\n\n  std::vector<unsigned char> iv;\n  iv.resize(IV_LEN + MAX_PASSWORD_LEN);\n  Base64Decode(*en_pass, &iv);\n\n  ret = EncryptWithFile(model_path, model_path + \".en\",\n                        (unsigned char *)aes256_pass.data(), iv.data());\n  if (ret != STATUS_SUCCESS) {\n    return ret;\n  }\n\n  return STATUS_SUCCESS;\n}\n\nint ToolCommandKey::Run(int argc, char *argv[]) {\n  int cmdtype = 0;\n  std::string fname;\n#if OPENSSL_API_COMPAT < 0x10100000L\n  OpenSSL_add_all_algorithms();\n  Defer { EVP_cleanup(); };\n#endif\n\n  if (argc == 1) {\n    std::cerr << GetHelp();\n    return 1;\n  }\n\n  MODELBOX_COMMAND_GETOPT_BEGIN(cmdtype, key_options)\n  switch (cmdtype) {\n    case MODELBOX_TOOL_KEY_MODEL:\n      fname = optarg;\n    case MODELBOX_TOOL_KEY_PASS:\n      optind = 1;\n      MODELBOX_COMMAND_SUB_UNLOCK();\n      return RunPassCommand(MODELBOX_COMMAND_SUB_ARGC,\n                            MODELBOX_COMMAND_SUB_ARGV, fname);\n    default:\n      break;\n  }\n  MODELBOX_COMMAND_GETOPT_END()\n\n  return 0;\n}\n\nStatus ToolCommandKey::ReadPassword(std::string *pass) {\n  struct termios oldt;\n  struct termios newt;\n  char ch;\n  int num = 0;\n  char c_pass[MAX_PASSWORD_LEN];\n\n  if (isatty(STDIN_FILENO) == 0) {\n    std::cin >> *pass;\n    return STATUS_OK;\n  }\n\n  std::cout << \"Please input password: \";\n  if (tcgetattr(STDIN_FILENO, &oldt) != 0) {\n    return {STATUS_FAULT, modelbox::StrError(errno)};\n  }\n  Defer { tcsetattr(STDIN_FILENO, TCSANOW, &oldt); };\n\n  newt = oldt;\n  newt.c_lflag &= ~(ECHO | ICANON | ISIG);\n  tcsetattr(STDIN_FILENO, TCSANOW, &newt);\n\n  while (((ch = getchar()) != '\\n') && (num < MAX_PASSWORD_LEN - 1)) {\n    if (ch == ASCII_ETX) {\n      std::cout << std::endl;\n      return {STATUS_STOP};\n    }\n\n    if (ch == ASCII_BACKSPACE || ch == ASCII_DEL) {\n      if (num > 0) {\n        num--;\n      }\n      continue;\n    }\n\n    if (ch == EOF) {\n      if (num == 0) {\n        return {STATUS_EOF, \"Get input failed\"};\n      }\n      break;\n    }\n\n    c_pass[num] = ch;\n    num++;\n  }\n\n  c_pass[num] = '\\0';\n  *pass = c_pass;\n  std::cout << std::endl;\n\n  return STATUS_OK;\n}\n\nint ToolCommandKey::RunPassCommand(int argc, char *argv[], std::string &fname) {\n  int cmdtype = 0;\n  std::string rootkey;\n  std::string enpass;\n  bool sysrelated = true;\n\n  MODELBOX_COMMAND_GETOPT_BEGIN(cmdtype, key_pass_option)\n  switch (cmdtype) {\n    case MODELBOX_TOOL_KEY_PASS_NON_SYSRELATED:\n      sysrelated = false;\n    default:\n      break;\n  }\n  MODELBOX_COMMAND_GETOPT_END()\n\n  auto ret = EnKey(sysrelated, &rootkey, &enpass, fname);\n  if (ret == STATUS_STOP) {\n    return -1;\n  }\n\n  if (!ret) {\n    std::cerr << std::endl << \"encrypt password failed, \" << ret << std::endl;\n    return -1;\n  }\n\n  std::cout << \"Key: \" << rootkey << std::endl;\n  std::cout << \"Encrypted password: \" << enpass << std::endl;\n  if (!fname.empty()) {\n    std::cout << \"Encrypted Model Path: \" << fname + \".en\" << std::endl;\n  }\n  return 0;\n}\n\nStatus ToolCommandKey::EnKey(bool sysrelated, std::string *rootkey,\n                             std::string *enpass, std::string &fname) {\n  std::string pass;\n  auto ret = STATUS_OK;\n\n  const char *env_pass = getenv(\"MODELBOX_PASSWORD\");\n  if (env_pass) {\n    pass = env_pass;\n  } else {\n    ret = ReadPassword(&pass);\n    if (ret == STATUS_STOP) {\n      return ret;\n    }\n\n    if (ret != STATUS_OK) {\n      std::cerr << \"Read password failed, \" << ret << std::endl;\n      return STATUS_INVALID;\n    }\n\n    if (pass.length() == 0) {\n      return STATUS_NODATA;\n    }\n  }\n  std::vector<char> pass_vec(pass.begin(), pass.end());\n  if (!fname.empty()) {\n    return ModelEncrypt(fname, pass_vec, sysrelated, rootkey, enpass);\n  }\n  return PassEncrypt(pass_vec, sysrelated, rootkey, enpass);\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/modelbox/tool/key.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#ifndef MODELBOX_TOOL_KEY_H\n#define MODELBOX_TOOL_KEY_H\n\n#include <modelbox/base/status.h>\n\n#include \"modelbox/common/command.h\"\n\nnamespace modelbox {\n\nconstexpr const char *KEY_DESC = \"Key encrypt\";\n\nclass ToolCommandKey : public ToolCommand {\n public:\n  ToolCommandKey();\n  ~ToolCommandKey() override;\n\n  int Run(int argc, char *argv[]) override;\n  std::string GetHelp() override;\n\n  std::string GetCommandName() override { return \"key\"; };\n  std::string GetCommandDesc() override { return KEY_DESC; };\n\n private:\n  int RunPassCommand(int argc, char *argv[], std::string &fname);\n  Status EnKey(bool sysrelated, std::string *rootkey, std::string *enpass, std::string &fname);\n  Status ReadPassword(std::string *pass);\n};\n\n}  // namespace modelbox\n#endif"
  },
  {
    "path": "src/modelbox/tool/log.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_SERVER_LOG_H_\n#define MODELBOX_SERVER_LOG_H_\n\n#include <modelbox/common/log.h>\n\nnamespace modelbox {\n\nextern std::shared_ptr<ModelboxServerLogger> kServerLogger;\n\n}  // namespace modelbox\n\n#endif  // MODELBOX_SERVER_LOG_H_"
  },
  {
    "path": "src/modelbox/tool/main.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <getopt.h>\n#include <modelbox/base/utils.h>\n#include <signal.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include <unistd.h>\n\n#include <memory>\n#include <thread>\n\n#include \"driver.h\"\n#include \"external_command.h\"\n#include \"key.h\"\n#include \"log.h\"\n#include \"modelbox/common/log.h\"\n#include \"modelbox/common/utils.h\"\n\n#define TMP_BUFF_LEN_32 32\n#define MODELBOX_TOOL_LOG_PATH \\\n  \"${MODELBOX_ROOT}/var/log/modelbox/modelbox-tool.log\"\n\nstatic int g_sig_list[] = {\n    SIGIO,   SIGPWR,    SIGSTKFLT, SIGPROF, SIGINT,  SIGTERM,\n    SIGBUS,  SIGVTALRM, SIGTRAP,   SIGXCPU, SIGXFSZ, SIGILL,\n    SIGABRT, SIGFPE,    SIGSEGV,   SIGQUIT, SIGSYS,\n};\n\nstatic int g_sig_num = sizeof(g_sig_list) / sizeof(g_sig_list[0]);\nstatic bool kVerbose = false;\nstd::string kLogLevel = \"ERROR\";\nstd::string kLogFile;\nstd::shared_ptr<modelbox::ModelboxServerLogger> kToolLogger;\n\nenum MODELBOX_TOOL_COMMAND {\n  MODELBOX_TOOL_COMMAND_KEY,\n  MODELBOX_TOOL_COMMAND_DRIVER,\n  MODELBOX_TOOL_COMMAND_VERBOSE,\n  MODELBOX_TOOL_COMMAND_LOG_LEVEL,\n  MODELBOX_TOOL_COMMAND_LOG_PATH,\n  MODELBOX_TOOL_COMMAND_GET_MODELBOX_ROOT,\n  MODELBOX_TOOL_COMMAND_HELP,\n  MODELBOX_TOOL_SHOW_VERSION,\n};\n\nstatic struct option options[] = {\n    {\"verbose\", 0, nullptr, MODELBOX_TOOL_COMMAND_VERBOSE},\n    {\"log-level\", 1, nullptr, MODELBOX_TOOL_COMMAND_LOG_LEVEL},\n    {\"log-path\", 1, nullptr, MODELBOX_TOOL_COMMAND_LOG_PATH},\n    {\"get-modelbox-root\", 1, nullptr, MODELBOX_TOOL_COMMAND_GET_MODELBOX_ROOT},\n    {\"h\", 0, nullptr, MODELBOX_TOOL_COMMAND_HELP},\n    {\"v\", 0, nullptr, MODELBOX_TOOL_SHOW_VERSION},\n    {nullptr, 0, nullptr, 0},\n};\n\nstatic void showhelp() {\n  /* clang-format off */\n    char help[] = \"\"\n        \"Usage: modelbox-tool [OPTION]...\\n\"\n        \"modelbox tool main options: \\n\"\n        \"  -verbose      output log to screen.\\n\"\n        \"  -log-level    log level: DEBUG, INFO, NOTICE, WARN, ERROR, FATAL.\\n\"\n        \"  -log-path     log file: default : \" MODELBOX_TOOL_LOG_PATH \"\\n\"\n        \"  -h            show this help message.\\n\"\n        \"  -v            show modelbox tool version.\\n\"\n        \"\\n\"\n        \"show command help:\\n\"\n        \"  modelbox-tool help [cmd]    show help message for specific command\\n\"\n        \"\\n\";\n\n    printf(\"%s\", help);\n  /* clang-format on */\n  printf(\"modelbox-tool commands list:\\n\");\n  printf(\"Usage: modelbox-tool [cmd] [OPTION]...\\n\");\n  auto all_cmds = modelbox::ToolCommandList::Instance()->GetAllCommands();\n  for (const auto &cmd : all_cmds) {\n    printf(\"  %-10.10s \\t\\t%s\\n\", cmd->GetCommandName().c_str(),\n           cmd->GetCommandDesc().c_str());\n  }\n}\nstatic void modelbox_tool_sig_handler(int volatile sig_no, siginfo_t *sig_info,\n                                      void *volatile ptr) {\n  switch (sig_no) {\n    case SIGINT:\n    case SIGTERM:\n      exit(1);\n      break;\n    case SIGQUIT:\n      return;\n      break;\n    case SIGSEGV:\n    case SIGPIPE:\n    case SIGFPE:\n    case SIGABRT:\n    case SIGBUS:\n    case SIGILL: {\n      char buf[4096];\n      MBLOG_ERROR << \"Segment fault\"\n                  << \", Signal: \" << sig_no << \", Addr: \" << sig_info->si_addr\n                  << \", Code: \" << sig_info->si_code << \", Caused by: \";\n      if (modelbox::modelbox_cpu_register_data(buf, sizeof(buf),\n                                               (ucontext_t *)ptr) == 0) {\n        MBLOG_ERROR << \"CPU Register Info:\\n\" << buf;\n      }\n      MBLOG_STACKTRACE(modelbox::LOG_FATAL);\n      sleep(1);\n    } break;\n    default:\n      break;\n  }\n\n  _exit(1);\n}\n\nstatic int modelbox_tool_init_bbox() {\n  if (modelbox::modelbox_sig_register(g_sig_list, g_sig_num,\n                                      modelbox_tool_sig_handler) != 0) {\n    fprintf(stderr, \"register signal failed.\\n\");\n    return 1;\n  }\n\n  return 0;\n}\n\nint modelbox_tool_init_log() {\n  kToolLogger = std::make_shared<modelbox::ModelboxServerLogger>();\n  if (kToolLogger->Init(kLogFile, 1024 * 1024, 32, kVerbose) == false) {\n    fprintf(stderr, \"init logger failed.\\n\");\n    return 1;\n  }\n\n  ModelBoxLogger.SetLogger(kToolLogger);\n  auto log_level = modelbox::LogLevelStrToLevel(kLogLevel);\n  kToolLogger->SetLogLevel(log_level);\n\n  return 0;\n}\n\nint modelbox_tool_init() {\n  if (modelbox_tool_init_bbox() != 0) {\n    fprintf(stderr, \"register signal failed.\\n\");\n    return 1;\n  }\n\n  if (modelbox_tool_init_log()) {\n    return 1;\n  }\n\n  /* if in standalone mode */\n  if (modelbox::modelbox_root_dir().length() > 0) {\n    std::string default_scanpath =\n        modelbox::modelbox_full_path(std::string(modelbox::MODELBOX_ROOT_VAR) +\n                                     MODELBOX_DEFAULT_DRIVER_PATH);\n    modelbox::Drivers::SetDefaultScanPath(default_scanpath);\n\n    std::string default_driver_info_path =\n        modelbox::modelbox_full_path(std::string(modelbox::MODELBOX_ROOT_VAR) +\n                                     \"/var/run/modelbox-driver-info\");\n    modelbox::Drivers::SetDefaultInfoPath(default_driver_info_path);\n  }\n\n  return 0;\n}\n\nint modelbox_tool_run(int argc, char *argv[]) {\n  if (argc <= 0) {\n    printf(\"Try -h for more information.\\n\");\n    return -1;\n  }\n\n  const char *action = argv[0];\n  auto cmd = modelbox::ToolCommandList::Instance()->GetCommand(action);\n  if (cmd == nullptr) {\n    printf(\"command %s not exist, try -h for more information.\\n\", action);\n    return -1;\n  }\n\n  return cmd->Run(argc, argv);\n}\n\nvoid modelbox_tool_stop() {}\n\nstatic void onexit() {}\n\n#ifdef BUILD_TEST\nint modelbox_tool_main(int argc, char *argv[])\n#else\nint main(int argc, char *argv[])\n#endif\n{\n  kLogFile = modelbox::modelbox_full_path(MODELBOX_TOOL_LOG_PATH);\n  int cmdtype = 0;\n\n  modelbox::ExternalCommandLoader::Load();\n  MODELBOX_COMMAND_GETOPT_BEGIN(cmdtype, options)\n  switch (cmdtype) {\n    case MODELBOX_TOOL_COMMAND_VERBOSE:\n      kVerbose = true;\n      break;\n    case MODELBOX_TOOL_COMMAND_LOG_LEVEL:\n      kLogLevel = optarg;\n      break;\n    case MODELBOX_TOOL_COMMAND_LOG_PATH:\n      kLogFile = modelbox::modelbox_full_path(optarg);\n      break;\n    case MODELBOX_TOOL_COMMAND_GET_MODELBOX_ROOT:\n      printf(\"%s\\n\", modelbox::modelbox_root_dir().c_str());\n      return 0;\n      break;\n    case MODELBOX_TOOL_COMMAND_HELP:\n      showhelp();\n      return 0;\n      break;\n    case MODELBOX_TOOL_SHOW_VERSION:\n      printf(\"modelbox-tool %s\\n\", modelbox::GetModelBoxVersion());\n      return 0;\n    default:\n      break;\n  }\n  MODELBOX_COMMAND_GETOPT_END()\n\n  if (argc <= 1) {\n    showhelp();\n    return 1;\n  }\n\n  Defer { onexit(); };\n  signal(SIGPIPE, SIG_IGN);\n\n  if (modelbox_tool_init() != 0) {\n    fprintf(stderr, \"init failed.\\n\");\n    return 1;\n  }\n\n  int argc_sub = argc - optind;\n  char **argv_sub = argv + optind;\n  optind = 1;\n  if (modelbox_tool_run(argc_sub, argv_sub) != 0) {\n    return 1;\n  }\n\n  return 0;\n}\n"
  },
  {
    "path": "src/modelbox/tool/server_command.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"server_command.h\"\n\n#include <errno.h>\n#include <getopt.h>\n#include <modelbox/base/crypto.h>\n#include <modelbox/base/utils.h>\n#include <netdb.h>\n#include <openssl/evp.h>\n#include <poll.h>\n#include <stdio.h>\n#include <string.h>\n#include <sys/socket.h>\n#include <sys/stat.h>\n#include <sys/un.h>\n#include <termios.h>\n#include <unistd.h>\n\n#include <iostream>\n\n#include \"modelbox/common/config.h\"\n#include \"modelbox/common/control_msg.h\"\n#include \"securec.h\"\n\nnamespace modelbox {\n\nREG_MODELBOX_TOOL_COMMAND(ToolCommandServer)\n\nenum MODELBOX_TOOL_SERVER_COMMAND {\n  MODELBOX_TOOL_SERVER_CONNECT,\n  MODELBOX_TOOL_SERVER_INFO_FROM_CONF,\n  MODELBOX_TOOL_SERVER_CHECKPORT,\n  MODELBOX_TOOL_SERVER_GETCONF,\n  MODELBOX_TOOL_SERVER_HELP,\n};\n\nstatic struct option server_options[] = {\n    {\"conn\", 1, nullptr, MODELBOX_TOOL_SERVER_CONNECT},\n    {\"conf\", 1, nullptr, MODELBOX_TOOL_SERVER_INFO_FROM_CONF},\n    {\"check-port\", 1, nullptr, MODELBOX_TOOL_SERVER_CHECKPORT},\n    {\"get-conf-value\", 1, nullptr, MODELBOX_TOOL_SERVER_GETCONF},\n    {\"h\", 0, nullptr, MODELBOX_TOOL_SERVER_HELP},\n    {nullptr, 0, nullptr, 0},\n};\n\nint CheckPort(const std::string &host) {\n  struct addrinfo hints;\n  struct addrinfo *result = nullptr;\n\n  memset_s(&hints, sizeof(hints), 0, sizeof(hints));\n  hints.ai_family = AF_UNSPEC;\n\n  std::string ip;\n  std::string port;\n\n  auto ret_val = modelbox::SplitIPPort(host, ip, port);\n  if (!ret_val) {\n    std::cerr << ret_val.Errormsg() << std::endl;\n    return 1;\n  }\n\n  auto ret = getaddrinfo(ip.c_str(), port.c_str(), &hints, &result);\n  if (ret != 0) {\n    std::cerr << \"check port failed, \" << gai_strerror(ret) << std::endl;\n    return 1;\n  }\n\n  Defer { freeaddrinfo(result); };\n\n  int sock = socket(result->ai_family, SOCK_STREAM, 0);\n  if (sock < 0) {\n    std::cerr << \"create socket failed\\n\";\n    return 1;\n  }\n  Defer { close(sock); };\n\n  if (bind(sock, result->ai_addr, result->ai_addrlen) != 0) {\n    if (errno == EADDRINUSE) {\n      /* in use */\n      return 2;\n    }\n\n    if (errno == EACCES) {\n      /* no permission */\n      return 3;\n    }\n\n    std::cerr << \"check failed, errno is \" << errno << \"\\n\";\n    return 1;\n  }\n\n  return 0;\n}\n\nint GetConfig(const std::string &conf_file, const std::string &key) {\n  std::shared_ptr<Configuration> config;\n  std::string confile_file_path = DEFAULT_MODELBOX_CONF;\n  if (conf_file.length() > 0) {\n    confile_file_path = conf_file;\n  }\n\n  confile_file_path = modelbox_full_path(confile_file_path);\n\n  config = LoadSubConfig(confile_file_path);\n  if (config == nullptr) {\n    std::cerr << \"conf file is invalid.\" << std::endl;\n    return modelbox::STATUS_INVALID;\n  }\n\n  auto values = config->GetStrings(key);\n  if (values.size() <= 0) {\n    fprintf(stderr, \"Not found key %s\\n\", key.c_str());\n    return 1;\n  }\n\n  for (const auto &value : values) {\n    std::cout << value << std::endl;\n  }\n\n  return 0;\n}\n\nToolCommandServer::ToolCommandServer() {\n  char tmp_var[] = \"/tmp/modelbox-tool.XXXXXXX\";\n  temp_fd_ = mkstemp(tmp_var);\n  if (temp_fd_ < 0) {\n    unix_path_ = \"/tmp/modelbox-tool.sock\";\n  } else {\n    unix_path_ = tmp_var;\n  }\n}\n\nToolCommandServer::~ToolCommandServer() {\n  CloseClient();\n  if (temp_fd_ > 0) {\n    close(temp_fd_);\n  }\n  unlink(unix_path_.c_str());\n}\n\nstd::string ToolCommandServer::GetHelp() {\n  char help[] =\n      \"Server command option:\\n\"\n      \"  -conn\\t\\t\\t  connect socket file, example: xxx.sock\\n\"\n      \"  -conf\\t\\t\\t  server conf file\\n\"\n      \"  -check-port\\t\\t\\t  check whether port can bind.\\n\"\n      \"  -get-conf-value\\t\\t\\t  get server conf value\\n\";\n  return help;\n}\n\nmodelbox::Status ToolCommandServer::InitClient(const std::string &connect_url) {\n  struct sockaddr_un client_sockaddr;\n  int unused __attribute__((unused));\n  auto ret = modelbox::STATUS_OK;\n  DeferCond { return ret != modelbox::STATUS_OK; };\n\n  struct stat stat_buf;\n  if (stat(connect_url.c_str(), &stat_buf) < 0) {\n    auto errmsg = \"cannot access control file: \" + connect_url +\n                  \", error: \" + modelbox::StrError(errno) +\n                  \". Maybe server is down, or try -h for help\";\n    std::cout << errmsg << std::endl;\n    return {modelbox::STATUS_PERMIT};\n  }\n\n  int fd = socket(AF_UNIX, SOCK_DGRAM, 0);\n  if (fd <= 0) {\n    std::string errmsg = \"create socket: \";\n    errmsg += modelbox::StrError(errno);\n    MBLOG_ERROR << errmsg;\n    ret = {modelbox::STATUS_FAULT, errmsg};\n    return ret;\n  }\n  DeferCondAdd { close(fd); };\n\n  client_sockaddr.sun_family = AF_UNIX;\n  auto err =\n      strncpy_s(client_sockaddr.sun_path, sizeof(client_sockaddr.sun_path),\n                unix_path_.c_str(), unix_path_.length());\n  if (err != 0) {\n    MBLOG_ERROR << \"strncpy_s failed.\";\n    return modelbox::STATUS_FAULT;\n  }\n  unlink(client_sockaddr.sun_path);\n\n  int rc =\n      bind(fd, (struct sockaddr *)&client_sockaddr, sizeof(client_sockaddr));\n  if (rc != 0) {\n    std::string errmsg = \"bind socket: \";\n    errmsg += modelbox::StrError(errno);\n    MBLOG_ERROR << errmsg;\n    ret = {modelbox::STATUS_FAULT, errmsg};\n    return ret;\n  }\n  auto ss = chmod(client_sockaddr.sun_path, 0660);\n  if (ss != 0) {\n    MBLOG_ERROR << \"ss chmod client ret: \" << ss << \", \"\n                << modelbox::StrError(errno);\n  }\n\n  unused = chown(client_sockaddr.sun_path, -1, stat_buf.st_gid);\n\n  client_fd_ = fd;\n  return modelbox::STATUS_OK;\n}\n\nvoid ToolCommandServer::CloseClient() {\n  if (client_fd_ > 0) {\n    close(client_fd_);\n    client_fd_ = -1;\n    unlink(unix_path_.c_str());\n  }\n}\n\nmodelbox::Status ToolCommandServer::SendMsg(\n    const std::shared_ptr<ControlMsg> &msg, const std::string &connect_url) {\n  modelbox::Status status = modelbox::STATUS_OK;\n  std::string errmsg;\n  struct sockaddr_un remote;\n  remote.sun_family = AF_UNIX;\n\n  auto ret = strncpy_s(remote.sun_path, sizeof(remote.sun_path),\n                       connect_url.c_str(), connect_url.length());\n  if (ret != 0) {\n    MBLOG_ERROR << \"strncpy_s failed.\";\n    return modelbox::STATUS_FAULT;\n  }\n\n  int rc = sendto(client_fd_, msg->GetData(), msg->GetDataLen(), 0,\n                  (struct sockaddr *)&remote, sizeof(remote));\n  if (rc <= 0) {\n    errmsg = \"send data to server failed, err: \";\n    errmsg += modelbox::StrError(errno);\n    if (errno == ENOENT) {\n      errmsg += \", No such file or directory. Maybe server is down\";\n      status = {modelbox::STATUS_NOENT, errmsg};\n    } else {\n      status = {modelbox::STATUS_FAULT, errmsg};\n    }\n\n    std::cerr << errmsg;\n  }\n\n  msg->Flip();\n\n  return status;\n}\n\nmodelbox::Status ToolCommandServer::SendCommand(\n    int argc, char *argv[], const std::string &connect_url) {\n  std::shared_ptr<ControlMsg> msg;\n  if (argc == 0) {\n    std::cout << GetHelp();\n    msg = std::make_shared<ControlMsgHelp>();\n  } else {\n    auto cmd_msg = std::make_shared<ControlMsgCmd>();\n    cmd_msg->SetArgs(argc, argv);\n    msg = cmd_msg;\n  }\n\n  auto ret = msg->Serialize();\n  if (!ret) {\n    return ret;\n  }\n\n  return SendMsg(msg, connect_url);\n}\n\nint ToolCommandServer::RecvCommand() {\n  int result = 0;\n  int len = 0;\n  int unused __attribute__((unused));\n  auto msg = std::make_shared<ControlMsg>();\n  struct sockaddr_un remote;\n  socklen_t addr_len = sizeof(remote);\n\n  struct pollfd fds[1];\n  int nfds = sizeof(fds) / sizeof(struct pollfd);\n\n  memset_s(fds, sizeof(fds), 0, sizeof(fds));\n  fds[0].fd = client_fd_;\n  fds[0].events = POLLIN;\n\n  while (true) {\n    int rc = poll(fds, nfds, 30 * 1000);\n    if (rc == 0) {\n      return -1;\n    }\n\n    if (rc < 0) {\n      continue;\n    }\n\n    len = recvfrom(client_fd_, msg->GetDataTail(), msg->GetRemainSpace(), 0,\n                   (sockaddr *)&remote, &addr_len);\n    if (len <= 0) {\n      return -1;\n    }\n\n    auto ret = msg->AppendDataLen(len);\n    if (!ret) {\n      return -1;\n    }\n    ret = msg->Unserialize();\n    if (ret == modelbox::STATUS_AGAIN) {\n      continue;\n    }\n    if (ret == modelbox::STATUS_INVALID) {\n      return -1;\n    }\n\n    int out_len = msg->GetMsgDataLen();\n    switch (msg->GetMsgType()) {\n      case SERVER_CONTROL_MSG_TYPE_OUTMSG:\n        if (msg->GetMsgData()[out_len - 1] == '\\0') {\n          out_len -= 1;\n        }\n        unused = write(STDOUT_FILENO, msg->GetMsgData(), out_len);\n        break;\n      case SERVER_CONTROL_MSG_TYPE_ERRMSG:\n        if (msg->GetMsgData()[out_len - 1] == '\\0') {\n          out_len -= 1;\n        }\n        unused = write(STDERR_FILENO, msg->GetMsgData(), out_len);\n        break;\n      case SERVER_CONTROL_MSG_TYPE_RESULT: {\n        auto new_msg = std::dynamic_pointer_cast<ControlMsgResult>(\n            ControlMsgBuilder::Build(msg));\n        if (new_msg == nullptr) {\n          return 1;\n        }\n        return new_msg->GetResult();\n      } break;\n      case SERVER_CONTROL_MSG_TYPE_ERR: {\n        auto new_msg = std::dynamic_pointer_cast<ControlMsgError>(\n            ControlMsgBuilder::Build(msg));\n        if (new_msg == nullptr) {\n          return 1;\n        }\n\n        MBLOG_ERROR << \"server return error, code: \" << new_msg->GetErrorMsg()\n                    << \", message: \" << new_msg->GetErrorMsg();\n        return -1;\n      } break;\n      default:\n        break;\n    }\n\n    msg->Flip();\n  }\n\n  return result;\n}\n\nmodelbox::Status ToolCommandServer::GetSockFile(const std::string &conf_file,\n                                                std::string &connect_url) {\n#ifdef BUILD_TEST\n  connect_url = CONTROL_UNIX_PATH;\n  return modelbox::STATUS_OK;\n#else\n  std::shared_ptr<Configuration> config;\n  std::string confile_file_path = DEFAULT_MODELBOX_CONF;\n  if (conf_file.length() > 0) {\n    confile_file_path = conf_file;\n  }\n\n  confile_file_path = modelbox_full_path(confile_file_path);\n\n  config = LoadSubConfig(confile_file_path);\n  if (config == nullptr) {\n    std::cout << \"conf file is invalid.\" << std::endl;\n    return modelbox::STATUS_INVALID;\n  }\n\n  if (config->GetBool(\"control.enable\", false) == false) {\n    std::cout << \"server control function is disabled.\" << std::endl;\n    return modelbox::STATUS_BADCONF;\n  }\n\n  connect_url = config->GetString(\"control.listen\");\n  connect_url = modelbox_full_path(connect_url);\n  if (connect_url.empty()) {\n    std::cout << \"control listen sock get failed.\" << std::endl;\n    return modelbox::STATUS_BADCONF;\n  }\n\n  return modelbox::STATUS_OK;\n#endif\n}\n\nint ToolCommandServer::Run(int argc, char *argv[]) {\n  int cmdtype = 0;\n  std::string connect_url;\n  std::string conf_file;\n  std::string get_conf_key;\n\n  MODELBOX_COMMAND_GETOPT_BEGIN(cmdtype, server_options)\n  switch (cmdtype) {\n    case MODELBOX_TOOL_SERVER_CONNECT:\n      connect_url = optarg;\n      break;\n    case MODELBOX_TOOL_SERVER_INFO_FROM_CONF:\n      conf_file = optarg;\n      break;\n    case MODELBOX_TOOL_SERVER_CHECKPORT:\n      return CheckPort(optarg);\n    case MODELBOX_TOOL_SERVER_GETCONF:\n      get_conf_key = optarg;\n      break;\n    case MODELBOX_TOOL_SERVER_HELP:\n      std::cout << GetHelp();\n      return 0;\n    default:\n      break;\n  }\n  MODELBOX_COMMAND_GETOPT_END()\n\n  if (get_conf_key.length()) {\n    return GetConfig(conf_file, get_conf_key);\n  }\n\n  modelbox::Status status = GetSockFile(conf_file, connect_url);\n  if (status != modelbox::STATUS_OK) {\n    return 1;\n  }\n\n  auto ret = InitClient(connect_url);\n  if (!ret) {\n    return 1;\n  }\n  Defer { CloseClient(); };\n\n  ret = SendCommand(argc - optind, argv + optind, connect_url);\n  if (!ret) {\n    return 1;\n  }\n\n  return RecvCommand();\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "src/modelbox/tool/server_command.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#ifndef MODELBOX_TOOL_SERVER_H\n#define MODELBOX_TOOL_SERVER_H\n\n#include <modelbox/base/status.h>\n#include <modelbox/common/control_msg.h>\n\n#include \"modelbox/common/command.h\"\n\nnamespace modelbox {\n\nconstexpr const char *SERVER_DESC = \"Server commands\";\nconstexpr const char *DEFAULT_MODELBOX_CONF = \"${MODELBOX_ROOT}/usr/local/etc/modelbox/modelbox.conf\";\n\nclass ToolCommandServer : public ToolCommand {\n public:\n  ToolCommandServer();\n  ~ToolCommandServer() override;\n\n  int Run(int argc, char *argv[]) override;\n  std::string GetHelp() override;\n\n  std::string GetCommandName() override { return \"server\"; };\n  std::string GetCommandDesc() override { return SERVER_DESC; };\n\n private:\n  modelbox::Status InitClient(const std::string &connect_url);\n  void CloseClient();\n  modelbox::Status SendCommand(\n      int argc, char *argv[],\n      const std::string &connect_url = CONTROL_UNIX_PATH);\n  modelbox::Status SendMsg(const std::shared_ptr<ControlMsg> &msg,\n                           const std::string &connect_url = CONTROL_UNIX_PATH);\n  int RecvCommand();\n  modelbox::Status GetSockFile(const std::string &conf_file,\n                             std::string &connect_url);\n  int client_fd_{-1};\n  std::string unix_path_;\n  int temp_fd_{-1};\n};\n\n}  // namespace modelbox\n#endif"
  },
  {
    "path": "src/python/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nproject(modelbox-python)\n\nif(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_BINARY_DIR})\n    message(FATAL_ERROR \"Do not build in source directory!\")\nendif()\n\nif(NOT ${PYTHONLIBS_FOUND})\n    message(STATUS \"Disable python pip package\")\n    return()\nendif()\n\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${PYBIND11_INCLUDE_DIR})\ninclude_directories(${MODELBOX_COMMON_MODELBOX_API_INCLUDE})\ninclude_directories(${CMAKE_CURRENT_LIST_DIR}/include)\n\nfile(GLOB_RECURSE LIBMODELBOX_PYHON_SOURCES *.cpp *.cc *.c)\nset(EMPTY_SOURCE_FILE ${CMAKE_BINARY_DIR}/empty.cc)\nif (NOT EXISTS ${EMPTY_SOURCE_FILE})\n    file(WRITE ${EMPTY_SOURCE_FILE})\nendif()\nadd_library(modelbox-python-static STATIC ${LIBMODELBOX_PYHON_SOURCES})\nset_property(TARGET modelbox-python-static PROPERTY POSITION_INDEPENDENT_CODE ON)\ntarget_link_libraries(modelbox-python-static PRIVATE pybind11::module)\ntarget_link_libraries(modelbox-python-static PRIVATE ${MODELBOX_COMMON_MODELBOX_API_LIBRARY})\ntarget_link_libraries(modelbox-python-static PRIVATE ${LIBMODELBOX_SHARED})\nadd_dependencies(modelbox-python-static ${MODELBOX_COMMON_MODELBOX_API_LIBRARY})\n\n# so for test, with build RPATH\nset(MODELBOX_PYTHON_BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR}/build)\nadd_library(modelbox-python-build SHARED ${EMPTY_SOURCE_FILE})\ntarget_link_libraries(modelbox-python-build -Wl,--whole-archive modelbox-python-static -Wl,--no-whole-archive)\nset_target_properties(modelbox-python-build PROPERTIES \n    OUTPUT_NAME \"_modelbox\"\n    PREFIX \"${PYTHON_MODULE_PREFIX}\"\n    SUFFIX \"${PYTHON_MODULE_EXTENSION}\")\n\n# so for release, without RPATH\nset(MODELBOX_PYTHON_TARGET_DIR ${CMAKE_CURRENT_BINARY_DIR}/target)\nadd_library(modelbox-python SHARED ${EMPTY_SOURCE_FILE})\ntarget_link_libraries(modelbox-python -Wl,--whole-archive modelbox-python-static -Wl,--no-whole-archive)\nset_target_properties(modelbox-python PROPERTIES \n    OUTPUT_NAME \"_modelbox\"\n    PREFIX \"${PYTHON_MODULE_PREFIX}\"\n    SUFFIX \"${PYTHON_MODULE_EXTENSION}\")\nset_target_properties(modelbox-python PROPERTIES SKIP_BUILD_RPATH ON)\n\n# copy build version to build directory\nset_target_properties(modelbox-python-build\n    PROPERTIES\n    ARCHIVE_OUTPUT_DIRECTORY \"${MODELBOX_PYTHON_BINARY_DIR}/modelbox\"\n    LIBRARY_OUTPUT_DIRECTORY \"${MODELBOX_PYTHON_BINARY_DIR}/modelbox\"\n    RUNTIME_OUTPUT_DIRECTORY \"${MODELBOX_PYTHON_BINARY_DIR}/bin\"\n)\n\n# copy release version to release directory\nset_target_properties(modelbox-python\n    PROPERTIES\n    ARCHIVE_OUTPUT_DIRECTORY \"${MODELBOX_PYTHON_TARGET_DIR}/modelbox\"\n    LIBRARY_OUTPUT_DIRECTORY \"${MODELBOX_PYTHON_TARGET_DIR}/modelbox\"\n    RUNTIME_OUTPUT_DIRECTORY \"${MODELBOX_PYTHON_TARGET_DIR}/bin\"\n)\n\nset(MODELBOX_PYTHON_RELEASE_DIR ${CMAKE_CURRENT_BINARY_DIR}/release)\nconfigure_file(${CMAKE_CURRENT_LIST_DIR}/setup.py.in ${CMAKE_CURRENT_BINARY_DIR}/setup.py)\nset(PYTHON_CONFIG_IN ${CMAKE_CURRENT_LIST_DIR}/test/test_config.py.in)\nset(PYTHON_CONFIG_OUT ${CMAKE_CURRENT_BINARY_DIR}/test/test_config.py)\n\nadd_custom_target(python-files)\n# copy file to target\nadd_custom_command(TARGET python-files PRE_BUILD COMMAND rm -fr ${MODELBOX_PYTHON_TARGET_DIR}/*)\nadd_custom_command(TARGET python-files PRE_BUILD COMMAND cp -af ${CMAKE_CURRENT_LIST_DIR}/modelbox ${MODELBOX_PYTHON_TARGET_DIR})\nadd_custom_command(TARGET python-files PRE_BUILD COMMAND cp -af ${LICENSE_FILE} ${MODELBOX_PYTHON_TARGET_DIR}/ )\nadd_custom_command(TARGET python-files PRE_BUILD COMMAND cp -af ${README_FILE} ${MODELBOX_PYTHON_TARGET_DIR}/ )\nadd_custom_command(TARGET python-files PRE_BUILD COMMAND cp -af ${CMAKE_CURRENT_BINARY_DIR}/setup.py ${MODELBOX_PYTHON_TARGET_DIR}/ )\nadd_custom_command(TARGET python-files PRE_BUILD COMMAND cp -af ${CMAKE_CURRENT_LIST_DIR}/MANIFEST.in ${MODELBOX_PYTHON_TARGET_DIR}/)\n\n# copy file to build, for testing\nadd_custom_command(TARGET python-files PRE_BUILD COMMAND rm -fr ${MODELBOX_PYTHON_BINARY_DIR})\nadd_custom_command(TARGET python-files PRE_BUILD COMMAND cp -af ${MODELBOX_PYTHON_TARGET_DIR} ${MODELBOX_PYTHON_BINARY_DIR})\nadd_custom_command(TARGET python-files PRE_BUILD COMMAND cp -af ${CMAKE_CURRENT_LIST_DIR}/test ${MODELBOX_PYTHON_BINARY_DIR}/ )\nadd_custom_command(TARGET python-files PRE_BUILD COMMAND cp -af ${CMAKE_CURRENT_BINARY_DIR}/test/* ${MODELBOX_PYTHON_BINARY_DIR}/test/ )\nadd_custom_command(TARGET python-files PRE_BUILD COMMAND rm -fr ${CMAKE_CURRENT_BINARY_DIR}/test/*.in)\nadd_dependencies(modelbox-python-build python-files)\nadd_dependencies(modelbox-python modelbox-python-build)\nadd_custom_command(TARGET modelbox-python \n    WORKING_DIRECTORY ${MODELBOX_PYTHON_TARGET_DIR}\n    POST_BUILD \n    COMMENT \"Building python wheel package..\"\n    COMMAND strip -s ${MODELBOX_PYTHON_TARGET_DIR}/modelbox/*.so\n    COMMAND umask 0022 && chmod -R a+rX . && ${PYTHON_EXECUTABLE} setup.py -q sdist -d ${MODELBOX_PYTHON_RELEASE_DIR} bdist_wheel -d ${MODELBOX_PYTHON_RELEASE_DIR})\n\ninstall(CODE \n    \"file(COPY ${MODELBOX_PYTHON_RELEASE_DIR}/ DESTINATION ${RELEASE_PACKAGE_DIR}/python)\"\n)\n\nset(MODELBOX_PYTHON_BINARY_DIR ${MODELBOX_PYTHON_BINARY_DIR} CACHE INTERNAL \"\")\n\nif(NOT DISABLE_MODELBOX_TEST)\n    add_custom_target(unittest-python\n        COMMAND PYTHONPATH=${MODELBOX_PYTHON_BINARY_DIR} ${PYTHON_EXECUTABLE} -m unittest discover ${MODELBOX_PYTHON_BINARY_DIR} -v\n        DEPENDS modelbox-python\n        WORKING_DIRECTORY ${TEST_WORKING_DIR}\n        COMMENT \"Run python Test...\"\n    )\n\n    # run single test case\n    # usage:\n    #   cmake -DPYTHON_TEST_CASE=test_log.py ..\n    #   make unittest-python-case\n    if (PYTHON_TEST_CASE)\n        message(STATUS \"run test case for python: ${PYTHON_TEST_CASE}\")\n        add_custom_target(unittest-python-case\n            COMMAND PYTHONPATH=${MODELBOX_PYTHON_BINARY_DIR} ${PYTHON_EXECUTABLE} ${MODELBOX_PYTHON_BINARY_DIR}/test/${PYTHON_TEST_CASE};\n            DEPENDS modelbox-python\n            WORKING_DIRECTORY ${TEST_WORKING_DIR}\n            COMMENT \"Run python Test...\"\n        )\n        add_dependencies(unittest-python-case all-drivers)\n    endif()\n\n    add_dependencies(unittest-python all-drivers)\n\n    list(APPEND MODELBOX_UNIT_TEST_TARGETS modelbox-python)\n    set(MODELBOX_UNIT_TEST_TARGETS ${MODELBOX_UNIT_TEST_TARGETS} CACHE INTERNAL \"\")\n\n    list(APPEND MODELBOX_UNIT_TEST_RUN_TARGETS unittest-python)\n    set(MODELBOX_UNIT_TEST_RUN_TARGETS ${MODELBOX_UNIT_TEST_RUN_TARGETS} CACHE INTERNAL \"\")\nendif()\n\n# update test config file\nlist(APPEND MODELBOX_UNIT_TEST_CONFIG_IN ${PYTHON_CONFIG_IN})\nset(MODELBOX_UNIT_TEST_CONFIG_IN ${MODELBOX_UNIT_TEST_CONFIG_IN} CACHE INTERNAL \"\")\nlist(APPEND MODELBOX_UNIT_TEST_CONFIG_OUT ${PYTHON_CONFIG_OUT})\nset(MODELBOX_UNIT_TEST_CONFIG_OUT ${MODELBOX_UNIT_TEST_CONFIG_OUT} CACHE INTERNAL \"\")\nlist(APPEND DRIVER_UNIT_TEST_TARGET modelbox-python-build)\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")\n\nset(MODELBOX_PYTHON_OP_DIR ${CMAKE_SOURCE_DIR}/src/python/test/op/ CACHE INTERNAL \"\")\n\nFILE(GLOB OP_CHILDRENS RELATIVE ${MODELBOX_PYTHON_OP_DIR} ${MODELBOX_PYTHON_OP_DIR}/*)\nforeach(OP_CHILDREN ${OP_CHILDRENS})\n    if(IS_DIRECTORY ${MODELBOX_PYTHON_OP_DIR}/${OP_CHILDREN})\n        file(GLOB OP_CONFIG_FILES ${MODELBOX_PYTHON_OP_DIR}/${OP_CHILDREN}/*.in ${MODELBOX_PYTHON_OP_DIR}/${OP_CHILDREN}/*.py)\n        foreach(OP_CONFIG_FILE ${OP_CONFIG_FILES})\n            if(OP_CONFIG_FILE MATCHES \".*.in$\")\n                string(REGEX MATCH \"op/*/.*.in$\"  RELATIVE_PATH ${OP_CONFIG_FILE})\n                string(REGEX REPLACE \"op/.*/\" \"\" TARGET_NAME ${RELATIVE_PATH})\n                string(REGEX REPLACE \".in\" \"_test.toml\" TARGET_NAME ${TARGET_NAME})\n                configure_file(${OP_CONFIG_FILE} ${CMAKE_BINARY_DIR}/test/test-working-dir/data/python_op/${OP_CHILDREN}/${TARGET_NAME} @ONLY)\n            elseif(OP_CONFIG_FILE MATCHES \".*.py\")\n                add_custom_command(TARGET python-files PRE_BUILD COMMAND cp -af  ${OP_CONFIG_FILE} ${CMAKE_BINARY_DIR}/test/test-working-dir/data/python_op/${OP_CHILDREN}/)\n            endif()\n        endforeach()\n    endif()\nendforeach()"
  },
  {
    "path": "src/python/MANIFEST.in",
    "content": "recursive-include modelbox *.so\n"
  },
  {
    "path": "src/python/include/modelbox/python/log.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#ifndef MODELBOX_PYTHON_LIB_LOG_H_\n#define MODELBOX_PYTHON_LIB_LOG_H_\n\n#include <modelbox/base/log.h>\n#include <pybind11/pybind11.h>\n\nnamespace py = pybind11;\n\nnamespace modelbox {\n\nclass LoggerPython : public Logger {\n public:\n  LoggerPython();\n  ~LoggerPython() override;\n\n  void Print(LogLevel level, const char *file, int lineno, const char *func,\n             const char *msg) override;\n\n  void SetLogLevel(LogLevel level) override;\n\n  LogLevel GetLogLevel() override;\n\n  void RegLogFunc(py::function pylog);\n\n private:\n  py::function pylog_;\n  LogLevel level_{LOG_OFF};\n  bool has_exception_{false};\n};\n\nclass LoggerPythonWapper {\n public:\n  LoggerPythonWapper();\n  virtual ~LoggerPythonWapper();\n\n  void RegLogFunc(py::function pylog);\n\n  void SetLogLevel(LogLevel level);\n\n  std::shared_ptr<Logger> GetLogger();\n\n  void SetLogger(const std::shared_ptr<Logger> &logger);\n\n  void PrintExt(LogLevel level, const char *file, int lineno, const char *func,\n             const char *msg);\n  void Print(LogLevel level, const char *msg);\n\n private:\n  std::shared_ptr<LoggerPython> logger_python_ =\n      std::make_shared<LoggerPython>();\n  py::module inspect_module_;\n};\n\n}  // namespace modelbox\n\n#endif  // MODELBOX_PYTHON_LIB_LOG_H_\n"
  },
  {
    "path": "src/python/lib/log.cc",
    "content": "\n#include \"modelbox/python/log.h\"\n\n#include <pybind11/embed.h>\n\n#include <utility>\n\nnamespace modelbox {\n\nLoggerPython::LoggerPython() = default;\nLoggerPython::~LoggerPython() = default;\n\nvoid LoggerPython::Print(LogLevel level, const char *file, int lineno,\n                         const char *func, const char *msg) {\n  if (has_exception_ == true) {\n    printf(\"%s\\n\", msg);\n    return;\n  }\n\n  try {\n    if (pylog_.is_none()) {\n      py::print(level, file, lineno, msg);\n      return;\n    }\n\n    py::gil_scoped_acquire acquire;\n    pylog_(level, file, lineno, func, msg);\n  } catch (py::error_already_set &ex) {\n    if (has_exception_ == false) {\n      printf(\"call function failed, %s, output log to console\\n\", ex.what());\n      has_exception_ = true;\n      printf(\"%s\\n\", msg);\n    }\n  }\n}\n\nvoid LoggerPython::SetLogLevel(LogLevel level) { level_ = level; }\n\nLogLevel LoggerPython::GetLogLevel() { return level_; }\n\nvoid LoggerPython::RegLogFunc(py::function pylog) {\n  has_exception_ = false;\n  pylog_ = std::move(pylog);\n}\n\nLoggerPythonWapper::LoggerPythonWapper() {\n  inspect_module_ = py::module::import(\"inspect\");\n}\n\nLoggerPythonWapper::~LoggerPythonWapper() { ModelBoxLogger.SetLogger(nullptr); }\n\nvoid LoggerPythonWapper::RegLogFunc(py::function pylog) {\n  logger_python_->RegLogFunc(std::move(pylog));\n  ModelBoxLogger.SetLogger(logger_python_);\n}\n\nstd::shared_ptr<Logger> LoggerPythonWapper::GetLogger() {\n  return ModelBoxLogger.GetLogger();\n}\n\nvoid LoggerPythonWapper::SetLogger(const std::shared_ptr<Logger> &logger) {\n  ModelBoxLogger.SetLogger(logger);\n}\n\nvoid LoggerPythonWapper::SetLogLevel(LogLevel level) {\n  logger_python_->SetLogLevel(level);\n}\n\nvoid LoggerPythonWapper::Print(LogLevel level, const char *msg) {\n  auto frame = inspect_module_.attr(\"currentframe\")();\n  auto info = inspect_module_.attr(\"getframeinfo\")(frame);\n\n  PrintExt(level, info.attr(\"filename\").cast<std::string>().c_str(),\n           info.attr(\"lineno\").cast<int>(),\n           info.attr(\"function\").cast<std::string>().c_str(), msg);\n}\n\nvoid LoggerPythonWapper::PrintExt(LogLevel level, const char *file, int lineno,\n                                  const char *func, const char *msg) {\n  ModelBoxLogger.Print(level, file, lineno, func, \"%s\", msg);\n}\n\n}  // namespace modelbox\n"
  },
  {
    "path": "src/python/lib/modelbox.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <modelbox/flow.h>\n#include <modelbox/flow_graph_desc.h>\n#include <pybind11/complex.h>\n#include <pybind11/functional.h>\n#include <pybind11/operators.h>\n#include <pybind11/pybind11.h>\n#include <pybind11/stl.h>\n#include <pybind11/stl_bind.h>\n\n#include <chrono>\n#include <functional>\n\n#include \"modelbox/python/log.h\"\n#include \"modelbox_api.h\"\n#include \"python_flow.h\"\n\nnamespace modelbox {\n\nclass PyLogger : public Logger {\n public:\n  using Logger::Logger;\n  ~PyLogger() override = default;\n  LogLevel GetLogLevel() override {\n    PYBIND11_OVERLOAD_PURE(LogLevel, Logger, GetLogLevel);\n  }\n};\n\nvoid SetUpLog(pybind11::module &m) {\n  py::class_<Logger, PyLogger, std::shared_ptr<Logger>>(m, \"Logger\")\n      .def(py::init<>())\n      .def(\"set_log_level\", &Logger::SetLogLevel)\n      .def(\"get_log_level\", &Logger::GetLogLevel);\n\n  auto c = py::class_<LoggerPythonWapper>(m, \"Log\")\n               .def(py::init<>())\n               .def(\"reg\", &LoggerPythonWapper::RegLogFunc,\n                    py::arg(\"Callable[[Level level, str file, int lineno, str \"\n                            \"func, str msg], None]\"))\n               .def(\"get_logger\", &LoggerPythonWapper::GetLogger)\n               .def(\"set_logger\", &LoggerPythonWapper::SetLogger)\n               .def(\"set_log_level\", &LoggerPythonWapper::SetLogLevel)\n               .def(\"print_ext\", &LoggerPythonWapper::PrintExt,\n                    py::arg(\"level\"), py::arg(\"file\"), py::arg(\"lineno\"),\n                    py::arg(\"func\"), py::arg(\"msg\"))\n               .def(\"print\", &LoggerPythonWapper::Print, py::arg(\"level\"),\n                    py::arg(\"msg\"));\n\n  ModelboxPyApiSetUpLogLevel(c);\n\n  ModelboxPyApiSetUpLog(m);\n}\n\nclass ExtOutputBufferList {\n public:\n  ExtOutputBufferList() = default;\n  virtual ~ExtOutputBufferList() = default;\n\n  OutputBufferList &GetOutputBufferList() { return out_data_; }\n  std::shared_ptr<BufferList> GetBufferList(const std::string &key) {\n    auto iter = out_data_.find(key);\n    if (iter == out_data_.end()) {\n      return nullptr;\n    }\n\n    return iter->second;\n  }\n\n private:\n  OutputBufferList out_data_;\n};\n\ntemplate <typename T>\nstruct unique_ptr_nogil_deleter {\n  void operator()(T *ptr) {\n    pybind11::gil_scoped_release nogil;\n    delete ptr;\n  }\n};\n\nstruct UniqueFlow : modelbox::Flow {};\n\nvoid SetUpFlow(pybind11::module &m) {\n  py::class_<ExtOutputBufferList, std::shared_ptr<ExtOutputBufferList>>(\n      m, \"ExtOutputBufferList\", py::module_local())\n      .def(py::init<>())\n      .def(\"get_buffer_list\", &ExtOutputBufferList::GetBufferList,\n           py::keep_alive<0, 1>(), py::call_guard<py::gil_scoped_release>());\n\n  py::class_<modelbox::ExternalDataMap,\n             std::shared_ptr<modelbox::ExternalDataMap>>(m, \"ExternalDataMap\",\n                                                         py::module_local())\n      .def(\"create_buffer_list\", &modelbox::ExternalDataMap::CreateBufferList,\n           py::keep_alive<0, 1>(), py::call_guard<py::gil_scoped_release>())\n      .def(\"send\", &modelbox::ExternalDataMap::Send,\n           py::call_guard<py::gil_scoped_release>())\n      .def(\n          \"recv\",\n          [](modelbox::ExternalDataMap &ext,\n             std::shared_ptr<ExtOutputBufferList> &out_data)\n              -> modelbox::Status {\n            auto &map_data = out_data->GetOutputBufferList();\n            auto status = ext.Recv(map_data);\n            return status;\n          },\n          py::keep_alive<2, 1>(), py::call_guard<py::gil_scoped_release>())\n      .def(\"close\", &modelbox::ExternalDataMap::Close,\n           py::call_guard<py::gil_scoped_release>())\n      .def(\"shutdown\", &modelbox::ExternalDataMap::Shutdown,\n           py::call_guard<py::gil_scoped_release>())\n      .def(\"set_output_meta\", &modelbox::ExternalDataMap::SetOutputMeta,\n           py::call_guard<py::gil_scoped_release>())\n      .def(\"get_last_error\", &modelbox::ExternalDataMap::GetLastError,\n           py::call_guard<py::gil_scoped_release>());\n\n  auto c = py::class_<\n      modelbox::UniqueFlow,\n      std::unique_ptr<UniqueFlow, unique_ptr_nogil_deleter<UniqueFlow>>>(\n      m, \"Flow\");\n  py::enum_<modelbox::Flow::Format>(c, \"Format\", py::arithmetic(),\n                                    py::module_local())\n      .value(\"FORMAT_AUTO\", Flow::FORMAT_AUTO)\n      .value(\"FORMAT_TOML\", Flow::FORMAT_TOML)\n      .value(\"FORMAT_JSON\", Flow::FORMAT_JSON);\n\n  c.def(py::init<>())\n      .def(\"init\",\n           static_cast<modelbox::Status (modelbox::Flow::*)(\n               const std::string &, modelbox::Flow::Format)>(\n               &modelbox::Flow::Init),\n           py::arg(\"conf_file\"),\n           py::arg(\"format\") = modelbox::Flow::Format::FORMAT_AUTO,\n           py::call_guard<py::gil_scoped_release>())\n      .def(\"init\",\n           static_cast<modelbox::Status (modelbox::Flow::*)(\n               const std::string &, const std::string &,\n               modelbox::Flow::Format)>(&modelbox::Flow::Init),\n           py::arg(\"name\"), py::arg(\"graph\"),\n           py::arg(\"format\") = modelbox::Flow::Format::FORMAT_AUTO,\n           py::call_guard<py::gil_scoped_release>())\n      .def(\"init\",\n           static_cast<modelbox::Status (modelbox::Flow::*)(\n               std::shared_ptr<Configuration>)>(&modelbox::Flow::Init),\n           py::call_guard<py::gil_scoped_release>())\n      .def(\"init\",\n           static_cast<modelbox::Status (modelbox::Flow::*)(\n               const std::shared_ptr<FlowGraphDesc> &)>(&modelbox::Flow::Init),\n           py::keep_alive<1, 2>(), py::call_guard<py::gil_scoped_release>())\n      .def(\"init_by_name\",\n           static_cast<modelbox::Status (modelbox::Flow::*)(\n               const std::string &,\n               const std::unordered_map<std::string, std::string> &,\n               const std::string &)>(&modelbox::Flow::InitByName),\n           py::arg(\"name\"),\n           py::arg(\"args\") = std::unordered_map<std::string, std::string>(),\n           py::arg(\"flow_dir\") = modelbox::DEFAULT_FLOW_PATH,\n           py::call_guard<py::gil_scoped_release>())\n      .def(\"build\", &modelbox::Flow::Build,\n           py::call_guard<py::gil_scoped_release>())\n      .def(\"run\", &modelbox::Flow::Run,\n           py::call_guard<py::gil_scoped_release>())\n      .def(\"run_async\", &modelbox::Flow::RunAsync,\n           py::call_guard<py::gil_scoped_release>())\n      .def(\"wait\", &modelbox::Flow::Wait, py::arg(\"timemout\") = 0,\n           py::arg(\"retval\") = nullptr,\n           py::call_guard<py::gil_scoped_release>())\n      .def(\"stop\", &modelbox::Flow::Stop,\n           py::call_guard<py::gil_scoped_release>())\n      .def(\"create_external_data_map\", &modelbox::Flow::CreateExternalDataMap,\n           py::keep_alive<0, 1>(), py::call_guard<py::gil_scoped_release>())\n      .def(\n          \"create_stream_io\",\n          [](modelbox::UniqueFlow &self)\n              -> std::shared_ptr<PythonFlowStreamIO> {\n            auto io = self.CreateStreamIO();\n            if (io == nullptr) {\n              return nullptr;\n            }\n\n            return std::make_shared<PythonFlowStreamIO>(io);\n          },\n          py::keep_alive<0, 1>(), py::call_guard<py::gil_scoped_release>())\n      .def(\"start_run\", &modelbox::Flow::StartRun,\n           py::call_guard<py::gil_scoped_release>());\n}\n\nPYBIND11_MODULE(_modelbox, m) {\n  m.doc() = R\"pbdoc(\n        modelbox module\n    )pbdoc\";\n\n  SetUpLog(m);\n  SetUpFlow(m);\n  ModelboxPyApiSetUpStatus(m);\n  ModelboxPyApiSetUpConfiguration(m);\n  ModelboxPyApiSetUpBuffer(m);\n  ModelboxPyApiSetUpBufferList(m);\n  ModelboxPyApiSetUpGeneric(m);\n  ModelboxPyApiSetUpEngine(m);\n  ModelboxPyApiSetUpDataHandler(m);\n  ModelboxPyApiSetUpFlowGraphDesc(m);\n  ModelboxPyApiSetUpFlowNodeDesc(m);\n  ModelboxPyApiSetUpFlowPortDesc(m);\n  ModelboxPyApiSetUpFlowStreamIO(m);\n  ModelboxPyApiSetUpModel(m);\n\n#ifdef VERSION_INFO\n  m.attr(\"__version__\") = VERSION_INFO;\n#else\n  m.attr(\"__version__\") = \"dev\";\n#endif\n}\n}  // namespace modelbox"
  },
  {
    "path": "src/python/modelbox/__init__.py",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n# import all module from shared library avoid stutter\nfrom modelbox._modelbox import *"
  },
  {
    "path": "src/python/setup.py.in",
    "content": "\n#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom setuptools import setup, find_packages\n\nsetup(name='modelbox',\n      description='modelbox',\n      url='http://code-cbu.huawei.com/ModelArts/Infer/BaseImages/modelbox',\n      version='@MODELBOX_VERSION_STRING@',\n      author='@MODELBOX_AUTHOR@',\n      author_email='@MODELBOX_AUTHOR_EMAIL@',\n      license='BSD 3-Clause License',\n      packages=find_packages(),\n      include_package_data=True,\n      zip_safe=False,\n      py_modules = [\n          ],\n      scripts = [\n          ],\n      entry_points = {\n          'console_scripts': [\n              ],\n          },\n      install_requires = [\n          ],\n      python_requires='>=@PYTHON_VERSION_STRING@',\n     )\n\n"
  },
  {
    "path": "src/python/test/__init__.py",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport modelbox\nimport datetime\n\n__log = modelbox.Log()\n\n\ndef LogCallback(level, file, lineno, func, msg):\n    print(\"[{time}][{level}][{file:>20}:{lineno:>4}] {msg}\".format(\n        time=datetime.datetime.now(), level=level,\n        file=file, lineno=lineno, msg=msg\n    ))\n\n\ndef RegLog():\n    __log.reg(LogCallback)\n    __log.set_log_level(modelbox.Log.Level.INFO)\n\n\ndef SetLogLevel(level):\n    __log.set_log_level(level)\n\nRegLog()\n"
  },
  {
    "path": "src/python/test/op/op_args/modelbox.op_args.in",
    "content": "[base]\nname = \"python_args\"\ndevice = \"cpu\"\nversion = \"1.0.0\"\ndescription = \"test python flowunit - args\"\nentry = \"op_args@ArgsFlowunit\"\ntype = \"python\"\n\n[input]\n[input.input1]\nname = \"input\"\n\n[output]\n[output.output1]\nname = \"output\""
  },
  {
    "path": "src/python/test/op/op_args/op_args.py",
    "content": "import _flowunit as modelbox\nimport sys\nimport os\n\nclass ArgsFlowunit(modelbox.FlowUnit):\n    def __init__(self):\n        super().__init__()\n\n    def open(self, config):\n        self.test_arg1 = config.get_string(\"test_arg1\")\n        self.test_arg2 = config.get_string(\"test_arg2\")\n\n        modelbox.info(\"get test_arg1\", self.test_arg1)\n        modelbox.info(\"get test_arg2\", self.test_arg2)\n\n        return modelbox.Status.StatusCode.STATUS_SUCCESS\n\n    def process(self, data_ctx):\n        inputs = data_ctx.input(\"input\")\n        outputs = data_ctx.output(\"output\")\n\n        for buffer in inputs:\n            input_str = buffer.as_object()\n            modelbox.info(\"get input\", input_str)\n            result = input_str + \", \" + self.test_arg1 + \", \" + self.test_arg2\n            modelbox.info(\"generate result\", result)\n            out_buffer = modelbox.Buffer(self.get_bind_device(), result)\n            outputs.push_back(out_buffer)\n\n        modelbox.info(\"ArgsFlowunit process\")\n        return modelbox.Status.StatusCode.STATUS_SUCCESS\n\n    def close(self):\n        return modelbox.Status()\n\n    def data_pre(self, data_ctx):\n        return modelbox.Status()\n\n    def data_post(self, data_ctx):\n        return modelbox.Status()\n\n"
  },
  {
    "path": "src/python/test/op/op_brightness/modelbox.op_brightness.in",
    "content": "[base]\nname = \"python_brightness\"\ndevice = \"cpu\"\nversion = \"1.0.0\"\ndescription = \"test python flowunit - brightness image\"\nentry = \"op_brightness@BrightnessFlowunit\"\ntype = \"python\"\n\nstream = false\ncondition  = false\ncollapse = false\ncollapse_all = false\nexpand = false\n\n[config]\nbrightness = 0.1\n\n\n[input]\n[input.input1]\nname = \"brightness_in\"\ntype = \"uint8\"\n\n[output]\n[output.output1]\nname = \"brightness_out\"\ntype = \"uint8\""
  },
  {
    "path": "src/python/test/op/op_brightness/op_brightness.py",
    "content": "import _flowunit as modelbox\nimport sys\nimport numpy as np\nimport os\nimport cv2\nfrom PIL import Image, ImageEnhance\n\nclass BrightnessFlowunit(modelbox.FlowUnit):\n    def __init__(self):\n        super().__init__()\n\n\n    def open(self, config):\n        self.__brightness = config.get_float(\"brightness\", 0.0)\n        if self.__brightness == 0.0:\n            return modelbox.Status.StatusCode.STATUS_FAULT\n\n        return modelbox.Status()\n\n    def process(self, data_ctx):\n        in_bl = data_ctx.input(\"brightness_in\")\n        out_bl = data_ctx.output(\"brightness_out\")\n\n        for buffer in in_bl:\n            np_image = np.array(buffer, copy= False)\n            image = Image.fromarray(np_image)\n\n            brightness_image = ImageEnhance.Brightness(image).enhance(self.__brightness)\n\n            brightness_array = np.array(brightness_image)\n            add_buffer = self.create_buffer(brightness_array)\n            add_buffer.copy_meta(buffer)\n            \n            add_buffer.set(\"brightness\", self.__brightness)\n\n            out_bl.push_back(add_buffer)\n\n        return modelbox.Status()\n\n\n    def close(self):\n        return modelbox.Status()\n\n    def data_pre(self, data_ctx):\n        return modelbox.Status()\n\n    def data_post(self, data_ctx):\n        return modelbox.Status()\n\n    def data_group_pre(self, data_ctx):\n        return modelbox.Status()\n\n    def data_group_post(self, data_ctx):\n        return modelbox.Status()\n"
  },
  {
    "path": "src/python/test/op/op_buffer/modelbox.op_buffer.in",
    "content": "[base]\nname = \"python_buffer\"\ndevice = \"cpu\"\nversion = \"1.0.0\"\ndescription = \"test python flowunit - test buffer\"\nentry = \"op_buffer@BufferTestFlowunit\"\ntype = \"python\"\n\nstream = false\ncondition  = false\ncollapse = false\ncollapse_all = false\nexpand = false\n\n[config]\nbuffer_config = 0.1\n\n\n[input]\n[input.input1]\nname = \"buffer_in\"\ntype = \"uint8\"\n\n[output]\n[output.output1]\nname = \"buffer_out\"\ntype = \"uint8\""
  },
  {
    "path": "src/python/test/op/op_buffer/op_buffer.py",
    "content": "import _flowunit as modelbox\nimport sys\nimport numpy as np\nimport os\nimport cv2\nfrom PIL import Image, ImageEnhance\n\nclass BufferTestFlowunit(modelbox.FlowUnit):\n    def __init__(self):\n        super().__init__()\n\n    def open(self, config):\n        self.__brightness = config.get_float(\"buffer_config\", 0.0)\n        if self.__brightness != 0.2:\n            return modelbox.Status(modelbox.Status.StatusCode.STATUS_FAULT)\n\n        return modelbox.Status()\n\n    def process(self, data_ctx):\n        in_bl = data_ctx.input(\"buffer_in\")\n        out_bl = data_ctx.output(\"buffer_out\")\n\n        data_ctx.set_private(\"float_test\", 0.5)\n        data_ctx.set_private(\"string_test\", \"TEST\")\n        data_ctx.set_private(\"int_test\", 100)\n        data_ctx.set_private(\"bool_test\", False)\n\n        data_ctx.set_private(\"list_int_test\", [1, 1, 1])\n        data_ctx.set_private(\"list_float_test\", [0.1, 0.2, 0.3])\n        data_ctx.set_private(\"list_bool_test\", [False, False, True])\n        data_ctx.set_private(\"list_string_test\", [\"TEST1\", \"TEST2\", \"TEST3\"])\n\n        data_ctx.set_private(\"list2_int_test\", [[1, 2], [3, 4]])\n        data_ctx.set_private(\"list2_float_test\", [[1.1, 2.2], [3.3, 4.4]])\n        data_ctx.set_private(\"list2_bool_test\", [[True, False], [False, True]])\n        data_ctx.set_private(\"list2_string_test\", [[\"hello\", \"world\"], [\"good\", \"bad\"]])\n\n        data_ctx.set_private(\"dict\", {\"1\":1, \"2\":2})\n\n        np_test = np.array([[1, 2 ,3], [11, 12, 13]])\n        data_ctx.set_private(\"np_test\", np_test)\n\n        empty_np = np.array([])\n        empty_buffer = self.create_buffer(empty_np)\n        first_buffer = in_bl.front()\n        last_buffer = in_bl.back()\n        for buffer in in_bl:\n            np_image = np.array(buffer, copy= False)\n            image = Image.fromarray(np_image)\n\n            brightness_array = np.array(image)\n            add_buffer = modelbox.Buffer(self.get_bind_device(), brightness_array)\n            add_buffer.copy_meta(buffer)\n            add_buffer.set(\"float_test\", data_ctx.get_private(\"float_test\"))\n            add_buffer.set(\"string_test\", data_ctx.get_private(\"string_test\"))\n            add_buffer.set(\"int_test\", data_ctx.get_private(\"int_test\"))\n            add_buffer.set(\"bool_test\", data_ctx.get_private(\"bool_test\"))\n\n            add_buffer.set(\"list_int_test\", data_ctx.get_private(\"list_int_test\"))\n            add_buffer.set(\"list_float_test\", data_ctx.get_private(\"list_float_test\"))\n            add_buffer.set(\"list_bool_test\", data_ctx.get_private(\"list_bool_test\"))\n            add_buffer.set(\"list_string_test\", data_ctx.get_private(\"list_string_test\"))\n\n            add_buffer.set(\"list2_int_test\", data_ctx.get_private(\"list2_int_test\"))\n            add_buffer.set(\"list2_float_test\", data_ctx.get_private(\"list2_float_test\"))\n            add_buffer.set(\"list2_bool_test\", data_ctx.get_private(\"list2_bool_test\"))\n            add_buffer.set(\"list2_string_test\", data_ctx.get_private(\"list2_string_test\"))\n\n            add_buffer.set(\"np_test\", data_ctx.get_private(\"np_test\"))\n\n            add_buffer.set(\"map_test\", {\"test\" : 1})\n            out_bl.push_back(add_buffer)\n\n        return modelbox.Status()\n\n\n    def close(self):\n        return modelbox.Status()\n\n    def data_pre(self, data_ctx):\n        return modelbox.Status()\n\n    def data_post(self, data_ctx):\n        return modelbox.Status()\n\n    def data_group_pre(self, data_ctx):\n        return modelbox.Status()\n\n    def data_group_post(self, data_ctx):\n        return modelbox.Status()\n"
  },
  {
    "path": "src/python/test/op/op_image/modelbox.op_image.in",
    "content": "[base]\nname = \"python_image\"\ndevice = \"cpu\"\nversion = \"1.0.0\"\ndescription = \"test python flowunit - read image\"\nentry = \"op_image@ImageFlowunit\"\ntype = \"python\"\n\nstream = false\ncondition  = false\ncollapse = false\ncollapse_all = false\nexpand = false\n\n[config]\npath = \"@CMAKE_SOURCE_DIR@/src/python/test/data/\"\n\n[input]\n\n[output]\n[output.output1]\nname = \"image_out/out_1\"\ntype = \"uint8\""
  },
  {
    "path": "src/python/test/op/op_image/op_image.py",
    "content": "import _flowunit as modelbox\nimport sys\nimport numpy as np\nimport threading\nimport time\nimport os\nimport cv2\nfrom PIL import Image  \n\n\nclass SendExternThread (threading.Thread):   #继承父类threading.Thread\n    def __init__(self, fu):\n        threading.Thread.__init__(self)\n        self.__fu = fu\n\n    def run(self):\n        images_files = []\n        dir_or_file = os.listdir(self.__fu.path_config)\n        for path in dir_or_file:\n            ab_path = os.path.join(self.__fu.path_config, path)\n            if os.path.isfile(ab_path) and ab_path.endswith(\".jpg\"):\n                images_files.append(ab_path)\n\n\n        while True:\n            for image_file in images_files:\n                img = cv2.imread(image_file)\n                img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n                extern_data = self.__fu.create_external_data()\n                buffer_list = extern_data.create_buffer_list()\n\n                im_array = np.asarray(img_rgb[:,:])\n                \n                buffer_list.push_back(im_array)\n                extern_data.send(buffer_list)\n\n                time.sleep(0.2)\n                break\n            break\n        self.__fu = None\n\n\nclass ImageFlowunit(modelbox.FlowUnit):\n    def __init__(self):\n        super().__init__()\n\n    def open(self, config):\n        self.path_config = config.get_string(\"path\")\n        if not os.path.isdir(self.path_config):\n            return modelbox.Status(modelbox.Status.StatusCode.STATUS_FAULT)\n\n        batch = config.get_int(\"batch_size\", 1)\n        if batch != 10:\n            return modelbox.Status(modelbox.Status.StatusCode.STATUS_FAULT, \"invalid batch\")\n\n        self.__thread1 = SendExternThread(self)\n        self.__thread1.start()\n\n        return modelbox.Status()\n\n    def process(self, data_ctx):\n        extern_bl = data_ctx.external()\n        out_bl = data_ctx.output(\"image_out/out_1\")\n\n        for buffer in extern_bl:\n            out_bl.push_back(buffer)\n\n        return modelbox.Status()\n\n    def close(self):\n        self.__thread1.join()\n        return modelbox.Status()\n\n    def data_pre(self, data_ctx):\n        return modelbox.Status()\n\n    def data_post(self, data_ctx):\n        return modelbox.Status()\n\n    def data_group_pre(self, data_ctx):\n        return modelbox.Status()\n\n    def data_group_post(self, data_ctx):\n        return modelbox.Status()\n\n"
  },
  {
    "path": "src/python/test/op/op_resize/modelbox.op_resize.in",
    "content": "[base]\nname = \"python_resize\"\ndevice = \"cpu\"\nversion = \"1.0.0\"\ndescription = \"test python flowunit - resize image\"\nentry = \"op_resize@ResizeFlowunit\"\ntype = \"python\"\n\nstream = false\ncondition  = false\ncollapse = false\ncollapse_all = false\nexpand = false\n\n[config]\nheight = 360\nwidth = 480\n\n[input]\n[input.input1]\nname = \"resize_in\"\ntype = \"uint8\"\n\n[output]\n[output.output1]\nname = \"resize_out\"\ntype = \"uint8\""
  },
  {
    "path": "src/python/test/op/op_resize/op_resize.py",
    "content": "import _flowunit as modelbox\nimport sys\nimport numpy as np\nimport os\nimport cv2\nfrom PIL import Image  \n\nclass ResizeFlowunit(modelbox.FlowUnit):\n    def __init__(self):\n        super().__init__()\n\n    def open(self, config):\n        self.height_config = config.get_int(\"height\")\n        self.width_config = config.get_int(\"width\")\n\n        return modelbox.Status.StatusCode.STATUS_SUCCESS\n\n    def process(self, data_ctx):\n        in_bl = data_ctx.input(\"resize_in\")\n        out_bl = data_ctx.output(\"resize_out\")\n\n        for buffer in in_bl:\n            np_image = np.array(buffer, copy= False)\n            resize_image = Image.fromarray(np_image).resize((self.width_config, self.height_config))\n            out_bl.push_back(np.array(resize_image))\n\n        modelbox.info(\"ResizeFlowunit process\")\n        return modelbox.Status.StatusCode.STATUS_SUCCESS\n\n    def close(self):\n        return modelbox.Status()\n\n    def data_pre(self, data_ctx):\n        return modelbox.Status()\n\n    def data_post(self, data_ctx):\n        return modelbox.Status()\n\n    def data_group_pre(self, data_ctx):\n        return modelbox.Status()\n\n    def data_group_post(self, data_ctx):\n        return modelbox.Status()\n\n"
  },
  {
    "path": "src/python/test/op/op_show/modelbox.op_show.in",
    "content": "[base]\nname = \"python_show\"\ndevice = \"cpu\"\nversion = \"1.0.0\"\ndescription = \"test python flowunit - show image\"\nentry = \"op_show@ShowFlowunit\"\ntype = \"python\"\n\nstream = false\ncondition  = false\ncollapse = false\ncollapse_all = false\nexpand = false\n\n[config]\nout_path = \"@CMAKE_SOURCE_DIR@/build/test/test-working-dir/out/\"\ncheck_path = \"@CMAKE_SOURCE_DIR@/src/python/test/data/python_test_show_out.png\"\n\n[input]\n[input.input1]\nname = \"show_in\"\ntype = \"uint8\"\n\n[output]"
  },
  {
    "path": "src/python/test/op/op_show/op_show.py",
    "content": "import _flowunit as modelbox\nimport sys\nimport numpy as np\nimport os\nimport cv2\nfrom PIL import Image\nfrom PIL import ImageChops\n\nclass ShowFlowunit(modelbox.FlowUnit):\n    def __init__(self):\n        super().__init__()\n\n    def open(self, config):\n        self.__out_path_config = config.get_string(\"out_path\")\n        if not os.path.exists(self.__out_path_config):\n            os.mkdir(self.__out_path_config)\n\n        self.__is_save_config = config.get_bool(\"is_save\")\n        if self.__is_save_config == True:\n            self.__out_path_config = config.get_string(\"out_path\", \"./\")\n            if not os.path.exists(self.__out_path_config):\n                os.mkdir(self.__out_path_config)\n\n            self.__out_file = self.__out_path_config + '/python_test_show_out.png'\n            if os.path.exists(self.__out_file):\n                os.remove(self.__out_file)\n\n        self.__check_path = config.get_string(\"check_path\")\n        if not os.path.exists(self.__check_path):\n            return modelbox.Status(modelbox.Status.StatusCode.STATUS_FAULT, \"invalid check file path, it is not exist\")\n\n        return modelbox.Status()\n\n    def process(self, data_ctx):\n        in_bl = data_ctx.input(\"show_in\")\n\n        for buffer in in_bl:\n            np_image = np.array(buffer, copy= False)\n\n            if np_image.shape[0] != 360:\n                return modelbox.Status(modelbox.Status.StatusCode.STATUS_SHUTDOWN, \"invalid height\")\n\n            if np_image.shape[1] != 480:\n                return modelbox.Status(modelbox.Status.StatusCode.STATUS_SHUTDOWN, \"invalid width\")\n\n            if np_image.shape[2] != 3:\n                return modelbox.Status(modelbox.Status.StatusCode.STATUS_SHUTDOWN, \"invalid channels\")\n\n            brightness = buffer.get(\"brightness\")\n            if brightness != 0.1:\n                return modelbox.Status(modelbox.Status.StatusCode.STATUS_SHUTDOWN, \"invalid brightness\")\n\n            image = Image.fromarray(np_image)\n\n            # if self.__is_save_config == True:\n            #    image.save(self.__out_file)\n\n            with Image.open(self.__check_path) as check_image:\n                try:\n                    diff = ImageChops.difference(image, check_image)\n                    if diff.getbbox() is not None:\n                        return modelbox.Status(modelbox.Status.StatusCode.STATUS_SHUTDOWN, \"invalid check image\")\n                except ValueError as e:\n                    return modelbox.Status(modelbox.Status.StatusCode.STATUS_SHUTDOWN, \"invalid check image size\")\n\n            # check_image.close()\n\n        modelbox.info(\"ShowFlowunit process\")\n        return modelbox.Status(modelbox.Status.StatusCode.STATUS_STOP)\n\n    def close(self):\n        return modelbox.Status()\n\n    def data_pre(self, data_ctx):\n        return modelbox.Status()\n\n    def data_post(self, data_ctx):\n        return modelbox.Status()\n\n    def data_group_pre(self, data_ctx):\n        return modelbox.Status()\n\n    def data_group_post(self, data_ctx):\n        return modelbox.Status()\n\n"
  },
  {
    "path": "src/python/test/test_api_mode.py",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom time import sleep\nimport unittest\nimport modelbox\nimport numpy as np\nimport os\nimport threading\nfrom test import test_config\nfrom PIL import Image\nfrom PIL import ImageChops\nimport cv2\n\ndef callback_func(ctx):\n    input = ctx.input(\"in1\")\n    input2 = ctx.input(\"in2\")\n    output = ctx.output(\"out1\")\n    output2 = ctx.output(\"out2\")\n    buffer1 = input[0]\n    buffer2 = input2[0]\n    array1 = np.array(buffer1)\n    array2 = np.array(buffer2)\n    output.push_back(array1 + array2)\n\n    output2.push_back(\"test\")\n\n    return modelbox.Status.StatusCode.STATUS_SUCCESS\n\nclass TestAPIMode(unittest.TestCase):\n    def setUp(self):\n        self.graph_desc = modelbox.FlowGraphDesc()\n        self.graph_desc.set_queue_size(32)\n        self.graph_desc.set_batch_size(8)\n        self.graph_desc.set_skip_default_drivers(True)\n        self.graph_desc.set_drivers_dir([test_config.TEST_DRIVER_DIR])\n\n    def tearDown(self):\n        pass\n\n    def test_add_node(self):\n        source_url = test_config.TEST_ASSETS + \"/video/jpeg_5s_480x320_24fps_yuv444_8bit.mp4\"\n        \n        input = self.graph_desc.add_input(\"input1\")\n        video_demuxer = self.graph_desc.add_node(\"video_demuxer\", \"cpu\", input)\n        self.graph_desc.add_output(\"output1\", video_demuxer)\n        \n        flow = modelbox.Flow()\n        flow.init(self.graph_desc)\n        flow.start_run()\n\n        stream_io = flow.create_stream_io()\n        stream_io.send(\"input1\", source_url)\n\n        buffer = stream_io.recv(\"output1\")\n        self.assertEqual(buffer.get_bytes(), 1285)\n        data = np.array(buffer)\n        self.assertEqual(data.shape, (1285,))\n        self.assertEqual(data.dtype, np.uint8)\n\n    def test_add_function(self):\n        input1 = self.graph_desc.add_input(\"input1\")\n        input2 = self.graph_desc.add_input(\"input2\")\n        func_node = self.graph_desc.add_function(callback_func, [\"in1\", \"in2\"], [\"out1\", \"out2\"], {\"in1\": input1[0], \"in2\": input2[0]})\n        self.graph_desc.add_output(\"output1\", func_node[0])\n        self.graph_desc.add_output(\"output2\", func_node[1])\n\n        flow = modelbox.Flow()\n        flow.init(self.graph_desc)\n        flow.start_run()\n\n        data = np.array([1, 1])\n        stream_io = flow.create_stream_io()\n        stream_io.send(\"input1\", data)\n        stream_io.send(\"input2\", data)\n        buffer = stream_io.recv(\"output1\")\n        out_data = np.array(buffer)\n        self.assertEqual(out_data[0], 2)\n        self.assertEqual(out_data[1], 2)\n        \n        buffer2 = stream_io.recv(\"output2\")\n        out_data2 = str(buffer2)\n        self.assertEqual(out_data2, \"test\")\n\nif __name__ == '__main__':\n    unittest.main()"
  },
  {
    "path": "src/python/test/test_buffer.py",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport unittest\nimport modelbox\nimport numpy as np\nimport os\nimport threading\nfrom test import test_config\nfrom PIL import Image\nfrom PIL import ImageChops\nimport cv2\n\n\nclass TestBuffer(unittest.TestCase):\n    def setUp(self):\n        pass\n\n    def tearDown(self):\n        pass\n\n    def test_flow_for_buffer(self):\n        conf_file = test_config.TEST_DATA_DIR + \"/py_op_config.toml\"\n        driver_dir = test_config.TEST_DRIVER_DIR\n        with open(conf_file, \"w\") as out:\n            txt = r\"\"\"\n[driver]\ndir=[\"{}\", \"{}\"]\nskip-default=true\n[log]\nlevel=\"INFO\"\n[graph]\ngraphconf = '''digraph demo {{                                                                            \n    input1[type=input]   \n    python_buffer[type=flowunit, flowunit=python_buffer, device=cpu, deviceid=0, label=\"<buffer_in> | <buffer_out>\", buffer_config = 0.2]  \n    output1[type=output]   \n    input1 -> python_buffer:buffer_in\n    python_buffer:buffer_out -> output1                                                                                             \n}}'''\nformat = \"graphviz\"\n\"\"\".format(driver_dir, test_config.TEST_DATA_DIR + \"/python_op\")\n            out.write(txt)\n\n        conf = modelbox.Configuration()\n        flow = modelbox.Flow()\n        ret = flow.init(conf_file)\n        os.remove(conf_file)\n        if ret == False:\n            modelbox.error(ret)\n        self.assertTrue(ret)\n        ret = flow.build()\n        self.assertTrue(ret)\n        ret = flow.run_async()\n        self.assertTrue(ret)\n\n        img = cv2.imread(test_config.TEST_SOURCE_DIR + \"/../src/python/test/data/liu-x-160.jpg\")\n        img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n        with Image.open(test_config.TEST_SOURCE_DIR + \"/../src/python/test/data/liu-x-160.jpg\") as img:\n            img_np = np.array(img)\n\n        extern_data_map = flow.create_external_data_map()\n        buffer_list = extern_data_map.create_buffer_list()\n        buffer_list.push_back(img_np)\n        extern_data_map.send(\"input1\", buffer_list)\n        extern_data_map.shutdown()\n        \n        buffer_list_map = modelbox.ExtOutputBufferList()\n        ret = extern_data_map.recv(buffer_list_map)\n        self.assertTrue(ret)\n\n        result_buffer_list = buffer_list_map.get_buffer_list(\"output1\")\n\n        for i in range(result_buffer_list.size()):\n            buffer = result_buffer_list[i]\n            np_image = np.array(buffer, copy= False)\n            image = Image.fromarray(np_image)\n            with Image.open(test_config.TEST_SOURCE_DIR + \"/../src/python/test/data/liu-x-160.jpg\") as check_image:\n                try:\n                    check_image_np = np.array(check_image)\n                    diff = ImageChops.difference(image, Image.fromarray(check_image_np))\n                    self.assertEqual(diff.getbbox(), None)\n                except ValueError as e:\n                    flow.stop()\n                    self.assertTrue(False)\n\n            data_type = buffer.get(\"type\")\n            if data_type != modelbox.Buffer.ModelBoxDataType.UINT8:\n                return modelbox.Status(modelbox.Status.StatusCode.STATUS_SHUTDOWN, \"invalid type\")\n\n            float_test = buffer.get(\"float_test\")\n            if float_test != 0.5:\n                return modelbox.Status(modelbox.Status.StatusCode.STATUS_SHUTDOWN, \"invalid float test\")\n\n            string_test = buffer.get(\"string_test\")\n            if string_test != \"TEST\":\n                return modelbox.Status(modelbox.Status.StatusCode.STATUS_SHUTDOWN, \"invalid string test\")\n\n            int_test = buffer.get(\"int_test\")\n            if int_test != 100:\n                return modelbox.Status(modelbox.Status.StatusCode.STATUS_SHUTDOWN, \"invalid int test\")\n\n            bool_test = buffer.get(\"bool_test\")\n            if bool_test != False:\n                return modelbox.Status(modelbox.Status.StatusCode.STATUS_SHUTDOWN, \"invalid bool test\")\n\n            int_list = buffer.get(\"list_int_test\")\n            if int_list != [1, 1, 1]:\n                return modelbox.Status(modelbox.Status.StatusCode.STATUS_SHUTDOWN, \"invalid int list\")\n\n            float_list = buffer.get(\"list_float_test\")\n            if float_list != [0.1, 0.2, 0.3]:\n                return modelbox.Status(modelbox.Status.StatusCode.STATUS_SHUTDOWN, \"invalid float list\")\n\n            bool_list = buffer.get(\"list_bool_test\")\n            if bool_list != [False, False, True]:\n                return modelbox.Status(modelbox.Status.StatusCode.STATUS_SHUTDOWN, \"invalid bool list\")\n\n            string_list = buffer.get(\"list_string_test\")\n            if string_list != [\"TEST1\", \"TEST2\", \"TEST3\"]:\n                return modelbox.Status(modelbox.Status.StatusCode.STATUS_SHUTDOWN, \"invalid string list\")\n\n            int_list2 = buffer.get(\"list2_int_test\")\n            if int_list2 != [[1, 2], [3, 4]]:\n                return modelbox.Status(modelbox.Status.StatusCode.STATUS_SHUTDOWN, \"invalid 2D int list\")\n\n            float_list2 = buffer.get(\"list2_float_test\")\n            if float_list2 != [[1.1, 2.2], [3.3, 4.4]]:\n                return modelbox.Status(modelbox.Status.StatusCode.STATUS_SHUTDOWN, \"invalid 2D float list\")\n\n            bool_list2 = buffer.get(\"list2_bool_test\")\n            if bool_list2 != [[True, False], [False, True]]:\n                return modelbox.Status(modelbox.Status.StatusCode.STATUS_SHUTDOWN, \"invalid 2D bool list\")\n\n            string_list2 = buffer.get(\"list2_string_test\")\n            if string_list2 != [[\"hello\", \"world\"], [\"good\", \"bad\"]]:\n                return modelbox.Status(modelbox.Status.StatusCode.STATUS_SHUTDOWN, \"invalid 2D string list\")\n\n            np_set_test = np.array([[1, 2 ,3], [11, 12, 13]])\n            np_get_test = buffer.get(\"np_test\")\n            if not (np_set_test == np_get_test).all():\n                return modelbox.Status(modelbox.Status.StatusCode.STATUS_SHUTDOWN, \"invalid np test\")\n\n            dict_test = buffer.get(\"map_test\")\n            if dict_test != {\"test\" : 1}:\n                return modelbox.Status(modelbox.Status.StatusCode.STATUS_SHUTDOWN, \"invalid map test\")\n\n        flow.stop()\n\nif __name__ == '__main__':\n    unittest.main()\n"
  },
  {
    "path": "src/python/test/test_config.py.in",
    "content": "\n# test working dir\nTEST_WORKING_DIR = \"@TEST_WORKING_DIR@\"\n\n# test lib dir\nTEST_LIB_DIR = \"@TEST_WORKING_LIB_DIR@\"\n\n# test bin dir\nTEST_BIN_DIR = \"@TEST_WORKING_BIN_DIR@\"\n\n# test data dir\nTEST_DATA_DIR = \"@TEST_WORKING_DATA_DIR@\"\n\n# test source code dir\nTEST_SOURCE_DIR = \"@TEST_SOURCE_DIR@\"\n\n# test driver dir\nTEST_DRIVER_DIR = \"@TEST_WORKING_DRIVERS_DIR@\"\n\n# test asserts file\nTEST_ASSETS = \"@TEST_ASSETS@\""
  },
  {
    "path": "src/python/test/test_configuration.py",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport unittest\nimport sys\nimport threading\nimport modelbox\nimport inspect\n\n\nclass TestConfiguration(unittest.TestCase):\n    def setUp(self):\n        pass\n\n    def tearDown(self):\n        pass\n\n    def test_config_set_get(self):\n        c = modelbox.Configuration()\n        c.set(\"1\", 1)\n        c.set(\"2\", 1.2)\n        c.set(\"3\", 1.3)\n        c.set(\"4\", False)\n        c.set(\"5\", True)\n        c.set(\"6\", \"test\")\n\n        self.assertEqual(c.get_int(\"1\"), 1)\n        self.assertEqual(c.get_float(\"2\"), 1.2)\n        self.assertEqual(c.get_float(\"3\"), 1.3)\n        self.assertEqual(c.get_bool(\"4\"), False)\n        self.assertEqual(c.get_bool(\"5\"), True)\n        self.assertEqual(c.get_string(\"6\"), \"test\")\n\n\nif __name__ == '__main__':\n    unittest.main()\n"
  },
  {
    "path": "src/python/test/test_dynamic_graph.py",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\nimport sys\nimport threading\nimport modelbox\nimport inspect\nfrom test import test_config\n\nclass TestDynamicGraph(unittest.TestCase):\n    def setUp(self):\n        pass\n\n    def tearDown(self):\n        pass\n\n    def test_dynamic_graph(self):\n        engine = modelbox.ModelBoxEngine()\n        self.assertNotEqual(engine, None)\n        config = modelbox.Configuration()\n        config.set(\"graph.queue_size\",\"32\")\n        config.set(\"graph.queue_size_external\",\"1000\")\n        config.set(\"graph.batch_size\",\"16\")\n        config.set(\"drivers.skip-default\", \"true\")\n        config.set(\"drivers.dir\", [test_config.TEST_DRIVER_DIR])\n        engine.init(config)\n        input_stream =  engine.create_input({\"input\"})\n        source_url = f'{test_config.TEST_ASSETS}/video/jpeg_5s_480x320_24fps_yuv444_8bit.mp4'\n        input_stream.setmeta(\"source_url\",source_url)\n        input_stream.close()\n\n        video_demuxer_output = engine.execute(\"video_demuxer\",{},input_stream)\n        frame_num = 0\n        for packet in video_demuxer_output:\n            frame_num = frame_num + 1\n        \n        engine.shutdown()\n        # engine.close()\n\nif __name__ == '__main__':\n    unittest.main()"
  },
  {
    "path": "src/python/test/test_flow.py",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport unittest\nimport modelbox\nimport numpy as np\nimport os\nimport threading\nfrom test import test_config\nfrom PIL import Image\nfrom PIL import ImageChops\nimport cv2\n\n\nclass TestFlow(unittest.TestCase):\n    def setUp(self):\n        pass\n\n    def tearDown(self):\n        pass\n\n    def test_build_failed(self):\n        conf_file = test_config.TEST_DATA_DIR + \"/py_config.toml\"\n        driver_dir = test_config.TEST_DRIVER_DIR\n        \n        txt = r\"\"\"\n[driver]\ndir=[\"{}\"]\nskip-default=true\n[log]\nlevel=\"ERROR\"\n[graph]\ngraphconf = '''digraph demo {{                                                                            \n    notexist[type=flowunit, flowunit=notexist, device=cpu]                                                                                                                \n}}'''\nformat = \"graphviz\"\n\"\"\".format(driver_dir)\n\n        conf = modelbox.Configuration()\n        flow = modelbox.Flow()\n        ret = flow.init(\"graph\", txt)\n        if ret == False:\n            self.assertTrue(ret)\n            \n        self.assertTrue(ret)\n        ret = flow.build()\n        if ret == False:\n            modelbox.error(ret)\n        self.assertFalse(ret)\n        \n    def test_flow_op(self):\n        conf_file = test_config.TEST_DATA_DIR + \"/py_op_config.toml\"\n        driver_dir = test_config.TEST_DRIVER_DIR\n        with open(conf_file, \"w\") as out:\n            txt = r\"\"\"\n[driver]\ndir=[\"{}\", \"{}\"]\nskip-default=true\n[log]\nlevel=\"INFO\"\n[graph]\ngraphconf = '''digraph demo {{                                                                            \n    python_image[type=flowunit, flowunit=python_image, device=cpu, deviceid=0, label=\"<image_out/out_1>\", batch_size = 10]   \n    python_resize[type=flowunit, flowunit=python_resize, device=cpu, deviceid=0, label=\"<resize_in> | <resize_out>\"]   \n    python_brightness[type=flowunit, flowunit=python_brightness, device=cpu, deviceid=0, label=\"<brightness_in> | <brightness_out>\", brightness = 0.1]  \n    python_show[type=flowunit, flowunit=python_show, device=cpu, deviceid=0, label=\"<show_in>\", is_save = true]    \n    python_image:\"image_out/out_1\" -> python_resize:resize_in\n    python_resize:resize_out -> python_brightness:brightness_in\n    python_brightness:brightness_out -> python_show:show_in                                                                                              \n}}'''\nformat = \"graphviz\"\n\"\"\".format(driver_dir, test_config.TEST_DATA_DIR + \"/python_op\")\n            out.write(txt)\n\n        conf = modelbox.Configuration()\n        flow = modelbox.Flow()\n        ret = flow.init(conf_file)\n        os.remove(conf_file)\n        if ret == False:\n            modelbox.error(ret)\n        self.assertTrue(ret)\n        ret = flow.build()\n        self.assertTrue(ret)\n        ret = flow.run_async()\n        self.assertTrue(ret)\n        retval = modelbox.Status()\n        ret = flow.wait(0, retval)\n        self.assertEqual(retval, modelbox.Status.StatusCode.STATUS_STOP)\n\n    def test_flow_op_thread(self):\n        conf_file = test_config.TEST_DATA_DIR + \"/py_op_config.toml\"\n        driver_dir = test_config.TEST_DRIVER_DIR\n        with open(conf_file, \"w\") as out:\n            txt = r\"\"\"\n[driver]\ndir=[\"{}\", \"{}\"]\nskip-default=true\n[log]\nlevel=\"INFO\"\n[graph]\ngraphconf = '''digraph demo {{                                                                            \n    python_image[type=flowunit, flowunit=python_image, device=cpu, deviceid=0, label=\"<image_out/out_1>\", batch_size = 10]   \n    python_resize[type=flowunit, flowunit=python_resize, device=cpu, deviceid=0, label=\"<resize_in> | <resize_out>\"]   \n    python_brightness[type=flowunit, flowunit=python_brightness, device=cpu, deviceid=0, label=\"<brightness_in> | <brightness_out>\", brightness = 0.1]  \n    python_show[type=flowunit, flowunit=python_show, device=cpu, deviceid=0, label=\"<show_in>\", is_save = false]    \n    python_image:\"image_out/out_1\" -> python_resize:resize_in\n    python_resize:resize_out -> python_brightness:brightness_in\n    python_brightness:brightness_out -> python_show:show_in                                                                                              \n}}'''\nformat = \"graphviz\"\n\"\"\".format(driver_dir, test_config.TEST_DATA_DIR + \"/python_op\")\n            out.write(txt)\n\n        conf = modelbox.Configuration()\n        flow1 = modelbox.Flow()\n        flow2 = modelbox.Flow()\n        t1 = threading.Thread(target=self.thread_func,\n                              args=(flow1, conf_file,))\n        t2 = threading.Thread(target=self.thread_func,\n                              args=(flow2, conf_file,))\n        t1.setDaemon(True)\n        t2.setDaemon(True)\n        \n        t1.start()\n        t2.start()\n\n        t1.join()\n        t2.join()\n        os.remove(conf_file)\n\n\n    def thread_func(self, flow, conf_file):\n        ret = flow.init(conf_file)\n        if ret == False:\n            modelbox.error(ret)\n        self.assertTrue(ret)\n        ret = flow.build()\n        self.assertTrue(ret)\n        ret = flow.run_async()\n        self.assertTrue(ret)\n        retval = modelbox.Status()\n        ret = flow.wait(0, retval)\n        self.assertEqual(retval, modelbox.Status.StatusCode.STATUS_STOP)\n\n    def test_flow_op_ext(self):\n        conf_file = test_config.TEST_DATA_DIR + \"/py_op_config.toml\"\n        driver_dir = test_config.TEST_DRIVER_DIR\n        with open(conf_file, \"w\") as out:\n            txt = r\"\"\"\n[driver]\ndir=[\"{}\", \"{}\"]\nskip-default=true\n[log]\nlevel=\"INFO\"\n[graph]\ngraphconf = '''digraph demo {{                                                                            \n    input1[type=input]   \n    python_resize[type=flowunit, flowunit=python_resize, device=cpu, deviceid=0, label=\"<resize_in> | <resize_out>\"]   \n    python_brightness[type=flowunit, flowunit=python_brightness, device=cpu, deviceid=0, label=\"<brightness_in> | <brightness_out>\", brightness = 0.1]  \n    output1[type=output]   \n    input1 -> python_resize:resize_in\n    python_resize:resize_out -> python_brightness:brightness_in\n    python_brightness:brightness_out -> output1                                                                                             \n}}'''\nformat = \"graphviz\"\n\"\"\".format(driver_dir, test_config.TEST_DATA_DIR + \"/python_op\")\n            out.write(txt)\n\n        conf = modelbox.Configuration()\n        flow = modelbox.Flow()\n        ret = flow.init(conf_file)\n        os.remove(conf_file)\n        if ret == False:\n            modelbox.error(ret)\n        self.assertTrue(ret)\n        ret = flow.build()\n        self.assertTrue(ret)\n        ret = flow.run_async()\n        self.assertTrue(ret)\n\n        img = cv2.imread(test_config.TEST_SOURCE_DIR + \"/../src/python/test/data/liu-x-160.jpg\")\n        img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n        extern_data_map = flow.create_external_data_map()\n        buffer_list = extern_data_map.create_buffer_list()\n        im_array = np.asarray(img_rgb[:,:])      \n        buffer_list.push_back(im_array)\n        extern_data_map.send(\"input1\", buffer_list)\n        extern_data_map.shutdown()\n\n        buffer_list_map = modelbox.ExtOutputBufferList()\n        ret = extern_data_map.recv(buffer_list_map)\n        self.assertTrue(ret)\n        \n        result_buffer_list = buffer_list_map.get_buffer_list(\"output1\")\n\n        for i in range(result_buffer_list.size()):\n            aa = result_buffer_list[i]\n            np_image = np.array(aa, copy= False)\n            image = Image.fromarray(np_image)\n            with Image.open(test_config.TEST_SOURCE_DIR + \"/../src/python/test/data/python_test_show_out.png\") as check_image:\n                try:\n                    diff = ImageChops.difference(image, check_image)\n                    self.assertEqual(diff.getbbox(), None)\n                except ValueError as e:\n                    self.assertTrue(False)\n        flow.stop()\n    \n    def test_flow_op_by_name(self):\n        conf_file = test_config.TEST_DATA_DIR + \"/py_op_config.toml\"\n        driver_dir = test_config.TEST_DRIVER_DIR\n        with open(conf_file, \"w\") as out:\n            txt = r\"\"\"\n[driver]\ndir=[\"{}\", \"{}\"]\nskip-default=true\n[flow]\nname=\"test_flow\"\n[log]\nlevel=\"INFO\"\n[args.test_arg1]\ndefault=\"test_arg1_value\"\n[args.test_arg2]\ndefault=\"test_arg2_value\"\n[graph]\ngraphconf = '''digraph demo {{                                                                            \n    input[type=input]   \n    python_args[type=flowunit, flowunit=python_args, device=cpu, test_arg1=\"$test_arg1\", test_arg2=\"$test_arg2\"]\n    output[type=output]\n    input -> python_args:input\n    python_args:output -> output\n}}'''\nformat = \"graphviz\"\n\"\"\".format(driver_dir, test_config.TEST_DATA_DIR + \"/python_op\")\n            out.write(txt)\n\n        conf = modelbox.Configuration()\n        flow = modelbox.Flow()\n        ret = flow.init_by_name(\"test_flow\", {\"test_arg1\": \"test_arg1_value2\"}, test_config.TEST_DATA_DIR)\n        if ret == False:\n            modelbox.error(ret)\n        os.remove(conf_file)\n        ret = flow.start_run()\n        self.assertTrue(ret)\n        \n        stream_io = flow.create_stream_io()\n        input_buffer = stream_io.create_buffer(\"input_buffer\")\n        stream_io.send(\"input\", \"input_buffer\")\n        stream_io.close_input()\n        result = stream_io.recv(\"output\")\n        modelbox.info(\"get output\", result.as_object().strip(chr(0)))\n        exp_output = \"input_buffer, test_arg1_value2, test_arg2_value\"\n        modelbox.info(\"exp output\", exp_output)\n        self.assertTrue(result.as_object() == exp_output)\n\n        flow.stop()\n\nif __name__ == '__main__':\n    unittest.main()\n"
  },
  {
    "path": "src/python/test/test_log.py",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport unittest\nimport sys\nimport threading\nimport modelbox\nimport inspect\nimport os\n\n\nclass TestLog(unittest.TestCase):\n    def setUp(self):\n        self._lock = threading.Lock()\n        self._count = 0\n        self._log = modelbox.Log()\n        self._oldlogger = self._log.get_logger()\n        self._log.reg(self.Log)\n        self._log.set_log_level(modelbox.Log.Level.INFO)\n\n    def tearDown(self):\n        self._log.set_logger(self._oldlogger)\n        pass\n\n    def Log(self, level, file, lineno, func, msg):\n        with self._lock:\n            self._count += 1\n\n        frame = inspect.currentframe()\n        frame = frame.f_back\n        info = inspect.getframeinfo(frame)\n\n        self._msg = msg\n        self.assertEqual(file, os.path.basename(info.filename))\n        self.assertTrue(lineno - info.lineno >= -2 and lineno - info.lineno <= 2)\n        self.assertEqual(func, info.function)\n\n    def test_LogPrint(self):\n        msg = \"Hello, world\"\n        modelbox.info(msg)\n        self.assertEqual(msg, self._msg)\n\n    def test_LogPrintExt(self):\n        msg = \"Hello, world\"\n        frame = inspect.currentframe()\n        info = inspect.getframeinfo(frame)\n        self._log.print_ext(modelbox.Log.Level.INFO, os.path.basename(info.filename),\n                            info.lineno + 2, info.function, msg)\n        self.assertEqual(msg, self._msg)\n\n    def test_LogSetLevel(self):\n        not_set_msg = \"NOT SET\"\n        self._msg = not_set_msg\n        msg = \"Hello, world\"\n        self._log.set_log_level(modelbox.Log.Level.ERROR)\n        modelbox.info(msg)\n        self.assertNotEqual(msg, self._msg)\n        self.assertEqual(not_set_msg, self._msg)\n\n    def test_LogDefaultNoOutput(self):\n        msg = \"Hello, world\"\n        modelbox.info(msg)\n\n    def threadtest(self, index, loop):\n        for i in range(loop):\n            modelbox.info(\"loop\" +\n                        str(index) + \": \" + str(i))\n\n    def test_LogMultiThread(self):\n        msg = \"Hello, world\"\n        l = []\n        num = 100\n        loop = 100\n\n        for i in range(100):\n            t = threading.Thread(target=self.threadtest, args=(i, loop))\n            t.start()\n            l.append(t)\n\n        for t in l:\n            t.join()\n\n        self.assertEqual(loop * num, self._count)\n\n\nif __name__ == '__main__':\n    unittest.main()\n"
  },
  {
    "path": "src/python/test/test_model.py",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\nimport modelbox\nimport numpy as np\n\nfrom test import test_config\n\n\nclass TestPyModel(unittest.TestCase):\n    def setUp(self):\n        self.model = modelbox.Model(test_config.TEST_DATA_DIR + \"/python_op\", \"python_brightness\", 1, \"cpu\", \"0\")\n        self.model.add_path(test_config.TEST_DRIVER_DIR)\n        ret = self.model.start()\n        self.assertEqual(ret.code(), modelbox.Status.StatusCode.STATUS_SUCCESS)\n\n    def tearDown(self):\n        self.model.stop()\n\n    def test_model_infer(self):\n        for i in range(0, 10):\n            data = np.zeros((32, 32, 3), dtype=np.uint8)\n            result = self.model.infer([data])\n            self.assertEqual(len(result), 1)\n            self.assertEqual(np.array(result[0]).shape, (32, 32, 3))\n\n    def test_model_infer_batch(self):\n        data = []\n        batch_size = 10\n        for i in range(0, batch_size):\n            data.append(np.zeros((32, 32, 3), dtype=np.uint8))\n        result = self.model.infer_batch([data])\n        self.assertEqual(len(result), 1)\n        port0_result = result[0]\n        self.assertEqual(len(port0_result), batch_size)\n        for i in range(0, batch_size):\n            self.assertEqual(np.array(port0_result[0]).shape, (32, 32, 3))\n\nif __name__ == '__main__':\n    unittest.main()\n"
  },
  {
    "path": "src/python/test/test_modelbox.py",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport unittest\nimport modelbox\n\nif __name__ == '__main__':\n    unittest.main()\n"
  },
  {
    "path": "src/python/test/test_session.py",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport unittest\nimport modelbox\nimport numpy as np\n\n\nclass TestSessionContext(unittest.TestCase):\n    def setUp(self):\n        pass\n\n    def tearDown(self):\n        pass\n\n    def test_session_set_get(self):\n        session_context = modelbox.SessionContext()\n        session_context.set_private(\"int\", 1)\n        session_context.set_private(\"double\", 11.2)\n        session_context.set_private(\"bool_true\", True)\n        session_context.set_private(\"bool_false\", False)\n        session_context.set_private(\"str\", \"string test\")\n\n        session_context.set_private(\"list1_int\", [1, 2, 3, 4])\n        session_context.set_private(\"list1_double\", [1.1, 2.2, 3.3, 4.4])\n        session_context.set_private(\"list1_bool\", [True, False, False, True])\n        session_context.set_private(\"list1_str\", [\"hello\", \"world\", \"!\"])\n\n        session_context.set_private(\"list2_int\", [[1, 2], [3, 4]])\n        session_context.set_private(\"list2_double\", [[1.1, 2.2], [3.3, 4.4]])\n        session_context.set_private(\"list2_bool\", [[True, False], [False, True]])\n        session_context.set_private(\"list2_str\", [[\"hello\", \"world\"], [\"good\", \"bad\"]])\n\n        session_context.set_private(\"dict\", {\"1\":1, \"2\":2})\n\n        np_test = np.random.random((2, 3))\n        session_context.set_private(\"np_test\", np_test)\n\n        self.assertEqual(session_context.get_private(\"int\"), 1)\n        self.assertEqual(session_context.get_private(\"double\"), 11.2)\n        self.assertEqual(session_context.get_private(\"bool_true\"), True)\n        self.assertEqual(session_context.get_private(\"bool_false\"), False)\n        self.assertEqual(session_context.get_private(\"str\"), \"string test\")\n\n        self.assertEqual(session_context.get_private(\"list1_int\"), [1, 2, 3, 4])\n        self.assertEqual(session_context.get_private(\"list1_double\"), [1.1, 2.2, 3.3, 4.4])\n        self.assertEqual(session_context.get_private(\"list1_bool\"), [True, False, False, True])\n        self.assertEqual(session_context.get_private(\"list1_str\"), [\"hello\", \"world\", \"!\"])\n\n        self.assertEqual(session_context.get_private(\"list2_int\"), [[1, 2], [3, 4]])\n        self.assertEqual(session_context.get_private(\"list2_double\"), [[1.1, 2.2], [3.3, 4.4]])\n        self.assertEqual(session_context.get_private(\"list2_bool\"), [[True, False], [False, True]])\n        self.assertEqual(session_context.get_private(\"list2_str\"), [[\"hello\", \"world\"], [\"good\", \"bad\"]])\n\n        self.assertEqual(session_context.get_private(\"dict\"), {\"1\":1, \"2\":2})\n\n        self.assertTrue((session_context.get_private(\"np_test\") == np_test).all())\n\nif __name__ == '__main__':\n    unittest.main()\n"
  },
  {
    "path": "src/python/test/test_status.py",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport unittest\nimport sys\nimport threading\nimport modelbox\nimport inspect\n\n\nclass TestStatus(unittest.TestCase):\n    def setUp(self):\n        pass\n\n    def tearDown(self):\n        pass\n\n    def test_status_cond(self):\n        s1 = modelbox.Status()\n        s2 = modelbox.Status()\n        self.assertEqual(s1, s2)\n\n        s1 = modelbox.Status(modelbox.Status.StatusCode.STATUS_SUCCESS)\n        s2 = modelbox.Status(modelbox.Status.StatusCode.STATUS_FAULT)\n        self.assertNotEqual(s1, s2)\n\n        self.assertTrue(s1)\n        self.assertFalse(s2)\n\n    def test_status_message(self):\n        m = \"py log message\"\n        s = modelbox.Status(modelbox.Status.StatusCode.STATUS_FAULT, m)\n        expect_msg = \"code: \" + s.str_code() + \", errmsg: \" + m\n        self.assertEqual(s.__str__(), expect_msg)\n\n\nif __name__ == '__main__':\n    unittest.main()\n"
  },
  {
    "path": "test/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nadd_definitions(-DBUILD_TEST)\nset(TEST_MAIN_SOURCE ${CMAKE_CURRENT_LIST_DIR}/test_main.cc)\nconfigure_file(${CMAKE_CURRENT_LIST_DIR}/test_config.h.in ${CMAKE_CURRENT_BINARY_DIR}/test_config.h @ONLY)\n\nlist(APPEND TEST_INCLUDE ${LIBMODELBOX_INCLUDE})\nlist(APPEND TEST_INCLUDE ${LIBMODELBOX_SOURCES_DIR})\nlist(APPEND TEST_INCLUDE ${MODELBOX_PROG_INCLUDE} )\nlist(APPEND TEST_INCLUDE ${MODELBOX_SERVER_INCLUDE})\nlist(APPEND TEST_INCLUDE ${LIBMODELBOX_BASE_INCLUDE})\nlist(APPEND TEST_INCLUDE ${TOML_INCLUDE_DIR})\nlist(APPEND TEST_INCLUDE ${CMAKE_CURRENT_BINARY_DIR})\nlist(APPEND TEST_INCLUDE ${MODELBOX_TOP_DIR})\nlist(APPEND TEST_INCLUDE ${MODELBOX_MANAGER_INCLUDE})\nlist(REMOVE_DUPLICATES TEST_INCLUDE)\n\nif(DUKTAPE_FOUND)\n    add_definitions(-DENABLE_JS_PLUGIN)\nendif()\n\nset(TEST_SOURCE \n    ${MODELBOX_SERVER_SOURCES} \n\t${MODELBOX_TOOL_SOURCES}\n    ${MODELBOX_SERVING_SOURCES}\n)\n\nset(TEST_LINK_LIBRARIES\n    ${MODELBOX_SERVER_LINK_LIBRARIES}\n\t${MODELBOX_TOOL_LINK_LIBRARIES}\n\t${LIBMODELBOX_LINK_SOURCES} \n\t${MODELBOX_MANAGER_LINK_LIBRARIES}\n)\n\ninclude_directories(${gtest_SOURCE_DIR}/include ${gtest_SOURCE_DIR})\ninclude_directories(${gmock_SOURCE_DIR}/include ${gmock_SOURCE_DIR})\ninclude_directories(${HUAWEI_SECURE_C_INCLUDE_DIR})\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${NLOHMANN_INCLUDE_DIR})\n\nset(CMAKE_CXX_FLAGS_OLD ${CMAKE_CXX_FLAGS})\nif (CMAKE_CXX_COMPILER_ID STREQUAL \"GNU\")\n\tset(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} -fPIC -fno-gnu-unique\")\nelse()\n\tset(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} -fPIC\")\nendif()\nadd_subdirectory(mock)\nif(NOT DISABLE_MODELBOX_TEST)\n\tadd_subdirectory(unit)\n\tadd_subdirectory(drivers)\n\tadd_subdirectory(function)\n\tadd_subdirectory(manager)\nendif()\nset(CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS_OLD})\n\nif(NOT DISABLE_MODELBOX_TEST)\nlist(REMOVE_DUPLICATES MODELBOX_UNIT_TEST_TARGETS)\nadd_custom_target(build-test\n\tDEPENDS ${MODELBOX_UNIT_TEST_TARGETS}\n\tWORKING_DIRECTORY ${TEST_WORKING_DIR}\n\tCOMMENT \"Run Build Test...\"\n)\n\nif (TEST_COVERAGE)\nadd_custom_target(coverage\n\tCOMMAND make unittest\n\tCOMMAND lcov -d ${CMAKE_BINARY_DIR}/src -d ${CMAKE_BINARY_DIR}/test -c -o ${CMAKE_BINARY_DIR}/coverage.info\n\tCOMMAND lcov -r ${CMAKE_BINARY_DIR}/coverage.info  '/usr/*' 'thirdparty/download/*' 'build/*' -o ${CMAKE_BINARY_DIR}/coverage-strip.info\n\tCOMMAND genhtml ${CMAKE_BINARY_DIR}/coverage-strip.info -o ${CMAKE_BINARY_DIR}/coverage/\n\tCOMMAND rm ${CMAKE_BINARY_DIR}/coverage-strip.info\n\tDEPENDS build-test\n\tWORKING_DIRECTORY ${CMAKE_BINARY_DIR}\n\tBYPRODUCTS ${CMAKE_BINARY_DIR}/coverage.info ${CMAKE_BINARY_DIR}/coverage\n\tCOMMENT \"Run Coverate Test...\"\n)\nendif()\n\nlist(REMOVE_DUPLICATES MODELBOX_UNIT_TEST_RUN_TARGETS)\nadd_custom_target(unittest\n\tDEPENDS ${MODELBOX_UNIT_TEST_RUN_TARGETS}\n\tWORKING_DIRECTORY ${TEST_WORKING_DIR}\n\tCOMMENT \"Run Unit Test...\"\n)\nendif()\n\n# replace all test config files\nlist(LENGTH MODELBOX_UNIT_TEST_CONFIG_IN CONFIG_FILE_NUMBER)\nif ( ${CONFIG_FILE_NUMBER} GREATER 0)\n\tmath(EXPR CONFIG_FILE_NUMBER \"${CONFIG_FILE_NUMBER}-1\")\n\tforeach(i RANGE 0 ${CONFIG_FILE_NUMBER}-1 1)\n\t\tlist(GET MODELBOX_UNIT_TEST_CONFIG_IN ${i} CONFIG_IN)\n\t\tlist(GET MODELBOX_UNIT_TEST_CONFIG_OUT ${i} CONFIG_OUT)\n\t\tconfigure_file(${CONFIG_IN} ${CONFIG_OUT} @ONLY)\n\tendforeach(i)\nendif()\n\nset(MODELBOX_UNIT_TEST_CONFIG_IN \"\" CACHE INTERNAL \"\")\nset(MODELBOX_UNIT_TEST_CONFIG_OUT \"\" CACHE INTERNAL \"\")\nset(MODELBOX_UNIT_TEST_TARGETS \"\" CACHE INTERNAL \"\")\nset(MODELBOX_UNIT_TEST_RUN_TARGETS \"\" CACHE INTERNAL \"\")\n"
  },
  {
    "path": "test/assets/ascend_padding_yuv",
    "content": "\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0011\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0011\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0011\u0011\u0013\u0011\u0011\u0011\u0011\u0011\u0010\u0010\u0010\u0010\u0010\u0011\u0013\u0011\u0011\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0011\u0011\u0012\u0011\u0011\u0011\u0010\u0011\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0012\u0012\u0011\u0011\u0010\u0011\u0012\u0013\u0014\u0013\u0013\u0013\u0013\u0014\u0013\u0011\u0011\u0011\u0010\u0010\u0011\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0011\u0011\u001fB)\u0011\u0011\u0011\u0011\u0011\u0010\u0010\u0010\u0011\u001fC'\u0011\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0011\"C!\u0011\u0011\u0010\u0011\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u00125:\u0014\u0011\u0010\u0013:OOOOOOOG3\u001d\u0011\u0011\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0011+Q@\u0015\u0011\u0011\u0011\u0010\u0010\u0010\u0010\u00118Q8\u0011\u0011\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0011\u00111P0\u0011\u0011\u0010\u0011\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0014FK\u001b\u0011\u0011\u001aJQLLLLLLQQI\"\u0012\u0011\u0011\u0011\u0011\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0011,QM\"\u0011\u0011\u0011\u0010\u0010\u0010\u0010\u0018GQ8\u0011\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0011\u00112P1\u0011\u0011\u0011\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0014GK\u001b\u0010\u0011\u001aJK\u001b\u0016\u0016\u0016\u0016\u0017 8QJ\u001a\u0011\u0011\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0011\u0011+QQ6\u0012\u0011\u0011\u0011\u0010\u0011\u0012-PQ8\u0011\u0010\u0010\u0011\u0011\u0011\u0011\u0011\u0011\u0011\u0010\u0011\u0011\u0011\u0011\u0011\u0010\u0011\u0011\u0010\u0011\u0010\u0011\u0010\u0011\u00112P1\u0011\u0010\u0011\u0010\u0011\u0011\u0011\u0010\u0010\u0011\u0010\u0010\u0010\u0011\u0011\u0010\u0015GK\u001c\u0010\u0011\u001bJK\u0015\u0011\u0011\u0011\u0011\u0011\u0011\u0014?Q3\u0011\u0011\u0011\u0011\u0010\u0011\u0011\u0011\u0011\u0011\u0010\u0010\u0010\u0010\u0011\u0010\u0010\u0011\u0011\u0011\u0011\u0010\u0010\u0011\u0010\u0011\u0011\u0010\u0011\u0011\u0011\u0011,QQH\u0017\u0010\u0011\u0011\u0011\u0011\u0014@QQ8\u0011\u0011\u0011\u0011\u0011\u0010\u0011\u0012\u0012\u0012\u0011\u0011\u0011\u0010\u0011\u0011\u0011\u0010\u0011\u0011\u0011\u0011\u0012\u0012\u0012\u00102P1\u0011\u0011\u0011\u0010\u0011\u0010\u0011\u0012\u0012\u0012\u0011\u0011\u0011\u0010\u0010\u0010\u0015GK\u001c\u0010\u0011\u001bJJ\u0016\u0011\u0011\u0010\u0010\u0010\u0011\u0010-P6\u0011\u0010\u0011\u0011\u0011\u0011\u0011\u0012\u0012\u0012\u0011\u0011\u0010\u0010\u0010\u0010\u0011\u0011\u0011\u0011\u0011\u0010\u0011\u0011\u0011\u0011\u0010\u0011\u0010\u0010\u0011\u0011+QQP(\u0011\u0011\u0011\u0011\u0010\u001fMQQ8\u0011\u0011\u0011\u0011\u0010\u0011\u001d8A?-\u0018\u0011\u0011\u0010\u0010\u0011\u0011\u0011\u0011\u0019+?A7\u001e3P1\u0011\u0011\u0011\u0010\u0011\u0015&;A<$\u0013\u0011\u0011\u0011\u0010\u0015GK\u001c\u0010\u0010\u001bJK\u0015\u0010\u0010\u0010\u0010\u0010\u0011\u00112Q6\u0011\u0011\u0010\u0011\u0011\u001a0@@3\u001a\u0012\u0011\u0011\u0011\u0010\u00141 \u0011\u0011\u0011\u0010\u0010\u00161 \u0011\u0011\u0011\u0011\u0011+PNQ;\u0014\u0011\u0010\u0010\u00120QNP8\u0011\u0010\u0011\u0010\u0013.MQRQPI\"\u0012\u0011\u0011\u0011\u0011\u0011\"HQQQQNNP1\u0011\u0011\u0011\u0010\u0019AORRQOC\u001c\u0011\u0011\u0010\u0015GK\u001c\u0010\u0010\u001aJK\u0015\u0011\u0011\u0011\u0011\u0011\u0011\u001bEO&\u0011\u0010\u0011\u0012+JPQQQM2\u0014\u0011\u0011\u0011\u001fNF\u001b\u0011\u0011\u0010\u00125Q8\u0012\u0011\u0011\u0010\u0010,QAKL\u001e\u0011\u0011\u0010\u0016FL9P8\u0011\u0011\u0010\u00140PK4 #=OL#\u0012\u0011\u0011\u0011#JPC'\u001f4LQP1\u0011\u0011\u0011\u0019DQJ.\u001f%FQC\u0018\u0011\u0011\u0015GK\u001c\u0010\u0010\u001bJN411111=LQ@\u0014\u0011\u0010\u0012+NP?%!8NP7\u0014\u0011\u0011\u0015=QB\u0017\u0011\u0011'PJ \u0012\u0011\u0011\u0010\u0011,P:=Q2\u0012\u0011\u0011%NF/O8\u0011\u0011\u0011+OK!\u0012\u0010\u0011\u0013-OF\u0019\u0011\u0011\u0018FO/\u0015\u0011\u0011\u0011 JP1\u0012\u0011\u00126QE\u001d\u0011\u0010\u0011\u0018BP+\u0012\u0011\u0015GK\u001c\u0010\u0010\u001bKQQQQQQQQRK\u001e\u0010\u0010\u0011\u0019KP4\u0014\u0011\u0011\u0012)NO&\u0011\u0011\u0011\u001eIP3\u0013\"JO-\u0011\u0010\u0010\u0010\u0010\u0011,P7&OC\u0016\u0011\u0013:Q,(P8\u0011\u0011\u0014=Q*\u0011\u0011\u0011\u0011\u0011\u0014?P)\u0012\u0011)O@\u0013\u0011\u0010\u0011\u0011\u00113Q1\u0011\u0011\u0019HO-\"\"!!!4QA\u0012\u0010\u0015HK\u001c\u0010\u0010\u001bKN522223BOP8\u0013\u0011\u0011,P;\u0014\u0011\u0011\u0011\u0011\u00122P9\u0013\u0011\u0011\u0011#LM0DQ9\u0014\u0010\u0010\u0010\u0010\u0010\u0011,P7\u0017DP#\u0011\u001cML\u001d)O7\u0011\u0010\u0017LK\u001b\u0010\u0011\u0011\u0011\u0011\u0011-QA\u0012\u0013;Q+\u0012\u0011\u0010\u0011\u0011\u00102P1\u0011\u0011'OQPQPPPQQQ?\u0012\u0010\u0015HK\u001c\u0010\u0011\u001bJK\u0015\u0010\u0010\u0010\u0010\u0011\u0012\u001fLO&\u0011\u0014=P'\u0011\u0011\u0011\u0011\u0011\u0011\u001dND\u0016\u0011\u0010\u0011\u0012(OQQB\u0017\u0011\u0010\u0010\u0010\u0010\u0010\u0011,P7\u00132P5\u0012.P>\u0013(O8\u0011\u0010\u0017LI\u0018\u0011\u0010\u0011\u0011\u0011\u0011$O@\u0012\u0014@P!\u0011\u0011\u0011\u0011\u0010\u00112P1\u0011\u00110RLEEDEDEE@#\u0011\u0011\u0015GK\u001c\u0010\u0010\u001aJK\u0016\u0010\u0011\u0010\u0010\u0010\u0011\u00126P6\u0011\u0018FM\u001a\u0011\u0010\u0011\u0011\u0011\u0011\u001cND\u0017\u0010\u0011\u0011\u0011\u0014=QO\"\u0011\u0011\u0010\u0010\u0010\u0010\u0010\u0011,P7\u0012\u001eKJ\u001fAO'\u0011)P8\u0011\u0011\u0017LJ\u001a\u0010\u0011\u0010\u0010\u0011\u0011*P@\u0011\u0013>P)\u0012\u0011\u0011\u0011\u0011\u00102P1\u0011\u0011*O@\u0013\u0011\u0011\u0011\u0011\u0011\u0011\u0011\u0011\u0011\u0010\u0014GK\u001c\u0010\u0010\u001bJK\u0016\u0011\u0011\u0011\u0011\u0011\u0010\u0011.Q7\u0011\u0014@O&\u0011\u0011\u0011\u0011\u0011\u0011\u001cNE\u0016\u0010\u0010\u0011\u0012)ORQ>\u0015\u0011\u0011\u0011\u0011\u0011\u0010\u0011,P7\u0012\u0013?P9NH\u001a\u0011)P8\u0011\u0011\u0014BP&\u0011\u0011\u0011\u0010\u0011\u0013;P3\u0011\u0011,P<\u0013\u0010\u0011\u0010\u0011\u00103Q1\u0011\u0011\u001bJJ\u001a\u0010\u0011\u0011\u0011\u0011\u0012\u0017\u0014\u0011\u0011\u0015GK\u001c\u0010\u0010\u001aJK\u0016\u0010\u0010\u0010\u0010\u0010\u0011\u0011.P6\u0011\u00120P6\u0012\u0010\u0011\u0010\u0010\u0011*P=\u0014\u0010\u0010\u0011\u001eGP=KO/\u0012\u0011\u0011\u0010\u0011\u0010\u0011,P7\u0012\u0012)PQQ4\u0012\u0011)P8\u0011\u0011\u0011-PD\u0019\u0011\u0011\u0011\u0011#KM\u001e\u0010\u0011\u001dJK\"\u0011\u0011\u0011\u0011\u0019DP1\u0011\u0011\u0014<Q3\u0015\u0011\u0011\u0011\u0012+M5\u0012\u0010\u0014GK\u001c\u0010\u0010\u001bKK\u0016\u0010\u0010\u0010\u0010\u0010\u0011\u0017DP/\u0011\u0011\u001cNI \u0011\u0011\u0011\u0011\u001bDO+\u0012\u0011\u0010\u0018DQ<\u0014(NN$\u0011\u0010\u0011\u0011\u0010\u0011,P7\u0012\u0011\u0019IRN\u001e\u0011\u0011)P8\u0011\u0011\u0011\u001bEQB \u0018\u001a.KO-\u0012\u0010\u0011\u0012,OK-\u001a\u0019%DQP1\u0011\u0011\u0011 JP<\u001d\u0019\u001b7OM%\u0012\u0011\u0015GK\u001c\u0010\u0010\u001aKK\u001d\u0019\u0019\u0019\u0019\u0019$AQJ\u0018\u0011\u0011\u00136PJ+\u0019\u0019\"GQA\u0017\u0011\u0011\u00126QH\u001b\u0011\u00130QI\u001e\u0011\u0011\u0011\u0010\u0011+P6\u0012\u0011\u0012:QB\u0014\u0011\u0011'P7\u0011\u0011\u0010\u0011\u001aBPOMNQO,\u0012\u0011\u0011\u0011\u0011\u0012+NQNNQQOP0\u0011\u0011\u0011\u0011 HQNMNQK$\u0012\u0011\u0010\u0015FJ\u001b\u0011\u0011\u001aJQMMMMMNQQK)\u0011\u0011\u0011\u0011\u00134OQNNPP=\u0017\u0011\u0011\u0012 NJ\u001f\u0012\u0011\u0011\u0015<P7\u0012\u0011\u0010\u0011\u0011\u001b; \u0011\u0011\u0011\u001d;!\u0011\u0011\u0011\u0018:\"\u0011\u0010\u0010\u0010\u0010\u0013,FKH>#\u0012\u0010\u0011\u0010\u0010\u0010\u0010\u0011\u001d9IJG+#;\u001d\u0011\u0011\u0010\u0010\u0011\u0018/IJH5\u001a\u0010\u0010\u0010\u0010\u0012.3\u0013\u0011\u0010\u00123JJJJJJJE.\u0019\u0012\u0010\u0010\u0010\u0010\u0010\u0013 >JKD&\u0014\u0011\u0010\u0011\u0011\u0018;%\u0011\u0010\u0010\u0011\u0011\u001a:&\u0011\u0010\u0011\u0011\u0011\u0011\u0012\u0011\u0011\u0011\u0011\u0012\u0012\u0011\u0011\u0011\u0010\u0011\u0012\u0011\u0011\u0010\u0010\u0010\u0010\u0010\u0012\u0013\u0012\u0013\u0013\u0011\u0011\u0011\u0010\u0010\u0010\u0010\u0010\u0011\u0011\u0013\u0013\u0013\u0012\u0012\u0011\u0012\u0011\u0011\u0011\u0010\u0010\u0011\u0011\u0011\u0013\u0013\u0013\u0012\u0011\u0011\u0010\u0010\u0010\u0010\u0012\u0012\u0011\u0011\u0011\u0011\u0012\u0012\u0012\u0012\u0012\u0012\u0012\u0013\u0012\u0012\u0011\u0011\u0010\u0010\u0010\u0010\u0011\u0011\u0012\u0012\u0012\u0013\u0013\u0011\u0011\u0011\u0010\u0011\u0011\u0011\u0012\u0011\u0010\u0011\u0010\u0011\u0011\u0011\u0012\u0011\u0011\u0010\u0011\u0011\u0011\u0010\u0011\u0011\u0011\u0011\u0010\u0011\u0010\u0011\u0010\u0011\u0010\u0010\u0011\u0011\u0011\u0011\u0010\u0010\u0010\u0010\u0010\u0011\u0012\u0011\u0011\u0011\u0011\u0011\u0010\u0010\u0010\u0010\u0010\u0011\u0010\u0011\u0011\u0011\u0011\u0011\u0011\u0011\u0011\u0011\u0011\u0011\u0010\u0011\u0010\u0011\u0011\u0011\u0011\u0011\u0010\u0011\u0010\u0010\u0010\u0010\u0011\u0011\u0010\u0011\u0010\u0011\u0011\u0011\u0011\u0011\u0011\u0011\u0011\u0011\u0011\u0011\u0011\u0011\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0011\u0011\u0011\u0011\u0011\u0011\u0010\u0011\u0011\u0011\u0011\u0011\u0011\u0011\u0010\u0010\u0011\u0010\u0011\u0010\u0011\u0010\u0011\u0011\u0010\u0011\u0011\u0011\u0011\u0011\u0010\u0010\u0011\u0011\u0011\u0011\u0010\u0010\u0011\u0011\u0011\u0011\u0011\u0010\u0010\u0010\u0010\u0010\u0010\u0011\u0010\u0011\u0010\u0011\u0011\u0011\u0010\u0010\u0010\u0010\u0011\u0011\u0011\u0010\u0010\u0011\u0010\u0010\u0011\u0010\u0010\u0010\u0011\u0010\u0011\u0010\u0011\u0011\u0010\u0010\u0011\u0011\u0011\u0010\u0010\u0010\u0010\u0011\u0010\u0011\u0010\u0011\u0010\u0011\u0011\u0011\u0011\u0011\u0010\u0010\u0011\u0011\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0011\u0010\u0011\u0010\u0011\u0011\u0010\u0011\u0011\u0011\u0011\u0010\u0010\u0010\u0011\u0010\u0011\u0010\u0010\u0010\u0011\u0011\u0011\u0011\u0011\u0010\u0011\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0011\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0011\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0011\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0011\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0011\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0011\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0011\u0011\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0011\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0011\u0011\u0011\u0010\u0010\u0010\u0010\u0011\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0011\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0011\u0010\u0011\u0010\u0011\u0011\u0010\u0011\u0010\u0010\u0010\u0010\u0010\u0010\u0011\u0011\u0011\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0011\u0010\u0011\u0010\u0011\u0010\u0011\u0011\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0011\u0011\u0011\u0011\u0010\u0010\u0011\u0011\u0010\u0010\u0010\u0010\u0010\u0011\u0011\u0011\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0011\u0011\u0011\u0012\u0010\u0011\u0011\u0010\u0011\u0010\u0010\u0010\u0010\u0010\u0011\u0011\u0011\u0011\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0011\u0011\u0010\u0011\u0012\u0012\u0010\u0011\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0012\u0011\u0011\u0011\u0011\u0011\u0012\u0013\u0014\u0013\u0013\u0013\u0013\u0013\u0013\u0011\u0011\u0011\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0011\u0011)E\u0012\u0012\u0011\u0010\u0010\u0010\u0010\u0010\u0010,=\u0012\u0011\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0011\u00113/\u0011\u0011\u0010\u0011\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0012]o\u0015\u0011\u0011\u0015lX%\u0011\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0011Ft\u0017\u0011\u0011\u0011\u0010\u0010\u0010\u0010\u0011ab\u0012\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0011\u0012UR\u0012\u0011\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0017\"\u0011\u0011 0\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0011\u0010G1\u0011\u0011\u0011\u0011\u0011\u0011\u0011\u001db\u0011\u0010\u0011\u0011\u0010\u0011\u0010\u0010\u0011\u0011\u0011\u0010\u0010\u0010\u0011\u0010\u0010\u0010\u0010\u0011\u0010\u0010\u0011\u0011\u0010\u0011US\u0011\u0010\u0010\u0010\u0011\u0011\u0010\u0011\u0010\u0011\u0010\u0011\u0011\u0010\u0010\u0010\u0017\"\u0010\u0010 \u001e\u0015\u0015\u0015\u0015\u0017,c!\u0011\u0010\u0011\u0011\u0011\u0011\u0011\u0011\u0010\u0011\u0010\u0010\u0010\u0010\u0011\u0010\u0011\u0011\u0011\u0010\u0011\u0011\u0011\u0011\u0010\u0010\u0011\u0011\u0011\u0011\u0010\u0010GZ\u0012\u0011\u0011\u0011\u0011\u0011\u0011Kb\u0011\u0011\u0010\u0010\u0011\u0011\u0011\u0010\u0011\u0011\u0011\u0011\u0011\u0010\u0011\u0010\u0010\u0010\u0010\u0011\u0010\u0011\u0011\u0010\u0010\u0011US\u0011\u0010\u0010\u0010\u0010\u0011\u0011\u0011\u0011\u0011\u0010\u0011\u0011\u0010\u0010\u0010\u0017\"\u0010\u0010 \u0019\u0010\u0010\u0010\u0010\u0011\u0011\u0015rV\u0011\u0010\u0010\u0011\u0011\u0011\u0010\u0011\u0011\u0011\u0012\u0011\u0010\u0010\u0010\u0011\u0011\u0011\u0011\u0011\u0011\u0011\u0011\u0010\u0011\u0011\u0011\u0011\u0010\u0011\u0010\u0010G\u001a\u0011\u0011\u0011\u0011\u0011\u0016tc\u0011\u0011\u0010\u0011\u0011\u0011\u0011\u0012\u0012\u0011\u0011\u0011\u0011\u0011\u0010\u0010\u0010\u0010\u0010\u0011\u0011\u0011\u0012\u0012\u0011\u0011US\u0011\u0010\u0010\u0010\u0011\u0011\u0011\u0011\u0012\u0011\u0011\u0011\u0010\u0010\u0010\u0010\u0017\"\u0010\u0010 \u0019\u0010\u0010\u0010\u0010\u0011\u0011\u0012J^\u0012\u0011\u0010\u0011\u0011\u0011\u0011\u0012\u0012\u0011\u0011\u0011\u0011\u0010\u0011\u0011\u0011\u0011\u0011\u0011\u0010\u0011\u0011\u0011\u0011\u0011\u0011\u0011\u0010\u0011\u0010\u0010G;\u0011\u0010\u0010\u0010\u0011+c\u0012\u0010\u0011\u0011\u0012\u0012&czwH\u001b\u0011\u0011\u0011\u0010\u0010\u0010\u0010\u0011\u001dHw{d&US\u0011\u0010\u0010\u0010\u0011\u0016:nzr5\u0013\u0011\u0010\u0010\u0010\u0017\"\u0010\u0010 \u0019\u0010\u0010\u0010\u0010\u0010\u0011\u0011O^\u0012\u0011\u0010\u0010\u0012 Pyz\\\u001f\u0012\u0011\u0011\u0011\u0012\u0018V+\u0011\u0010\u0011\u0011\u0011\u0019T0\u0011\u0011\u0011\u0010\u0010Go\u0015\u0010\u0011\u0010\u0012Qc\u0011\u0011\u0011\u0011\u0013L0\u0012\u0010\u0011\u0010\u0010\u00101S\u0011\u0011\u0010\u0010\u001dx}\"\u0010\u0011\u0010\u0017\"\u0010\u0010 \u0019\u0010\u0010\u0010\u0010\u0010\u0011\u001e:\u0011\u0011\u0011\u0012CW\u0014\u0011\u0011\u00110\u001f\u0011\u0011\u0011\u0013[d\u0011\u0010\u0011\u0010\u0010Gq)\u0011\u0011\u0010\u0019]b\u0011\u0011\u0011\u0016PZ-1n2\u0012\u0011\u0011\u00122|<,YS\u0011\u0012\u0011\u001e|L-9}\u001c\u0011\u0010\u0017\"\u0010\u0010 VSSSSTmt\u0015\u0011\u0011\u0012Dt6.b_\u0015\u0011\u0011\u0016my\u0019\u0011\u0011=.\u0011\u0011\u0011\u0010\u0010GdmS\u0012\u0011\u00115|Gb\u0011\u0011\u0011E-\u0012\u0011\u0011\u0013H\u001e\u0011\u0011\u001fJ\u0016\u0012\u0012\u0012-T\u0011\u0011\u0013a$\u0012\u0011\u0011\u001cyF\u0012\u0010\u0018\"\u0010\u0010 &\u0011\u0011\u0011\u001eY\u0014\u0011\u0011\u0013?9\u0011\u0012\u0011%X\u00140I\u0012\u0011\u0011\u0010\u0010\u0010Ga9{\u0017\u0011\u0014lI@c\u0011\u0011\u0014pD\u0011\u0011\u0011\u0011\u0012\u0013tA\u0011\u0011As\u0013\u0011\u0010\u0011\u0011\u0011US\u0011\u0011\u001d?,-,-.Mt\u0012\u0011\u0018\"\u0010\u0011 YUUUUW|c\u0014\u0011\u0012Fh\u0014\u0011\u0011\u0011\u0011\u0012Pe\u0013\u0011\u0011\u00125K|f\u0015\u0011\u0011\u0010\u0011\u0010\u0010Ga\u0018|4\u0012&$?b\u0011\u0011\u001d\"\u0011\u0011\u0011\u0011\u0011\u0011It\u0012\u0013lI\u0011\u0011\u0011\u0011\u0011\u0011US\u0012\u0011=q\u0012\u0011\u0017\"\u0010\u0010!\u0019\u0011\u0011\u0010\u0010\u0011\u0012*8\u0011\u0015o<\u0011\u0010\u0011\u0011\u0010\u0011'\u0019\u0010\u0011\u0011\u0012>{\u0019\u0011\u0011\u0010\u0011\u0012\u0010\u0010Ga\u0013T]\u0012Kn\u0014?b\u0011\u0011\u001e\u001c\u0011\u0010\u0010\u0011\u0011\u00103s\u0011\u0013v0\u0011\u0011\u0011\u0011\u0010\u0010US\u0011\u0011P}}}}}}}x4\u0012\u0011\u0018\"\u0010\u0010 \u0019\u0010\u0010\u0010\u0010\u0010\u0011\u0012[^\u0011\u001a \u0011\u0011\u0011\u0010\u0010\u0010&\u0019\u0011\u0010\u0011\u0011\u0014i.\u0011\u0011\u0011\u0010\u0011\u0010\u0010\u0010Ga\u0013'&w?\u0012?b\u0011\u0011\u001d\u001e\u0011\u0011\u0011\u0010\u0011\u0010Bt\u0012\u0013rB\u0011\u0011\u0010\u0011\u0012\u0011TS\u0011\u0011Dr\u0013\u0011\u0011\u0010\u0011\u0011\u0011\u0010\u0011\u0011\u0011\u0018\"\u0010\u0010 \u0019\u0010\u0010\u0010\u0010\u0010\u0011\u0011K_\u0011\u0017u9\u0011\u0011\u0011\u0011\u0011\u0011'\u0018\u0011\u0011\u0010\u0012Aq\u0017\u0011\u0011\u0011\u0010\u0011\u0010\u0010Ga\u0012\u0015p` \u0011?b\u0011\u0011\u0015w:\u0012\u0011\u0011\u0010\u0011\u0013kW\u0011\u0011Jk\u0012\u0011\u0011\u0011\u0011\u0011US\u0011\u0012\" \u0011\u0011\u0011\u0010\u0011\u0012\u001b\u0015\u0011\u0011\u0017\"\u0010\u0010 \u0019\u0010\u0010\u0010\u0010\u0010\u0011\u0011K^\u0011\u0011O_\u0012\u0012\u0011\u0011\u0010\u0011Gm\u0015\u0010\u0011\u0011)hM\u0012\u0011\u0011\u0010\u0010\u0010\u0010Ga\u0012\u0011AZ\u0012\u0011?b\u0011\u0011\u0011L}\u001e\u0011\u0011\u0011\u00122+\u0011\u0011&2\u0011\u0011\u0010\u0012\u001f}S\u0011\u0011\u0014jV\u0017\u0011\u0011\u0011\u0012D^\u0011\u0011\u0018\"\u0010\u0010 \u0019\u0010\u0010\u0010\u0010\u0011\u0012\u001a{O\u0011\u0011&*\u0012\u0011\u0011\u0011!{D\u0011\u0010\u0011\u001a{l\u0015?5\u0012\u0012\u0011\u0011\u0010\u0010Ga\u0012\u0011\u001f*\u0010\u0011?b\u0011\u0010\u0011!x(\u0018\u001aNI\u0013\u0010\u0010\u0012HL\u001a\u00178}T\u0012\u0010\u0012,l\"\u0018\u001e^8\u0011\u0010\u0018\"\u0010\u0010  \u0017\u0017\u0017\u0017\u001a6x\u001e\u0011\u0011\u0013]C\u0019\u00180v\u001a\u0011\u0011\u0012\\\u001f\u0011\u0013O(\u0011\u0011\u0011\u0011\u0011F_\u0012\u0011\u0012hy\u0015\u0011\u0011?a\u0012\u0011\u0011\u0011 yG\u0012\u0011\u0011\u0011\u0011\u0013GR\u0012\u0011\u0010\u0011-5\u0013\u0012\u0011\u0018!\u0011\u0011 C\u0011\u0010\u0010\u0010\u0014[o\u0019\u0011\u0011\u0012/,\u0011\u0011\u0011\u0015ld\u0012\u0012\u0011\u0011\u0010!n-\u0010\u0011\u0011'p/\u0011\u0011\u0011\u001cm1\u0011\u0011\u0011\u0011\u0012\u0015Ir4\u0012\u0012\u0012\u0010\u0011\u0012\u0011\u0012$jH.p&\u0011\u0011\u0010\u0011\u0011\u001aQ]\u001d\u0011\u0011\u0012\u0011\u0012L\\\u0012\u0011\u0011\u0013[~O\u001e\u0010\u0010\u0010\u0010\u0010\u0010\u0013,q~:\u0013\u0011\u0011\u0011\u0011\u001cm9\u0011\u0011\u0011\u0011\u0011\u001el=\u0012\u0011\u0010\u0011\u0011\u0011\u0012\u0011\u0011\u0010\u0011\u0011\u0012\u0011\u0011\u0011\u0010\u0011\u0012\u0012\u0010\u0011\u0010\u0011\u0011\u0011\u0011\u0012\u0012\u0012\u0011\u0011\u0011\u0011\u0010\u0011\u0010\u0011\u0011\u0011\u0011\u0012\u0012\u0013\u0012\u0011\u0011\u0012\u0012\u0010\u0011\u0010\u0011\u0011\u0011\u0011\u0012\u0012\u0012\u0012\u0010\u0011\u0011\u0011\u0010\u0011\u0011\u0011\u0010\u0010\u0011\u0011\u0012\u0012\u0013\u0012\u0012\u0012\u0012\u0012\u0012\u0011\u0011\u0011\u0010\u0010\u0010\u0010\u0011\u0011\u0010\u0012\u0012\u0012\u0012\u0011\u0011\u0011\u0011\u0011\u0011\u0011\u0011\u0010\u0011\u0010\u0011\u0011\u0011\u0010\u0011\u0011\u0011\u0010\u0011\u0011\u0011\u0011\u0011\u0011\u0011\u0010\u0011\u0010\u0011\u0010\u0011\u0011\u0011\u0010\u0011\u0011\u0011\u0010\u0011\u0011\u0011\u0011\u0011\u0011\u0011\u0011\u0011\u0011\u0011\u0011\u0011\u0010\u0010\u0010\u0011\u0011\u0011\u0011\u0011\u0011\u0010\u0011\u0010\u0011\u0011\u0010\u0011\u0011\u0011\u0011\u0010\u0011\u0011\u0010\u0011\u0010\u0011\u0011\u0010\u0010\u0011\u0010\u0010\u0011\u0011\u0011\u0011\u0010\u0010\u0011\u0011\u0010\u0010\u0010\u0010\u0010\u0011\u0011\u0011\u0011\u0010\u0010\u0010\u0010\u0010\u0011\u0011\u0011\u0011\u0011\u0011\u0011\u0011\u0011\u0011\u0011\u0010\u0011\u0011\u0011\u0010\u0011\u0011\u0011\u0011\u0010\u0011\u0011\u0011\u0010\u0011\u0011\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0011\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0011\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0011\u0010\u0010\u0011\u0011\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0011\u0011\u0011\u0011\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0011\u0011\u0011\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0011\u0011\u0011\u0011\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0011\u0011\u0011\u0010\u0010\u0010\u0010\u0010\u0010\u0011\u0011\u0011\u0011\u0011\u0010\u0010\u0010\u0011\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0011\u0010\u0010\u0011\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0011\u0011\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0011\u0010\u0011\u0010\u0010\u0010\u0012\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0011\u0011\u0011\u0011\u0010\u0010\u0011\u0010\u0011\u0010\u0010\u0010\u0010\u0010\u0010\u0011\u0011\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0011\u0010\u0011\u0012\u0011\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0012\u0011\u0011\u0012\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0011\u0011\u0012\u0012\u0011\u0011\u0011\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0011\u0012\u0011\u0011\u0011\u0011\u0012\u0011\u0012\u0011\u0011\u0011\u0011\u0012\u0012\u0011\u0011\u0011\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0011\u0018. \u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0019.\u001d\u0011\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0011\u0011\u001b/\u001a\u0011\u0011\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0011'*\u0012\u0011\u0011\u0012),++++++-$\u0017\u0011\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0011\u0010\u001e*)\u0012\u0011\u0011\u0010\u0010\u0011\u0011\u0011\u0011$*$\u0011\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0011\u0011\u0011\u0010\u0011\u0010\u0010!*#\u0010\u0010\u0010\u0011\u0011\u0011\u0010\u0010\u0011\u0010\u0011\u0011\u0011\u0010\u0010\u0010\u0012*,\u0016\u0010\u0010\u0015-*......+*,\u001a\u0011\u0012\u0011\u0010\u0011\u0010\u0010\u0010\u0011\u0011\u0011\u0011\u0011\u0010\u0010\u0010\u0010\u0010\u0011\u0011\u0010\u0010\u0011\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0011\u0011\u001e*,\u001a\u0011\u0010\u0011\u0011\u0010\u0010\u0011\u0015,*$\u0011\u0010\u0010\u0010\u0010\u0011\u0011\u0011\u0011\u0011\u0011\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0011\u0011\u0010\u0011\u0011\u0010\u0011!*\"\u0010\u0010\u0010\u0011\u0010\u0010\u0010\u0010\u0011\u0011\u0010\u0011\u0011\u0010\u0010\u0010\u0012),\u0016\u0010\u0010\u0015-*\u0014\u0012\u0012\u0012\u0012\u0012\u0018'++\u0016\u0011\u0011\u0010\u0010\u0011\u0011\u0011\u0011\u0011\u0011\u0011\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0011\u0010\u0011\u0010\u0010\u0011\u0010\u0010\u0010\u0010\u0010\u0010\u0011\u0011\u001e**#\u0012\u0010\u0011\u0011\u0011\u0010\u0011 *)$\u0011\u0010\u0010\u0010\u0010\u0011\u0011\u0011\u0010\u0011\u0011\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0011\u0010\u0011\u0011\u0011\u0011\u0011!+#\u0010\u0010\u0010\u0011\u0011\u0010\u0011\u0010\u0011\u0011\u0011\u0010\u0011\u0010\u0010\u0010\u0012),\u0016\u0010\u0011\u0015,)\u0013\u0011\u0011\u0011\u0011\u0011\u0011\u0012'*\"\u0011\u0011\u0010\u0011\u0011\u0010\u0010\u0010\u0011\u0011\u0011\u0011\u0011\u0010\u0010\u0010\u0010\u0011\u0011\u0010\u0011\u0010\u0011\u0012\u0010\u0011\u0010\u0010\u0010\u0010\u0010\u0011\u001e))*\u0013\u0011\u0011\u0011\u0010\u0011\u0012)*)$\u0010\u0010\u0010\u0010\u0010\u0010\u0011\u0012\u0012\u0011\u0011\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0011\u0011\u0011\u0012\u0012\u0011!+#\u0010\u0010\u0010\u0010\u0011\u0010\u0012\u0011\u0012\u0012\u0011\u0011\u0010\u0010\u0010\u0010\u0012),\u0016\u0010\u0011\u0015,*\u0013\u0011\u0011\u0011\u0011\u0010\u0011\u0011!+#\u0011\u0010\u0011\u0010\u0011\u0011\u0012\u0011\u0012\u0012\u0011\u0011\u0010\u0010\u0010\u0010\u0010\u0011\u0011\u0010\u0010\u0010\u0010\u0010\u0011\u0011\u0010\u0010\u0010\u0010\u0010\u0010\u001e))+\u001d\u0011\u0011\u0011\u0011\u0011\u0018+**$\u0011\u0010\u0010\u0010\u0010\u0010\u0017'-,\u001f\u0014\u0010\u0010\u0010\u0010\u0010\u0010\u0011\u0011\u0014\u001f,-(\u0017!+#\u0011\u0011\u0010\u0011\u0011\u0011\u001c*-+\u001b\u0012\u0011\u0011\u0010\u0011\u0012),\u0016\u0010\u0010\u0015,*\u0012\u0010\u0010\u0010\u0010\u0010\u0011\u0011!+#\u0011\u0010\u0011\u0010\u0011\u0015\",-&\u0015\u0011\u0010\u0010\u0010\u0011\u0012%\u0018\u0011\u0011\u0010\u0011\u0011\u0014%\u0019\u0011\u0010\u0011\u0011\u0011\u001e*+*(\u0012\u0011\u0011\u0010\u0011\"***$\u0011\u0010\u0011\u0011\u0012!,****,\u0019\u0011\u0011\u0011\u0011\u0011\u0012\u001a,+***+*+#\u0011\u0010\u0011\u0011\u0014++*)*++\u0016\u0011\u0011\u0010\u0013),\u0016\u0010\u0010\u0015,*\u0013\u0011\u0011\u0011\u0011\u0011\u0011\u0015++\u001c\u0011\u0011\u0011\u0011\u001e-****+$\u0013\u0011\u0011\u0012\u001a++\u0015\u0011\u0011\u0011\u0012$*'\u0011\u0011\u0011\u0011\u0011\u001e*+++\u0018\u0011\u0010\u0010\u0013*,'+$\u0011\u0011\u0011\u0013\"+,%\u0018\u001a*++\u001a\u0010\u0011\u0011\u0011\u001a,+*\u001d\u0018$+**#\u0011\u0010\u0010\u0015**+!\u0018\u001c,*+\u0014\u0011\u0010\u0013),\u0016\u0010\u0010\u0015-*%$$$$$(,**\u0012\u0011\u0011\u0011\u001e++*\u001b\u0019&++%\u0011\u0010\u0011\u0013'**\u0014\u0011\u0010\u001d++\u0019\u0011\u0011\u0010\u0011\u0011\u001e*('+\"\u0012\u0011\u0011\u001b,)!+$\u0011\u0010\u0011\u001e+,\u0018\u0012\u0011\u0011\u0011 +,\u0015\u0011\u0011\u0015++ \u0012\u0011\u0011\u0011\u0019,*#\u0011\u0011\u0011&*+\u0016\u0011\u0010\u0011\u0014*+\u001f\u0011\u0010\u0013),\u0016\u0010\u0011\u0015,**))))****\u0017\u0011\u0011\u0011\u0015,+$\u0012\u0011\u0011\u0011\u001d,,\u001d\u0011\u0011\u0010\u0017++#\u0012\u0019,+ \u0011\u0011\u0011\u0011\u0011\u0011\u001e*&\u001c+)\u0013\u0011\u0012)+ \u001e,$\u0011\u0011\u0011)+\u001e\u0011\u0012\u0010\u0011\u0011\u0012**\u001e\u0011\u0011\u001d+)\u0011\u0011\u0011\u0011\u0011\u0012\"+#\u0011\u0011\u0014*+\u001e\u0019\u0019\u0019\u0019\u0019!+'\u0012\u0011\u0013),\u0016\u0010\u0010\u0016,*\"\"\"\"\"\"-,+&\u0012\u0011\u0011\u001f+%\u0012\u0011\u0011\u0011\u0011\u0011\"+%\u0011\u0011\u0011\u0011\u001b,,!+*&\u0012\u0011\u0011\u0011\u0010\u0010\u0011\u001e*&\u0013*+\u001a\u0012\u0017+,\u0016\u001e+#\u0010\u0011\u0014*+\u0016\u0010\u0010\u0010\u0010\u0011\u0011\u001f*'\u0011\u0011)+ \u0011\u0011\u0011\u0010\u0011\u0010!*#\u0012\u0010\u001d+***++***)(\u0011\u0010\u0013),\u0016\u0010\u0010\u0015,)\u0012\u0010\u0010\u0010\u0010\u0010\u0011\u0017++\u001c\u0011\u0012(+\u001d\u0011\u0011\u0011\u0010\u0011\u0011\u0016*,\u0013\u0010\u0011\u0011\u0010\u001d+**+\u0013\u0011\u0010\u0010\u0010\u0010\u0010\u0011\u001e*&\u0012\"*%\u0011 +(\u0012\u001e+#\u0010\u0011\u0014*-\u0014\u0010\u0010\u0010\u0010\u0010\u0010\u001a,'\u0011\u0012*+\u0019\u0012\u0010\u0010\u0010\u0010\u0010 *#\u0011\u0011!*+))**))**\u001b\u0011\u0011\u0012),\u0016\u0010\u0010\u0015,)\u0012\u0010\u0010\u0010\u0010\u0010\u0011\u0011$*$\u0011\u0014+*\u0015\u0011\u0011\u0010\u0011\u0010\u0011\u0016*+\u0013\u0010\u0010\u0011\u0010\u0012&*+\u0018\u0011\u0011\u0010\u0010\u0010\u0010\u0010\u0011\u001e*&\u0011\u0017+,\u0017)+\u001e\u0011\u001e+#\u0010\u0011\u0014*,\u0015\u0010\u0010\u0010\u0010\u0011\u0011\u001e+(\u0011\u0011)*\u001e\u0011\u0011\u0011\u0010\u0011\u0010!+#\u0010\u0011\u001e+(\u0011\u0011\u0010\u0011\u0011\u0011\u0011\u0011\u0012\u0012\u0011\u0013),\u0016\u0010\u0010\u0015,)\u0012\u0010\u0010\u0010\u0010\u0011\u0011\u0010!+#\u0011\u0012*+\u001d\u0011\u0011\u0011\u0010\u0010\u0011\u0016*+\u0013\u0011\u0011\u0011\u0011\u001e,**)\u0013\u0011\u0010\u0010\u0010\u0010\u0010\u0011\u001e*&\u0011\u0012)+%++\u0015\u0011\u001e+#\u0010\u0011\u0013++\u001b\u0010\u0010\u0010\u0010\u0011\u0012(*#\u0010\u0011 +'\u0011\u0011\u0011\u0010\u0011\u0010!+#\u0011\u0011\u0016+,\u0016\u0010\u0010\u0010\u0011\u0011\u0011\u0014\u0012\u0011\u0011\u0012),\u0016\u0010\u0010\u0015,)\u0012\u0010\u0010\u0010\u0010\u0010\u0011\u0011!+#\u0011\u0011!+$\u0011\u0010\u0011\u0010\u0010\u0011 +'\u0012\u0011\u0011\u0010\u0018++'++!\u0011\u0010\u0010\u0010\u0010\u0010\u0011\u001e*&\u0012\u0011\u001f+**$\u0011\u0011\u001e+#\u0011\u0011\u0011!+*\u0014\u0010\u0011\u0010\u0012\u001a++\u0018\u0011\u0011\u0017++\u001a\u0011\u0011\u0010\u0011\u0015*+#\u0011\u0011\u0011'+\"\u0013\u0011\u0010\u0011\u0011\u001f-'\u0011\u0011\u0012),\u0016\u0011\u0010\u0015,)\u0012\u0010\u0010\u0010\u0010\u0010\u0011\u0013)+!\u0011\u0011\u0017++\u0018\u0011\u0010\u0011\u0011\u0015)+\u001e\u0011\u0011\u0011\u0014+*(\u0012\u001d+,\u001a\u0011\u0010\u0010\u0010\u0011\u0011\u001e*&\u0011\u0011\u0015+*+\u0018\u0011\u0010\u001e+$\u0011\u0012\u0011\u0015+**\u0017\u0013\u0014\",,\u001f\u0011\u0011\u0011\u0011\u001f++!\u0014\u0013\u001c+**#\u0011\u0011\u0011\u0019,+(\u0016\u0013\u0015$+,\u001c\u0011\u0010\u0013),\u0016\u0011\u0010\u0015-*\u0015\u0013\u0013\u0013\u0013\u0013\u001b+*+\u0015\u0011\u0011\u0012%*,\u001e\u0013\u0013\u001a,+*\u0014\u0011\u0011\u0011#*,\u0015\u0011\u0012!+,\u0018\u0012\u0011\u0011\u0011\u0011\u001f+&\u0011\u0011\u0011'*+\u0012\u0010\u0011\u001e,$\u0011\u0011\u0010\u0011\u0015**,-,*+ \u0011\u0011\u0011\u0011\u0011\u0011\u001f,+--+*++#\u0011\u0012\u0011\u0010\u0018-*,-,*-\u001b\u0011\u0011\u0011\u0013*-\u0016\u0011\u0010\u0015-*-----,**,\u001f\u0011\u0011\u0011\u0012\u0011$,*--**)\u0014\u0011\u0011\u0011\u001b,,\u0018\u0011\u0011\u0011\u0012(*'\u0011\u0011\u0010\u0010\u0011\u0016+\u0019\u0011\u0010\u0011\u0018+\u001a\u0011\u0010\u0011\u0014+\u001a\u0011\u0011\u0011\u0011\u0011\u0012 ,+,+\u001b\u0010\u0011\u0011\u0011\u0010\u0010\u0010\u0011\u0017*++* \u001a,\u0017\u0011\u0011\u0011\u0011\u0011\u0014#+++&\u0015\u0011\u0011\u0011\u0011\u0011\"'\u0011\u0011\u0011\u0011%++******#\u0014\u0011\u0011\u0011\u0010\u0011\u0011\u0011\u0018*+*+\u001c\u0012\u0011\u0011\u0011\u0011\u0014+\u001c\u0011\u0011\u0010\u0010\u0011\u0015*\u001e\u0012\u0010\u0011\u0011\u0011\u0011\u0011\u0011\u0010\u0011\u0011\u0011\u0011\u0011\u0011\u0011\u0011\u0011\u0011\u0011\u0011\u0011\u0011\u0011\u0010\u0011\u0011\u0012\u0011\u0011\u0011\u0011\u0011\u0012\u0010\u0010\u0010\u0010\u0011\u0011\u0011\u0012\u0011\u0012\u0011\u0011\u0011\u0012\u0011\u0011\u0011\u0011\u0010\u0011\u0011\u0011\u0012\u0012\u0011\u0011\u0011\u0011\u0010\u0011\u0011\u0011\u0011\u0011\u0011\u0011\u0011\u0011\u0011\u0011\u0011\u0011\u0011\u0011\u0011\u0011\u0011\u0012\u0011\u0012\u0010\u0010\u0011\u0011\u0011\u0011\u0011\u0011\u0012\u0011\u0011\u0011\u0011\u0011\u0011\u0010\u0012\u0011\u0011\u0012\u0011\u0010\u0011\u0010\u0011\u0011\u0011\u0011\u0011\u0010\u0011\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0010\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000yvzun~q~b__`q\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000qaڀgj^{c؀xazzyfdӀ\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000q[z}\\j]{b؀xbـ^\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000p]jo\\jr\\\\fw^[`\\{`[^y{b؀xbـz^u][c~wf̀nk\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000qi]`ljta}nf{_zt^]tx_{b؀x[\\\\\\Ztz]{n`ဂapx^}\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000qmevyarkcx^~lo]t\\]]][~zb؀xcڀs`ဂkp`{~]\\y\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000qmvdjqsjbz^~js^ri~~~~{b؀xaـ_~hsaz`\\}\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000qm]\\~sjohǀx`߀wc׀|^|^}e{b؀xbڀ{]~wdՀz]hiqa\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000qmidsjc^`\\}m]`\\]s[a\\q{b׀x\\aaa[ok^a\\wxagm\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000}|~|~~|~||||~|\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000wuplwvmic\\~~hb{zD5@0@0A1g`\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000aYE6QFYN=+vtE7tqD4vtvturQEK<\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000aZ7$wt|y9)YO=+vtF7trF7@.~~\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000aZ8&YPaX;)XNg`:(7%NAro<,7$B2:*wu?.7$?.trvtF7trF7us>-ok;)8%G:}|qmNBaXWM\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000aZUJ<*C4aXXNhcA1}|b[OBxw=,wtjd;)~:)mivt>.vtF7ts7%9'8'9'6#mhwt:(zx^W@0D5c\\ur>-|{\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000aZ[QMAnitqD4kfXNH8rn=+~ZQbZ=,kf7%<*;*<*7%wtF7tqF8icA2WMc\\C3wt~~;)8%vu\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000aZ[RokK=VKe]lgYNE8ur@0~YNke<+~b[UJ~~vtF7trF7?.~TIhbB4wt?0:(}|\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000aZ[R;)8&~~lgYN`WQFvrC2rnI;yx=*zy=,|zJ=vtF7trF7xv:(~plH9wu<*~THTJf]D7\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000b[]SRFK?lhYOI;?/B3;*}{_W;(D58%;*ga:'F6:'c\\vtH8ur:(F7F7D47%aYZQ=+D6:(tqrpC3NCYO\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000}}}|~~|{||~~}||{|{|{~~||\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000}{~{wyqqqpz\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000xstvo~r}q}}}sr\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000yn}~nvo~r}rp\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000xovxovyont|pnqp~pnp}~r|r~p{onr|txv\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000xvpqvvzr~xt~p~ypn{}p~r}oooon{~oxpqy}p\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000ywt{}qzvr|ovyo{noppo~r}rzqvyq~on}\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000yw|suy{vr}ptzpxu~r}rpuzr~qn\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000ywoozvxu|p}rppr~r}r~o|r~puuzq\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000ywvszvrqpoxoqooznqnx~r|oqqqoxvpqo}|qtv\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000"
  },
  {
    "path": "test/assets/auth/auth_info.toml",
    "content": "[base]\n# for example: https://iam.cn-north-7.myhuaweicloud.com\niam_host=\"\"\n# agent info \ndomain_id=\"\"\nproject_id=\"\"\nak=\"\"\nsk=\"\"\n\n\n"
  },
  {
    "path": "test/assets/mindspore_inference/tensor_add.mindir",
    "content": "\n\u00050.1.0\u0012\tMindSpore*\u00051.1.0:\u0003\n\u0002\n\u001a8_7_5_construct_wrapper:x_\n\u001a8_7_5_construct_wrapper:y_\u0012\u001b8_7_5_construct_wrapper:1:2\u001a\"8_7_5_construct_wrapper:[CNode]0:1\"\u0003Add*\u001fb\u000e\b\u0002\b\u0002\u0010\u0001:\u0006shape1z\rshape:shape1,*0\n\foutput_namesZ\u0006outputz\u0015scalar:List[value1,],\u0001\b*4\n\u000binput_namesZ\u0001xZ\u0001yz\u001cscalar:List[value1,value2,],\u0001\b:\u000fDefault/Add-op3\u0012\u00178_7_5_construct_wrapper*$\n\u001a8_7_5_construct_wrapper:x_\u0012\u0006\b\u0002\b\u0002\u0010\u0001*$\n\u001a8_7_5_construct_wrapper:y_\u0012\u0006\b\u0002\b\u0002\u0010\u00012%\n\u001b8_7_5_construct_wrapper:1:2\u0012\u0006\b\u0002\b\u0002\u0010\u0001"
  },
  {
    "path": "test/assets/obs/obs_download.toml",
    "content": "[base]\n# for example: obs.cn-north-7.ulanqab.huawei.com\nhost=\"\"\n# obs bucket name\nbucket=\"\"\nak=\"\"\nsk=\"\"\n# subpath and file which want to be downloaded, for example: test/video.mp4\nfile_key=\"\"\n# local path to store the downloaded file,  for example: /home/xx/obs_download, default is /tmp/obs_download\nlocal_path=\"\"\n"
  },
  {
    "path": "test/assets/obs/obs_upload.toml",
    "content": "[base]\n# for example: obs.cn-north-7.ulanqab.huawei.com\nhost=\"\"\n# obs bucket name\nbucket=\"\"\nak=\"\"\nsk=\"\"\n# subpath in the bucket which to store the file\nsub_path = \"\"\n"
  },
  {
    "path": "test/assets/resize_cpu/virtual_python_test.toml",
    "content": "[base]\nname = \"httpserver_python\"\ndevice = \"cpu\"\nversion = \"1.1.1\"\ndescription = \"a python httpserver flowunit\"\nentry = \"xxx.py\"\ntype = \"python\"\n\n[input]\n[input.input1]\nname = \"image\"\n[input.input2]\nname = \"anchor\"\n\n[output]\n[output.output1]\nname = \"output\""
  },
  {
    "path": "test/assets/tensorflow/1.13.1/tensorflow_save_model/variables/variables.data-00000-of-00001",
    "content": "%M?Z>"
  },
  {
    "path": "test/assets/tensorflow/1.15.0/tensorflow_pb/frozen_model_en.pb",
    "content": "&;^GÈT\"~\u000eK\u0015\u0002n\u0015\u0001@h\u00186,kp\bwd\u0003Ġִ+\u0004\b\u001e4픋F_eBY\u000b'=pIV;!XSyS\bTb{Wmn\u0019\u001a\u0012\u0007_+\u0011ʺ^Y%p\u001c9Ch\u001fi*Y\nhUFкo0]W:\u0005\n\u0007Ԥ+\u00198.-j^GIfC\bsr*=\u0004SBec!\u0011P-(!'|\u000bOc\u0015\u0002\u0013%^Gb\f*\u0002rx\u0002\\ӱ\u0004\u0016tDA\u0002B T06Y\u000f-Xl\u00107\u0015\\A\u001b&WG|:p#GfuwO\u0011\u0010ٖxE\u000f<X0\u00023^\u00018GTEAC\u0015LX\u0007\u0005\u0019I߯nG\u0014<Ǝ2\u0006"
  },
  {
    "path": "test/assets/tensorflow/1.15.0/tensorflow_save_model/variables/variables.data-00000-of-00001",
    "content": "m&M?\u0013>"
  },
  {
    "path": "test/assets/tensorflow/2.6.0-dev20210809/tensorflow_save_model/variables/variables.data-00000-of-00001",
    "content": "%M?Z>"
  },
  {
    "path": "test/assets/test_inference/virtual_model_test.toml",
    "content": "[base]\nname = \"inference\"\ndevice = \"cpu\"\nversion = \"1.1.2\"\ndescription = \"a gpu inference flowunit\"\nentry = \"xxx.pb\"\ntype = \"inference\"\n\n[input]\n[input.input1]\nname = \"image\"\ntype = \"float\"\n\n[input.input2]\nname = \"anchor\"\ntype = \"float\"\n\n[output]\n[output.output1]\nname = \"output\"\ntype = \"float\""
  },
  {
    "path": "test/assets/test_model/test_dynamic.onnx",
    "content": "\b\u0007\u0012\u0007pytorch\u001a\u00061.13.0:\u0001\n\u001a\n\u0003in1\n\u0003in2\u0012\u0003out\u001a\u0004/Add\"\u0003Add\u0012\ttorch_jitZ'\n\u0003in1\u0012 \n\u001e\b\u0001\u0012\u001a\n\f\u0012\nbatch_size\n\u0002\b\u0003\n\u0002\b\u0010\n\u0002\b\u0010Z'\n\u0003in2\u0012 \n\u001e\b\u0001\u0012\u001a\n\f\u0012\nbatch_size\n\u0002\b\u0003\n\u0002\b\u0010\n\u0002\b\u0010b'\n\u0003out\u0012 \n\u001e\b\u0001\u0012\u001a\n\f\u0012\nbatch_size\n\u0002\b\u0003\n\u0002\b\u0010\n\u0002\b\u0010B\u0002\u0010\u000e"
  },
  {
    "path": "test/drivers/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n \ncmake_minimum_required(VERSION 3.3)\n \nfile(GLOB_RECURSE UNIT_TEST_SOURCE *.cpp *.cc *.c)\n \ninclude_directories(${CMAKE_CURRENT_SOURCE_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${MOCK_DRIVER_CTRL_INCLUDE})\ninclude_directories(${TEST_INCLUDE})\ninclude_directories(${MODELBOX_COMMON_DRIVER_UTIL_INCLUDE})\n\nlist(REMOVE_DUPLICATES DRIVER_UNIT_TEST_INCLUDE)\ninclude_directories(${DRIVER_UNIT_TEST_INCLUDE})\n\nlist(REMOVE_DUPLICATES DRIVER_UNIT_TEST_SOURCE)\nadd_executable(drivers-unit EXCLUDE_FROM_ALL\n\t${UNIT_TEST_SOURCE}\n\t${TEST_MAIN_SOURCE}\n\t${DRIVER_UNIT_TEST_SOURCE}\n)\nset(DRIVER_UNIT_TEST_INCLUDE \"\" CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_SOURCE \"\" CACHE INTERNAL \"\")\n\nadd_custom_target(all-drivers)\nadd_custom_command(TARGET all-drivers PRE_BUILD\n\tCOMMAND rm -fr ${TEST_WORKING_DRIVERS_DIR}/*\n)\n\nforeach (ITR ${DRIVER_UNIT_TEST_TARGET} ${list_var})\n\tadd_dependencies(all-drivers ${ITR})\n\tadd_custom_command(TARGET all-drivers POST_BUILD\n\tCOMMAND cp $<TARGET_FILE:${ITR}> ${TEST_WORKING_DRIVERS_DIR}/\n\t)\nendforeach(ITR) \nset(DRIVER_UNIT_TEST_TARGET \"\" CACHE INTERNAL \"\")\n\nadd_custom_command(TARGET all-drivers POST_BUILD\n\tCOMMAND ldconfig ${TEST_WORKING_DRIVERS_DIR} -n\n)\n\nforeach (ITR ${DRIVER_UNIT_TEST_LINK_LIBRARIES} ${list_var})\n\ttarget_link_libraries(drivers-unit ${ITR})\nendforeach(ITR) \nset(DRIVER_UNIT_TEST_LINK_LIBRARIES \"\" CACHE INTERNAL \"\")\n\ntarget_link_libraries(drivers-unit pthread)\ntarget_link_libraries(drivers-unit rt)\ntarget_link_libraries(drivers-unit dl)\ntarget_link_libraries(drivers-unit gtest_main)\ntarget_link_libraries(drivers-unit gmock_main)\ntarget_link_libraries(drivers-unit ${LIBMODELBOX_SHARED})\ntarget_link_libraries(drivers-unit ${MOCKFLOW_LIB})\ntarget_link_libraries(drivers-unit pthread)\nadd_dependencies(drivers-unit all-drivers)\n\nadd_custom_target(unittest-drivers\n\tCOMMAND ${TEST_RUNNER_LIST} ${CMAKE_CURRENT_BINARY_DIR}/drivers-unit\n\tDEPENDS  drivers-unit\n\tWORKING_DIRECTORY ${TEST_WORKING_DIR}\n\tCOMMENT \"Run drivers Test...\"\n)\n\nlist(APPEND MODELBOX_UNIT_TEST_TARGETS drivers-unit)\nset(MODELBOX_UNIT_TEST_TARGETS ${MODELBOX_UNIT_TEST_TARGETS} CACHE INTERNAL \"\")\n\nlist(APPEND MODELBOX_UNIT_TEST_RUN_TARGETS unittest-drivers)\nset(MODELBOX_UNIT_TEST_RUN_TARGETS ${MODELBOX_UNIT_TEST_RUN_TARGETS} CACHE INTERNAL \"\")\n\nset(MODELBOX_DRIVERS_TEST_TARGET drivers-unit CACHE INTERNAL \"\")\n\n "
  },
  {
    "path": "test/drivers/common/mock_cert.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include \"mock_cert.h\"\n\n#include \"modelbox/base/crypto.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace modelbox {\n\nStatus GenerateCert(std::string* enPass, std::string* ekRootKey,\n                    const std::string& private_key,\n                    const std::string& public_key) {\n  std::string pass_str = \"password\";\n  std::string openssl_cmd =\n      \"openssl req -x509 -newkey rsa:4096 -passout pass:\" + pass_str +\n      \" -keyout \";\n  openssl_cmd += modelbox::PathCanonicalize(private_key) + \" -out \" +\n                 modelbox::PathCanonicalize(public_key);\n  openssl_cmd +=\n      \" -days 36500 -subj '/C=CN/ST=SZ/L=SZ/O=HW/OU=OU/CN=localhost'\";\n\n  std::vector<char> pass(pass_str.begin(), pass_str.end());\n  auto ret = PassEncrypt(pass, true, ekRootKey, enPass);\n  if (!ret) {\n    return ret;\n  }\n\n  if (system(openssl_cmd.c_str()) != 0) {\n    std::string errmsg = \"run command failed, \";\n    errmsg += modelbox::StrError(errno);\n    return {STATUS_FAULT, errmsg};\n  }\n\n  return STATUS_OK;\n}\n\nStatus GenerateCert(const std::string& private_key,\n                    const std::string& public_key) {\n  std::string openssl_cmd = \"openssl req -nodes -x509 -newkey rsa:4096 \";\n  openssl_cmd += \"-keyout \" + modelbox::PathCanonicalize(private_key) + \" -out \" +\n                 modelbox::PathCanonicalize(public_key);\n  openssl_cmd +=\n      \" -days 36500 -subj '/C=CN/ST=SZ/L=SZ/O=HW/OU=OU/CN=localhost'\";\n\n  if (system(openssl_cmd.c_str()) != 0) {\n    std::string errmsg = \"run command failed, \";\n    errmsg += modelbox::StrError(errno);\n    return {STATUS_FAULT, errmsg};\n  }\n\n  return STATUS_OK;\n}\n\n}  // namespace modelbox\n"
  },
  {
    "path": "test/drivers/common/mock_cert.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#ifndef MODELBOX_DRIVER_TEST_CERT_MOCK_H_\n#define MODELBOX_DRIVER_TEST_CERT_MOCK_H_\n\n#include <string>\n\n#include \"modelbox/base/status.h\"\n#include \"driver_flow_test.h\"\n#include \"test/mock/minimodelbox/mockflow.h\"\n\nnamespace modelbox {\n\n/**\n * @brief generate cert for mock\n */\nStatus GenerateCert(std::string* enPass, std::string* ekRootKey,\n                    const std::string& private_key,\n                    const std::string& public_key);\n\n/**\n * @brief generate cert for mock\n */\nStatus GenerateCert(const std::string& private_key,\n                    const std::string& public_key);\n};  // namespace modelbox\n\n#endif  // MODELBOX_DRIVER_TEST_CERT_MOCK_H_"
  },
  {
    "path": "test/drivers/common/tensorflow_inference/tensorflow_inference_mock.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"tensorflow_inference_mock.h\"\n\n#include <dlfcn.h>\n\n#include \"flowunit_mockflowunit/flowunit_mockflowunit.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing ::testing::_;\n\nnamespace tensorflow_inference {\n\nstatic void Register_Test_0_1_Batch_Flowunit(\n    std::shared_ptr<modelbox::MockDriverCtl> &ctl) {\n  modelbox::MockFlowUnitDriverDesc desc_flowunit;\n  desc_flowunit.SetClass(\"DRIVER-FLOWUNIT\");\n  desc_flowunit.SetType(\"cpu\");\n  desc_flowunit.SetName(\"test_0_1_batch\");\n  desc_flowunit.SetDescription(\"The test input batch data, 0 inputs 1 output\");\n  desc_flowunit.SetVersion(\"1.0.0\");\n  std::string file_path_flowunit =\n      std::string(TEST_DRIVER_DIR) + \"/libmodelbox-unit-cpu-test_0_1_batch.so\";\n  desc_flowunit.SetFilePath(file_path_flowunit);\n  auto mock_flowunit = std::make_shared<modelbox::MockFlowUnit>();\n  auto mock_flowunit_desc = std::make_shared<modelbox::FlowUnitDesc>();\n  mock_flowunit_desc->SetFlowUnitName(\"test_0_1_batch\");\n  mock_flowunit_desc->AddFlowUnitOutput(modelbox::FlowUnitOutput(\"Out_1\"));\n  mock_flowunit_desc->SetFlowType(modelbox::STREAM);\n  mock_flowunit->SetFlowUnitDesc(mock_flowunit_desc);\n  std::weak_ptr<modelbox::MockFlowUnit> mock_flowunit_wp;\n  mock_flowunit_wp = mock_flowunit;\n\n  EXPECT_CALL(*mock_flowunit, Open(_))\n      .WillRepeatedly(testing::Invoke(\n          [=](const std::shared_ptr<modelbox::Configuration> &flow_option) {\n            auto spt = mock_flowunit_wp.lock();\n            auto ext_data = spt->CreateExternalData();\n            if (!ext_data) {\n              MBLOG_ERROR << \"can not get external data.\";\n            }\n\n            auto buffer_list = ext_data->CreateBufferList();\n            buffer_list->Build({10 * sizeof(int)});\n            auto *data = (int *)buffer_list->MutableData();\n            for (size_t i = 0; i < 10; i++) {\n              data[i] = i;\n            }\n\n            auto status = ext_data->Send(buffer_list);\n            if (!status) {\n              MBLOG_ERROR << \"external data send buffer list failed:\" << status;\n            }\n\n            status = ext_data->Close();\n            if (!status) {\n              MBLOG_ERROR << \"external data close failed:\" << status;\n            }\n\n            return modelbox::STATUS_OK;\n          }));\n\n  EXPECT_CALL(*mock_flowunit, DataPre(_))\n      .WillRepeatedly(testing::Invoke(\n          [&](const std::shared_ptr<modelbox::DataContext> &data_ctx) {\n            MBLOG_INFO << \"test_0_1_batch \"\n                       << \"DataPre\";\n            return modelbox::STATUS_OK;\n          }));\n\n  EXPECT_CALL(*mock_flowunit, DataPost(_))\n      .WillRepeatedly(testing::Invoke(\n          [&](const std::shared_ptr<modelbox::DataContext> &data_ctx) {\n            MBLOG_INFO << \"test_0_1_batch \"\n                       << \"DataPost\";\n            return modelbox::STATUS_OK;\n          }));\n\n  EXPECT_CALL(*mock_flowunit,\n              Process(testing::An<std::shared_ptr<modelbox::DataContext>>()))\n      .WillRepeatedly(testing::Invoke(\n          [=](const std::shared_ptr<modelbox::DataContext> &op_ctx) {\n            auto output_buf_1 = op_ctx->Output(\"Out_1\");\n            std::vector<size_t> shape_vector(10, 8 * sizeof(float));\n            modelbox::ModelBoxDataType type = modelbox::MODELBOX_FLOAT;\n            output_buf_1->Build(shape_vector);\n            output_buf_1->Set(\"type\", type);\n            std::vector<size_t> shape{8};\n            output_buf_1->Set(\"shape\", shape);\n            auto *dev_data = (float *)(output_buf_1->MutableData());\n            float num;\n            for (size_t i = 0; i < output_buf_1->Size(); ++i) {\n              num = 1.0;\n              for (size_t j = 0; j < 8; ++j) {\n                dev_data[i * 8 + j] = num;\n                num += 1.0;\n              }\n            }\n\n            MBLOG_DEBUG << output_buf_1->GetBytes();\n            MBLOG_DEBUG << \"test_0_1 gen data, 0\" << output_buf_1->Size();\n\n            return modelbox::STATUS_OK;\n          }));\n\n  EXPECT_CALL(*mock_flowunit, Close()).WillRepeatedly(testing::Invoke([=]() {\n    return modelbox::STATUS_OK;\n  }));\n  desc_flowunit.SetMockFlowUnit(mock_flowunit);\n  ctl->AddMockDriverFlowUnit(\"test_0_1_batch\", \"cpu\", desc_flowunit,\n                             std::string(TEST_DRIVER_DIR));\n};\n\nstatic void Register_Test_1_0_Batch_Flowunit(\n    std::shared_ptr<modelbox::MockDriverCtl> &ctl) {\n  modelbox::MockFlowUnitDriverDesc desc_flowunit;\n  desc_flowunit.SetClass(\"DRIVER-FLOWUNIT\");\n  desc_flowunit.SetType(\"cpu\");\n  desc_flowunit.SetName(\"test_1_0_batch\");\n  desc_flowunit.SetDescription(\"The test output batch data, 1 input 0 outputs\");\n  desc_flowunit.SetVersion(\"1.0.0\");\n  std::string file_path_flowunit =\n      std::string(TEST_DRIVER_DIR) + \"/libmodelbox-unit-cpu-test_1_0_batch.so\";\n  desc_flowunit.SetFilePath(file_path_flowunit);\n  auto mock_flowunit = std::make_shared<modelbox::MockFlowUnit>();\n  auto mock_flowunit_desc = std::make_shared<modelbox::FlowUnitDesc>();\n  mock_flowunit_desc->SetFlowUnitName(\"test_1_0_batch\");\n  mock_flowunit_desc->AddFlowUnitInput(modelbox::FlowUnitInput(\"In_1\"));\n  mock_flowunit_desc->SetFlowType(modelbox::STREAM);\n  mock_flowunit_desc->SetMaxBatchSize(10);\n  mock_flowunit->SetFlowUnitDesc(mock_flowunit_desc);\n  std::weak_ptr<modelbox::MockFlowUnit> mock_flowunit_wp;\n  mock_flowunit_wp = mock_flowunit;\n\n  EXPECT_CALL(*mock_flowunit, Open(_))\n      .WillRepeatedly(testing::Invoke(\n          [=](const std::shared_ptr<modelbox::Configuration> &flow_option) {\n            return modelbox::STATUS_OK;\n          }));\n\n  EXPECT_CALL(*mock_flowunit, DataPre(_))\n      .WillRepeatedly(testing::Invoke(\n          [&](const std::shared_ptr<modelbox::DataContext> &data_ctx) {\n            MBLOG_INFO << \"test_1_0_batch \"\n                       << \"DataPre\";\n            return modelbox::STATUS_OK;\n          }));\n\n  EXPECT_CALL(*mock_flowunit, DataPost(_))\n      .WillRepeatedly(testing::Invoke(\n          [&](const std::shared_ptr<modelbox::DataContext> &data_ctx) {\n            MBLOG_INFO << \"test_1_0_batch \"\n                       << \"DataPost\";\n            return modelbox::STATUS_STOP;\n          }));\n\n  EXPECT_CALL(*mock_flowunit,\n              Process(testing::An<std::shared_ptr<modelbox::DataContext>>()))\n      .WillRepeatedly(testing::Invoke(\n          [=](const std::shared_ptr<modelbox::DataContext> &op_ctx) {\n            std::shared_ptr<modelbox::BufferList> input_bufs =\n                op_ctx->Input(\"In_1\");\n            EXPECT_EQ(input_bufs->Size(), 10);\n            std::vector<size_t> shape_vector{8};\n            std::vector<size_t> input_shape;\n            auto result = input_bufs->At(0)->Get(\"shape\", input_shape);\n            EXPECT_TRUE(result);\n            EXPECT_EQ(input_shape, shape_vector);\n\n            for (size_t i = 0; i < input_bufs->Size(); ++i) {\n              const auto *input_data =\n                  static_cast<const float *>(input_bufs->ConstBufferData(i));\n              MBLOG_DEBUG << \"index: \" << i;\n              for (size_t j = 0; j < input_shape[0]; ++j) {\n                MBLOG_DEBUG << input_data[j];\n              }\n\n              EXPECT_NEAR(input_data[0], 1.05097, 1e-5);\n              EXPECT_NEAR(input_data[1], 1.30058, 1e-5);\n              EXPECT_NEAR(input_data[2], 1.55019, 1e-5);\n              EXPECT_NEAR(input_data[3], 1.7998, 1e-5);\n              EXPECT_NEAR(input_data[4], 2.0494, 1e-5);\n              EXPECT_NEAR(input_data[5], 2.29901, 1e-5);\n              EXPECT_NEAR(input_data[6], 2.54862, 1e-5);\n              EXPECT_NEAR(input_data[7], 2.79823, 1e-5);\n            }\n            return modelbox::STATUS_OK;\n          }));\n\n  EXPECT_CALL(*mock_flowunit, Close()).WillRepeatedly(testing::Invoke([=]() {\n    return modelbox::STATUS_OK;\n  }));\n  desc_flowunit.SetMockFlowUnit(mock_flowunit);\n  ctl->AddMockDriverFlowUnit(\"test_1_0_batch\", \"cpu\", desc_flowunit,\n                             std::string(TEST_DRIVER_DIR));\n};\n\nstatic void Register_Test_0_1_Flowunit(\n    std::shared_ptr<modelbox::MockDriverCtl> &ctl) {\n  modelbox::MockFlowUnitDriverDesc desc_flowunit;\n  desc_flowunit.SetClass(\"DRIVER-FLOWUNIT\");\n  desc_flowunit.SetType(\"cpu\");\n  desc_flowunit.SetName(\"test_0_1\");\n  desc_flowunit.SetDescription(\"The test input data, 0 inputs 1 output\");\n  desc_flowunit.SetVersion(\"1.0.0\");\n  std::string file_path_flowunit =\n      std::string(TEST_DRIVER_DIR) + \"/libmodelbox-unit-cpu-test_0_1.so\";\n  desc_flowunit.SetFilePath(file_path_flowunit);\n  auto mock_flowunit = std::make_shared<modelbox::MockFlowUnit>();\n  auto mock_flowunit_desc = std::make_shared<modelbox::FlowUnitDesc>();\n  mock_flowunit_desc->SetFlowUnitName(\"test_0_1\");\n  mock_flowunit_desc->AddFlowUnitOutput(modelbox::FlowUnitOutput(\"Out_1\"));\n  mock_flowunit_desc->SetFlowType(modelbox::STREAM);\n  mock_flowunit->SetFlowUnitDesc(mock_flowunit_desc);\n  std::weak_ptr<modelbox::MockFlowUnit> mock_flowunit_wp;\n  mock_flowunit_wp = mock_flowunit;\n\n  EXPECT_CALL(*mock_flowunit, Open(_))\n      .WillRepeatedly(testing::Invoke(\n          [=](const std::shared_ptr<modelbox::Configuration> &flow_option) {\n            auto spt = mock_flowunit_wp.lock();\n            auto ext_data = spt->CreateExternalData();\n            if (!ext_data) {\n              MBLOG_ERROR << \"can not get external data.\";\n            }\n\n            auto buffer_list = ext_data->CreateBufferList();\n            buffer_list->Build({10 * sizeof(int)});\n            auto *data = (int *)buffer_list->MutableData();\n            for (size_t i = 0; i < 10; i++) {\n              data[i] = i;\n            }\n\n            auto status = ext_data->Send(buffer_list);\n            if (!status) {\n              MBLOG_ERROR << \"external data send buffer list failed:\" << status;\n            }\n\n            status = ext_data->Close();\n            if (!status) {\n              MBLOG_ERROR << \"external data close failed:\" << status;\n            }\n\n            return modelbox::STATUS_OK;\n          }));\n\n  EXPECT_CALL(*mock_flowunit, DataPre(_))\n      .WillRepeatedly(testing::Invoke(\n          [&](const std::shared_ptr<modelbox::DataContext> &data_ctx) {\n            MBLOG_INFO << \"test_0_1 \"\n                       << \"DataPre\";\n            return modelbox::STATUS_OK;\n          }));\n\n  EXPECT_CALL(*mock_flowunit, DataPost(_))\n      .WillRepeatedly(testing::Invoke(\n          [&](const std::shared_ptr<modelbox::DataContext> &data_ctx) {\n            MBLOG_INFO << \"test_0_1 \"\n                       << \"DataPost\";\n            return modelbox::STATUS_OK;\n          }));\n\n  EXPECT_CALL(*mock_flowunit,\n              Process(testing::An<std::shared_ptr<modelbox::DataContext>>()))\n      .WillRepeatedly(testing::Invoke(\n          [=](const std::shared_ptr<modelbox::DataContext> &op_ctx) {\n            auto output_buf_1 = op_ctx->Output(\"Out_1\");\n            std::vector<size_t> shape_vector(1, 8 * sizeof(float));\n            modelbox::ModelBoxDataType type = modelbox::MODELBOX_FLOAT;\n            output_buf_1->Build(shape_vector);\n            output_buf_1->Set(\"type\", type);\n            std::vector<size_t> shape{8};\n            output_buf_1->Set(\"shape\", shape);\n            auto *dev_data = (float *)(output_buf_1->MutableData());\n            float num = 1.0;\n            for (size_t i = 0; i < output_buf_1->Size(); ++i) {\n              for (size_t j = 0; j < 8; ++j) {\n                dev_data[i * 8 + j] = num;\n                num += 1.0;\n              }\n            }\n\n            MBLOG_DEBUG << output_buf_1->GetBytes();\n            MBLOG_DEBUG << \"test_0_1 gen data, 0\" << output_buf_1->Size();\n\n            return modelbox::STATUS_OK;\n          }));\n\n  EXPECT_CALL(*mock_flowunit, Close()).WillRepeatedly(testing::Invoke([=]() {\n    return modelbox::STATUS_OK;\n  }));\n  desc_flowunit.SetMockFlowUnit(mock_flowunit);\n  ctl->AddMockDriverFlowUnit(\"test_0_1\", \"cpu\", desc_flowunit,\n                             std::string(TEST_DRIVER_DIR));\n};\n\nstatic void Register_Test_1_0_Flowunit(\n    std::shared_ptr<modelbox::MockDriverCtl> &ctl) {\n  modelbox::MockFlowUnitDriverDesc desc_flowunit;\n  desc_flowunit.SetClass(\"DRIVER-FLOWUNIT\");\n  desc_flowunit.SetType(\"cpu\");\n  desc_flowunit.SetName(\"test_1_0\");\n  desc_flowunit.SetDescription(\"The test output data, 1 input 0 outputs\");\n  desc_flowunit.SetVersion(\"1.0.0\");\n  std::string file_path_flowunit =\n      std::string(TEST_DRIVER_DIR) + \"/libmodelbox-unit-cpu-test_1_0.so\";\n  desc_flowunit.SetFilePath(file_path_flowunit);\n  auto mock_flowunit = std::make_shared<modelbox::MockFlowUnit>();\n  auto mock_flowunit_desc = std::make_shared<modelbox::FlowUnitDesc>();\n  mock_flowunit_desc->SetFlowUnitName(\"test_1_0\");\n  mock_flowunit_desc->AddFlowUnitInput(modelbox::FlowUnitInput(\"In_1\"));\n  mock_flowunit_desc->SetFlowType(modelbox::STREAM);\n  mock_flowunit->SetFlowUnitDesc(mock_flowunit_desc);\n  std::weak_ptr<modelbox::MockFlowUnit> mock_flowunit_wp;\n  mock_flowunit_wp = mock_flowunit;\n\n  EXPECT_CALL(*mock_flowunit, Open(_))\n      .WillRepeatedly(testing::Invoke(\n          [=](const std::shared_ptr<modelbox::Configuration> &flow_option) {\n            return modelbox::STATUS_OK;\n          }));\n\n  EXPECT_CALL(*mock_flowunit, DataPre(_))\n      .WillRepeatedly(testing::Invoke(\n          [&](const std::shared_ptr<modelbox::DataContext> &data_ctx) {\n            MBLOG_INFO << \"test_1_0 \"\n                       << \"DataPre\";\n            return modelbox::STATUS_OK;\n          }));\n\n  EXPECT_CALL(*mock_flowunit, DataPost(_))\n      .WillRepeatedly(testing::Invoke(\n          [&](const std::shared_ptr<modelbox::DataContext> &data_ctx) {\n            MBLOG_INFO << \"test_1_0 \"\n                       << \"DataPost\";\n            return modelbox::STATUS_STOP;\n          }));\n\n  EXPECT_CALL(*mock_flowunit,\n              Process(testing::An<std::shared_ptr<modelbox::DataContext>>()))\n      .WillRepeatedly(testing::Invoke(\n          [=](const std::shared_ptr<modelbox::DataContext> &op_ctx) {\n            std::shared_ptr<modelbox::BufferList> input_bufs =\n                op_ctx->Input(\"In_1\");\n            EXPECT_EQ(input_bufs->Size(), 1);\n            std::vector<size_t> shape_vector{8};\n            std::vector<size_t> input_shape;\n            auto result = input_bufs->At(0)->Get(\"shape\", input_shape);\n            EXPECT_TRUE(result);\n            EXPECT_EQ(input_shape, shape_vector);\n\n            for (size_t i = 0; i < input_bufs->Size(); ++i) {\n              const auto *input_data =\n                  static_cast<const float *>(input_bufs->ConstBufferData(i));\n              MBLOG_DEBUG << \"index: \" << i;\n              for (size_t j = 0; j < input_shape[0]; ++j) {\n                MBLOG_DEBUG << input_data[j];\n              }\n\n              EXPECT_NEAR(input_data[0], 1.05097, 1e-5);\n              EXPECT_NEAR(input_data[1], 1.30058, 1e-5);\n              EXPECT_NEAR(input_data[2], 1.55019, 1e-5);\n              EXPECT_NEAR(input_data[3], 1.7998, 1e-5);\n              EXPECT_NEAR(input_data[4], 2.0494, 1e-5);\n              EXPECT_NEAR(input_data[5], 2.29901, 1e-5);\n              EXPECT_NEAR(input_data[6], 2.54862, 1e-5);\n              EXPECT_NEAR(input_data[7], 2.79823, 1e-5);\n            }\n            return modelbox::STATUS_OK;\n          }));\n\n  EXPECT_CALL(*mock_flowunit, Close()).WillRepeatedly(testing::Invoke([=]() {\n    return modelbox::STATUS_OK;\n  }));\n  desc_flowunit.SetMockFlowUnit(mock_flowunit);\n  ctl->AddMockDriverFlowUnit(\"test_1_0\", \"cpu\", desc_flowunit,\n                             std::string(TEST_DRIVER_DIR));\n};\n\nmodelbox::Status AddMockFlowUnit(\n    std::shared_ptr<modelbox::DriverFlowTest> &flow) {\n  auto ctl = flow->GetMockFlowCtl();\n  Register_Test_0_1_Batch_Flowunit(ctl);\n  Register_Test_1_0_Batch_Flowunit(ctl);\n  Register_Test_0_1_Flowunit(ctl);\n  Register_Test_1_0_Flowunit(ctl);\n  return modelbox::STATUS_SUCCESS;\n}\n\nmodelbox::Status ReplaceVersion(const std::string &src, const std::string &dest,\n                                const std::string &version) {\n  if (access(dest.c_str(), F_OK) == 0) {\n    return modelbox::STATUS_FAULT;\n  }\n\n  std::ifstream src_file(src, std::ios::binary);\n  std::ofstream dst_file(dest, std::ios::binary | std::ios::trunc);\n\n  if (src_file.fail() || dst_file.fail()) {\n    return modelbox::STATUS_FAULT;\n  }\n\n  std::string line;\n  std::string tf_version = \"TF_VERSION\";\n\n  while (std::getline(src_file, line)) {\n    auto pos = line.find(tf_version);\n    if (pos != std::string::npos) {\n      line.replace(pos, tf_version.size(), version);\n    }\n    dst_file << line << \"\\n\";\n  }\n\n  src_file.close();\n  if (dst_file.fail()) {\n    dst_file.close();\n    remove(dest.c_str());\n    return modelbox::STATUS_FAULT;\n  }\n  dst_file.close();\n\n  return modelbox::STATUS_OK;\n}\n\nstd::string GetTFVersion() {\n  std::string ans;\n  void *handler = dlopen(MODELBOX_TF_SO_PATH, RTLD_LOCAL | RTLD_DEEPBIND);\n  if (handler == nullptr) {\n    MBLOG_ERROR << \"dlopen error: \" << dlerror();\n    return ans;\n  }\n\n  Defer { dlclose(handler); };\n  typedef const char *(*TF_Version)();\n  TF_Version func = nullptr;\n\n  func = (TF_Version)dlsym(handler, \"TF_Version\");\n  if (func == nullptr) {\n    MBLOG_ERROR << \"dlsym TF_Version failed, \" << dlerror();\n    return ans;\n  }\n\n  ans = std::string(func());\n  return ans;\n}\n\n}  // namespace tensorflow_inference"
  },
  {
    "path": "test/drivers/common/tensorflow_inference/tensorflow_inference_mock.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_DRIVER_TEST_TENSORFLOW_INFERENCE_MOCK_H_\n#define MODELBOX_DRIVER_TEST_TENSORFLOW_INFERENCE_MOCK_H_\n\n#include \"driver_flow_test.h\"\n#include \"modelbox/base/status.h\"\n#include \"test/mock/minimodelbox/mockflow.h\"\n\nnamespace tensorflow_inference {\n\nstatic std::set<std::string> SUPPORT_TF_VERSION = {\"1.13.1\", \"1.15.0\",\n                                                   \"2.6.0-dev20210809\"};\n\nmodelbox::Status AddMockFlowUnit(\n    std::shared_ptr<modelbox::DriverFlowTest> &flow);\n\nmodelbox::Status ReplaceVersion(const std::string &src, const std::string &dest,\n                                const std::string &version);\n\nstd::string GetTFVersion();\n};  // namespace tensorflow_inference\n\n#endif  // MODELBOX_DRIVER_TEST_TENSORFLOW_INFERENCE_MOCK_H_"
  },
  {
    "path": "test/drivers/common/video_decoder/video_decoder_mock.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"video_decoder_mock.h\"\n\n#include \"flowunit_mockflowunit/flowunit_mockflowunit.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace videodecoder {\n\nstatic modelbox::Status StartFlowUnitOpenFunc(\n    const std::shared_ptr<modelbox::Configuration>& flow_option,\n    const std::shared_ptr<modelbox::MockFlowUnit>& mock_flowunit) {\n  for (uint32_t i = 0; i < 2; i++) {\n    auto ext_data = mock_flowunit->CreateExternalData();\n    if (!ext_data) {\n      MBLOG_ERROR << \"can not get external data.\";\n      return modelbox::STATUS_FAULT;\n    }\n\n    auto source_url = std::string();\n    if (i == 0) {\n      source_url = std::string(TEST_ASSETS) +\n                   \"/video/jpeg_5s_480x320_24fps_yuv444_8bit.mp4\";\n    } else {\n      source_url = std::string(TEST_ASSETS) +\n                   \"/video/avc1_5s_480x320_24fps_yuv420_8bit.mp4\";\n    }\n\n    auto output_buf = ext_data->CreateBufferList();\n    output_buf->BuildFromHost({source_url.size()}, (void*)source_url.data(),\n                              source_url.size());\n    if (i == 0) {\n      // Test demuxer url in output meta\n      auto data_meta = std::make_shared<modelbox::DataMeta>();\n      data_meta->SetMeta(\"source_url\",\n                         std::make_shared<std::string>(source_url));\n      ext_data->SetOutputMeta(data_meta);\n    } else {\n      // Test demuxer url in output buffer\n    }\n\n    auto status = ext_data->Send(output_buf);\n    if (!status) {\n      MBLOG_ERROR << \"external data send buffer list failed:\" << status;\n      return modelbox::STATUS_FAULT;\n    }\n\n    status = ext_data->Close();\n    if (!status) {\n      MBLOG_ERROR << \"external data close failed:\" << status;\n      return modelbox::STATUS_FAULT;\n    }\n  }\n\n  return modelbox::STATUS_SUCCESS;\n}\n\nstatic void AddStartFlowUnit(std::shared_ptr<modelbox::MockFlow>& flow) {\n  auto mock_desc =\n      modelbox::GenerateFlowunitDesc(\"start_unit\", {}, {\"stream_meta\"});\n  mock_desc->SetFlowType(modelbox::STREAM);\n  mock_desc->SetStreamSameCount(true);\n  auto process_func =\n      [=](const std::shared_ptr<modelbox::DataContext>& data_ctx,\n          const std::shared_ptr<modelbox::MockFlowUnit>& mock_flowunit) {\n        auto output_buffers = data_ctx->Output(\"stream_meta\");\n        auto input_buffers = data_ctx->External();\n        for (const auto& buffer : *input_buffers) {\n          output_buffers->PushBack(buffer);\n        }\n        return modelbox::STATUS_OK;\n      };\n  auto mock_functions = std::make_shared<modelbox::MockFunctionCollection>();\n  mock_functions->RegisterOpenFunc(StartFlowUnitOpenFunc);\n  mock_functions->RegisterProcessFunc(process_func);\n  flow->AddFlowUnitDesc(mock_desc, mock_functions->GenerateCreateFunc(),\n                        TEST_DRIVER_DIR);\n}\n\nstatic void CheckVideoFrame(\n    const std::shared_ptr<modelbox::Buffer>& frame_buffer,\n    const std::shared_ptr<int64_t>& index_counter) {\n  int64_t index = 0;\n  int32_t width = 0;\n  int32_t height = 0;\n  int32_t rate_num = 0;\n  int32_t rate_den = 0;\n  int64_t duration = 0;\n  bool eos = false;\n  int64_t timestamp;\n  frame_buffer->Get(\"index\", index);\n  frame_buffer->Get(\"width\", width);\n  frame_buffer->Get(\"height\", height);\n  frame_buffer->Get(\"rate_num\", rate_num);\n  frame_buffer->Get(\"rate_den\", rate_den);\n  frame_buffer->Get(\"duration\", duration);\n  frame_buffer->Get(\"eos\", eos);\n  frame_buffer->Get(\"timestamp\", timestamp);\n\n  EXPECT_EQ(index, *index_counter);\n  *index_counter = *index_counter + 1;\n  EXPECT_EQ(width, 480);\n  EXPECT_EQ(height, 320);\n  EXPECT_EQ(rate_num, 24);\n  EXPECT_EQ(rate_den, 1);\n  if (index < 119) {\n    EXPECT_FALSE(eos);\n  } else {\n    EXPECT_TRUE(eos);\n  }\n\n  EXPECT_EQ(duration, 5);\n  if (index == 0) {\n    EXPECT_EQ(timestamp, 0);\n  } else if (index == 119) {\n    EXPECT_EQ(timestamp, 4958);\n  }\n}\n\nstatic void AddReadFrameFlowUnit(std::shared_ptr<modelbox::MockFlow>& flow,\n                                 bool is_stream) {\n  auto mock_desc =\n      modelbox::GenerateFlowunitDesc(\"read_frame\", {\"frame_info\"}, {});\n  mock_desc->SetFlowType(modelbox::STREAM);\n  auto data_pre_func =\n      [&](const std::shared_ptr<modelbox::DataContext>& data_ctx,\n          const std::shared_ptr<modelbox::MockFlowUnit>& mock_flowunit) {\n        MBLOG_INFO << \"read_frame DataPre\";\n        auto index_counter = std::make_shared<int64_t>(0);\n        data_ctx->SetPrivate(\"index\", index_counter);\n        return modelbox::STATUS_OK;\n      };\n  auto process_func =\n      [=](const std::shared_ptr<modelbox::DataContext>& op_ctx,\n          const std::shared_ptr<modelbox::MockFlowUnit>& mock_flowunit) {\n        auto index_counter =\n            std::static_pointer_cast<int64_t>(op_ctx->GetPrivate(\"index\"));\n\n        auto frame_buffer_list = op_ctx->Input(\"frame_info\");\n        EXPECT_NE(frame_buffer_list, nullptr);\n        if (is_stream) {\n          return modelbox::STATUS_OK;\n        }\n        for (size_t i = 0; i < frame_buffer_list->Size(); ++i) {\n          auto frame_buffer = frame_buffer_list->At(i);\n          if (frame_buffer->GetBytes() == 0) {\n            continue;\n          }\n\n          CheckVideoFrame(frame_buffer, index_counter);\n        }\n\n        return modelbox::STATUS_OK;\n      };\n\n  auto mock_functions = std::make_shared<modelbox::MockFunctionCollection>();\n  mock_functions->RegisterDataPreFunc(data_pre_func);\n  mock_functions->RegisterProcessFunc(process_func);\n  flow->AddFlowUnitDesc(mock_desc, mock_functions->GenerateCreateFunc(),\n                        TEST_DRIVER_DIR);\n}\n\nmodelbox::Status AddMockFlowUnit(std::shared_ptr<modelbox::MockFlow>& flow,\n                                 bool is_stream) {\n  AddStartFlowUnit(flow);\n  AddReadFrameFlowUnit(flow, is_stream);\n  return modelbox::STATUS_SUCCESS;\n}\n\nstd::string GetTomlConfig(const std::string& device,\n                          const std::string& pix_fmt) {\n  const std::string test_lib_dir = TEST_DRIVER_DIR;\n  const std::string test_data_dir = TEST_DATA_DIR;\n  std::string toml_content =\n      R\"(\n      [driver]\n      skip-default = true\n      dir=[\")\" +\n      test_lib_dir + \"\\\",\\\"\" + test_data_dir + \"\\\"]\\n    \" +\n      R\"([graph]\n      graphconf = '''digraph demo {\n            start_unit[type=flowunit, flowunit=start_unit, device=cpu, deviceid=0, label=\"<stream_meta>\"]\n            videodemuxer[type=flowunit, flowunit=video_demuxer, device=cpu, deviceid=0, label=\"<in_video_url> | <out_video_packet>\"]\n            videodecoder[type=flowunit, flowunit=video_decoder, device=)\" +\n      device +\n      R\"(, deviceid=0, label=\"<in_video_packet> | <out_video_frame>\", pix_fmt=)\" +\n      pix_fmt + R\"(]\n            read_frame[type=flowunit, flowunit=read_frame, device=cpu, deviceid=0, label=\"<frame_info>\"]\n            start_unit:stream_meta -> videodemuxer:in_video_url\n            videodemuxer:out_video_packet -> videodecoder:in_video_packet\n            videodecoder:out_video_frame -> read_frame:frame_info\n          }'''\n      format = \"graphviz\"\n    )\";\n  return toml_content;\n}\n}  // namespace videodecoder"
  },
  {
    "path": "test/drivers/common/video_decoder/video_decoder_mock.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#ifndef MODELBOX_DRIVER_TEST_VIDEO_DECODER_MOCK_H_\n#define MODELBOX_DRIVER_TEST_VIDEO_DECODER_MOCK_H_\n\n#include <string>\n\n#include \"modelbox/base/status.h\"\n#include \"driver_flow_test.h\"\n#include \"test/mock/minimodelbox/mockflow.h\"\n\nnamespace videodecoder {\nmodelbox::Status AddMockFlowUnit(std::shared_ptr<modelbox::MockFlow>& flow,\n                               bool is_stream = false);\n\nstd::string GetTomlConfig(const std::string& device,\n                          const std::string& pix_fmt);\n};  // namespace videodecoder\n\n#endif  // MODELBOX_DRIVER_TEST_VIDEO_DECODER_MOCK_H_"
  },
  {
    "path": "test/drivers/driver_flow_test.cc",
    "content": "\n#include \"driver_flow_test.h\"\n\n#include \"flowunit_mockflowunit/flowunit_mockflowunit.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"mock_driver_ctl.h\"\n\nnamespace modelbox {\n\nDriverFlowTest::DriverFlowTest() : flow_(std::make_shared<Flow>()) {}\n\nDriverFlowTest::~DriverFlowTest() {\n  flow_ = nullptr;\n  ctl_ = nullptr;\n}\n\nvoid DriverFlowTest::Clear() { flow_ = nullptr; }\n\nStatus DriverFlowTest::InitFlow(const std::string &name,\n                                const std::string &graph) {\n  ctl_ = GetMockFlowCtl();\n\n  modelbox::DriverDesc desc;\n  desc.SetClass(\"DRIVER-DEVICE\");\n  desc.SetType(\"cpu\");\n  desc.SetName(\"device-driver-cpu\");\n  desc.SetDescription(\"the cpu device\");\n  desc.SetVersion(\"8.9.2\");\n  std::string file_path_device =\n      std::string(TEST_DRIVER_DIR) + \"/libmodelbox-device-cpu.so\";\n  desc.SetFilePath(file_path_device);\n  ctl_->AddMockDriverDevice(\"cpu\", desc, std::string(TEST_DRIVER_DIR));\n\n  desc.SetClass(\"DRIVER-GRAPHCONF\");\n  desc.SetType(\"GRAPHVIZ\");\n  desc.SetName(\"GRAPHCONF-GRAPHVIZ\");\n  desc.SetDescription(\"graph config parse graphviz\");\n  desc.SetVersion(\"0.1.0\");\n  std::string file_path_graph =\n      std::string(TEST_DRIVER_DIR) + \"/libmodelbox-graphconf-graphviz.so\";\n  desc.SetFilePath(file_path_graph);\n\n  ctl_->AddMockDriverGraphConf(\"graphviz\", \"\", desc,\n                               std::string(TEST_DRIVER_DIR));\n  auto status = flow_->Init(name, graph);\n  return status;\n}\n\nStatus DriverFlowTest::BuildAndRun(const std::string &name,\n                                   const std::string &graph, int timeout) {\n  auto ret = InitFlow(name, graph);\n  if (!ret) {\n    return ret;\n  }\n\n  ret = flow_->Build();\n  if (!ret) {\n    return ret;\n  }\n\n  ret = flow_->RunAsync();\n  if (!ret) {\n    return ret;\n  }\n\n  if (timeout < 0) {\n    return ret;\n  }\n\n  Status retval;\n  flow_->Wait(timeout, &retval);\n  return retval;\n}\n\nstd::shared_ptr<MockDriverCtl> DriverFlowTest::GetMockFlowCtl() {\n  if (ctl_ == nullptr) {\n    ctl_ = std::make_shared<MockDriverCtl>();\n  }\n  return ctl_;\n}\n\nstd::shared_ptr<Flow> DriverFlowTest::GetFlow() { return flow_; }\n\n}  // namespace modelbox"
  },
  {
    "path": "test/drivers/driver_flow_test.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#ifndef MODELBOX_DRIVER_TEST_H_\n#define MODELBOX_DRIVER_TEST_H_\n\n#include <modelbox/base/log.h>\n#include <modelbox/flow.h>\n\n#include <fstream>\n\n#include \"mock_driver_ctl.h\"\n\nnamespace modelbox {\n\nclass DriverFlowTest {\n public:\n  DriverFlowTest();\n  virtual ~DriverFlowTest();\n\n  void Clear();\n\n  Status BuildAndRun(const std::string &name, const std::string &graph,\n                     int timeout = 15 * 1000);\n\n  std::shared_ptr<MockDriverCtl> GetMockFlowCtl();\n  std::shared_ptr<Flow> GetFlow();\n\n private:\n  Status InitFlow(const std::string &name, const std::string &graph);\n\n  std::shared_ptr<Flow> flow_;\n  std::shared_ptr<MockDriverCtl> ctl_;\n};\n\n}  // namespace modelbox\n\n#endif  // MODELBOX_DRIVER_TEST_H_\n"
  },
  {
    "path": "test/function/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nfile(GLOB FUNCTION_TEST_SOURCE *.cpp *.cc *.c)\n\ninclude_directories(${CMAKE_CURRENT_SOURCE_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${MOCK_DRIVER_CTRL_INCLUDE})\ninclude_directories(${TEST_INCLUDE})\ninclude_directories(${DRIVER_UNIT_TEST_INCLUDE})\n\nset(FUNCTION_TEST_SOURCE ${FUNCTION_TEST_SOURCE} CACHE INTERNAL \"\")\n\nadd_subdirectory(car_detection)\n\nadd_executable(function EXCLUDE_FROM_ALL\n\t${FUNCTION_TEST_SOURCE}\n\t${TEST_MAIN_SOURCE}\n)\n\nset(MODELBOX_UNIT_LINK_LIBRARY ${OpenCV_LIBS})\nadd_dependencies(function ${MODELBOX_DRIVERS_TEST_TARGET})\ntarget_link_libraries(function pthread)\ntarget_link_libraries(function rt)\ntarget_link_libraries(function gtest_main)\ntarget_link_libraries(function gmock_main)\ntarget_link_libraries(function ${LIBMODELBOX_SHARED})\ntarget_link_libraries(function ${MOCK_DRIVER_CTRL_LIB})\ntarget_link_libraries(function ${HUAWEI_SECURE_C_LIBRARIES})\ntarget_link_libraries(function ${MODELBOX_UNIT_LINK_LIBRARY})\n\nadd_custom_target(functiontest\n\tCOMMAND ${TEST_RUNNER_LIST} ${CMAKE_CURRENT_BINARY_DIR}/function\n\tDEPENDS function\n\tWORKING_DIRECTORY ${TEST_WORKING_DIR}\n\tCOMMENT \"Run Function Test...\"\n)\n\nlist(APPEND MODELBOX_UNIT_TEST_TARGETS function)\nset(MODELBOX_UNIT_TEST_TARGETS ${MODELBOX_UNIT_TEST_TARGETS} CACHE INTERNAL \"\")\n\nlist(APPEND MODELBOX_UNIT_TEST_RUN_TARGETS functiontest)\nset(MODELBOX_UNIT_TEST_RUN_TARGETS ${MODELBOX_UNIT_TEST_RUN_TARGETS} CACHE INTERNAL \"\")\n\nforeach (ITR ${DRIVER_DEMO_TEST_TARGET} ${list_var})\n\tadd_dependencies(function ${ITR})\n\tadd_custom_command(TARGET function PRE_BUILD\n\tCOMMAND cp $<TARGET_FILE:${ITR}> ${TEST_DEMO_DRIVERS_DIR}/\n\t)\nendforeach(ITR) \nset(DRIVER_DEMO_TEST_TARGET \"\" CACHE INTERNAL \"\")"
  },
  {
    "path": "test/function/api_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <opencv2/opencv.hpp>\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"mock_driver_ctl.h\"\n#include \"modelbox/data_handler.h\"\n#include \"modelbox/external_data_simple.h\"\n#include \"modelbox/flow.h\"\n#include \"modelbox/flow_graph_desc.h\"\n#include \"securec.h\"\n\nnamespace modelbox {\nclass FlowGraphTest : public testing::Test {\n protected:\n  void SetUp() override {\n    graph_desc_ = std::make_shared<FlowGraphDesc>();\n    graph_desc_->SetQueueSize(32);\n    graph_desc_->SetBatchSize(8);\n    graph_desc_->SetSkipDefaultDrivers(true);\n    graph_desc_->SetDriversDir({TEST_DRIVER_DIR});\n  }\n\n  std::shared_ptr<FlowGraphDesc> graph_desc_;\n};\n\nTEST_F(FlowGraphTest, AddNodeTest) {\n  auto source_url =\n      std::string(TEST_ASSETS) + \"/video/jpeg_5s_480x320_24fps_yuv444_8bit.mp4\";\n\n  auto input = graph_desc_->AddInput(\"input1\");\n  auto video_demuxer = graph_desc_->AddNode(\"video_demuxer\", \"cpu\", input);\n  graph_desc_->AddOutput(\"output1\", video_demuxer);\n\n  auto flow = std::make_shared<Flow>();\n  auto ret = flow->Init(graph_desc_);\n  ASSERT_EQ(ret, STATUS_OK);\n\n  flow->StartRun();\n  auto data_map = flow->CreateExternalDataMap();\n  auto data_simple = std::make_shared<ExternalDataSimple>(data_map);\n  data_simple->PushData(\"input1\", source_url.data(), source_url.size());\n\n  std::shared_ptr<void> data = nullptr;\n  size_t data_len = 0;\n  auto status = data_simple->GetResult(\"output1\", data, data_len, 1000);\n  EXPECT_EQ(status, STATUS_SUCCESS);\n  EXPECT_GT(data_len, 1000);\n}\n\nTEST_F(FlowGraphTest, AddFuncTest) {\n  auto input1 = graph_desc_->AddInput(\"input1\");\n  auto process_func =\n      [](const std::shared_ptr<DataContext> &data_context) -> Status {\n    auto input = data_context->Input(\"in_1\");\n    const auto *in_data = (const uint8_t *)(input->ConstBufferData(0));\n\n    auto output = data_context->Output(\"out_1\");\n    auto buffer = input->At(0);\n    output->Build({buffer->GetBytes()});\n    auto *data_ptr = (uint8_t *)(output->MutableBufferData(0));\n    for (uint8_t i = 0; i < 10; ++i) {\n      data_ptr[i] = in_data[i] + 1;\n    }\n    output->At(0)->Set(\"test_meta\", \"test_meta\");\n    return STATUS_SUCCESS;\n  };\n  auto func_node =\n      graph_desc_->AddFunction(process_func, {\"in_1\"}, {\"out_1\"}, input1);\n  graph_desc_->AddOutput(\"output1\", func_node);\n\n  auto flow = std::make_shared<Flow>();\n  auto ret = flow->Init(graph_desc_);\n  ASSERT_EQ(ret, STATUS_OK);\n\n  flow->StartRun();\n  auto stream_io = flow->CreateStreamIO();\n  auto buffer = stream_io->CreateBuffer();\n  buffer->Build(10);\n  auto *buffer_data = (uint8_t *)(buffer->MutableData());\n  for (uint8_t i = 0; i < 10; ++i) {\n    buffer_data[i] = i;\n  }\n  stream_io->Send(\"input1\", buffer);\n\n  std::shared_ptr<Buffer> out_buffer;\n  stream_io->Recv(\"output1\", out_buffer);\n  std::string meta;\n  out_buffer->Get(\"test_meta\", meta);\n  EXPECT_EQ(meta, \"test_meta\");\n  const auto *data = (const uint8_t *)(out_buffer->ConstData());\n  for (uint8_t i = 0; i < 10; ++i) {\n    EXPECT_EQ(data[i], i + 1);\n  }\n}\n\nclass CustomFlowUnit : public FlowUnit {\n public:\n  Status Process(std::shared_ptr<DataContext> data_ctx) override {\n    auto in = data_ctx->Input(\"in1\");\n    EXPECT_EQ(in->Size(), 1);\n    auto in_buffer = in->Front();\n    auto out = data_ctx->Output(\"out1\");\n    out->Build({1});\n    (*((uint8_t *)out->MutableData())) =\n        (*((const uint8_t *)in_buffer->ConstData()));\n    return STATUS_OK;\n  }\n};\n\nclass CustomFlowUnitBuilder : public FlowUnitBuilder {\n public:\n  void Probe(std::shared_ptr<FlowUnitDesc> &desc) override {\n    desc->SetFlowUnitType(\"cpu\");\n    desc->SetFlowUnitName(\"custom_flowunit\");\n    desc->AddFlowUnitInput({\"in1\"});\n    desc->AddFlowUnitOutput({\"out1\"});\n  }\n\n  std::shared_ptr<FlowUnit> Build() override {\n    return std::make_shared<CustomFlowUnit>();\n  }\n};\n\nTEST_F(FlowGraphTest, RegisterFlowUnitTest) {\n  auto input1 = graph_desc_->AddInput(\"input1\");\n  auto node1 = graph_desc_->AddNode(\"custom_flowunit\", \"cpu\", input1);\n  graph_desc_->AddOutput(\"output1\", node1);\n\n  auto flow = std::make_shared<Flow>();\n  flow->RegisterFlowUnit(std::make_shared<CustomFlowUnitBuilder>());\n  auto ret = flow->Init(graph_desc_);\n  ASSERT_EQ(ret, STATUS_OK);\n\n  ret = flow->StartRun();\n  ASSERT_EQ(ret, STATUS_OK);\n\n  auto stream_io = flow->CreateStreamIO();\n  auto buffer = stream_io->CreateBuffer();\n  buffer->Build(1);\n  auto *buffer_ptr = (uint8_t *)(buffer->MutableData());\n  (*buffer_ptr) = 123;\n  stream_io->Send(\"input1\", buffer);\n\n  auto out_buffer = stream_io->Recv(\"output1\", 0);\n  ASSERT_NE(out_buffer, nullptr);\n  const auto *ptr = (const uint8_t *)out_buffer->ConstData();\n  EXPECT_EQ((*ptr), 123);\n}\n\n}  // namespace modelbox\n"
  },
  {
    "path": "test/function/car_detection/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nif(NOT CUDA_FOUND)\n  return()\nendif()\n\nfile(GLOB_RECURSE TEST_SOURCE *.cpp *.cc *.c)\n\nlist(APPEND FUNCTION_TEST_SOURCE ${TEST_SOURCE})\nset(FUNCTION_TEST_SOURCE ${FUNCTION_TEST_SOURCE} CACHE INTERNAL \"\")"
  },
  {
    "path": "test/function/car_detection/car_flow.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include \"car_flow.h\"\n\n#include \"flowunit_mockflowunit/flowunit_mockflowunit.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"mock_driver_ctl.h\"\n\nnamespace modelbox {\n\nCarFlow::CarFlow() : flow_(std::make_shared<Flow>()) {}\n\nCarFlow::~CarFlow() {\n  flow_ = nullptr;\n  ctl_ = nullptr;\n}\n\nvoid CarFlow::Clear() { flow_ = nullptr; }\n\nStatus CarFlow::Init(const std::string& graphFilePath) {\n  ctl_ = GetMockFlowCtl();\n\n  modelbox::DriverDesc desc;\n  desc.SetClass(\"DRIVER-DEVICE\");\n  desc.SetType(\"cpu\");\n  desc.SetName(\"device-driver-cpu\");\n  desc.SetDescription(\"the cpu device\");\n  desc.SetVersion(\"8.9.2\");\n  std::string file_path_device =\n      std::string(TEST_DRIVER_DIR) + \"/libmodelbox-device-cpu.so\";\n  desc.SetFilePath(file_path_device);\n  ctl_->AddMockDriverDevice(\"cpu\", desc, std::string(TEST_DRIVER_DIR));\n\n  desc.SetClass(\"DRIVER-GRAPHCONF\");\n  desc.SetType(\"GRAPHVIZ\");\n  desc.SetName(\"GRAPHCONF-GRAPHVIZ\");\n  desc.SetDescription(\"graph config parse graphviz\");\n  desc.SetVersion(\"0.1.0\");\n  std::string file_path_graph =\n      std::string(TEST_DRIVER_DIR) + \"/libmodelbox-graphconf-graphviz.so\";\n  desc.SetFilePath(file_path_graph);\n\n  ctl_->AddMockDriverGraphConf(\"graphviz\", \"\", desc,\n                               std::string(TEST_DRIVER_DIR));\n  auto status = flow_->Init(graphFilePath);\n  return status;\n}\n\nStatus CarFlow::Build() { return flow_->Build(); }\n\nvoid CarFlow::Run() { flow_->RunAsync(); }\n\nvoid CarFlow::Wait(const uint64_t millisecond) { flow_->Wait(millisecond); }\n\nvoid CarFlow::Destroy() {}\n\nstd::shared_ptr<MockDriverCtl> CarFlow::GetMockFlowCtl() {\n  if (ctl_ == nullptr) {\n    ctl_ = std::make_shared<MockDriverCtl>();\n  }\n  return ctl_;\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "test/function/car_detection/car_flow.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#ifndef MODELBOX_CAR_FLOW_TEST_H_\n#define MODELBOX_CAR_FLOW_TEST_H_\n\n#include <modelbox/base/log.h>\n#include <modelbox/flow.h>\n\n#include <fstream>\n\n#include \"mock_driver_ctl.h\"\n\nnamespace modelbox {\n\nclass CarFlow {\n public:\n  CarFlow();\n  virtual ~CarFlow();\n\n  Status Init(const std::string &graphFilePath);\n  Status Build();\n  void Run();\n  void Wait(const uint64_t millisecond);\n  void Clear();\n  void Destroy();\n\n  std::shared_ptr<MockDriverCtl> GetMockFlowCtl();\n\n private:\n  std::shared_ptr<Flow> flow_;\n  std::shared_ptr<MockDriverCtl> ctl_;\n};\n\n}  // namespace modelbox\n\n#endif  // MODELBOX_DRIVER_TEST_H_\n"
  },
  {
    "path": "test/function/car_detection/car_flow_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include \"car_flow.h\"\n\n#include <securec.h>\n\n#include <functional>\n#include <future>\n#include <random>\n#include <thread>\n\n#include \"modelbox/base/log.h\"\n#include \"modelbox/base/utils.h\"\n#include \"modelbox/buffer.h\"\n#include \"flowunit_mockflowunit/flowunit_mockflowunit.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nusing ::testing::_;\n\nnamespace modelbox {\n\nclass CarFlowTest : public testing::Test {\n public:\n  CarFlowTest() : car_flow_(std::make_shared<CarFlow>()) {}\n\n  std::shared_ptr<CarFlow> GetCarFlow();\n\n protected:\n  void SetUp() override {\n    auto ret = AddMockFlowUnit();\n    EXPECT_EQ(ret, STATUS_OK);\n  }\n\n  void TearDown() override { car_flow_->Clear(); };\n\n private:\n  Status AddMockFlowUnit();\n  std::shared_ptr<CarFlow> car_flow_;\n};\n\nstd::shared_ptr<CarFlow> CarFlowTest::GetCarFlow() { return car_flow_; }\n\nStatus CarFlowTest::AddMockFlowUnit() {\n  auto ctl_ = car_flow_->GetMockFlowCtl();\n\n  {\n    MockFlowUnitDriverDesc desc_flowunit;\n    desc_flowunit.SetClass(\"DRIVER-FLOWUNIT\");\n    desc_flowunit.SetType(\"cpu\");\n    desc_flowunit.SetName(\"start_unit\");\n    desc_flowunit.SetDescription(\"start unit in test\");\n    desc_flowunit.SetVersion(\"1.0.0\");\n    std::string file_path_flowunit =\n        std::string(TEST_DRIVER_DIR) + \"/libmodelbox-unit-cpu-start_unit.so\";\n    desc_flowunit.SetFilePath(file_path_flowunit);\n    auto mock_flowunit = std::make_shared<MockFlowUnit>();\n    auto mock_flowunit_desc = std::make_shared<FlowUnitDesc>();\n    mock_flowunit_desc->SetFlowUnitName(\"start_unit\");\n    mock_flowunit_desc->AddFlowUnitOutput(\n        modelbox::FlowUnitOutput(\"stream_meta\"));\n    mock_flowunit->SetFlowUnitDesc(mock_flowunit_desc);\n    std::weak_ptr<MockFlowUnit> mock_flowunit_wp;\n    mock_flowunit_wp = mock_flowunit;\n\n    EXPECT_CALL(*mock_flowunit, Open(_))\n        .WillRepeatedly(testing::Invoke(\n            [=](const std::shared_ptr<modelbox::Configuration>& flow_option) {\n              auto spt = mock_flowunit_wp.lock();\n              for (uint32_t i = 0; i < 1; i++) {\n                auto ext_data = spt->CreateExternalData();\n                if (!ext_data) {\n                  MBLOG_ERROR << \"can not get external data.\";\n                }\n                auto source_url = std::string();\n                if (i == 0) {\n                  source_url = std::string(TEST_ASSETS) +\n                               \"/car_detection/test_video.mp4\";\n                } else {\n                  source_url = std::string(TEST_ASSETS) +\n                               \"/car_detection/test_video.mp4\";\n                }\n\n                auto output_buf = ext_data->CreateBufferList();\n                modelbox::TensorList output_tensor_list(output_buf);\n                output_tensor_list.BuildFromHost<unsigned char>(\n                    {1, {source_url.size() + 1}}, (void*)source_url.data(),\n                    source_url.size() + 1);\n\n                auto status = ext_data->Send(output_buf);\n                if (!status) {\n                  MBLOG_ERROR << \"external data send buffer list failed:\"\n                              << status;\n                }\n\n                status = ext_data->Close();\n                if (!status) {\n                  MBLOG_ERROR << \"external data close failed:\" << status;\n                }\n              }\n\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, DataPre(_))\n        .WillRepeatedly(\n            testing::Invoke([&](const std::shared_ptr<DataContext>& data_ctx) {\n              MBLOG_INFO << \"stream_meta  \"\n                         << \"DataPre\";\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, DataPost(_))\n        .WillRepeatedly(\n            testing::Invoke([&](const std::shared_ptr<DataContext>& data_ctx) {\n              MBLOG_INFO << \"stream_meta  \"\n                         << \"DataPost\";\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit,\n                Process(testing::An<std::shared_ptr<modelbox::DataContext>>()))\n        .WillRepeatedly(\n            testing::Invoke([=](const std::shared_ptr<DataContext>& data_ctx) {\n              auto output_buf = data_ctx->Output(\"stream_meta\");\n              std::vector<size_t> shape(1, 1);\n              output_buf->Build(shape);\n\n              auto external = data_ctx->External();\n              auto source_url = std::make_shared<std::string>(\n                  (char*)(*external)[0]->ConstData());\n\n              auto data_meta = std::make_shared<DataMeta>();\n              data_meta->SetMeta(\"source_url\", source_url);\n\n              data_ctx->SetOutputMeta(\"stream_meta\", data_meta);\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, Close()).WillRepeatedly(testing::Invoke([=]() {\n      return modelbox::STATUS_OK;\n    }));\n    desc_flowunit.SetMockFlowUnit(mock_flowunit);\n    ctl_->AddMockDriverFlowUnit(\"start_unit\", \"cpu\", desc_flowunit,\n                                std::string(TEST_DRIVER_DIR));\n  }\n\n  {\n    MockFlowUnitDriverDesc desc_flowunit;\n    desc_flowunit.SetClass(\"DRIVER-FLOWUNIT\");\n    desc_flowunit.SetType(\"cpu\");\n    desc_flowunit.SetName(\"color_transpose\");\n    desc_flowunit.SetDescription(\"the test in 1 out 0\");\n    desc_flowunit.SetVersion(\"1.0.0\");\n    std::string file_path_flowunit =\n        std::string(TEST_DRIVER_DIR) + \"/libmodelbox-unit-cpu-color_transpose.so\";\n    desc_flowunit.SetFilePath(file_path_flowunit);\n    auto mock_flowunit = std::make_shared<MockFlowUnit>();\n    auto mock_flowunit_desc = std::make_shared<FlowUnitDesc>();\n    mock_flowunit_desc->SetFlowUnitName(\"color_transpose\");\n    mock_flowunit_desc->AddFlowUnitInput(modelbox::FlowUnitInput(\"in_image\"));\n    mock_flowunit_desc->AddFlowUnitOutput(modelbox::FlowUnitOutput(\"out_image\"));\n    mock_flowunit_desc->SetFlowType(NORMAL);\n    mock_flowunit->SetFlowUnitDesc(mock_flowunit_desc);\n    std::weak_ptr<MockFlowUnit> mock_flowunit_wp;\n    mock_flowunit_wp = mock_flowunit;\n\n    EXPECT_CALL(*mock_flowunit, Open(_))\n        .WillRepeatedly(testing::Invoke(\n            [=](const std::shared_ptr<modelbox::Configuration>& flow_option) {\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, DataPre(_))\n        .WillRepeatedly(\n            testing::Invoke([&](const std::shared_ptr<DataContext>& data_ctx) {\n              MBLOG_INFO << \"color_transpose \"\n                         << \"DataPre\";\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, DataPost(_))\n        .WillRepeatedly(\n            testing::Invoke([&](const std::shared_ptr<DataContext>& data_ctx) {\n              MBLOG_INFO << \"color_transpose \"\n                         << \"DataPost\";\n              return modelbox::STATUS_STOP;\n            }));\n\n    EXPECT_CALL(*mock_flowunit,\n                Process(testing::An<std::shared_ptr<modelbox::DataContext>>()))\n        .WillRepeatedly(\n            testing::Invoke([=](const std::shared_ptr<DataContext>& op_ctx) {\n              MBLOG_INFO << \"test color_transpose process\";\n              auto input_buf = op_ctx->Input(\"in_image\");\n              auto output_buf = op_ctx->Output(\"out_image\");\n\n              std::vector<size_t> shape_vector;\n              for (size_t i = 0; i < input_buf->Size(); ++i) {\n                shape_vector.push_back(input_buf->At(i)->GetBytes());\n              }\n\n              output_buf->Build(shape_vector);\n              int32_t width = 0;\n              int32_t height = 0;\n              int32_t channel = 0;\n              std::string pix_fmt;\n              modelbox::ModelBoxDataType type = MODELBOX_TYPE_INVALID;\n              input_buf->At(0)->Get(\"width\", width);\n              input_buf->At(0)->Get(\"height\", height);\n              input_buf->At(0)->Get(\"channel\", channel);\n              input_buf->At(0)->Get(\"pix_fmt\", pix_fmt);\n              input_buf->At(0)->Get(\"type\", type);\n              size_t elem_size = width * height;\n\n              const auto* input_data =\n                  static_cast<const u_char*>(input_buf->ConstData());\n              auto* output_data =\n                  static_cast<u_char*>(output_buf->MutableData());\n              for (size_t i = 0; i < (size_t)channel; ++i) {\n                for (size_t j = 0; j < elem_size; ++j) {\n                  output_data[i * elem_size + j] = input_data[j * channel + i];\n                }\n              }\n\n              output_buf->Set(\"width\", width);\n              output_buf->Set(\"height\", height);\n              output_buf->Set(\"channel\", channel);\n              output_buf->Set(\"pix_fmt\", pix_fmt);\n              output_buf->Set(\"type\", type);\n\n              MBLOG_DEBUG << \"color_transpose process data finish\";\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, Close()).WillRepeatedly(testing::Invoke([=]() {\n      return modelbox::STATUS_OK;\n    }));\n    desc_flowunit.SetMockFlowUnit(mock_flowunit);\n    ctl_->AddMockDriverFlowUnit(\"color_transpose\", \"cpu\", desc_flowunit,\n                                std::string(TEST_DRIVER_DIR));\n  }\n\n  {\n    MockFlowUnitDriverDesc desc_flowunit;\n    desc_flowunit.SetClass(\"DRIVER-FLOWUNIT\");\n    desc_flowunit.SetType(\"cpu\");\n    desc_flowunit.SetName(\"free\");\n    desc_flowunit.SetDescription(\"free in test\");\n    desc_flowunit.SetVersion(\"1.0.0\");\n    std::string file_path_flowunit =\n        std::string(TEST_DRIVER_DIR) + \"/libmodelbox-unit-cpu-free.so\";\n    desc_flowunit.SetFilePath(file_path_flowunit);\n    auto mock_flowunit = std::make_shared<MockFlowUnit>();\n    auto mock_flowunit_desc = std::make_shared<FlowUnitDesc>();\n    mock_flowunit_desc->SetFlowUnitName(\"free\");\n    mock_flowunit_desc->AddFlowUnitInput(modelbox::FlowUnitInput(\"in_data\"));\n    mock_flowunit_desc->SetFlowType(NORMAL);\n    mock_flowunit->SetFlowUnitDesc(mock_flowunit_desc);\n    std::weak_ptr<MockFlowUnit> mock_flowunit_wp;\n    mock_flowunit_wp = mock_flowunit;\n\n    static std::atomic<int64_t> run_count(0);\n    static std::atomic<bool> is_print(false);\n\n    EXPECT_CALL(*mock_flowunit, Open(_))\n        .WillRepeatedly(testing::Invoke(\n            [=](const std::shared_ptr<modelbox::Configuration>& flow_option) {\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, DataPre(_))\n        .WillRepeatedly(\n            testing::Invoke([&](const std::shared_ptr<DataContext>& data_ctx) {\n              MBLOG_INFO << \"stream_meta  \"\n                         << \"DataPre\";\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, DataPost(_))\n        .WillRepeatedly(\n            testing::Invoke([&](const std::shared_ptr<DataContext>& data_ctx) {\n              MBLOG_INFO << \"stream_meta  \"\n                         << \"DataPost\";\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit,\n                Process(testing::An<std::shared_ptr<modelbox::DataContext>>()))\n        .WillRepeatedly(\n            testing::Invoke([=](const std::shared_ptr<DataContext>& data_ctx) {\n              auto input_buf = data_ctx->Input(\"in_data\");\n\n              static auto begin_time = GetTickCount();\n              static std::atomic<uint64_t> print_time{GetTickCount()};\n\n              run_count += input_buf->Size();\n\n              auto end_time = GetTickCount();\n              if (end_time - print_time > 1000) {\n                auto expected = false;\n                if (is_print.compare_exchange_weak(expected, true)) {\n                  MBLOG_INFO << \"Average throughput: \"\n                             << (run_count * 1000) / (end_time - begin_time)\n                             << \"/s\";\n                  is_print = false;\n                  print_time = GetTickCount();\n                }\n              }\n\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, Close()).WillRepeatedly(testing::Invoke([=]() {\n      return modelbox::STATUS_OK;\n    }));\n    desc_flowunit.SetMockFlowUnit(mock_flowunit);\n    ctl_->AddMockDriverFlowUnit(\"free\", \"cpu\", desc_flowunit,\n                                std::string(TEST_DRIVER_DIR));\n  }\n\n  {\n    MockFlowUnitDriverDesc desc_flowunit;\n    desc_flowunit.SetClass(\"DRIVER-FLOWUNIT\");\n    desc_flowunit.SetType(\"cuda\");\n    desc_flowunit.SetName(\"free\");\n    desc_flowunit.SetDescription(\"free in test\");\n    desc_flowunit.SetVersion(\"1.0.0\");\n    std::string file_path_flowunit =\n        std::string(TEST_DRIVER_DIR) + \"/libmodelbox-unit-cuda-free.so\";\n    desc_flowunit.SetFilePath(file_path_flowunit);\n    auto mock_flowunit = std::make_shared<MockFlowUnit>();\n    auto mock_flowunit_desc = std::make_shared<FlowUnitDesc>();\n    mock_flowunit_desc->SetFlowUnitName(\"free\");\n    mock_flowunit_desc->AddFlowUnitInput(modelbox::FlowUnitInput(\"in_data\"));\n    mock_flowunit_desc->SetFlowType(NORMAL);\n    mock_flowunit->SetFlowUnitDesc(mock_flowunit_desc);\n    std::weak_ptr<MockFlowUnit> mock_flowunit_wp;\n    mock_flowunit_wp = mock_flowunit;\n\n    static std::atomic<int64_t> run_count(0);\n    static std::atomic<bool> is_print(false);\n\n    EXPECT_CALL(*mock_flowunit, Open(_))\n        .WillRepeatedly(testing::Invoke(\n            [=](const std::shared_ptr<modelbox::Configuration>& flow_option) {\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, DataPre(_))\n        .WillRepeatedly(\n            testing::Invoke([&](const std::shared_ptr<DataContext>& data_ctx) {\n              MBLOG_INFO << \"stream_meta  \"\n                         << \"DataPre\";\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, DataPost(_))\n        .WillRepeatedly(\n            testing::Invoke([&](const std::shared_ptr<DataContext>& data_ctx) {\n              MBLOG_INFO << \"stream_meta  \"\n                         << \"DataPost\";\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit,\n                Process(testing::An<std::shared_ptr<modelbox::DataContext>>()))\n        .WillRepeatedly(\n            testing::Invoke([=](const std::shared_ptr<DataContext>& data_ctx) {\n              auto input_buf = data_ctx->Input(\"in_data\");\n\n              static auto begin_time = GetTickCount();\n              static std::atomic<uint64_t> print_time{GetTickCount()};\n\n              run_count += input_buf->Size();\n\n              auto end_time = GetTickCount();\n              if (end_time - print_time > 1000) {\n                auto expected = false;\n                if (is_print.compare_exchange_weak(expected, true)) {\n                  MBLOG_INFO << \"Average throughput: \"\n                             << (run_count * 1000) / (end_time - begin_time)\n                             << \"/s\";\n                  is_print = false;\n                  print_time = GetTickCount();\n                }\n              }\n\n              return modelbox::STATUS_OK;\n            }));\n\n    EXPECT_CALL(*mock_flowunit, Close()).WillRepeatedly(testing::Invoke([=]() {\n      return modelbox::STATUS_OK;\n    }));\n    desc_flowunit.SetMockFlowUnit(mock_flowunit);\n    ctl_->AddMockDriverFlowUnit(\"free\", \"cuda\", desc_flowunit,\n                                std::string(TEST_DRIVER_DIR));\n  }\n\n  return STATUS_SUCCESS;\n}\n\nTEST_F(CarFlowTest, DISABLED_CarDetection) {\n  MBLOG_INFO << \"car detection get in.\" << std::endl;\n  const std::string test_lib_dir = TEST_DRIVER_DIR;\n  const std::string test_data_dir = TEST_DATA_DIR;\n  std::string toml_content = R\"(\n    [log]\n    level = \"DEBUG\"\n    [driver]\n    skip-default=true\n    dir=[\")\" + test_lib_dir + \"\\\", \\\"\" +\n                             test_data_dir + \"\\\"]\\n    \" +\n                             R\"([graph]\n    graphconf = '''digraph demo {\n\n          video_input[type=flowunit, flowunit=video_input, device=cpu, deviceid=0, label=\"<out_video_url>\", source_url=\"test_video.mp4\"]                                           \n          videodemuxer[type=flowunit, flowunit=video_demuxer, device=cpu, deviceid=0, label=\"<in_video_url> | <out_video_packet>\"]\n          videodecoder[type=flowunit, flowunit=video_decoder, device=cuda, deviceid=0, label=\"<in_video_packet> | <out_video_frame>\", pix_fmt=rgb, queue_size = 16]\n          cv_resize[type=flowunit, flowunit=resize, device=cpu, deviceid=0, label=\"<in_image> | <out_image>\", width=800, height=480, method=\"inter_nearest\", batch_size=5, queue_size = 16]\n          color_transpose[type=flowunit, flowunit=packed_planar_transpose, device=cpu, deviceid=0, label=\"<in_image> | <out_image>\", queue_size = 16]\n          normalize[type=flowunit, flowunit=normalize, device=cpu, deviceid=0, label=\"<in_data> | <out_data>\", normalize=\"0.003921568627451, 0.003921568627451, 0.003921568627451\", queue_size = 16]\n          day_inference[type=flowunit, flowunit=day_inference, device=cuda, deviceid=0, label=\"<data> | <layer15_conv> | <layer22_conv>\", queue_size = 16, batch_size = 1]\n          yolobox[type=flowunit, flowunit=yolobox, device=cpu, deviceid=0, label=\"<layer15_conv> | <layer22_conv> | <Out_1>\", queue_size = 16, batch_size = 1]\n          draw_bbox[type=flowunit, flowunit=draw_bbox, device=cpu, deviceid=0, label=\"<In_1> | <In_2> | <Out_1>\", queue_size = 16]\n          videoencoder[type=flowunit, flowunit=video_encoder, device=cpu, deviceid=0, label=\"<in_video_frame>\", queue_size=16, default_dest_url=\"rtsp://localhost/test\", encoder=\"mpeg4\"]\n\n          video_input:out_video_url -> videodemuxer:in_video_url\n          videodemuxer:out_video_packet -> videodecoder:in_video_packet\n          videodecoder:out_video_frame -> cv_resize:in_image\n          cv_resize:out_image -> color_transpose: in_image\n          color_transpose: out_image -> normalize: in_data\n          normalize: out_data -> day_inference:data\n          day_inference:layer15_conv -> yolobox: layer15_conv\n          day_inference:layer22_conv -> yolobox: layer22_conv\n          yolobox: Out_1 -> draw_bbox: in_region\n          videodecoder:out_video_frame -> draw_bbox: in_image\n          draw_bbox:out_image -> videoencoder: in_video_frame\n        }'''\n    format = \"graphviz\"\n  )\";\n  std::string config_file_path = std::string(TEST_DATA_DIR) + \"/test.toml\";\n  struct stat buffer;\n  if (stat(config_file_path.c_str(), &buffer) == 0) {\n    remove(config_file_path.c_str());\n  }\n  std::ofstream ofs(config_file_path);\n  EXPECT_TRUE(ofs.is_open());\n  ofs.write(toml_content.data(), toml_content.size());\n  ofs.flush();\n  ofs.close();\n  Defer {\n    auto rmret = remove(config_file_path.c_str());\n    EXPECT_EQ(rmret, 0);\n  };\n\n  auto car_flow = GetCarFlow();\n  auto ret = car_flow->Init(config_file_path);\n  EXPECT_EQ(ret, STATUS_OK);\n\n  ret = car_flow->Build();\n  EXPECT_EQ(ret, STATUS_OK);\n\n  car_flow->Run();\n  car_flow->Wait(10000 * 1000);\n}\n\n}  // namespace modelbox\n"
  },
  {
    "path": "test/function/demo_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <sys/stat.h>\n\n#include <atomic>\n#include <cstdio>\n#include <fstream>\n#include <functional>\n#include <future>\n#include <thread>\n\n#include \"modelbox/base/log.h\"\n#include \"modelbox/buffer.h\"\n#include \"modelbox/flow.h\"\n#include \"modelbox/graph.h\"\n#include \"modelbox/node.h\"\n#include \"engine/scheduler/flow_scheduler.h\"\n#include \"flowunit_mockflowunit/flowunit_mockflowunit.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"test/mock/minimodelbox/mockflow.h\"\n\nnamespace modelbox {\nclass DemoTest : public testing::Test {\n public:\n  DemoTest() = default;\n\n protected:\n  void SetUp() override{};\n  void TearDown() override{};\n};\n\nstatic bool SkipJudge(const std::string &local_file_path) {\n  struct stat statbuf;\n  if (stat(local_file_path.c_str(), &statbuf) == -1) {\n    MBLOG_ERROR << \"failed to load \" << local_file_path;\n    return true;\n  }\n  return false;\n}\n\nstatic std::string GetModelPath(const std::string &toml_path) {\n  auto conf_builder = std::make_shared<ConfigurationBuilder>();\n  std::shared_ptr<Configuration> toml_config = conf_builder->Build(toml_path);\n  auto model_path = toml_config->GetString(\"base.entry\");\n  return model_path;\n}\n\nstatic void TestRunGraph(const std::string &toml_content) {\n  std::string config_file_path =\n      std::string(TEST_DATA_DIR) + \"/demo_test.toml\";\n  struct stat buffer;\n  if (stat(config_file_path.c_str(), &buffer) == 0) {\n    remove(config_file_path.c_str());\n  }\n  std::ofstream demo_test_toml(config_file_path);\n  EXPECT_TRUE(demo_test_toml.is_open());\n  demo_test_toml.write(toml_content.data(), toml_content.size());\n  demo_test_toml.flush();\n  demo_test_toml.close();\n  Defer {\n    auto rmret = remove(config_file_path.c_str());\n    EXPECT_EQ(rmret, 0);\n  };\n\n  auto flow = std::make_shared<Flow>();\n  auto ret = flow->Init(config_file_path);\n  EXPECT_EQ(ret, STATUS_OK);\n\n  ret = flow->Build();\n  EXPECT_EQ(ret, STATUS_OK);\n\n  flow->RunAsync();\n  flow->Wait(0);\n}\n\nTEST_F(DemoTest, LPDetection) {\n  std::string video_file =\n      std::string(TEST_DEMO_VIDEO_DIR) + std::string(\"/test_video.mp4\");\n  if (SkipJudge(video_file) == true) {\n    GTEST_SKIP();\n  };\n\n  std::string car_toml = std::string(TEST_DEMO_DRIVERS_DIR) +\n                         std::string(\"/car_inference.toml\");\n  std::string car_model_path = GetModelPath(car_toml);\n  if (SkipJudge(car_model_path) == true) {\n    GTEST_SKIP();\n  };\n\n  std::string lp_toml = std::string(TEST_DEMO_DRIVERS_DIR) +\n                        std::string(\"/lp_inference.toml\");\n  std::string lp_model_path = GetModelPath(lp_toml);\n  if (SkipJudge(lp_model_path) == true) {\n    GTEST_SKIP();\n  };\n\n  MBLOG_INFO << \"lp detection get in.\" << std::endl;\n  const std::string test_lib_dir = TEST_DRIVER_DIR;\n  const std::string test_demo_dir = TEST_DEMO_DRIVERS_DIR;\n  std::string toml_content = R\"(\n    [log]\n    level = \"INFO\"\n    [driver]\n    skip-default=true\n    dir=[\")\" + test_lib_dir + \"\\\", \\\"\" +\n                             test_demo_dir + \"\\\"]\\n    \" +\n                             R\"([graph]\n    graphconf = \"\"\"digraph demo_test {\n                video_input[type=flowunit, flowunit=video_input, device=cpu, deviceid=0, label=\"<out_video_url>\", source_url=\"/opt/modelbox/demo/video/test_video.mp4\"]                                           \n                videodemuxer[type=flowunit, flowunit=video_demuxer, device=cpu, deviceid=0, label=\"<in_video_url> | <out_video_packet>\"]\n                videodecoder[type=flowunit, flowunit=video_decoder, device=cpu, deviceid=0, label=\"<in_video_packet> | <out_video_frame>\", pix_fmt=rgb, queue_size = 16, batch_size=5]\n                frame_resize[type=flowunit, flowunit=resize, device=cpu, deviceid=0, label=\"<in_image> | <out_image>\", image_width=800, image_height=480, method=\"inter_nearest\", batch_size=5, queue_size = 16]\n                car_color_transpose[type=flowunit, flowunit=packed_planar_transpose, device=cpu, deviceid=0, label=\"<in_image> | <out_image>\", batch_size = 5, queue_size = 16]\n                car_normalize[type=flowunit, flowunit=normalize, device=cpu, deviceid=0, label=\"<input> | <output>\", normalize=\"0.003921568627451, 0.003921568627451, 0.003921568627451\", queue_size = 16, batch_size = 5]\n                car_inference[type=flowunit, flowunit=car_inference, device=cuda, deviceid=0, label=\"<data> | <layer15-conv> | <layer22-conv>\", queue_size = 16, batch_size = 5]\n                car_yolobox[type=flowunit, flowunit=car_yolobox, device=cpu, deviceid=0, label=\"<layer15-conv> | <layer22-conv> | <Out_1>\", image_width=1920, image_height=1080, queue_size = 16, batch_size = 5]\n                expand_bbox_img[type=flowunit, flowunit=expand_bbox_img, device=cpu, deviceid=0, label=\"<In_img> | <In_bbox> | <Out_img> | <Out_bbox>\"]\n\n                car_condition[type=flowunit, flowunit=car_condition, device=cpu, deviceid=0, label=\"<In_img> | <In_bbox> | <Out_true> | <Out_false>\", batch_size = 1, queue_size = 16]\n                split_img_bbox[type=flowunit, flowunit=split_img_bbox, device=cpu, deviceid=0, label=\"<In_true> | <Out_img> | <Out_bbox>\", batch_size = 5, queue_size = 16]            \n                \n                car_resize[type=flowunit, flowunit=resize, device=cpu, deviceid=0, label=\"<in_image> | <Out_1>\", image_width=416, image_height=416, method=\"inter_nearest\", batch_size=5, queue_size = 16]\n                lp_color_transpose[type=flowunit, flowunit=packed_planar_transpose, device=cpu, deviceid=0, label=\"<in_image> | <out_image>\", batch_size = 5, queue_size = 16]\n                lp_normalize[type=flowunit, flowunit=normalize, device=cpu, deviceid=0, label=\"<input> | <output>\", normalize=\"0.003921568627451, 0.003921568627451, 0.003921568627451\", queue_size = 16, batch_size = 5]\n                lp_inference[type=flowunit, flowunit=lp_inference, device=cuda, deviceid=0, label=\"<data> | <layer11-conv>\", queue_size = 16, batch_size = 5]\n                lp_yolobox[type=flowunit, flowunit=lp_yolobox, device=cpu, deviceid=0, label=\"<layer11-conv> | <car_bboxes> | <Out_1>\", image_width=1920, image_height=1080, queue_size = 16, batch_size = 5]\n                collapse_bbox[type=flowunit, flowunit=collapse_bbox, device=cpu, deviceid=0, label=\"<input> | <output>\"]\n                draw_bbox[type=flowunit, flowunit=draw_bbox, device=cpu, deviceid=0, label=\"<in_image> | <in_region> | <out_image>\", queue_size = 16, batch_size = 5]\n                videoencoder[type=flowunit, flowunit=video_encoder, device=cpu, deviceid=0, label=\"<in_video_frame>\", queue_size=16, default_dest_url=\"rtsp://172.22.115.16/test\", encoder=\"mpeg4\"]\n                \n                video_input:out_video_url -> videodemuxer:in_video_url\n                videodemuxer:out_video_packet -> videodecoder:in_video_packet\n                videodecoder:out_video_frame -> frame_resize:in_image\n                frame_resize: out_image -> car_color_transpose: in_image\n                car_color_transpose: out_image -> car_normalize: in_data\n                car_normalize: out_data -> car_inference:data\n                car_inference: \"layer15-conv\" -> car_yolobox: \"layer15-conv\"\n                car_inference: \"layer22-conv\" -> car_yolobox: \"layer22-conv\"\n                car_yolobox: Out_1 -> expand_bbox_img: In_bbox\n                videodecoder:out_video_frame -> expand_bbox_img: In_img\n                expand_bbox_img: Out_img -> car_condition: In_img\n                expand_bbox_img: Out_bbox -> car_condition: In_bbox\n\n                car_condition: Out_true -> split_img_bbox: In_true\n                car_condition: Out_false -> collapse_bbox: input\n\n                split_img_bbox: Out_img -> car_resize: in_image\n                split_img_bbox: Out_bbox -> lp_yolobox: car_bboxes\n\n                car_resize: out_image  -> lp_color_transpose: in_image\n                lp_color_transpose: out_image  ->  lp_normalize: in_data\n                lp_normalize: out_data -> lp_inference: data\n                lp_inference: \"layer11-conv\" -> lp_yolobox: \"layer11-conv\"\n                lp_yolobox: Out_1 -> collapse_bbox: input\n                collapse_bbox: output -> draw_bbox: in_region\n                videodecoder:out_video_frame -> draw_bbox: in_image \n                draw_bbox: out_image -> videoencoder: in_video_frame   \n                }\"\"\"\n    format = \"graphviz\"\n  )\";\n\n  // run graph\n  TestRunGraph(toml_content);\n}\n\nTEST_F(DemoTest, CarDetection) {\n  std::string video_file =\n      std::string(TEST_DEMO_VIDEO_DIR) + std::string(\"/test_video.mp4\");\n  if (SkipJudge(video_file) == true) {\n    GTEST_SKIP();\n  };\n\n  std::string car_toml = std::string(TEST_DEMO_DRIVERS_DIR) +\n                         std::string(\"/car_inference.toml\");\n  std::string car_model_path = GetModelPath(car_toml);\n  if (SkipJudge(car_model_path) == true) {\n    GTEST_SKIP();\n  };\n\n  MBLOG_INFO << \"car detection get in.\" << std::endl;\n  const std::string test_lib_dir = TEST_DRIVER_DIR;\n  const std::string test_demo_dir = TEST_DEMO_DRIVERS_DIR;\n  std::string toml_content = R\"(\n    [log]\n    level = \"INFO\"\n    [driver]\n    skip-default=true\n    dir=[\")\" + test_lib_dir + \"\\\", \\\"\" +\n                             test_demo_dir + \"\\\"]\\n    \" +\n                             R\"([graph]\n    graphconf = \"\"\"digraph demo_test {\n                video_input[type=flowunit, flowunit=video_input, device=cpu, deviceid=0, label=\"<out_video_url>\", source_url=\"/opt/modelbox/demo/video/test_video.mp4\"]                                           \n                videodemuxer[type=flowunit, flowunit=video_demuxer, device=cpu, deviceid=0, label=\"<in_video_url> | <out_video_packet>\"]\n                videodecoder[type=flowunit, flowunit=video_decoder, device=cpu, deviceid=0, label=\"<in_video_packet> | <out_video_frame>\", pix_fmt=rgb, queue_size = 16, batch_size=5]\n                frame_resize[type=flowunit, flowunit=resize, device=cpu, deviceid=0, label=\"<in_image> | <Out_1>\", image_width=800, image_height=480, method=\"inter_nearest\", batch_size=5, queue_size = 16]\n                car_color_transpose[type=flowunit, flowunit=packed_planar_transpose, device=cpu, deviceid=0, label=\"<in_image> | <out_image>\", batch_size = 5, queue_size = 16]\n                car_normalize[type=flowunit, flowunit=normalize, device=cpu, deviceid=0, label=\"<in_data> | <out_data>\", normalize=\"0.003921568627451, 0.003921568627451, 0.003921568627451\", queue_size = 16, batch_size = 5]\n                car_inference[type=flowunit, flowunit=car_inference, device=cuda, deviceid=0, label=\"<data> | <layer15-conv> | <layer22-conv>\", queue_size = 16, batch_size = 5]\n                car_yolobox[type=flowunit, flowunit=car_yolobox, device=cpu, deviceid=0, label=\"<layer15-conv> | <layer22-conv> | <Out_1>\", image_width=1920, image_height=1080, queue_size = 16, batch_size = 5]\n                draw_bbox[type=flowunit, flowunit=draw_bbox, device=cpu, deviceid=0, label=\"<in_image> | <in_region> | <out_image>\", queue_size = 16, batch_size = 5]\n                videoencoder[type=flowunit, flowunit=video_encoder, device=cpu, deviceid=0, label=\"<in_video_frame>\", queue_size=16, default_dest_url=\"rtsp://localhost/test\", encoder=\"mpeg4\"]\n                \n                video_input:out_video_url -> videodemuxer:in_video_url\n                videodemuxer:out_video_packet -> videodecoder:in_video_packet\n                videodecoder:out_video_frame -> frame_resize:in_image\n                frame_resize: out_image -> car_color_transpose: in_image\n                car_color_transpose: out_image -> car_normalize: in_data\n                car_normalize: out_data -> car_inference:data\n                car_inference: \"layer15-conv\" -> car_yolobox: \"layer15-conv\"\n                car_inference: \"layer22-conv\" -> car_yolobox: \"layer22-conv\"\n                car_yolobox: Out_1 -> draw_bbox: in_region\n                videodecoder:out_video_frame -> draw_bbox: in_image\n                draw_bbox: out_image -> videoencoder: in_video_frame   \n                }\"\"\"\n    format = \"graphviz\"\n  )\";\n\n  TestRunGraph(toml_content);\n}\n\nTEST_F(DemoTest, YOLOv3) {\n  std::string video_file =\n      std::string(TEST_DEMO_VIDEO_DIR) + std::string(\"/test_video.mp4\");\n  if (SkipJudge(video_file) == true) {\n    GTEST_SKIP();\n  };\n\n  std::string yolov3_toml = std::string(TEST_DEMO_DRIVERS_DIR) +\n                            std::string(\"/yolov3_inference.toml\");\n  std::string yolov3_model_path = GetModelPath(yolov3_toml);\n  if (SkipJudge(yolov3_model_path) == true) {\n    GTEST_SKIP();\n  };\n\n  MBLOG_INFO << \"YOLOv3(coco) get in.\" << std::endl;\n  const std::string test_lib_dir = TEST_DRIVER_DIR;\n  const std::string test_demo_dir = TEST_DEMO_DRIVERS_DIR;\n  std::string toml_content = R\"(\n    [log]\n    level = \"INFO\"\n    [driver]\n    skip-default=true\n    dir=[\")\" + test_lib_dir + \"\\\", \\\"\" +\n                             test_demo_dir + \"\\\"]\\n    \" +\n                             R\"([graph]\n    graphconf = \"\"\"digraph demo_test {\n                video_input[type=flowunit, flowunit=video_input, device=cpu, deviceid=0, source_url=\"/opt/modelbox/demo/video/test_video.mp4\"]                                                                                 \n                videodemuxer[type=flowunit, flowunit=video_demuxer, device=cpu, deviceid=0]\n                videodecoder[type=flowunit, flowunit=video_decoder, device=cpu, deviceid=0, pix_fmt=rgb, queue_size = 16, batch_size=5]\n                frame_resize[type=flowunit, flowunit=resize, device=cpu, deviceid=0, image_width=608, image_height=608, method=\"inter_nearest\", batch_size=5, queue_size = 16]\n                color_transpose[type=flowunit, flowunit=packed_planar_transpose, device=cpu, deviceid=0, batch = 5, queue_size = 16]\n                normalize[type=flowunit, flowunit=normalize, device=cpu, deviceid=0, normalize=\"0.003921568627451, 0.003921568627451, 0.003921568627451\", queue_size = 16, batch_size = 5]\n                yolov3_inference[type=flowunit, flowunit=yolov3_inference, device=cuda, deviceid=0, queue_size = 16, batch_size = 5]\n                YOLOv3_post[type=flowunit, flowunit=YOLOv3_post, device=cpu, deviceid=0, image_width=1920, image_height=1080, queue_size = 16, batch_size = 5]\n                draw_bbox[type=flowunit, flowunit=draw_bbox, device=cpu, deviceid=0, queue_size = 16, batch_size = 5]\n                videoencoder[type=flowunit, flowunit=video_encoder, device=cpu, deviceid=0, queue_size=16, default_dest_url=\"rtsp://localhost/test\", encoder=\"mpeg4\"]\n\n                video_input:out_video_url -> videodemuxer:in_video_url\n                videodemuxer:out_video_packet -> videodecoder:in_video_packet\n                videodecoder:out_video_frame -> frame_resize:in_image\n                frame_resize: out_image -> color_transpose: in_image\n                color_transpose: out_image -> normalize: in_data\n                normalize: out_data -> yolov3_inference:data\n                yolov3_inference: \"layer82-conv\" -> YOLOv3_post: \"layer82-conv\"\n                yolov3_inference: \"layer94-conv\" -> YOLOv3_post: \"layer94-conv\"\n                yolov3_inference: \"layer106-conv\" -> YOLOv3_post: \"layer106-conv\"\n                yolov3: Out_1 -> draw_bbox: in_region\n                videodecoder:out_video_frame -> draw_bbox: in_image\n                draw_bbox: out_image -> videoencoder: in_video_frame      \n                }\"\"\"\n    format = \"graphviz\"\n  )\";\n\n  TestRunGraph(toml_content);\n}\n\nTEST_F(DemoTest, FaceDetection) {\n  std::string video_file =\n      std::string(TEST_DEMO_VIDEO_DIR) + std::string(\"/face_test.mp4\");\n  if (SkipJudge(video_file) == true) {\n    GTEST_SKIP();\n  };\n\n  std::string face_toml = std::string(TEST_DEMO_DRIVERS_DIR) +\n                          std::string(\"/face_inference.toml\");\n  std::string face_model_path = GetModelPath(face_toml);\n  if (SkipJudge(face_model_path) == true) {\n    GTEST_SKIP();\n  };\n\n  MBLOG_INFO << \"face detection get in.\" << std::endl;\n  const std::string test_lib_dir = TEST_DRIVER_DIR;\n  const std::string test_demo_dir = TEST_DEMO_DRIVERS_DIR;\n  std::string toml_content = R\"(\n    [log]\n    level = \"INFO\"\n    [driver]\n    skip-default=true\n    dir=[\")\" + test_lib_dir + \"\\\", \\\"\" +\n                             test_demo_dir + \"\\\"]\\n    \" +\n                             R\"([graph]\n    graphconf = '''digraph demo {\n            video_input[type=flowunit, flowunit=video_input, device=cpu, deviceid=0, label=\"<out_video_url>\", source_url=\"/opt/modelbox/demo/video/face_test.mp4\"]\n            videodemuxer[type=flowunit, flowunit=video_demuxer, device=cpu, deviceid=0, label=\"<in_video_url> | <out_video_packet>\"]\n            videodecoder[type=flowunit, flowunit=video_decoder, device=cpu, deviceid=0, label=\"<in_video_packet> | <out_video_frame>\", pix_fmt=rgb, queue_size=16, batch_size=5]\n            face_preprocess[type=flowunit, flowunit=face_preprocess, device=cpu, deviceid=0, label=\"<In_1> | <Out_1>\", width=640, height=352, resize_type=1, method=\"inter_nearest\", batch_size=5, queue_size=16]\n            face_color_transpose[type=flowunit, flowunit=face_color_transpose, device=cpu, deviceid=0, label=\"<in_image> | <out_image>\", batch_size=5, queue_size=16]\n            face_inference[type=flowunit, flowunit=face_inference, device=cuda, deviceid=0, label=\"<blob1> | <sigmoid_blob1> | <conv_blob60> | <conv_blob62> | <conv_blob64>\", queue_size=16, batch_size=5]\n            face_center[type=flowunit, flowunit=face_center, device=cpu, deviceid=0, label=\"<sigmoid_blob1> | <conv_blob60> | <conv_blob62> | <conv_blob64> | <Out_1> | <Out_2>\", image_width=2560, image_height=1440, input_width=640, input_height=352, queue_size=16, batch_size=5]\n\n            face_alignment[type=flowunit, flowunit=face_alignment, device=cpu, deviceid=0, label=\"<In_img> | <In_kps> | <Aligned_img>\", net_width=224, net_height=224, batch_size=5, queue_size=16]\n            face_expand[type=flowunit, flowunit=face_expand, device=cpu, deviceid=0, label=\"<In_img> | <Out_img>\"]\n            face_condition[type=flowunit, flowunit=face_condition, device=cpu, deviceid=0, label=\"<In_img> | <Out_true> | <Out_false>\", queue_size=16, batch_size=1]\n            expression_inference[type=flowunit, flowunit=expression_inference, device=cuda, deviceid=0, label=\"<blob1> | <fc_blob1>\", queue_size=16, batch_size=5]\n            expression_process[type=flowunit, flowunit=face_mobilev2, device=cpu, deviceid=0, label=\"<fc_blob1> | <Out_1>\", queue_size=16, batch_size=5]\n            face_collapse[type=flowunit, flowunit=face_collapse, device=cpu, deviceid=0, label=\"<In_label> | <Out_label>\"]\n            face_draw[type=flowunit, flowunit=face_draw, device=cpu, deviceid=0, label=\"<In_1> | <In_2> | <In_3> | <Out_1>\", method=max, queue_size=16, batch_size=5]\n            videoencoder[type=flowunit, flowunit=video_encoder, device=cpu, deviceid=0, label=\"<in_video_frame>\", queue_size=16, default_dest_url=\"rtsp://172.22.115.16/youxujia_test\", encoder=\"mpeg4\"]\n\n            video_input:out_video_url -> videodemuxer:in_video_url\n            videodemuxer:out_video_packet -> videodecoder:in_video_packet\n            videodecoder:out_video_frame -> face_preprocess:In_1\n            face_preprocess: Out_1 -> face_color_transpose: in_image\n            face_color_transpose: out_image -> face_inference:blob1\n            face_inference: sigmoid_blob1 -> face_center: sigmoid_blob1\n            face_inference: conv_blob60 -> face_center: conv_blob60\n            face_inference: conv_blob62 -> face_center: conv_blob62\n            face_inference: conv_blob64 -> face_center: conv_blob64\n            face_center: Out_1 -> face_draw: In_1\n            face_center:Out_2 -> face_alignment:In_kps\n\n            videodecoder:out_video_frame -> face_alignment:In_img\n            face_alignment:Aligned_img -> face_expand:In_img\n            face_expand:Out_img -> face_condition : In_img\n            face_condition:Out_false -> face_collapse:In_label\n            face_condition:Out_true -> expression_inference : blob1\n            expression_inference:fc_blob1 -> expression_process:fc_blob1\n            expression_process:Out_1 -> face_collapse:In_label\n            face_collapse:Out_label -> face_draw:In_3\n            videodecoder:out_video_frame -> face_draw: In_2\n            face_draw: Out_1 -> videoencoder: in_video_frame\n            }'''\n    format = \"graphviz\"\n  )\";\n\n  TestRunGraph(toml_content);\n}\n\nTEST_F(DemoTest, PedestrianTracking) {\n  std::string video_file =\n      std::string(\"/opt/modelbox/demo/video/ppt_1080p.mp4\");\n  if (SkipJudge(video_file) == true) {\n    GTEST_SKIP();\n  };\n\n  std::string pedestrian_toml =\n      std::string(TEST_DEMO_DRIVERS_DIR) +\n      std::string(\"/pedestrian_detect_inference.toml\");\n  std::string pedestrian_model_path = GetModelPath(pedestrian_toml);\n  if (SkipJudge(pedestrian_model_path) == true) {\n    GTEST_SKIP();\n  };\n\n  std::string reid_toml = std::string(TEST_DEMO_DRIVERS_DIR) +\n                          std::string(\"/pedestrian_reid_inference.toml\");\n  std::string reid_model_path = GetModelPath(reid_toml);\n  if (SkipJudge(reid_model_path) == true) {\n    GTEST_SKIP();\n  };\n\n  MBLOG_INFO << \"pedestrian tracking get in.\" << std::endl;\n  const std::string test_lib_dir = TEST_DRIVER_DIR;\n  const std::string test_demo_dir = TEST_DEMO_DRIVERS_DIR;\n  std::string toml_content = R\"(\n    [log]\n    level = \"DEBUG\"\n    [driver]\n    skip-default=true\n    dir=[\")\" + test_lib_dir + \"\\\", \\\"\" +\n                             test_demo_dir + \"\\\"]\\n    \" +\n                             R\"([graph]\n    graphconf = \"\"\"digraph demo_test {\n              video_input[type=flowunit, flowunit=video_input, device=cpu, deviceid=0, label=\"<out_video_url>\", source_url=\"/opt/modelbox/demo/video/ppt_1080p.mp4\"]\n              videodemuxer[type=flowunit, flowunit=video_demuxer, device=cpu, deviceid=0, label=\"<in_video_url> | <out_video_packet>\"]\n              videodecoder[type=flowunit, flowunit=video_decoder, device=cpu, deviceid=0, label=\"<in_video_packet> | <out_video_frame>\", pix_fmt=rgb, queue_size = 16]\n              skip_frame[type=flowunit, flowunit=skip_frame, device=cpu, deviceid=0, label=\"<input_frame> | <output_frame>\", process_frame_per_second=5, queue_size = 16]\n              fullImg_resize[type=flowunit, flowunit=resize, device=cpu, deviceid=0, label=\"<in_image> | <out_image>\", width=800, height=480, method=\"inter_nearest\", batch_size=5, queue_size = 16]\n              fullImg_color_transpose[type=flowunit, flowunit=packed_planar_transpose, device=cpu, deviceid=0, label=\"<in_image> | <out_image>\", queue_size = 16]\n              fullImg_normalize[type=flowunit, flowunit=normalize, device=cpu, deviceid=0, label=\"<in_data> | <out_data>\", normalize=\"0.003921568627451, 0.003921568627451, 0.003921568627451\", queue_size = 16, batch_size = 5]\n              pedestrian_detect_inference[type=flowunit, flowunit=pedestrian_detect_inference, device=cuda, deviceid=0, label=\"<data> | <layer82-conv> | <layer94-conv> | <layer106-conv>\", queue_size = 16, batch_size = 5]\n              pedestrian_yolov3_post[type=flowunit, flowunit=pedestrian_yolov3_post, device=cpu, deviceid=0, label=\"<layer82-conv> | <layer94-conv> | <layer106-conv> | <Out_1>\", queue_size = 16, batch_size = 5]\n\n              video_input:out_video_url -> videodemuxer:in_video_url\n              videodemuxer:out_video_packet -> videodecoder:in_video_packet\n              videodecoder:out_video_frame -> skip_frame:input_frame\n              skip_frame:output_frame -> fullImg_resize:in_image\n              fullImg_resize: out_image -> fullImg_color_transpose: in_image\n              fullImg_color_transpose: out_image -> fullImg_normalize: in_data\n              fullImg_normalize: out_data -> pedestrian_detect_inference:data\n              pedestrian_detect_inference: \"layer82-conv\" -> pedestrian_yolov3_post: \"layer82-conv\"\n              pedestrian_detect_inference: \"layer94-conv\" -> pedestrian_yolov3_post: \"layer94-conv\"\n              pedestrian_detect_inference: \"layer106-conv\" -> pedestrian_yolov3_post: \"layer106-conv\"\n\n              expand_bbox_img[type=flowunit, flowunit=pedestrian_expand_bbox_img, device=cpu, deviceid=0, label=\"<In_img> | <In_bbox> | <Out_img> | <Out_bbox>\"]\n              has_bbox_condition[type=flowunit, flowunit=has_bbox_condition, device=cpu, deviceid=0, label=\"<In_img> | <In_bbox> | <Out_true> | <Out_false>\", batch_size = 1, queue_size = 16]\n              split_img_bbox[type=flowunit, flowunit=split_img_bbox, device=cpu, deviceid=0, label=\"<In_true> | <Out_img> | <Out_bbox>\", batch_size = 8, queue_size = 16]\n              cropImg_resize[type=flowunit, flowunit=resize, device=cpu, deviceid=0, label=\"<in_image> | <out_image>\", width=128, height=256, method=\"inter_nearest\", batch_size=8, queue_size = 16]\n              cropImg_color_transpose[type=flowunit, flowunit=packed_planar_transpose, device=cpu, deviceid=0, label=\"<in_image> | <out_image>\", queue_size = 16, batch_size = 1]\n              cropImg_mean[type=flowunit, flowunit=mean, device=cpu, deviceid=0, label=\"<in_data> | <out_data>\", mean=\"124, 116, 104\", queue_size = 16, batch_size = 8]\n              cropImg_normalize[type=flowunit, flowunit=normalize, device=cpu, deviceid=0, label=\"<in_data> | <out_data>\", normalize=\"0.229, 0.224, 0.225\", queue_size = 16, batch_size = 8]\n              reid_inference[type=flowunit, flowunit=pedestrian_reid_inference, device=cuda, deviceid=0, label=\"<input> | <output>\", queue_size = 16, batch_size = 8]\n              reid_postprocess[type=flowunit, flowunit=reid_postprocess, device=cpu, deviceid=0, label=\"<embedding> | <bboxes> | <Out_1>\", input_width=128, input_height=256, queue_size = 16, batch_size = 8]\n              collapse_bbox[type=flowunit, flowunit=collapse_person_bbox, device=cpu, deviceid=0, label=\"<input> | <output>\"]\n\n              pedestrian_yolov3_post: Out_1 ->  expand_bbox_img: In_bbox\n              skip_frame:output_frame -> expand_bbox_img: In_img\n              expand_bbox_img: Out_img -> has_bbox_condition: In_img\n              expand_bbox_img: Out_bbox -> has_bbox_condition: In_bbox\n              has_bbox_condition: Out_true -> split_img_bbox: In_true\n              has_bbox_condition: Out_false -> collapse_bbox: input\n              split_img_bbox: Out_img -> cropImg_resize: in_image\n              split_img_bbox: Out_bbox -> reid_postprocess: bboxes\n              cropImg_resize: out_image  -> cropImg_color_transpose: in_image\n              cropImg_color_transpose: out_image  ->  cropImg_mean: in_data\n              cropImg_mean: out_data -> cropImg_normalize: in_data\n              cropImg_normalize: out_data -> reid_inference: input\n              reid_inference: \"output\" -> reid_postprocess: embedding\n              reid_postprocess: Out_1 -> collapse_bbox: input\n\n              matching[type=flowunit, flowunit=matching, device=cpu, deviceid=0, label=\"<Input_1> | <Output>\", queue_size = 16, batch_size = 1]\n              draw_bbox[type=flowunit, flowunit=draw_bbox_mot, device=cpu, deviceid=0, label=\"<In_1> | <In_2> | <Out_1>\", queue_size = 16, batch_size = 5]\n              videoencoder[type=flowunit, flowunit=video_encoder, device=cpu, deviceid=0, label=\"<in_video_frame>\", queue_size=16, default_dest_url=\"rtsp://localhost/test\", encoder=\"mpeg4\"]\n\n              collapse_bbox: output -> matching: Input_1\n              matching: Output -> draw_bbox: In_1\n              skip_frame:output_frame -> draw_bbox: In_2\n              draw_bbox: Out_1 -> videoencoder: in_video_frame\n\n            }\"\"\"\n    format = \"graphviz\"\n  )\";\n\n  // run graph\n  TestRunGraph(toml_content);\n}\n}  // namespace modelbox"
  },
  {
    "path": "test/function/dynamic_graph_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"include/modelbox/data_handler.h\"\n#include \"include/modelbox/modelbox_engine.h\"\n#include \"mock_driver_ctl.h\"\n\nnamespace modelbox {\nclass DynamicGraphTest : public testing::Test {\n public:\n  DynamicGraphTest() = default;\n\n protected:\n  std::shared_ptr<ModelBoxEngine> modelbox_engine;\n  void SetUp() override{\n\n  };\n\n  void TearDown() override{\n\n  };\n};\n\nstd::shared_ptr<ModelBoxEngine> Createmodelbox_engine() {\n  auto modelbox_engine = std::make_shared<ModelBoxEngine>();\n  return modelbox_engine;\n}\n\n\nTEST_F(DynamicGraphTest, DataHandlerTest) {\n  auto data_handler0 = std::make_shared<DataHandler>(BUFFERLIST_NODE);\n  auto data_handler = std::make_shared<DataHandler>(BUFFERLIST_NODE);\n  std::map<std::string, std::shared_ptr<DataHandler>> data;\n  auto status = data_handler->SetDataHandler(data);\n  EXPECT_TRUE(status == STATUS_FAULT);\n}\n\n\nTEST_F(DynamicGraphTest, StreamTest) {\n  modelbox_engine = std::make_shared<ModelBoxEngine>();\n  auto builder = std::make_shared<ConfigurationBuilder>();\n  auto config = builder->Build();\n  config->SetProperty(\"graph.queue_size\", \"32\");\n  config->SetProperty(\"graph.queue_size_external\", \"1000\");\n  config->SetProperty(\"graph.batch_size\", \"16\");\n  config->SetProperty(\"drivers.skip-default\", \"true\");\n  config->SetProperty(\"drivers.dir\", TEST_DRIVER_DIR);\n  modelbox_engine->Init(config);\n  auto input_stream = modelbox_engine->CreateInput({\"input\"});\n  auto source_url =\n      std::string(TEST_ASSETS) + \"/video/jpeg_5s_480x320_24fps_yuv444_8bit.mp4\";\n  input_stream->SetMeta(\"source_url\", source_url);\n  std::map<std::string, std::string> demuxer_config;\n\n  demuxer_config.emplace(\"device\", \"cpu\");\n  demuxer_config.emplace(\"deviceid\", \"0\");\n\n  auto video_demuxer_output =\n      modelbox_engine->Execute(\"video_demuxer\", demuxer_config, input_stream);\n  auto buffer = video_demuxer_output->GetData();\n  video_demuxer_output->Close();\n  modelbox_engine->ShutDown();\n  EXPECT_NE(buffer, nullptr);\n}\n\nTEST_F(DynamicGraphTest, VideoReEncodeTest) {\n  std::shared_ptr<ModelBoxEngine> modelbox_engine = Createmodelbox_engine();\n\n  auto builder = std::make_shared<ConfigurationBuilder>();\n  auto config = builder->Build();\n  config->SetProperty(\"graph.queue_size\", \"32\");\n  config->SetProperty(\"graph.queue_size_external\", \"1000\");\n  config->SetProperty(\"graph.batch_size\", \"16\");\n  config->SetProperty(\"drivers.skip-default\", \"true\");\n  config->SetProperty(\"drivers.dir\", TEST_DRIVER_DIR);\n  Status status = modelbox_engine->Init(config);\n  EXPECT_EQ(status, STATUS_SUCCESS);\n  if (status != STATUS_SUCCESS) {\n    MBLOG_ERROR << \"failed init modelbox_engine\";\n    return;\n  }\n\n  auto stream = modelbox_engine->CreateInput({\"input1\"});\n  std::string path =\n      std::string(TEST_ASSETS) + \"/video/jpeg_5s_480x320_24fps_yuv444_8bit.mp4\";\n  stream->SetMeta(\"source_url\", path);\n  stream->Close();\n\n  auto encoder_input_stream = modelbox_engine->CreateInput({\"input2\"});\n\n  std::map<std::string, std::string> demuxer_config = {{\"deviceid\", \"0\"},\n                                                       {\"device\", \"cpu\"}};\n\n  std::map<std::string, std::string> decoder_config;\n  {\n    decoder_config.emplace(\"device\", \"cpu\");\n    decoder_config.emplace(\"deviceid\", \"0\");\n    decoder_config.emplace(\"pix_fmt\", \"nv12\");\n  }\n  std::map<std::string, std::string> encoder_config;\n  {\n    encoder_config.emplace(\"device\", \"cpu\");\n    encoder_config.emplace(\"deviceid\", \"0\");\n    encoder_config.emplace(\"queue_size\", \"1\");\n    encoder_config.emplace(\"format\", \"mp4\");\n    encoder_config.emplace(\"default_dest_url\", \"/tmp/ters.mp4\");\n    encoder_config.emplace(\"encoder\", \"libx264\");\n  }\n\n  auto video_demuxer_output =\n      modelbox_engine->Execute(\"video_demuxer\", demuxer_config, stream);\n  auto video_decoder_output =\n      modelbox_engine->Execute(\"video_decoder\", decoder_config, video_demuxer_output);\n\n  modelbox_engine->Execute(\"video_encoder\", encoder_config, encoder_input_stream);\n  std::shared_ptr<DataHandler> buffer = nullptr;\n  int frame_num = 0;\n  while ((buffer = video_decoder_output->GetData()) != nullptr) {\n    encoder_input_stream->PushData(buffer, \"input2\");\n    frame_num++;\n  }\n  EXPECT_TRUE(frame_num > 1);\n  encoder_input_stream->Close();  \n  EXPECT_EQ(status, STATUS_SUCCESS);\n}\n\n}  // namespace modelbox\n"
  },
  {
    "path": "test/manager/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nfile(GLOB MANAGER_TEST_SOURCE *.cpp *.cc *.c)\n\ninclude_directories(${CMAKE_CURRENT_SOURCE_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${MOCK_DRIVER_CTRL_INCLUDE})\ninclude_directories(${TEST_INCLUDE})\ninclude_directories(${DRIVER_UNIT_TEST_INCLUDE})\n\nset(MANAGER_TEST_SOURCE ${MANAGER_TEST_SOURCE} CACHE INTERNAL \"\")\n\nadd_executable(manager-unit EXCLUDE_FROM_ALL\n\t${MANAGER_TEST_SOURCE}\n\t${MODELBOX_MANAGER_SOURCES}\n)\n\ntarget_link_libraries(manager-unit pthread)\ntarget_link_libraries(manager-unit rt)\ntarget_link_libraries(manager-unit gtest_main)\ntarget_link_libraries(manager-unit gmock_main)\ntarget_link_libraries(manager-unit ${HUAWEI_SECURE_C_LIBRARIES})\ntarget_link_libraries(manager-unit ${TLOG_STATIC_LIBRARIES})\n\nadd_custom_target(unittest-manager\n\tCOMMAND ${TEST_RUNNER_LIST} ${CMAKE_CURRENT_BINARY_DIR}/manager-unit\n\tDEPENDS manager-unit\n\tWORKING_DIRECTORY ${TEST_WORKING_DIR}\n\tCOMMENT \"Run manager-unit Test...\"\n)\n\nlist(APPEND MODELBOX_UNIT_TEST_TARGETS manager)\nset(MODELBOX_UNIT_TEST_TARGETS ${MODELBOX_UNIT_TEST_TARGETS} CACHE INTERNAL \"\")\n\nlist(APPEND MODELBOX_UNIT_TEST_RUN_TARGETS unittest-manager)\nset(MODELBOX_UNIT_TEST_RUN_TARGETS ${MODELBOX_UNIT_TEST_RUN_TARGETS} CACHE INTERNAL \"\")\n"
  },
  {
    "path": "test/manager/manager_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"manager.h\"\n\n#include <dlfcn.h>\n#include <stdio.h>\n\n#include <fstream>\n#include <future>\n#include <memory>\n#include <thread>\n\n#include \"gtest/gtest.h\"\n#include \"manager_conf.h\"\n#include \"manager_monitor.h\"\n#include \"test_config.h\"\n\nnamespace modelbox {\n\nclass ManagerTest : public testing::Test {\n public:\n protected:\n  ManagerTest() = default;\n  void SetUp() override{\n\n  };\n  void TearDown() override{\n\n  };\n};\n\nclass ManagerTestServer {\n public:\n  ManagerTestServer() = default;\n  virtual ~ManagerTestServer() { Stop(); }\n  void Start() {\n    manager_init_server(0);\n    memset(&test_app, 0, sizeof(test_app));\n    thread_ = std::thread(&ManagerTestServer::Run);\n    running_ = true;\n    usleep(1000);\n  }\n\n  void Stop() {\n    int unused __attribute__((unused));\n    if (running_ == false) {\n      return;\n    }\n\n    running_ = false;\n\n    manager_exit();\n    if (thread_.joinable()) {\n      thread_.join();\n    }\n    memset(&test_app, 0, sizeof(test_app));\n    std::string delpid = \"rm -f /tmp/modelbox_app_*\";\n    unused = system(delpid.c_str());\n  }\n\n  static int Run() { return manager_run(); }\n\n private:\n  bool running_{false};\n  std::thread thread_;\n};\n\nclass ManagerTestApp {\n public:\n  ManagerTestApp() {\n    Reset();\n    test_app.run = ManagerTestApp::Run;\n    test_app.arg1 = this;\n  }\n\n  ~ManagerTestApp() {\n    test_app.run = nullptr;\n    test_app.arg1 = nullptr;\n  }\n\n  void Reset() {\n    run_count_ = 0;\n    pause_ = 0;\n    pause_after_count_ = 0;\n    ignore_segv_ = 0;\n  }\n\n  void SetRunCount(int n) { run_count_ = n; }\n\n  void SetIgnoreSigSegv() { ignore_segv_ = 1; }\n\n  void SetPause(int pause_time) { pause_ = pause_time; }\n\n  void SetPauseAfterCount(int pause_time) { pause_after_count_ = pause_time; }\n\n  static void SignalSegHandler(int sig) { printf(\"handle signal %d\\n\", sig); }\n\n  static void SignalSegHandleExist(int sig) { _exit(1); }\n\n  static int Run(struct Test_App *app, int count, const char *name) {\n    auto *test_app = (ManagerTestApp *)app->arg1;\n    printf(\"run %d %s\\n\", count, name);\n    if (test_app->ignore_segv_) {\n      signal(SIGSEGV, SignalSegHandler);\n    } else {\n      signal(SIGSEGV, SignalSegHandleExist);\n    }\n\n    while (test_app->pause_ > 0) {\n      sleep(1);\n      test_app->pause_--;\n    }\n\n    if (count <= test_app->run_count_ && test_app->run_count_ > 0) {\n      return 0;\n    }\n\n    while (test_app->pause_after_count_ > 0) {\n      sleep(1);\n      test_app->pause_after_count_--;\n    }\n\n    return -1;\n  }\n\n private:\n  int run_count_{0};\n  int ignore_segv_{0};\n  int pause_{0};\n  int pause_after_count_{0};\n};\n\nTEST_F(ManagerTest, Start) {\n  ManagerTestServer server;\n  server.Start();\n  struct app_start_info info;\n  memset(&info, 0, sizeof(info));\n  info.name = \"1\";\n  info.cmdline = \"sleep\\0 1\\0\\0\";\n  info.cmd_max_len = PATH_MAX;\n  info.check_alive = 1;\n  info.keepalive_time = 60;\n  info.heartbeat_interval = 5;\n  EXPECT_EQ(0, app_start(&info));\n  sleep(1);\n  int pid = app_getpid(\"1\");\n  EXPECT_GT(pid, 0);\n\n  for (int i = 0; i < conf_watchdog_timeout * 2; i++) {\n    EXPECT_EQ(0, app_alive(\"1\"));\n    EXPECT_EQ(pid, app_getpid(\"1\"));\n  }\n}\n\nTEST_F(ManagerTest, Start_dup) {\n  ManagerTestServer server;\n  server.Start();\n  struct app_start_info info;\n  memset(&info, 0, sizeof(info));\n  info.name = \"1\";\n  info.cmdline = \"test\\0\\0\";\n  info.cmd_max_len = PATH_MAX;\n  info.check_alive = 1;\n  info.keepalive_time = 60;\n  info.heartbeat_interval = 5;\n  EXPECT_EQ(0, app_start(&info));\n  info.name = \"2\";\n  EXPECT_EQ(0, app_start(&info));\n  info.name = \"3\";\n  EXPECT_EQ(0, app_start(&info));\n  info.name = \"2\";\n  EXPECT_NE(0, app_start(&info));\n}\n\nTEST_F(ManagerTest, Start_many) {\n  ManagerTestServer server;\n  server.Start();\n  for (int i = 0; i < 8; i++) {\n    struct app_start_info info;\n    memset(&info, 0, sizeof(info));\n    info.name = std::to_string(i).c_str();\n    info.cmdline = \"test\\0\\0\";\n    info.cmd_max_len = PATH_MAX;\n    info.check_alive = 1;\n    info.keepalive_time = 60;\n    info.heartbeat_interval = 5;\n    EXPECT_EQ(0, app_start(&info));\n  }\n  sleep(1);\n  for (int i = 0; i < 8; i++) {\n    EXPECT_EQ(0, app_alive(std::to_string(i).c_str()));\n  }\n}\n\nTEST_F(ManagerTest, Start_stop_half) {\n  ManagerTestServer server;\n  server.Start();\n  for (int i = 0; i < 8; i++) {\n    struct app_start_info info;\n    memset(&info, 0, sizeof(info));\n    info.name = std::to_string(i).c_str();\n    info.cmdline = \"test\\0\\0\";\n    info.cmd_max_len = PATH_MAX;\n    info.check_alive = 1;\n    info.keepalive_time = 60;\n    info.heartbeat_interval = 5;\n    EXPECT_EQ(0, app_start(&info));\n  }\n  for (int i = 0; i < 8; i++) {\n    if (i % 2 == 0) {\n      continue;\n    }\n    EXPECT_EQ(0, app_stop(std::to_string(i).c_str(), 0));\n  }\n  sleep(1);\n  for (int i = 0; i < 8; i++) {\n    if (i % 2 == 0) {\n      EXPECT_EQ(0, app_alive(std::to_string(i).c_str()));\n      continue;\n    }\n    EXPECT_NE(0, app_alive(std::to_string(i).c_str()));\n  }\n}\n\nTEST_F(ManagerTest, Start_stop_all) {\n  ManagerTestServer server;\n  server.Start();\n  for (int i = 0; i < 8; i++) {\n    struct app_start_info info;\n    memset(&info, 0, sizeof(info));\n    info.name = std::to_string(i).c_str();\n    info.cmdline = \"test\\0\\0\";\n    info.cmd_max_len = PATH_MAX;\n    info.check_alive = 1;\n    info.keepalive_time = 60;\n    info.heartbeat_interval = 5;\n    EXPECT_EQ(0, app_start(&info));\n  }\n  for (int i = 0; i < 8; i++) {\n    EXPECT_EQ(0, app_stop(std::to_string(i).c_str(), 0));\n  }\n  for (int i = 0; i < 8; i++) {\n    EXPECT_NE(0, app_alive(std::to_string(i).c_str()));\n  }\n}\n\nTEST_F(ManagerTest, monitor) {\n  ManagerTestServer server;\n  server.Start();\n  ManagerTestApp app;\n  app.SetPause(10);\n\n  struct app_start_info info;\n  memset(&info, 0, sizeof(info));\n  info.name = \"monitor\";\n  info.cmdline = \"sleep\\00900\\0\\0\";\n  info.cmd_max_len = PATH_MAX;\n  info.check_alive = 1;\n  info.keepalive_time = 2;\n  info.heartbeat_interval = 1;\n  EXPECT_EQ(0, app_start(&info));\n  sleep(1);\n  int pid = app_getpid(\"monitor\");\n\n  sleep(2);\n  EXPECT_NE(pid, app_getpid(\"monitor\"));\n  EXPECT_NE(-1, app_getpid(\"monitor\"));\n  EXPECT_EQ(0, app_alive(\"monitor\"));\n  app_stop(\"monitor\", 0);\n}\n\nTEST_F(ManagerTest, killcmd) {\n  ManagerTestServer server;\n  server.Start();\n  ManagerTestApp app;\n  app.SetPause(10);\n\n  unlink(\"/tmp/killcmd\");\n  struct app_start_info info;\n  memset(&info, 0, sizeof(info));\n  info.name = \"killcmd\";\n  info.cmdline = \"sleep\\00900\\0\\0\";\n  info.cmd_max_len = PATH_MAX;\n  info.killcmd = \"touch\\0/tmp/killcmd\\0\\0\";\n  info.killcmd_max_len = PATH_MAX;\n  info.check_alive = 1;\n  info.keepalive_time = 2;\n  info.heartbeat_interval = 1;\n  EXPECT_EQ(0, app_start(&info));\n\n  sleep(3);\n  EXPECT_EQ(0, access(\"/tmp/killcmd\", F_OK));\n  app_stop(\"killcmd\", 0);\n  EXPECT_NE(0, app_alive(\"monitor\"));\n  unlink(\"/tmp/killcmd\");\n}\n\n}  // namespace modelbox\n"
  },
  {
    "path": "test/manager/test_main.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"gtest/gtest.h\"\n\nint main(int argc, char **argv) {\n  int ret = 0;\n  ::testing::InitGoogleTest(&argc, argv);\n  ret |= RUN_ALL_TESTS();\n\n  return ret;\n}\n"
  },
  {
    "path": "test/mock/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nif(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_BINARY_DIR})\n    message(FATAL_ERROR \"Do not build in source directory!\")\nendif()\n\nadd_subdirectory(drivers)\nadd_subdirectory(minimodelbox)\nadd_subdirectory(flowunit)"
  },
  {
    "path": "test/mock/drivers/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nif(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_BINARY_DIR})\n    message(FATAL_ERROR \"Do not build in source directory!\")\nendif()\n\nfile(GLOB MOCK_DRIVER_SOURCE *.cpp *.cc *.c)\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\n\nadd_subdirectory(device_mockdevice)\nadd_subdirectory(flowunit_mockflowunit)\nadd_subdirectory(graph_conf_mockgraphconf)\n\nadd_definitions(-DTEST_DEVICE_MOCKDEVICE_PATH=\"${LIBMODELBOX_DEVICE_MOCKDEVICE_SO}\")\nadd_definitions(-DTEST_FLOWUNIT_MOCKFLOWUNIT_PATH=\"${LIBMODELBOX_FLOWUNIT_MOCKFLOWUNIT_SO}\")\nadd_definitions(-DTEST_GRAPHCONF_MOCKGRAPHCONF_PATH=\"${LIBMODELBOX_GRAPHCONF_MOCKGRAPHCONF_SO}\")\n\nset(MOCK_DRIVER_CTRL_LIB mock-driver-ctrl-lib)\nset(MOCK_DRIVER_CTRL_LIB ${MOCK_DRIVER_CTRL_LIB} CACHE INTERNAL \"\")\nset(MOCK_DRIVER_CTRL_INCLUDE ${CMAKE_CURRENT_LIST_DIR} CACHE INTERNAL \"\")\n\nadd_library(${MOCK_DRIVER_CTRL_LIB} ${MOCK_DRIVER_SOURCE})\ntarget_link_libraries(${MOCK_DRIVER_CTRL_LIB} dl)\n\nadd_dependencies(${MOCK_DRIVER_CTRL_LIB} ${LIBMODELBOX_DEVICE_MOCKDEVICE_SHARED})\nadd_dependencies(${MOCK_DRIVER_CTRL_LIB} ${LIBMODELBOX_FLOWUNIT_MOCKFLOWUNIT_SHARED})\nadd_dependencies(${MOCK_DRIVER_CTRL_LIB} ${LIBMODELBOX_GRAPHCONF_MOCKGRAPHCONF_SHARED})\n\n"
  },
  {
    "path": "test/mock/drivers/device_mockdevice/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nset(DEVICE_NAME \"mockdevice\")\nproject(modelbox-device-${DEVICE_NAME})\n\nfile(GLOB_RECURSE LIBMODELBOX_DEVICE_SOURCES *.cpp *.cc *.c)\nset(LIBMODELBOX_DEVICE_MOCKDEVICE_INCLUDE ${CMAKE_CURRENT_LIST_DIR}/include)\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_MOCKDEVICE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_CPU_INCLUDE})\ninclude_directories(${HUAWEI_SECURE_C_INCLUDE_DIR})\n\nset(LIBMODELBOX_DEVICE_MOCKDEVICE_INCLUDE ${LIBMODELBOX_DEVICE_CPU_INCLUDE} ${LIBMODELBOX_DEVICE_MOCKDEVICE_INCLUDE})\nset(LIBMODELBOX_DEVICE_MOCKDEVICE_INCLUDE ${HUAWEI_SECURE_C_INCLUDE_DIR} ${LIBMODELBOX_DEVICE_MOCKDEVICE_INCLUDE})\n\nset(HEADER \n    ${LIBMODELBOX_DEVICE_MOCKDEVICE_INCLUDE}/modelbox\n)\n\nset(LIBMODELBOX_DEVICE_MOCKDEVICE_STATIC libmodelbox-device-${DEVICE_NAME}-static)\nset(LIBMODELBOX_DEVICE_MOCKDEVICE_SHARED libmodelbox-device-${DEVICE_NAME}-shared)\n\nadd_library(${LIBMODELBOX_DEVICE_MOCKDEVICE_STATIC} STATIC ${LIBMODELBOX_DEVICE_SOURCES})\nadd_library(${LIBMODELBOX_DEVICE_MOCKDEVICE_SHARED} SHARED ${LIBMODELBOX_DEVICE_SOURCES})\n\nset_target_properties(${LIBMODELBOX_DEVICE_MOCKDEVICE_SHARED} PROPERTIES \n    SOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n\ntarget_link_libraries(${LIBMODELBOX_DEVICE_MOCKDEVICE_STATIC} pthread)\ntarget_link_libraries(${LIBMODELBOX_DEVICE_MOCKDEVICE_STATIC} rt)\ntarget_link_libraries(${LIBMODELBOX_DEVICE_MOCKDEVICE_STATIC} dl)\n\ntarget_link_libraries(${LIBMODELBOX_DEVICE_MOCKDEVICE_SHARED} ${LIBMODELBOX_SHARED})\ntarget_link_libraries(${LIBMODELBOX_DEVICE_MOCKDEVICE_SHARED} ${LIBMODELBOX_DEVICE_CPU_SHARED})\ntarget_link_libraries(${LIBMODELBOX_DEVICE_MOCKDEVICE_SHARED} ${HUAWEI_SECURE_C_LIBRARIES})\ntarget_link_libraries(${LIBMODELBOX_DEVICE_MOCKDEVICE_SHARED} rt)\ntarget_link_libraries(${LIBMODELBOX_DEVICE_MOCKDEVICE_SHARED} dl)\n\nset_target_properties(${LIBMODELBOX_DEVICE_MOCKDEVICE_STATIC} ${LIBMODELBOX_DEVICE_MOCKDEVICE_SHARED} \n    PROPERTIES OUTPUT_NAME \"modelbox-device-${DEVICE_NAME}\"\n)\n\nconfigure_file(${CMAKE_CURRENT_LIST_DIR}/libmodelbox-device-${DEVICE_NAME}.pc.in ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-device-${DEVICE_NAME}.pc @ONLY)\n\ninstall(TARGETS ${LIBMODELBOX_DEVICE_MOCKDEVICE_SHARED} ${LIBMODELBOX_DEVICE_MOCKDEVICE_STATIC} \n        RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n        LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        ARCHIVE DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        OPTIONAL)\ninstall(DIRECTORY ${HEADER} DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR})\ninstall(FILES ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-device-${DEVICE_NAME}.pc DESTINATION ${CMAKE_INSTALL_LIBDIR}/pkgconfig)\n\nset(LIBMODELBOX_DEVICE_MOCKDEVICE_STATIC ${LIBMODELBOX_DEVICE_MOCKDEVICE_STATIC} CACHE INTERNAL \"\")\nset(LIBMODELBOX_DEVICE_MOCKDEVICE_SHARED ${LIBMODELBOX_DEVICE_MOCKDEVICE_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_DEVICE_MOCKDEVICE_INCLUDE ${LIBMODELBOX_DEVICE_MOCKDEVICE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_DEVICE_SOURCES ${LIBMODELBOX_DEVICE_SOURCES} CACHE INTERNAL \"\")\nset(LIBMODELBOX_DEVICE_MOCKDEVICE_SO ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-device-${DEVICE_NAME}.so CACHE INTERNAL \"\")\n\n"
  },
  {
    "path": "test/mock/drivers/device_mockdevice/device_mockdevice.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include \"modelbox/device/mockdevice/device_mockdevice.h\"\n#include <stdio.h>\n#include \"modelbox/base/log.h\"\n\nnamespace modelbox {\nMockDriverDevice MockDriverDevice::desc_;\n\n}  // namespace modelbox"
  },
  {
    "path": "test/mock/drivers/device_mockdevice/driver_desc.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include \"driver_desc.h\"\n#include \"modelbox/device/mockdevice/device_mockdevice.h\"\n\n#include <stdio.h>\n#include <memory>\n\nstd::shared_ptr<modelbox::DriverFactory> CreateDriverFactory() {\n  std::shared_ptr<modelbox::DriverFactory> factory =\n      std::make_shared<modelbox::MockDeviceFactory>();\n  return factory;\n}\n\nmodelbox::MockDriverDevice *GetDriverMock() { return modelbox::MockDriverDevice::Instance(); }\n\nvoid DriverDescription(modelbox::DriverDesc *desc) {\n  if (GetDriverMock() == nullptr) {\n    MBLOG_WARN << \"Mock is invalid.\";\n    return;\n  }\n\n  if (GetDriverMock()->GetDriverDesc() == nullptr) {\n    MBLOG_WARN << \"Mock driver is invalid.\";\n    return;\n  }\n\n  if (GetDriverMock()->GetDriverDesc() == nullptr) {\n    MBLOG_WARN << \"Mock driver is invalid.\";\n    return;\n  }\n\n  desc->SetName(GetDriverMock()->GetDriverDesc()->GetName());\n  desc->SetClass(GetDriverMock()->GetDriverDesc()->GetClass());\n  desc->SetType(GetDriverMock()->GetDriverDesc()->GetType());\n  desc->SetDescription(\n      GetDriverMock()->GetDriverDesc()->GetDescription());\n  desc->SetVersion(GetDriverMock()->GetDriverDesc()->GetVersion());\n  desc->SetFilePath(\n      GetDriverMock()->GetDriverDesc()->GetFilePath());\n}\n\nmodelbox::Status DriverInit() {\n  // Driver Init.\n  return modelbox::MockDriverDevice::Instance()->DriverInit();\n}\n\nvoid DriverFini() {\n  // Driver Fini.\n  modelbox::MockDriverDevice::Instance()->DriverFini();\n}\n\n"
  },
  {
    "path": "test/mock/drivers/device_mockdevice/driver_desc.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_DEVICE_DESC_H_\n#define MODELBOX_DEVICE_DESC_H_\n\n#include <modelbox/base/device.h>\n#include <modelbox/base/driver.h>\n#include <modelbox/base/driver_api_helper.h>\n#include <modelbox/base/status.h>\n\n#include \"modelbox/device/mockdevice/device_mockdevice.h\"\n\nextern \"C\" modelbox::MockDriverDevice *GetDriverMock();\n\n#endif  // MODELBOX_DEVICE_DESC_H_"
  },
  {
    "path": "test/mock/drivers/device_mockdevice/include/modelbox/device/mockdevice/device_mockdevice.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_DEVICE_MOCKDEVICE_H_\n#define MODELBOX_DEVICE_MOCKDEVICE_H_\n\n#include <modelbox/base/device.h>\n#include <modelbox/device/cpu/device_cpu.h>\n#include <modelbox/flow.h>\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"mock_driver_ctl.h\"\n\nnamespace modelbox {\n\nconstexpr const char *MOCK_DEVICE_TYPE = \"MOCKDEVICE\";\nconstexpr const char *MOCK_DEVICE_DRIVER_NAME = \"device-mockdevice\";\nconstexpr const char *MOCK_DEVICE_DRIVER_DESCRIPTION =\n    \"A mockdevice device driver\";\n\nclass FakeDeviceMemoryManager : public DeviceMemoryManager {\n public:\n  FakeDeviceMemoryManager() : DeviceMemoryManager(\"0\") {}\n\n  std::shared_ptr<DeviceMemory> MakeDeviceMemory(\n      const std::shared_ptr<Device> &device, void *mem_ptr, size_t size) {\n    return nullptr;\n  };\n\n  std::shared_ptr<DeviceMemory> MakeDeviceMemory(\n      const std::shared_ptr<Device> &device, std::shared_ptr<void> mem_ptr,\n      size_t size) override {\n    return nullptr;\n  };\n\n  void *Malloc(size_t size, uint32_t mem_flags = 0) override {\n    return nullptr;\n  };\n\n  void Free(void *mem_ptr, uint32_t mem_flags = 0) override{};\n\n  Status Copy(void *dest, size_t dest_size, const void *src_buffer,\n              size_t src_size, DeviceMemoryCopyKind kind) override {\n    return STATUS_SUCCESS;\n  }\n\n  Status DeviceMemoryCopy(const std::shared_ptr<DeviceMemory> &dest_memory,\n                          size_t dest_offset,\n                          const std::shared_ptr<const DeviceMemory> &src_memory,\n                          size_t src_offset, size_t src_size,\n                          DeviceMemoryCopyKind copy_kind =\n                              DeviceMemoryCopyKind::FromHost) override {\n    return STATUS_SUCCESS;\n  };\n\n  Status GetDeviceMemUsage(size_t *free, size_t *total) const override {\n    return STATUS_SUCCESS;\n  };\n};\n\nclass MockDevice : public Device {\n public:\n  MockDevice() : Device(std::make_shared<FakeDeviceMemoryManager>()) {\n    EXPECT_CALL(*this, Malloc)\n        .WillRepeatedly(\n            [](size_t size, const std::string &user_id) { return nullptr; });\n  };\n\n  ~MockDevice() override = default;\n\n  std::vector<std::shared_ptr<DeviceMemory>> GetDeviceMemories() {\n    return std::vector<std::shared_ptr<DeviceMemory>>();\n  }\n\n  using DeviceMem = std::shared_ptr<DeviceMemory>;\n  MOCK_METHOD(DeviceMem, Malloc, (size_t, const std::string &));\n\n private:\n  std::shared_ptr<Device> device_;\n};\n\nclass MockDeviceFactory : public DeviceFactory {\n public:\n  MockDeviceFactory() {\n    EXPECT_CALL(*this, DeviceProbe).WillRepeatedly([this]() {\n      return bind_factory_->DeviceProbe();\n    });\n\n    EXPECT_CALL(*this, CreateDevice)\n        .WillRepeatedly([this](const std::string &device_id) {\n          return bind_factory_->CreateDevice(device_id);\n        });\n  };\n\n  ~MockDeviceFactory() override = default;\n\n  using DescMap = std::map<std::string, std::shared_ptr<DeviceDesc>>;\n  MOCK_METHOD(DescMap, DeviceProbe, (), (override));\n  using DevicePtr = std::shared_ptr<Device>;\n  MOCK_METHOD(DevicePtr, CreateDevice, (const std::string &), (override));\n\n private:\n  std::shared_ptr<DeviceFactory> bind_factory_ = std::make_shared<CPUFactory>();\n};\n\nclass MockDriverDevice : public modelbox::MockDriver {\n public:\n  MockDriverDevice() = default;\n  ~MockDriverDevice() override = default;\n\n  static MockDriverDevice *Instance() { return &desc_; };\n\n private:\n  static MockDriverDevice desc_;\n};\n\n}  // namespace modelbox\n\n#endif  // MODELBOX_DEVICE_MockDevice_H_\n"
  },
  {
    "path": "test/mock/drivers/device_mockdevice/libmodelbox-device-mockdevice.pc.in",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nprefix=/usr\nexec_prefix=${prefix}\nlibdir=${prefix}/lib\nincludedir=${prefix}/include/modelbox/device/mockdevice\n\nName: libmodelbox-device-mockdevice\nDescription: modelbox mockdevice device SDK\nVersion: @MODELBOX_VERSION_STRING@\nLibs: -L${libdir} -lmodelbox-device-mockdevice\nCflags: -I${includedir}"
  },
  {
    "path": "test/mock/drivers/flowunit_mockflowunit/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nset(UNIT_DEVICE \"mockdevice\")\nset(UNIT_NAME \"mockflowunit\")\n\nproject(modelbox-flowunit-${UNIT_NAME}-${UNIT_DEVICE})\n\nfile(GLOB_RECURSE MODELBOX_UNIT_SOURCE *.cpp *.cc *.c)\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_MOCKDEVICE_INCLUDE})\n\nset(MODELBOX_UNIT_SHARED mock_flowunit)\nset(MODELBOX_UNIT_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\nadd_library(${MODELBOX_UNIT_SHARED} SHARED ${MODELBOX_UNIT_SOURCE})\nset(LIBMODELBOX_FLOWUNIT_MOCKFLOWUNIT_SHARED ${MODELBOX_UNIT_SHARED})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES \n    SOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_MOCKDEVICE_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} rt)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} dl)\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}\")\n\ninstall(TARGETS ${MODELBOX_UNIT_SHARED}\n        RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n        LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        ARCHIVE DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        OPTIONAL)\ninstall(DIRECTORY ${HEADER} DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR})\n\nset(LIBMODELBOX_FLOWUNIT_MOCKFLOWUNIT_SHARED ${MODELBOX_UNIT_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_MOCKFLOWUNIT_INCLUDE ${MODELBOX_UNIT_SOURCE_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_MOCKFLOWUNIT_SOURCES ${MODELBOX_UNIT_SOURCE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_FLOWUNIT_MOCKFLOWUNIT_SO ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}.so CACHE INTERNAL \"\")\n\n"
  },
  {
    "path": "test/mock/drivers/flowunit_mockflowunit/flowunit_desc.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"flowunit_desc.h\"\n\n#include <stdio.h>\n\n#include <memory>\n\n#include \"flowunit_mockflowunit.h\"\n#include \"gtest/gtest.h\"\n#include \"modelbox/base/driver_api_helper.h\"\n#include \"modelbox/base/status.h\"\n#include \"modelbox/flowunit.h\"\n\nstd::shared_ptr<modelbox::DriverFactory> CreateDriverFactory() {\n  auto factory = std::make_shared<modelbox::MockFlowUnitFactory>();\n  auto mock_flowunit =\n      std::dynamic_pointer_cast<modelbox::MockFlowUnitDriverDesc>(\n          modelbox::MockDriverFlowUnit::Instance()->GetDriverDesc())\n          ->GetMockFlowUnit();\n  auto create_function =\n      std::dynamic_pointer_cast<modelbox::MockFlowUnitDriverDesc>(\n          modelbox::MockDriverFlowUnit::Instance()->GetDriverDesc())\n          ->GetMockFlowCreateFunc();\n  auto flowunit_desc =\n      std::dynamic_pointer_cast<modelbox::MockFlowUnitDriverDesc>(\n          modelbox::MockDriverFlowUnit::Instance()->GetDriverDesc())\n          ->GetMockFlowunitDesc();\n\n  factory->SetMockFunctionFlowUnit(mock_flowunit);\n  factory->SetMockCreateFlowUnitFunc(create_function);\n  factory->SetMockFlowUnitDesc(flowunit_desc);\n  return factory;\n}\n\nmodelbox::MockDriverFlowUnit *GetDriverMock() {\n  return modelbox::MockDriverFlowUnit::Instance();\n}\n\nvoid DriverDescription(modelbox::DriverDesc *desc) {\n  if (modelbox::MockDriverFlowUnit::Instance()->GetDriverDesc() == nullptr) {\n    printf(\n        \"\\x1B[31m===========================================================\"\n        \"\\x1B[0m\\n\");\n    printf(\"\\x1B[31m= WARNNING: Driver is not mocked. \\x1B[0m\\n\");\n    printf(\"\\x1B[31m= please clean directory : %s \\x1B[0m\\n\", TEST_LIB_DIR);\n    printf(\n        \"\\x1B[31m===========================================================\"\n        \"\\x1B[0m\\n\");\n    FAIL();\n    return;\n  }\n\n  desc->SetName(\n      modelbox::MockDriverFlowUnit::Instance()->GetDriverDesc()->GetName());\n  desc->SetClass(\n      modelbox::MockDriverFlowUnit::Instance()->GetDriverDesc()->GetClass());\n  desc->SetType(\n      modelbox::MockDriverFlowUnit::Instance()->GetDriverDesc()->GetType());\n  desc->SetDescription(modelbox::MockDriverFlowUnit::Instance()\n                           ->GetDriverDesc()\n                           ->GetDescription());\n  desc->SetVersion(\n      modelbox::MockDriverFlowUnit::Instance()->GetDriverDesc()->GetVersion());\n  desc->SetFilePath(\n      modelbox::MockDriverFlowUnit::Instance()->GetDriverDesc()->GetFilePath());\n}\n\nmodelbox::Status DriverInit() {\n  // Driver Init.\n  return modelbox::MockDriverFlowUnit::Instance()->DriverInit();\n}\n\nvoid DriverFini() {\n  // Driver Fini.\n  modelbox::MockDriverFlowUnit::Instance()->DriverFini();\n}\n"
  },
  {
    "path": "test/mock/drivers/flowunit_mockflowunit/flowunit_desc.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_DESC_H_\n#define MODELBOX_FLOWUNIT_DESC_H_\n\n#include <modelbox/base/device.h>\n#include <modelbox/base/driver.h>\n#include <modelbox/base/driver_api_helper.h>\n#include <modelbox/base/status.h>\n\n#include \"flowunit_mockflowunit.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nextern \"C\" modelbox::MockDriverFlowUnit *GetDriverMock();\n\n#endif  // MODELBOX_FLOWUNIT_DESC_H_"
  },
  {
    "path": "test/mock/drivers/flowunit_mockflowunit/flowunit_mockflowunit.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"flowunit_mockflowunit.h\"\n\n#include <utility>\n\nnamespace modelbox {\n\nMockDriverFlowUnit MockDriverFlowUnit::desc_;\n\nstd::map<std::string, std::shared_ptr<FlowUnitDesc>>\nMockFlowUnitFactory::FlowUnitProbe() {\n  auto tmp_map = std::map<std::string, std::shared_ptr<FlowUnitDesc>>();\n  if (flowunit_desc_.size() > 0) {\n    for (auto &desc : flowunit_desc_) {\n      tmp_map.insert(std::make_pair(desc->GetFlowUnitName(), desc));\n    }\n  }\n\n  if (bind_mock_flowunit_ != nullptr) {\n    auto desc = bind_mock_flowunit_->GetFlowUnitDesc();\n    tmp_map.insert(std::make_pair(\n        bind_mock_flowunit_->GetFlowUnitDesc()->GetFlowUnitName(), desc));\n  }\n\n  return tmp_map;\n}\n\nstd::shared_ptr<FlowUnit> MockFlowUnitFactory::CreateFlowUnit(\n    const std::string &name, const std::string &type) {\n  if (flowunit_create_func_) {\n    return flowunit_create_func_(name, type);\n  }\n\n  if (bind_mock_flowunit_ != nullptr) {\n    return bind_mock_flowunit_;\n  }\n\n  return std::make_shared<MockFlowUnit>();\n}\n\nvoid MockFlowUnitFactory::SetMockFunctionFlowUnit(\n    std::shared_ptr<MockFlowUnit> mock_flowunit) {\n  bind_mock_flowunit_ = std::move(mock_flowunit);\n}\n\nvoid MockFlowUnitFactory::SetMockCreateFlowUnitFunc(\n    std::function<std::shared_ptr<FlowUnit>(const std::string &name,\n                                            const std::string &type)>\n        create_func) {\n  flowunit_create_func_ = std::move(create_func);\n}\n\nvoid MockFlowUnitFactory::SetMockFlowUnitDesc(\n    std::vector<std::shared_ptr<FlowUnitDesc>> descs) {\n  flowunit_desc_ = std::move(descs);\n}\n}  // namespace modelbox\n"
  },
  {
    "path": "test/mock/drivers/flowunit_mockflowunit/flowunit_mockflowunit.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_MOCK_CPU_H_\n#define MODELBOX_FLOWUNIT_MOCK_CPU_H_\n\n#include <modelbox/base/device.h>\n#include <modelbox/flow.h>\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"mock_driver_ctl.h\"\n#include \"modelbox/flowunit.h\"\n\nnamespace modelbox {\n\nclass MockFlowUnit : public FlowUnit {\n public:\n  MockFlowUnit() = default;\n  ~MockFlowUnit() override = default;\n\n  MOCK_METHOD(Status, Open, (const std::shared_ptr<Configuration> &opts), (override));\n  MOCK_METHOD(Status, Close, (), (override));\n\n  MOCK_METHOD(Status, Process, (std::shared_ptr<DataContext>), (override));\n  MOCK_METHOD(Status, DataPre, (std::shared_ptr<DataContext>), (override));\n  MOCK_METHOD(Status, DataPost, (std::shared_ptr<DataContext>), (override));\n  MOCK_METHOD(Status, DataGroupPre, (std::shared_ptr<DataContext>), (override));\n  MOCK_METHOD(Status, DataGroupPost, (std::shared_ptr<DataContext>), (override));\n};\n\nclass MockFlowUnitFactory : public FlowUnitFactory {\n public:\n  MockFlowUnitFactory() = default;\n  ~MockFlowUnitFactory() override = default;\n\n  std::map<std::string, std::shared_ptr<FlowUnitDesc>> FlowUnitProbe() override;\n\n  std::shared_ptr<FlowUnit> CreateFlowUnit(const std::string &name,\n                                           const std::string &type) override;\n\n  void SetMockFunctionFlowUnit(std::shared_ptr<MockFlowUnit> mock_flowunit);\n\n  void SetMockCreateFlowUnitFunc(\n      std::function<std::shared_ptr<FlowUnit>(const std::string &name,\n                                              const std::string &type)>\n          create_func);\n  void SetMockFlowUnitDesc(std::vector<std::shared_ptr<FlowUnitDesc>> descs);\n\n private:\n  std::shared_ptr<MockFlowUnit> bind_mock_flowunit_;\n  std::vector<std::shared_ptr<FlowUnitDesc>> flowunit_desc_;\n  std::function<std::shared_ptr<FlowUnit>(const std::string &name,\n                                          const std::string &type)>\n      flowunit_create_func_;\n};\n\nclass MockDriverFlowUnit : public MockDriver {\n public:\n  MockDriverFlowUnit() = default;\n  ~MockDriverFlowUnit() override = default;\n\n  static MockDriverFlowUnit *Instance() { return &desc_; };\n\n private:\n  static MockDriverFlowUnit desc_;\n};\n}  // namespace modelbox\n\n#endif  // MODELBOX_FLOWUNIT_MOCK_CPU_H_\n"
  },
  {
    "path": "test/mock/drivers/graph_conf_mockgraphconf/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nset(UNIT_GRAPHCONF \"graphconf\")\nset(UNIT_NAME \"mockgraphconf\")\n\nproject(modelbox-${UNIT_NAME}-${UNIT_GRAPHCONF})\n\nfile(GLOB_RECURSE LIBMODELBOX_GRAPHCONF_MOCKGRAPHCONF_SOURCES *.cpp *.cc *.c)\nset(LIBMODELBOX_GRAPHCONF_MOCKGRAPHCONF_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_GRAPHCONF_GRAPHVIZ_INCLUDE})\n\nset(LIBMODELBOX_GRAPHCONF_MOCKGRAPHCONF_INCLUDE ${LIBMODELBOX_GRAPHCONF_GRAPHVIZ_INCLUDE} ${LIBMODELBOX_GRAPHCONF_MOCKGRAPHCONF_INCLUDE})\nset(LIBMODELBOX_GRAPHCONF_MOCKGRAPHCONF_SHARED mock_graphconf)\n\nadd_library(${LIBMODELBOX_GRAPHCONF_MOCKGRAPHCONF_SHARED} SHARED ${LIBMODELBOX_GRAPHCONF_MOCKGRAPHCONF_SOURCES})\n\nset_target_properties(${LIBMODELBOX_GRAPHCONF_MOCKGRAPHCONF_SHARED} PROPERTIES \n    SOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n\ntarget_link_libraries(${LIBMODELBOX_GRAPHCONF_MOCKGRAPHCONF_SHARED} pthread)\ntarget_link_libraries(${LIBMODELBOX_GRAPHCONF_MOCKGRAPHCONF_SHARED} rt)\ntarget_link_libraries(${LIBMODELBOX_GRAPHCONF_MOCKGRAPHCONF_SHARED} dl)\ntarget_link_libraries(${LIBMODELBOX_GRAPHCONF_MOCKGRAPHCONF_SHARED} ${LIBMODELBOX_GRAPHCONF_GRAPHVIZ_SHARED})\ntarget_link_libraries(${LIBMODELBOX_GRAPHCONF_MOCKGRAPHCONF_SHARED} ${LIBMODELBOX_SHARED})\n\nset_target_properties(${LIBMODELBOX_GRAPHCONF_MOCKGRAPHCONF_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-${UNIT_GRAPHCONF}-${UNIT_NAME}\")\n\ninstall(TARGETS ${LIBMODELBOX_GRAPHCONF_MOCKGRAPHCONF_SHARED}\n        RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}\n        LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        ARCHIVE DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n        OPTIONAL)\ninstall(DIRECTORY ${HEADER} DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR})\n\nset(LIBMODELBOX_GRAPHCONF_MOCKGRAPHCONF_SHARED ${LIBMODELBOX_GRAPHCONF_MOCKGRAPHCONF_SHARED} CACHE INTERNAL \"\")\nset(LIBMODELBOX_GRAPHCONF_MOCKGRAPHCONF_INCLUDE ${LIBMODELBOX_GRAPHCONF_MOCKGRAPHCONF_INCLUDE} CACHE INTERNAL \"\")\nset(LIBMODELBOX_GRAPHCONF_MOCKGRAPHCONF_SOURCES ${LIBMODELBOX_GRAPHCONF_MOCKGRAPHCONF_SOURCES} CACHE INTERNAL \"\")\nset(LIBMODELBOX_GRAPHCONF_MOCKGRAPHCONF_SO ${CMAKE_CURRENT_BINARY_DIR}/libmodelbox-${UNIT_GRAPHCONF}-${UNIT_NAME}.so CACHE INTERNAL \"\")\n"
  },
  {
    "path": "test/mock/drivers/graph_conf_mockgraphconf/graph_conf_desc.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include \"modelbox/base/graph_manager.h\"\n#include \"graph_conf_mockgraphconf.h\"\n\n#include <stdio.h>\n#include <memory>\n\n#include \"modelbox/base/status.h\"\n#include \"graph_conf_desc.h\"\n\n#include <stdio.h>\n#include <memory>\n\nstd::shared_ptr<modelbox::DriverFactory> CreateDriverFactory() {\n  std::shared_ptr<modelbox::DriverFactory> factory =\n      std::make_shared<modelbox::MockGraphConfigFactory>();\n  return factory;\n}\n\nmodelbox::MockDriverGraphConfig *GetDriverMock() {\n  return modelbox::MockDriverGraphConfig::Instance();\n}\n\nvoid DriverDescription(modelbox::DriverDesc *desc) {\n  if (modelbox::MockDriverGraphConfig::Instance()->GetDriverDesc() == nullptr) {\n    printf(\n        \"\\x1B[31m===========================================================\"\n        \"\\x1B[0m\\n\");\n    printf(\"\\x1B[31m= WARNNING: Driver is not mocked. \\x1B[0m\\n\");\n    printf(\"\\x1B[31m= please clean directory : %s \\x1B[0m\\n\", TEST_LIB_DIR);\n    printf(\n        \"\\x1B[31m===========================================================\"\n        \"\\x1B[0m\\n\");\n    FAIL();\n    return;\n  }\n\n  desc->SetName(modelbox::MockDriverGraphConfig::Instance()->GetDriverDesc()->GetName());\n  desc->SetClass(\n      modelbox::MockDriverGraphConfig::Instance()->GetDriverDesc()->GetClass());\n  desc->SetType(modelbox::MockDriverGraphConfig::Instance()->GetDriverDesc()->GetType());\n  desc->SetDescription(\n      modelbox::MockDriverGraphConfig::Instance()->GetDriverDesc()->GetDescription());\n  desc->SetVersion(\n      modelbox::MockDriverGraphConfig::Instance()->GetDriverDesc()->GetVersion());\n  desc->SetFilePath(\n      modelbox::MockDriverGraphConfig::Instance()->GetDriverDesc()->GetFilePath());\n}\n\nmodelbox::Status DriverInit() {\n  // Driver Init.\n  return modelbox::MockDriverGraphConfig::Instance()->DriverInit();\n}\n\nvoid DriverFini() {\n  // Driver Fini.\n  modelbox::MockDriverGraphConfig::Instance()->DriverFini();\n}"
  },
  {
    "path": "test/mock/drivers/graph_conf_mockgraphconf/graph_conf_desc.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_GRAPHCONF_DESC_H_\n#define MODELBOX_GRAPHCONF_DESC_H_\n\n#include <modelbox/base/driver.h>\n#include <modelbox/base/driver_api_helper.h>\n#include <modelbox/base/graph_manager.h>\n#include <modelbox/base/status.h>\n\n#include \"gmock/gmock.h\"\n#include \"graph_conf_mockgraphconf.h\"\n#include \"gtest/gtest.h\"\n\nextern \"C\" modelbox::MockDriverGraphConfig *GetDriverMock();\n\n\n#endif  // MODELBOX_GRAPHCONF_DESC_H_"
  },
  {
    "path": "test/mock/drivers/graph_conf_mockgraphconf/graph_conf_mockgraphconf.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include \"graph_conf_mockgraphconf.h\"\n#include \"modelbox/base/log.h\"\n\nnamespace modelbox {\nmodelbox::MockDriverGraphConfig modelbox::MockDriverGraphConfig::desc_;\nmodelbox::GraphvizFactory factory_;\n\n}  // namespace modelbox"
  },
  {
    "path": "test/mock/drivers/graph_conf_mockgraphconf/graph_conf_mockgraphconf.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#ifndef MODELBOX_GRAPHMANAGER_MOCK_GRAPHCONF_H_\n#define MODELBOX_GRAPHMANAGER_MOCK_GRAPHCONF_H_\n\n#include <modelbox/base/graph_manager.h>\n#include <modelbox/flow.h>\n\n#include <utility>\n\n#include \"gmock/gmock.h\"\n#include \"graphviz_conf.h\"\n#include \"gtest/gtest.h\"\n#include \"mock_driver_ctl.h\"\n\nnamespace modelbox {\n\nconstexpr const char *MOCK_GRAPHCONF_TYPE = \"MOCKGRAPHVIZ\";\nconstexpr const char *MOCK_GRAPHCONF_NAME = \"MOCK-GRAPHCONF-GRAPHVIZ\";\nconstexpr const char *MOCK_GRAPHCONF_DESC = \"mock graph config parse graphviz\";\n\nclass MockGraphConfig : public modelbox::GraphConfig {\n public:\n  using GraphConfig::Resolve;\n  MockGraphConfig(const std::string &graph_conf_path) {\n    EXPECT_CALL(*this, Resolve)\n        .WillRepeatedly([this](std::shared_ptr<modelbox::GCGraph> graph) {\n          return this->Resolve(std::move(graph));\n        });\n  };\n  ~MockGraphConfig() override = default;\n\n  MOCK_METHOD(bool, Resolve, (std::shared_ptr<modelbox::GCGraph>));\n\n  std::shared_ptr<modelbox::GraphConfig> graphconfig_;\n};\n\nclass MockGraphConfigFactory : public modelbox::GraphConfigFactory {\n public:\n  MockGraphConfigFactory() {\n    EXPECT_CALL(*this, CreateGraphConfigFromStr)\n        .WillRepeatedly([this](const std::string &config_path) {\n          bind_factory_ = std::make_shared<modelbox::GraphvizFactory>();\n          return bind_factory_->CreateGraphConfigFromStr(config_path);\n        });\n\n    EXPECT_CALL(*this, CreateGraphConfigFromFile)\n        .WillRepeatedly([this](const std::string &file_path) {\n          bind_factory_ = std::make_shared<modelbox::GraphvizFactory>();\n          return bind_factory_->CreateGraphConfigFromFile(file_path);\n        });\n\n    EXPECT_CALL(*this, GetGraphConfFactoryType).WillRepeatedly([this]() {\n      bind_factory_ = std::make_shared<modelbox::GraphvizFactory>();\n      return bind_factory_->GetGraphConfFactoryType();\n    });\n  };\n\n  ~MockGraphConfigFactory() override = default;\n\n  MOCK_METHOD(std::shared_ptr<GraphConfig>, CreateGraphConfigFromStr,\n              (const std::string &config_path), (override));\n  MOCK_METHOD(std::shared_ptr<GraphConfig>, CreateGraphConfigFromFile,\n              (const std::string &file_path), (override));\n  MOCK_METHOD(std::string, GetGraphConfFactoryType, (), (override));\n\n private:\n  std::shared_ptr<GraphvizFactory> bind_factory_;\n};\n\nclass MockDriverGraphConfig : public modelbox::MockDriver {\n public:\n  MockDriverGraphConfig() = default;\n  ~MockDriverGraphConfig() override = default;\n\n  static MockDriverGraphConfig *Instance() { return &desc_; };\n\n private:\n  static MockDriverGraphConfig desc_;\n};\n\n}  // namespace modelbox\n\n#endif  // MODELBOX_GRAPHMANAGER_MOCK_GRAPHCONF_H_"
  },
  {
    "path": "test/mock/drivers/mock_driver_ctl.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include \"mock_driver_ctl.h\"\n\n#include <dlfcn.h>\n\n#include <utility>\n\n#include \"modelbox/base/log.h\"\n#include \"modelbox/base/utils.h\"\n#include \"flowunit_mockflowunit/flowunit_mockflowunit.h\"\n\nnamespace modelbox {\n\nconstexpr const char *MOCK_DRIVER_DEVICE_LIB_PREFIX = \"libmodelbox-device-\";\nconstexpr const char *MOCK_DRIVER_UNIT_LIB_PREFIX = \"libmodelbox-unit-\";\nconstexpr const char *MOCK_DRIVER_GRAPHCONF_LIB_PREFIX = \"libmodelbox-graphconf-\";\n\nMockDriverDescSetup::MockDriverDescSetup() = default;\n\nMockDriverDescSetup::~MockDriverDescSetup() = default;\n\nvoid MockDriverDescSetup::SetDriverDesc(std::shared_ptr<DriverDesc> desc) {\n  desc_ = std::move(desc);\n}\n\nvoid MockDriverDescSetup::SetDriverFilePath(std::string filepath) {\n  file_path_ = std::move(filepath);\n}\n\nvoid MockDriverDescSetup::SetDriverHandler(void *handler) {\n  driver_handler_ = handler;\n}\n\nvoid MockDriverDescSetup::SetMockDriver(MockDriver *mock_driver) {\n  mock_driver_ = mock_driver;\n}\n\nstd::shared_ptr<modelbox::DriverDesc> MockDriverDescSetup::GetDriverDesc() {\n  return desc_;\n}\n\nstd::string MockDriverDescSetup::GetDriverFilePath() { return file_path_; }\n\nvoid *MockDriverDescSetup::GetDriverHander() { return driver_handler_; }\n\nMockDriver *MockDriverDescSetup::GetMockDriver() { return mock_driver_; }\n\nvoid MockDriverDescSetup::Setup() { mock_driver_->SetDriverDesc(desc_); }\n\nMockDriverCtl::MockDriverCtl() = default;\n\nMockDriverCtl::~MockDriverCtl() {\n  RemoveAllMockDriverFlowUnit();\n  RemoveAllMockDriverDevice();\n}\n\nstd::string MockDriverCtl::GetMockDriverFlowUnitFilePath(\n    const std::string &drive_name, const std::string &device_name,\n    const std::string &flowunit_dir) {\n  std::ostringstream otarget;\n  otarget << flowunit_dir << \"/\" << MOCK_DRIVER_UNIT_LIB_PREFIX << device_name\n          << \"-\" << drive_name << \".so\";\n  return otarget.str();\n}\n\nbool MockDriverCtl::AddMockDriverFlowUnit(const std::string &drive_name,\n                                          const std::string &device_name,\n                                          const MockFlowUnitDriverDesc &desc,\n                                          const std::string &copy_path) {\n  std::string drive_class_name;\n  std::string driver_file =\n      GetMockDriverFlowUnitFilePath(drive_name, device_name, copy_path);\n  MockDriverDescSetup mock_desc;\n  std::string key;\n  void *driver_handler = nullptr;\n  auto ptr_desc = std::make_shared<MockFlowUnitDriverDesc>();\n  typedef MockDriver *(*GetDriverMock)();\n  GetDriverMock driver_mock_func = nullptr;\n\n  key = device_name + drive_name;\n  if (device_.find(key) != device_.end()) {\n    return false;\n  }\n\n  if (CopyFile(TEST_FLOWUNIT_MOCKFLOWUNIT_PATH, driver_file, 0, true) ==\n      false) {\n    MBLOG_ERROR << \"Copy file \" << TEST_FLOWUNIT_MOCKFLOWUNIT_PATH << \" to \"\n                << driver_file << \" failed\";\n    return false;\n  }\n\n  driver_handler = dlopen(driver_file.c_str(), RTLD_NOW | RTLD_LOCAL);\n  if (driver_handler == nullptr) {\n    MBLOG_ERROR << \"Open library \" << driver_file.c_str() << \" failed, \"\n                << dlerror();\n    MBLOG_ERROR << driver_handler;\n    goto errout;\n  }\n\n  driver_mock_func = (GetDriverMock)dlsym(driver_handler, \"GetDriverMock\");\n  if (driver_handler == nullptr) {\n    MBLOG_ERROR << \"Cannot find symbol GetDriverMock, \" << modelbox::StrError(errno);\n    goto errout;\n  }\n\n  *ptr_desc = desc;\n  mock_desc.SetDriverDesc(ptr_desc);\n  mock_desc.SetDriverFilePath(driver_file);\n  mock_desc.SetDriverHandler(driver_handler);\n  mock_desc.SetMockDriver(driver_mock_func());\n  flow_unit_[key] = mock_desc;\n  mock_desc.Setup();\n\n  return true;\n\nerrout:\n  if (driver_handler) {\n    dlclose(driver_handler);\n  }\n  remove(driver_file.c_str());\n  return false;\n}\n\nbool MockDriverCtl::RemoveMockDriverFlowUnit(const std::string &drive_name,\n                                             const std::string &device_name) {\n  std::string key = device_name + drive_name;\n  std::string driver_file;\n\n  if (flow_unit_.find(key) == flow_unit_.end()) {\n    return false;\n  }\n\n  auto mock_desc = flow_unit_[key];\n  UnloadAndRemove(mock_desc);\n  flow_unit_.erase(key);\n  return false;\n}\n\nvoid MockDriverCtl::RemoveAllMockDriverFlowUnit() {\n  for (auto it = flow_unit_.begin(); it != flow_unit_.end();) {\n    auto mock_desc = it->second;\n    flow_unit_.erase(it++);\n    UnloadAndRemove(mock_desc);\n  }\n}\n\nstd::string MockDriverCtl::GetMockDriverDeviceFilePath(\n    const std::string &device_name, const std::string &device_dir) {\n  std::ostringstream otarget;\n  otarget << device_dir << \"/\" << MOCK_DRIVER_DEVICE_LIB_PREFIX << device_name\n          << \".so\";\n  return otarget.str();\n}\n\nbool MockDriverCtl::AddMockDriverDevice(const std::string &device_name,\n                                        const modelbox::DriverDesc &desc,\n                                        const std::string &copy_path) {\n  std::string drive_class_name;\n  std::string driver_file = GetMockDriverDeviceFilePath(device_name, copy_path);\n  std::shared_ptr<modelbox::DriverDesc> ptr_desc =\n      std::make_shared<MockFlowUnitDriverDesc>();\n  MockDriverDescSetup mock_desc;\n  std::string key;\n  void *driver_handler = nullptr;\n  typedef MockDriver *(*GetDriverMock)();\n  GetDriverMock driver_mock_func = nullptr;\n\n  key = device_name;\n  if (device_.find(key) != device_.end()) {\n    return false;\n  }\n\n  if (CopyFile(TEST_DEVICE_MOCKDEVICE_PATH, driver_file, 0, true) == false) {\n    MBLOG_ERROR << \"Copy file \" << TEST_DEVICE_MOCKDEVICE_PATH << \" to \"\n                << driver_file << \" failed\";\n    return false;\n  }\n\n  driver_handler = dlopen(driver_file.c_str(), RTLD_NOW | RTLD_LOCAL);\n  if (driver_handler == nullptr) {\n    MBLOG_ERROR << \"Open library \" << driver_file.c_str() << \" failed. \"\n                << dlerror();\n    goto errout;\n  }\n\n  driver_mock_func = (GetDriverMock)dlsym(driver_handler, \"GetDriverMock\");\n  if (driver_mock_func == nullptr) {\n    MBLOG_ERROR << \"Cannot find symbol GetDriverMock, \" << modelbox::StrError(errno);\n    goto errout;\n  }\n\n  *ptr_desc = desc;\n  mock_desc.SetDriverDesc(ptr_desc);\n  mock_desc.SetDriverFilePath(driver_file);\n  mock_desc.SetDriverHandler(driver_handler);\n  mock_desc.SetMockDriver(driver_mock_func());\n  device_[key] = mock_desc;\n  mock_desc.Setup();\n\n  return true;\nerrout:\n  if (driver_handler) {\n    dlclose(driver_handler);\n  }\n  remove(driver_file.c_str());\n  return false;\n}\n\nbool MockDriverCtl::RemoveMockDriverDevice(const std::string &device_name) {\n  const std::string &key = device_name;\n\n  if (device_.find(key) == device_.end()) {\n    return false;\n  }\n\n  auto mock_desc = flow_unit_[key];\n  UnloadAndRemove(mock_desc);\n  device_.erase(key);\n  return false;\n}\n\nvoid MockDriverCtl::RemoveAllMockDriverDevice() {\n  for (auto it = device_.begin(); it != device_.end();) {\n    auto mock_desc = it->second;\n    device_.erase(it++);\n    UnloadAndRemove(mock_desc);\n  }\n}\n\nvoid MockDriverCtl::UnloadAndRemove(MockDriverDescSetup &mock_desc) {\n  std::string driver_file;\n  void *driver_handler;\n\n  driver_file = mock_desc.GetDriverFilePath();\n  driver_handler = mock_desc.GetDriverHander();\n\n  if (driver_handler) {\n    dlclose(driver_handler);\n    mock_desc.SetDriverHandler(nullptr);\n  }\n  remove(driver_file.c_str());\n}\n\nbool MockDriverCtl::AddMockDriverGraphConf(const std::string &drive_name,\n                                           const std::string &device_name,\n                                           const modelbox::DriverDesc &desc,\n                                           const std::string &copy_path) {\n  std::string drive_class_name;\n  std::string driver_file =\n      GetMockDriverGraphConfFilePath(drive_name, copy_path);\n  std::shared_ptr<modelbox::DriverDesc> ptr_desc =\n      std::make_shared<MockFlowUnitDriverDesc>();\n  MockDriverDescSetup mock_desc;\n  std::string key;\n  void *driver_handler = nullptr;\n  typedef MockDriver *(*GetDriverMock)();\n  GetDriverMock driver_mock_func = nullptr;\n\n  key = drive_name;\n  if (graph_conf_.find(key) != graph_conf_.end()) {\n    return false;\n  }\n\n  if (CopyFile(TEST_GRAPHCONF_MOCKGRAPHCONF_PATH, driver_file, 0, true) ==\n      false) {\n    MBLOG_ERROR << \"Copy file \" << TEST_GRAPHCONF_MOCKGRAPHCONF_PATH << \" to \"\n                << driver_file << \" failed\";\n    return false;\n  }\n\n  driver_handler = dlopen(driver_file.c_str(), RTLD_NOW | RTLD_LOCAL);\n  if (driver_handler == nullptr) {\n    MBLOG_ERROR << \"Open library \" << driver_file.c_str() << \" failed, \"\n                << dlerror();\n    MBLOG_ERROR << driver_handler;\n    goto errout;\n  }\n\n  driver_mock_func = (GetDriverMock)dlsym(driver_handler, \"GetDriverMock\");\n  if (driver_mock_func == nullptr) {\n    MBLOG_ERROR << \"Cannot find symbol GetDriverMock, \" << dlerror();\n    goto errout;\n  }\n\n  *ptr_desc = desc;\n  mock_desc.SetDriverDesc(ptr_desc);\n  mock_desc.SetDriverFilePath(driver_file);\n  mock_desc.SetDriverHandler(driver_handler);\n  mock_desc.SetMockDriver(driver_mock_func());\n  flow_unit_[key] = mock_desc;\n  mock_desc.Setup();\n\n  return true;\n\nerrout:\n  if (driver_handler) {\n    dlclose(driver_handler);\n  }\n  remove(driver_file.c_str());\n  return false;\n}\n\nbool MockDriverCtl::RemoveMockDriverGraphConf(const std::string &drive_name,\n                                              const std::string &device_name) {\n  const std::string &key = drive_name;\n\n  if (graph_conf_.find(key) == graph_conf_.end()) {\n    return false;\n  }\n\n  auto mock_desc = graph_conf_[key];\n  UnloadAndRemove(mock_desc);\n  graph_conf_.erase(key);\n  return true;\n}\n\nstd::string MockDriverCtl::GetMockDriverGraphConfFilePath(\n    const std::string &graph_conf_name, const std::string &graph_dir) {\n  std::ostringstream otarget;\n  otarget << graph_dir << \"/\" << MOCK_DRIVER_GRAPHCONF_LIB_PREFIX\n          << graph_conf_name << \".so\";\n  return otarget.str();\n}\n\nvoid MockDriverCtl::RemoveAllMockDriverGraphConf() {\n  for (auto it = graph_conf_.begin(); it != graph_conf_.end();) {\n    auto mock_desc = it->second;\n    graph_conf_.erase(it++);\n    UnloadAndRemove(mock_desc);\n  }\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "test/mock/drivers/mock_driver_ctl.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#ifndef MODELBOX_DRIVER_MOCK_CTRL_H_\n#define MODELBOX_DRIVER_MOCK_CTRL_H_\n\n#include <map>\n#include <utility>\n\n#include \"modelbox/base/device.h\"\n#include \"modelbox/base/driver.h\"\n#include \"modelbox/base/timer.h\"\n#include \"modelbox/flow.h\"\n#include \"modelbox/flowunit.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"test_config.h\"\n\n\nnamespace modelbox {\n\nclass MockFlowUnit;\n\nclass MockDriver {\n public:\n  MockDriver() {\n    EXPECT_CALL(*this, DriverInit).WillRepeatedly([]() {\n      return STATUS_OK;\n    });\n\n    EXPECT_CALL(*this, DriverFini).WillRepeatedly([]() {});\n  };\n  virtual ~MockDriver() = default;\n  virtual void SetDriverDesc(std::shared_ptr<modelbox::DriverDesc> desc) {\n    desc_ = std::move(desc);\n  };\n\n  std::shared_ptr<modelbox::DriverDesc> GetDriverDesc() { return desc_; };\n  MOCK_METHOD(modelbox::Status, DriverInit, ());\n  MOCK_METHOD(void, DriverFini, ());\n\n private:\n  std::shared_ptr<modelbox::DriverDesc> desc_;\n};\n\nclass MockFlowUnitDriverDesc : public modelbox::DriverDesc {\n public:\n  MockFlowUnitDriverDesc() = default;\n  ~MockFlowUnitDriverDesc() override = default;\n\n  void SetMockFlowUnit(std::shared_ptr<MockFlowUnit> mock_flowunit) {\n    mock_flowunit_ = std::move(mock_flowunit);\n  }\n\n  void SetMockFlowUnit(\n      std::function<std::shared_ptr<modelbox::FlowUnit>(const std::string &name,\n                                                      const std::string &type)>\n          create_func,\n      std::vector<std::shared_ptr<modelbox::FlowUnitDesc>> flowunit_descs) {\n    flowunit_create_func_ = std::move(create_func);\n    mock_flowunit_desc_ = std::move(flowunit_descs);\n  }\n\n  void SetMockFlowUnit(\n      std::function<std::shared_ptr<modelbox::FlowUnit>(\n          const std::string &name, const std::string &type)>\n          create_func,\n      const std::shared_ptr<modelbox::FlowUnitDesc> &flowunit_desc) {\n    flowunit_create_func_ = std::move(create_func);\n    mock_flowunit_desc_.push_back(flowunit_desc);\n  }\n\n  std::shared_ptr<MockFlowUnit> GetMockFlowUnit() { return mock_flowunit_; }\n\n  std::function<std::shared_ptr<modelbox::FlowUnit>(const std::string &name,\n                                                  const std::string &type)>\n  GetMockFlowCreateFunc() {\n    return flowunit_create_func_;\n  }\n\n  std::vector<std::shared_ptr<modelbox::FlowUnitDesc>> GetMockFlowunitDesc() {\n    return mock_flowunit_desc_;\n  }\n\n private:\n  std::shared_ptr<MockFlowUnit> mock_flowunit_;\n  std::function<std::shared_ptr<modelbox::FlowUnit>(const std::string &name,\n                                                  const std::string &type)>\n      flowunit_create_func_;\n  std::vector<std::shared_ptr<modelbox::FlowUnitDesc>> mock_flowunit_desc_;\n};\n\nclass MockDriverDescSetup {\n public:\n  MockDriverDescSetup();\n  virtual ~MockDriverDescSetup();\n  std::shared_ptr<modelbox::DriverDesc> GetDriverDesc();\n  std::string GetDriverFilePath();\n  void *GetDriverHander();\n  MockDriver *GetMockDriver();\n\n  void SetDriverDesc(std::shared_ptr<modelbox::DriverDesc> desc);\n  void SetDriverFilePath(std::string filepath);\n  void SetDriverHandler(void *handler);\n  void SetMockDriver(MockDriver *mock_driver);\n\n  void Setup();\n\n private:\n  std::string file_path_;\n  std::shared_ptr<modelbox::DriverDesc> desc_;\n  void *driver_handler_ = nullptr;\n  MockDriver *mock_driver_;\n};\n\nclass MockDriverCtl {\n public:\n  MockDriverCtl();\n  virtual ~MockDriverCtl();\n  bool AddMockDriverFlowUnit(std::string drive_name, std::string device_name,\n                             const modelbox::DriverDesc &desc);\n\n  bool AddMockDriverFlowUnit(const std::string &drive_name,\n                             const std::string &device_name,\n                             const MockFlowUnitDriverDesc &desc,\n                             const std::string &copy_path = TEST_LIB_DIR);\n\n  bool RemoveMockDriverFlowUnit(const std::string &drive_name,\n                                const std::string &device_name);\n\n  std::string GetMockDriverFlowUnitFilePath(const std::string &drive_name,\n                                            const std::string &device_name,\n                                            const std::string &flowunit_dir);\n\n  void RemoveAllMockDriverFlowUnit();\n\n  bool AddMockDriverDevice(const std::string &device_name,\n                           const modelbox::DriverDesc &desc,\n                           const std::string &copy_path = TEST_LIB_DIR);\n\n  bool RemoveMockDriverDevice(const std::string &device_name);\n\n  std::string GetMockDriverDeviceFilePath(const std::string &device_name,\n                                          const std::string &device_dir);\n\n  void RemoveAllMockDriverDevice();\n\n  bool AddMockDriverGraphConf(const std::string &drive_name,\n                              const std::string &device_name,\n                              const modelbox::DriverDesc &desc,\n                              const std::string &copy_path = TEST_LIB_DIR);\n\n  bool RemoveMockDriverGraphConf(const std::string &drive_name,\n                                 const std::string &device_name);\n\n  std::string GetMockDriverGraphConfFilePath(const std::string &graph_conf_name,\n                                             const std::string &graph_dir);\n\n  void RemoveAllMockDriverGraphConf();\n\n private:\n  void UnloadAndRemove(MockDriverDescSetup &mock_desc);\n  std::map<std::string, MockDriverDescSetup> flow_unit_;\n  std::map<std::string, MockDriverDescSetup> device_;\n  std::map<std::string, MockDriverDescSetup> graph_conf_;\n};\n\n}  // namespace modelbox\n#endif  // MODELBOX_DRIVER_MOCK_CTRL_H_"
  },
  {
    "path": "test/mock/flowunit/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nproject(modelbox-drivers-test-flowunit)\n\nif(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_BINARY_DIR})\n    message(FATAL_ERROR \"Do not build in source directory!\")\nendif()\n\nsubdirlist(SUBDIRS ${CMAKE_CURRENT_SOURCE_DIR} \"CMakeLists.txt\")\n\nforeach(subdir ${SUBDIRS})\n    add_subdirectory(${subdir})\nendforeach()\n\nlist(APPEND DRIVER_UNIT_TEST_INCLUDE ${ACL_INCLUDE_DIR})\nlist(APPEND DRIVER_UNIT_TEST_INCLUDE ${DSMI_INCLUDE_DIR})\nset(DRIVER_UNIT_TEST_INCLUDE ${DRIVER_UNIT_TEST_INCLUDE} CACHE INTERNAL \"\")\nlist(APPEND DRIVER_UNIT_TEST_LINK_LIBRARIES ${ACL_LIBRARIES})\nlist(APPEND DRIVER_UNIT_TEST_LINK_LIBRARIES ${DSMI_LIBRARIES})\nset(DRIVER_UNIT_TEST_LINK_LIBRARIES ${DRIVER_UNIT_TEST_LINK_LIBRARIES} CACHE INTERNAL \"\")"
  },
  {
    "path": "test/mock/flowunit/passthrouth/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nset(UNIT_DEVICE \"cpu\")\nset(UNIT_NAME \"passthrouth\")\n\nproject(modelbox-flowunit-${UNIT_DEVICE}-${UNIT_NAME})\n\nfile(GLOB_RECURSE UNIT_SOURCE *.cpp *.cc *.c)\ngroup_source_test_files(MODELBOX_UNIT_SOURCE MODELBOX_UNIT_TEST_SOURCE \"_test.c*\" ${UNIT_SOURCE})\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_CPU_INCLUDE})\n\nset(MODELBOX_UNIT_SHARED libmodelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}-shared)\nset(MODELBOX_UNIT_SOURCE_INCLUDE ${CMAKE_CURRENT_LIST_DIR})\n\nadd_library(${MODELBOX_UNIT_SHARED} SHARED ${MODELBOX_UNIT_SOURCE})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES \n    SOVERSION ${MODELBOX_VERSION_MAJOR}\n    VERSION ${MODELBOX_VERSION_MAJOR}.${MODELBOX_VERSION_MINOR}.${MODELBOX_VERSION_PATCH}\n)\n\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} pthread)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} rt)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} dl)\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_DEVICE_CPU_SHARED})\ntarget_link_libraries(${MODELBOX_UNIT_SHARED} ${LIBMODELBOX_SHARED})\n\nset_target_properties(${MODELBOX_UNIT_SHARED} PROPERTIES OUTPUT_NAME \"modelbox-unit-${UNIT_DEVICE}-${UNIT_NAME}\")\n\n# driver test\nlist(APPEND DRIVER_UNIT_TEST_SOURCE ${MODELBOX_UNIT_TEST_SOURCE})\nlist(APPEND DRIVER_UNIT_TEST_TARGET ${MODELBOX_UNIT_SHARED})\nset(DRIVER_UNIT_TEST_SOURCE ${DRIVER_UNIT_TEST_SOURCE} CACHE INTERNAL \"\")\nset(DRIVER_UNIT_TEST_TARGET ${DRIVER_UNIT_TEST_TARGET} CACHE INTERNAL \"\")"
  },
  {
    "path": "test/mock/flowunit/passthrouth/passthrouth.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"passthrouth.h\"\n\n#include \"modelbox/flowunit_api_helper.h\"\n\nmodelbox::Status PassThrouthFlowUnit::Open(\n    const std::shared_ptr<modelbox::Configuration> &opts) {\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status PassThrouthFlowUnit::Process(\n    std::shared_ptr<modelbox::DataContext> data_ctx) {\n  auto indata = data_ctx->Input(\"in\");\n  auto output = data_ctx->Output(\"out\");\n\n  for (const auto &buff : *indata) {\n    output->PushBack(buff);\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nmodelbox::Status PassThrouthFlowUnit::Close() { return modelbox::STATUS_OK; }\n\nMODELBOX_FLOWUNIT(PassThrouthFlowUnit, desc) {\n  desc.SetFlowUnitName(FLOWUNIT_NAME);\n  desc.AddFlowUnitInput({\"in\"});\n  desc.AddFlowUnitOutput({\"out\"});\n  desc.SetFlowType(modelbox::NORMAL);\n  desc.SetInputContiguous(false);\n  desc.SetDescription(FLOWUNIT_DESC);\n}\n\nMODELBOX_DRIVER_FLOWUNIT(desc) {\n  desc.Desc.SetName(FLOWUNIT_NAME);\n  desc.Desc.SetClass(modelbox::DRIVER_CLASS_FLOWUNIT);\n  desc.Desc.SetType(FLOWUNIT_TYPE);\n  desc.Desc.SetDescription(FLOWUNIT_DESC);\n  desc.Desc.SetVersion(\"1.0.0\");\n}"
  },
  {
    "path": "test/mock/flowunit/passthrouth/passthrouth.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_FLOWUNIT_CPU_PASSTHROUGH_H_\n#define MODELBOX_FLOWUNIT_CPU_PASSTHROUGH_H_\n\n#include <modelbox/base/device.h>\n#include <modelbox/base/status.h>\n#include <modelbox/buffer.h>\n#include <modelbox/device/cpu/device_cpu.h>\n#include <modelbox/flow.h>\n#include <modelbox/flowunit.h>\n\nconstexpr const char *FLOWUNIT_TYPE = \"cpu\";\nconstexpr const char *FLOWUNIT_NAME = \"passthrouth\";\nconstexpr const char *FLOWUNIT_DESC =\n    \"\\n\\t@Brief: A passthrouth flowunit on cpu device. \\n\";\n\nclass PassThrouthFlowUnit : public modelbox::FlowUnit {\n public:\n  PassThrouthFlowUnit() = default;\n  ~PassThrouthFlowUnit() override = default;\n\n  modelbox::Status Open(\n      const std::shared_ptr<modelbox::Configuration> &opts) override;\n\n  modelbox::Status Close() override;\n\n  modelbox::Status Process(\n      std::shared_ptr<modelbox::DataContext> data_ctx) override;\n};\n\n#endif  // MODELBOX_FLOWUNIT_CPU_PASSTHROUGH_H_\n"
  },
  {
    "path": "test/mock/minimodelbox/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nif(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_BINARY_DIR})\n    message(FATAL_ERROR \"Do not build in source directory!\")\nendif()\n\nfile(GLOB MOCKFLOW_SOURCE *.cpp *.cc *.c)\n\ninclude_directories(${LIBMODELBOX_INCLUDE})\ninclude_directories(${LIBMODELBOX_BASE_INCLUDE})\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${MOCK_DRIVER_CTRL_INCLUDE})\ninclude_directories(${MODELBOX_SERVER_INCLUDE})\n\nset(MOCKFLOW_LIB flowmock-lib)\nadd_library(${MOCKFLOW_LIB} ${MOCKFLOW_SOURCE})\n\ntarget_link_libraries(${MOCKFLOW_LIB} ${MOCK_DRIVER_CTRL_LIB})\n\nset(MOCKFLOW_LIB ${MOCKFLOW_LIB} CACHE INTERNAL \"\")\nset(MOCKFLOW_INCLUDE ${CMAKE_CURRENT_LIST_DIR} ${MOCK_DRIVER_CTRL_INCLUDE} CACHE INTERNAL \"\")\n\n\n"
  },
  {
    "path": "test/mock/minimodelbox/mock_server.cc",
    "content": "#include \"mock_server.h\"\n\n#include \"../config.h\"\n#include \"test_config.h\"\n\nnamespace modelbox {\n\nMockServer::MockServer() = default;\n\nMockServer::~MockServer() = default;\n\nstd::string MockServer::GetTestGraphDir() {\n  return std::string(TEST_WORKING_DIR) + \"/graph\";\n}\n\nstd::string MockServer::GetServerURL() {\n  return std::string(\"http://\") + \"0.0.0.0:11104\";\n}\n\nhttplib::Response MockServer::DoRequest(HttpRequest &request) {\n  SendHttpRequest(request);\n  return request.GetResponse();\n}\n\nvoid MockServer::SetDefaultConfig(const std::shared_ptr<Configuration> &config) {\n  std::vector<std::string> plugin_path;\n  plugin_path.emplace_back(MODELBOX_PLUGIN_SO_PATH);\n  plugin_path.emplace_back(MODELBOX_PLUGIN_EDITOR_SO_PATH);\n  if (config->GetStrings(\"plugin.files\").size() <= 0) {\n    config->SetProperty(\"plugin.files\", plugin_path);\n  }\n\n  config->SetProperty(\"server.ip\", \"0.0.0.0\");\n  config->SetProperty(\"server.port\", \"11104\");\n  config->SetProperty(\"control.enable\", \"true\");\n  config->SetProperty(\"control.listen\", CONTROL_UNIX_PATH);\n  config->SetProperty(\"server.flow_path\", MockServer::GetTestGraphDir());\n\n  config->SetProperty(\"editor.enable\", \"true\");\n  config->SetProperty(\"editor.ip\", \"0.0.0.0\");\n  config->SetProperty(\"editor.port\", \"11104\");\n}\n\nStatus MockServer::Init(std::shared_ptr<Configuration> config) {\n  if (config == nullptr) {\n    ConfigurationBuilder builder;\n    config = builder.Build();\n  }\n\n  CreateDirectory(MockServer::GetTestGraphDir());\n\n  SetDefaultConfig(config);\n\n  if (access(MODELBOX_PLUGIN_SO_PATH, F_OK) != 0) {\n    return STATUS_NOTSUPPORT;\n  }\n\n  if (access(MODELBOX_PLUGIN_EDITOR_SO_PATH, F_OK) != 0) {\n    return STATUS_NOTSUPPORT;\n  }\n\n  server_ = std::make_shared<Server>(config);\n  return server_->Init();\n}\n\nStatus MockServer::Start() { return server_->Start(); }\n\nvoid MockServer::Stop() {\n  if (server_ == nullptr) {\n    return;\n  }\n  server_->Stop();\n  server_ = nullptr;\n}\n\n}  // namespace modelbox\n"
  },
  {
    "path": "test/mock/minimodelbox/mock_server.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_MOCKSERVERH_\n#define MODELBOX_MOCKSERVERH_\n\n#include <modelbox/base/configuration.h>\n\n#include <iostream>\n#include <string>\n#include <thread>\n\n#include \"../server.h\"\n#include \"modelbox/server/http_helper.h\"\n\nnamespace modelbox {\n\nclass MockServer {\n public:\n  MockServer();\n  virtual ~MockServer();\n\n  static std::string GetTestGraphDir();\n  std::string GetServerURL();\n\n  Status Init(std::shared_ptr<Configuration> config);\n  Status Start();\n  void Stop();\n  httplib::Response DoRequest(HttpRequest &request);\n\n protected:\n  virtual void SetDefaultConfig(const std::shared_ptr<Configuration> &config);\n\n private:\n  std::shared_ptr<Server> server_;\n};\n\n}  // namespace modelbox\n#endif  // MODELBOX_MOCKSERVERH_\n"
  },
  {
    "path": "test/mock/minimodelbox/mock_tool.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"mock_tool.h\"\n\n#include \"../config.h\"\n#include \"modelbox/common/command.h\"\n#include \"test_config.h\"\n\nnamespace modelbox {\n\nMockTool::MockTool() = default;\n\nMockTool::~MockTool() = default;\n\nint MockTool::Run(const std::string &cmd) {\n  auto cmds = modelbox::StringSplit(cmd, ' ');\n  int argc = cmds.size();\n  char *argv[cmds.size()];\n  for (size_t i = 0; i < cmds.size(); i++) {\n    argv[i] = (char *)cmds[i].data();\n  }\n  return Run(argc, argv);\n}\n\nint MockTool::Run(int argc, char *argv[]) {\n  if (argc <= 0) {\n    printf(\"Try -h for more information.\\n\");\n    return -1;\n  }\n\n  const char *action = argv[0];\n  auto cmd = modelbox::ToolCommandList::Instance()->GetCommand(action);\n  if (cmd == nullptr) {\n    printf(\"command %s not exist, try -h for more information.\\n\", action);\n    return -1;\n  }\n\n  return cmd->Run(argc, argv);\n}\n\nvoid MockTool::SetDefaultConfig(const std::shared_ptr<Configuration> &config) {\n  std::vector<std::string> plugin_path;\n  config->SetProperty(\"control.enable\", \"true\");\n  config->SetProperty(\"control.listen\",\n                      std::string(TEST_DATA_DIR) + \"/modelbox.sock\");\n}\n\nStatus MockTool::Init(std::shared_ptr<Configuration> config) {\n  if (config == nullptr) {\n    ConfigurationBuilder builder;\n    config = builder.Build();\n  }\n\n  SetDefaultConfig(config);\n\n  return STATUS_OK;\n}\n\n}  // namespace modelbox\n"
  },
  {
    "path": "test/mock/minimodelbox/mock_tool.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_MOCK_TOOL_\n#define MODELBOX_MOCK_TOOL_\n\n#include <modelbox/base/configuration.h>\n\n#include <iostream>\n#include <string>\n#include <thread>\n\nnamespace modelbox {\n\nclass MockTool {\n public:\n  MockTool();\n  virtual ~MockTool();\n\n  Status Init(std::shared_ptr<Configuration> config);\n\n  int Run(int argc, char *argv[]);\n  int Run(const std::string &cmd);\n\n protected:\n  virtual void SetDefaultConfig(const std::shared_ptr<Configuration> &config);\n};\n\n}  // namespace modelbox\n#endif  // MODELBOX_MOCK_TOOL_\n"
  },
  {
    "path": "test/mock/minimodelbox/mockflow.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"mockflow.h\"\n\n#include <sstream>\n#include <utility>\n\n#include \"flowunit_mockflowunit/flowunit_mockflowunit.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"modelbox/data_context.h\"\n#include \"modelbox/session_context.h\"\n\nusing ::testing::_;\nnamespace modelbox {\n\nstd::shared_ptr<FlowUnitDesc> GenerateFlowunitDesc(\n    const std::string &name, const std::set<std::string> &inputs,\n    const std::set<std::string> &outputs) {\n  auto mock_flowunit_desc = std::make_shared<FlowUnitDesc>();\n  mock_flowunit_desc->SetFlowUnitName(name);\n  for (const auto &input : inputs) {\n    mock_flowunit_desc->AddFlowUnitInput(modelbox::FlowUnitInput(input));\n  }\n  for (const auto &output : outputs) {\n    mock_flowunit_desc->AddFlowUnitOutput(modelbox::FlowUnitOutput(output));\n  }\n  mock_flowunit_desc->SetFlowType(FlowType::NORMAL);\n  return mock_flowunit_desc;\n}\n\nstd::function<std::shared_ptr<modelbox::FlowUnit>(const std::string &,\n                                                  const std::string &)>\nMockFunctionCollection::GenerateCreateFunc(bool need_sequence) {\n  auto function_collections = shared_from_this();\n  auto fu_create_func = [=](const std::string &unitname,\n                            const std::string &unittype) {\n    auto mock_flowunit = std::make_shared<MockFlowUnit>();\n    std::weak_ptr<MockFlowUnit> mock_flowunit_wp = mock_flowunit;\n    UNUSED_VAR(function_collections);\n    EXPECT_CALL(*mock_flowunit, Open(_))\n        .WillRepeatedly(testing::Invoke(\n            [=](const std::shared_ptr<modelbox::Configuration> &flow_option) {\n              auto mock_flowunit_lock = mock_flowunit_wp.lock();\n              MBLOG_DEBUG << unitname << \" Open\";\n              if (open_func_ && mock_flowunit_lock != nullptr) {\n                return open_func_(flow_option, mock_flowunit_lock);\n              }\n              return modelbox::STATUS_OK;\n            }));\n    EXPECT_CALL(*mock_flowunit, Close()).WillRepeatedly(testing::Invoke([=]() {\n      auto mock_flowunit_lock = mock_flowunit_wp.lock();\n      MBLOG_DEBUG << unitname << \" Close\";\n      if (close_func_ && mock_flowunit_lock != nullptr) {\n        return close_func_(mock_flowunit_lock);\n      }\n      return modelbox::STATUS_OK;\n    }));\n    if (need_sequence) {\n      ON_CALL(\n          *mock_flowunit,\n          DataGroupPre(testing::An<std::shared_ptr<modelbox::DataContext>>()))\n          .WillByDefault(testing::Invoke(\n              [=](std::shared_ptr<DataContext> data_ctx) -> Status {\n                auto mock_flowunit_lock = mock_flowunit_wp.lock();\n                MBLOG_DEBUG << unitname << \" DataGroupPre\";\n                if (data_group_pre_func_ && mock_flowunit_lock != nullptr) {\n                  return data_group_pre_func_(std::move(data_ctx),\n                                              mock_flowunit_lock);\n                }\n                return STATUS_OK;\n              }));\n\n      ON_CALL(*mock_flowunit,\n              DataPre(testing::An<std::shared_ptr<modelbox::DataContext>>()))\n          .WillByDefault(testing::Invoke(\n              [=](std::shared_ptr<DataContext> data_ctx) -> Status {\n                auto mock_flowunit_lock = mock_flowunit_wp.lock();\n                MBLOG_DEBUG << unitname << \" DataPre\";\n                if (data_pre_func_ && mock_flowunit_lock != nullptr) {\n                  return data_pre_func_(std::move(data_ctx),\n                                        mock_flowunit_lock);\n                }\n                return STATUS_OK;\n              }));\n\n      ON_CALL(*mock_flowunit,\n              Process(testing::An<std::shared_ptr<modelbox::DataContext>>()))\n          .WillByDefault(testing::Invoke(\n              [=](std::shared_ptr<DataContext> data_ctx) -> Status {\n                auto mock_flowunit_lock = mock_flowunit_wp.lock();\n                MBLOG_DEBUG << unitname << \" Process\";\n                if (process_func_ && mock_flowunit_lock != nullptr) {\n                  return process_func_(std::move(data_ctx), mock_flowunit_lock);\n                }\n                return STATUS_OK;\n              }));\n\n      ON_CALL(*mock_flowunit,\n              DataPost(testing::An<std::shared_ptr<modelbox::DataContext>>()))\n          .WillByDefault(testing::Invoke(\n              [=](std::shared_ptr<DataContext> data_ctx) -> Status {\n                auto mock_flowunit_lock = mock_flowunit_wp.lock();\n                MBLOG_DEBUG << unitname << \" DataPost\";\n                if (data_post_func_ && mock_flowunit_lock != nullptr) {\n                  return data_post_func_(std::move(data_ctx),\n                                         mock_flowunit_lock);\n                }\n                return STATUS_OK;\n              }));\n\n      ON_CALL(\n          *mock_flowunit,\n          DataGroupPost(testing::An<std::shared_ptr<modelbox::DataContext>>()))\n          .WillByDefault(testing::Invoke(\n              [=](std::shared_ptr<DataContext> data_ctx) -> Status {\n                auto mock_flowunit_lock = mock_flowunit_wp.lock();\n                MBLOG_DEBUG << unitname << \" DataGroupPost\";\n                if (data_group_post_func_ && mock_flowunit_lock != nullptr) {\n                  return data_group_post_func_(std::move(data_ctx),\n                                               mock_flowunit_lock);\n                }\n                return STATUS_OK;\n              }));\n    } else {\n      EXPECT_CALL(*mock_flowunit, DataGroupPre(_))\n          .WillRepeatedly(testing::Invoke(\n              [=](std::shared_ptr<DataContext> data_ctx) -> Status {\n                auto mock_flowunit_lock = mock_flowunit_wp.lock();\n                MBLOG_DEBUG << unitname << \" DataGroupPre\";\n                if (data_group_pre_func_ && mock_flowunit_lock != nullptr) {\n                  return data_group_pre_func_(std::move(data_ctx),\n                                              mock_flowunit_lock);\n                }\n                return STATUS_OK;\n              }));\n\n      EXPECT_CALL(*mock_flowunit, DataPre(_))\n          .WillRepeatedly(testing::Invoke(\n              [=](std::shared_ptr<DataContext> data_ctx) -> Status {\n                auto mock_flowunit_lock = mock_flowunit_wp.lock();\n                MBLOG_DEBUG << unitname << \" DataPre\";\n                if (data_pre_func_ && mock_flowunit_lock != nullptr) {\n                  return data_pre_func_(std::move(data_ctx),\n                                        mock_flowunit_lock);\n                }\n                return STATUS_OK;\n              }));\n\n      EXPECT_CALL(*mock_flowunit, Process(_))\n          .WillRepeatedly(testing::Invoke(\n              [=](std::shared_ptr<DataContext> data_ctx) -> Status {\n                auto mock_flowunit_lock = mock_flowunit_wp.lock();\n                MBLOG_DEBUG << unitname << \" Process\";\n                if (process_func_ && mock_flowunit_lock != nullptr) {\n                  return process_func_(std::move(data_ctx), mock_flowunit_lock);\n                }\n                return STATUS_OK;\n              }));\n\n      EXPECT_CALL(*mock_flowunit, DataPost(_))\n          .WillRepeatedly(testing::Invoke(\n              [=](std::shared_ptr<DataContext> data_ctx) -> Status {\n                auto mock_flowunit_lock = mock_flowunit_wp.lock();\n                MBLOG_DEBUG << unitname << \" DataPost\";\n                if (data_post_func_ && mock_flowunit_lock != nullptr) {\n                  return data_post_func_(std::move(data_ctx),\n                                         mock_flowunit_lock);\n                }\n                return STATUS_OK;\n              }));\n\n      EXPECT_CALL(*mock_flowunit, DataGroupPost(_))\n          .WillRepeatedly(testing::Invoke(\n              [=](std::shared_ptr<DataContext> data_ctx) -> Status {\n                auto mock_flowunit_lock = mock_flowunit_wp.lock();\n                MBLOG_DEBUG << unitname << \" DataGroupPost\";\n                if (data_group_post_func_ && mock_flowunit_lock != nullptr) {\n                  return data_group_post_func_(std::move(data_ctx),\n                                               mock_flowunit_lock);\n                }\n                return STATUS_OK;\n              }));\n    }\n\n    return mock_flowunit;\n  };\n  return fu_create_func;\n}\n\nvoid MockFlow::AddFlowUnitDesc(\n    const std::shared_ptr<FlowUnitDesc> &flow_desc,\n    std::function<std::shared_ptr<modelbox::FlowUnit>(const std::string &name,\n                                                      const std::string &type)>\n        create_func,\n    const std::string &lib_path) {\n  MockFlowUnitDriverDesc desc_flowunit;\n  auto name = flow_desc->GetFlowUnitName();\n  desc_flowunit.SetClass(\"DRIVER-FLOWUNIT\");\n  desc_flowunit.SetType(\"cpu\");\n  desc_flowunit.SetName(name);\n  desc_flowunit.SetDescription(name);\n  desc_flowunit.SetVersion(\"1.0.0\");\n  std::string file_path_flowunit =\n      lib_path + \"/libmodelbox-unit-cpu-\" + name + \"so\";\n  desc_flowunit.SetFilePath(file_path_flowunit);\n  desc_flowunit.SetMockFlowUnit(std::move(create_func), flow_desc);\n  ctl_->AddMockDriverFlowUnit(name, \"cpu\", desc_flowunit, lib_path);\n}\n\nvoid MockFlow::Register_Test_0_2_Flowunit() {\n  auto mock_desc = GenerateFlowunitDesc(\"test_0_2\", {}, {\"Out_1\", \"Out_2\"});\n  mock_desc->SetDefaultBatchSize(1);\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n\n  auto open_func = [=](const std::shared_ptr<Configuration> &opts,\n                       const std::shared_ptr<MockFlowUnit> &mock_flowunit) {\n    auto ext_data = mock_flowunit->CreateExternalData();\n\n    if (!ext_data) {\n      MBLOG_ERROR << \"can not get external data.\";\n    }\n\n    auto session_ctx = ext_data->GetSessionContext();\n    auto session_content = std::make_shared<int>(1111);\n    session_ctx->SetPrivate(\"session\", session_content);\n\n    if (!session_ctx) {\n      MBLOG_ERROR << \"can not get session.\";\n    }\n\n    auto buffer_list = ext_data->CreateBufferList();\n    buffer_list->Build({10 * sizeof(int)});\n    auto *data = (int *)buffer_list->MutableData();\n    for (size_t i = 0; i < 10; i++) {\n      data[i] = i;\n    }\n\n    auto status = ext_data->Send(buffer_list);\n    if (!status) {\n      MBLOG_ERROR << \"external data send buffer list failed:\" << status;\n    }\n\n    status = ext_data->Close();\n    if (!status) {\n      MBLOG_ERROR << \"external data close failed:\" << status;\n    }\n\n    return modelbox::STATUS_OK;\n  };\n\n  auto process_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    auto session_ctx = data_ctx->GetSessionContext();\n    auto *session_content = (int *)(session_ctx->GetPrivate(\"session\").get());\n    MBLOG_INFO << \"session_content is \" << session_content[0];\n\n    auto external = data_ctx->External();\n    auto *external_data_1 = (int *)(*external)[0]->ConstData();\n    auto bytes = external->GetBytes();\n\n    auto output_buf_1 = data_ctx->Output(\"Out_1\");\n    auto output_buf_2 = data_ctx->Output(\"Out_2\");\n\n    std::vector<size_t> data_1_shape({bytes});\n    output_buf_1->Build(data_1_shape);\n    auto *dev_data_1 = (int *)(output_buf_1->MutableData());\n    for (size_t i = 0; i < bytes / sizeof(int); ++i) {\n      dev_data_1[i] = external_data_1[i];\n    }\n\n    std::vector<size_t> data_2_shape({bytes});\n    output_buf_2->Build({data_2_shape});\n    auto *dev_data_2 = (int *)(output_buf_2->MutableData());\n    for (size_t i = 0; i < bytes / sizeof(int); ++i) {\n      dev_data_2[i] = external_data_1[i] + 10;\n    }\n\n    return modelbox::STATUS_OK;\n  };\n  mock_funcitons->RegisterOpenFunc(open_func);\n  mock_funcitons->RegisterProcessFunc(process_func);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n}\n\nvoid MockFlow::Register_Test_0_1_Flowunit() {\n  auto mock_desc = GenerateFlowunitDesc(\"test_0_1\", {}, {\"Out_1\"});\n  mock_desc->SetDefaultBatchSize(1);\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n\n  auto open_func = [=](const std::shared_ptr<Configuration> &opts,\n                       const std::shared_ptr<MockFlowUnit> &mock_flowunit) {\n    auto ext_data = mock_flowunit->CreateExternalData();\n\n    if (!ext_data) {\n      MBLOG_ERROR << \"can not get external data.\";\n    }\n\n    auto session_ctx = ext_data->GetSessionContext();\n    auto session_content = std::make_shared<int>(1111);\n    session_ctx->SetPrivate(\"session\", session_content);\n\n    if (!session_ctx) {\n      MBLOG_ERROR << \"can not get session.\";\n    }\n\n    auto buffer_list = ext_data->CreateBufferList();\n    buffer_list->Build({10 * sizeof(int)});\n    auto *data = (int *)buffer_list->MutableData();\n    for (size_t i = 0; i < 10; i++) {\n      data[i] = i;\n    }\n\n    auto status = ext_data->Send(buffer_list);\n    if (!status) {\n      MBLOG_ERROR << \"external data send buffer list failed:\" << status;\n    }\n\n    status = ext_data->Close();\n    if (!status) {\n      MBLOG_ERROR << \"external data close failed:\" << status;\n    }\n\n    return modelbox::STATUS_OK;\n  };\n\n  auto process_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    auto session_ctx = data_ctx->GetSessionContext();\n    auto *session_content = (int *)(session_ctx->GetPrivate(\"session\").get());\n    MBLOG_INFO << \"session_content is \" << session_content[0];\n\n    auto external = data_ctx->External();\n    auto *external_data_1 = (int *)(*external)[0]->ConstData();\n    auto bytes = external->GetBytes();\n\n    auto output_buf_1 = data_ctx->Output(\"Out_1\");\n\n    std::vector<size_t> data_1_shape({bytes});\n    output_buf_1->Build(data_1_shape);\n    auto *dev_data_1 = (int *)(output_buf_1->MutableData());\n    for (size_t i = 0; i < bytes / sizeof(int); ++i) {\n      dev_data_1[i] = external_data_1[i];\n    }\n\n    return modelbox::STATUS_OK;\n  };\n  mock_funcitons->RegisterOpenFunc(open_func);\n  mock_funcitons->RegisterProcessFunc(process_func);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n}\n\nvoid MockFlow::Register_Test_1_0_Flowunit() {\n  auto mock_desc = GenerateFlowunitDesc(\"test_1_0\", {\"In_1\"}, {});\n  mock_desc->SetFlowType(STREAM);\n\n  auto process_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    auto input_bufs_1 = data_ctx->Input(\"In_1\");\n    int ending;\n    bool flag = input_bufs_1->At(0)->Get(\"ending\", ending);\n    if (flag) {\n      MBLOG_INFO << ending;\n    }\n    MBLOG_INFO << *((int *)input_bufs_1->ConstData());\n\n    return modelbox::STATUS_OK;\n  };\n\n  auto data_post_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    return modelbox::STATUS_STOP;\n  };\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  mock_funcitons->RegisterProcessFunc(process_func);\n  mock_funcitons->RegisterDataPostFunc(data_post_func);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n}\n\nvoid MockFlow::Register_Test_0_1_Batch_Flowunit() {\n  auto mock_desc = GenerateFlowunitDesc(\"test_0_1_batch\", {}, {\"Out_1\"});\n  mock_desc->SetDefaultBatchSize(1);\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n\n  auto open_func = [=](const std::shared_ptr<Configuration> &opts,\n                       const std::shared_ptr<MockFlowUnit> &mock_flowunit) {\n    auto ext_data = mock_flowunit->CreateExternalData();\n\n    if (!ext_data) {\n      MBLOG_ERROR << \"can not get external data.\";\n    }\n\n    auto session_ctx = ext_data->GetSessionContext();\n    auto session_content = std::make_shared<int>(1111);\n    session_ctx->SetPrivate(\"session\", session_content);\n\n    if (!session_ctx) {\n      MBLOG_ERROR << \"can not get session.\";\n    }\n\n    auto buffer_list = ext_data->CreateBufferList();\n    std::vector<size_t> buffer_shape(10, sizeof(int));\n    buffer_list->Build(buffer_shape);\n    for (size_t i = 0; i < 10; i++) {\n      auto *data = (int *)buffer_list->At(i)->MutableData();\n      *data = i;\n    }\n\n    auto status = ext_data->Send(buffer_list);\n    if (!status) {\n      MBLOG_ERROR << \"external data send buffer list failed:\" << status;\n    }\n\n    status = ext_data->Close();\n    if (!status) {\n      MBLOG_ERROR << \"external data close failed:\" << status;\n    }\n\n    return modelbox::STATUS_OK;\n  };\n\n  auto process_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    auto session_ctx = data_ctx->GetSessionContext();\n    auto *session_content = (int *)(session_ctx->GetPrivate(\"session\").get());\n    MBLOG_INFO << \"session_content is \" << session_content[0];\n\n    auto external = data_ctx->External();\n    auto bytes = external->GetBytes();\n\n    auto output_buf_1 = data_ctx->Output(\"Out_1\");\n\n    std::vector<size_t> data_1_shape(bytes / sizeof(int), sizeof(int));\n    output_buf_1->Build(data_1_shape);\n    for (size_t i = 0; i < bytes / sizeof(int); ++i) {\n      auto *dev_data_1 = (int *)(output_buf_1->At(i)->MutableData());\n      *dev_data_1 = *((int *)(external->At(i)->ConstData()));\n    }\n\n    return modelbox::STATUS_OK;\n  };\n  mock_funcitons->RegisterOpenFunc(open_func);\n  mock_funcitons->RegisterProcessFunc(process_func);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n}\n\nvoid MockFlow::Register_Test_0_1_Batch_Thread_Flowunit() {\n  auto mock_desc = GenerateFlowunitDesc(\"test_0_1_batch_thread\", {}, {\"Out_1\"});\n  mock_desc->SetDefaultBatchSize(1);\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n\n  static std::atomic<bool> is_closed(false);\n  static std::shared_ptr<std::thread> listener_thread = nullptr;\n  static int32_t interval_time = 5 * 1000;\n\n  auto open_func = [=](const std::shared_ptr<Configuration> &opts,\n                       const std::shared_ptr<MockFlowUnit> &mock_flowunit) {\n    interval_time = opts->GetInt32(\"interval_time\", 5000);\n    std::packaged_task<void()> task([=]() {\n      while (!is_closed) {\n        if (interval_time > 0) {\n          usleep(interval_time);\n        }\n        auto ext_data = mock_flowunit->CreateExternalData();\n\n        if (!ext_data) {\n          MBLOG_ERROR << \"can not get external data.\";\n        }\n\n        auto session_ctx = ext_data->GetSessionContext();\n        auto session_content = std::make_shared<int>(1111);\n        session_ctx->SetPrivate(\"session\", session_content);\n\n        if (!session_ctx) {\n          MBLOG_ERROR << \"can not get session.\";\n        }\n\n        auto buffer_list = ext_data->CreateBufferList();\n        std::vector<size_t> buffer_shape(10, sizeof(int));\n        buffer_list->Build(buffer_shape);\n        for (size_t i = 0; i < 10; i++) {\n          auto *data = (int *)buffer_list->At(i)->MutableData();\n          *data = i;\n        }\n\n        auto status = ext_data->Send(buffer_list);\n        if (!status) {\n          MBLOG_ERROR << \"external data send buffer list failed:\" << status;\n        }\n\n        status = ext_data->Close();\n        if (!status) {\n          MBLOG_ERROR << \"external data close failed:\" << status;\n        }\n      }\n    });\n\n    is_closed = false;\n    listener_thread = std::make_shared<std::thread>(std::move(task));\n    return modelbox::STATUS_OK;\n  };\n\n  auto process_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    auto session_ctx = data_ctx->GetSessionContext();\n    auto *session_content = (int *)(session_ctx->GetPrivate(\"session\").get());\n    MBLOG_INFO << \"session_content is \" << session_content[0];\n\n    auto external = data_ctx->External();\n    auto bytes = external->GetBytes();\n\n    auto output_buf_1 = data_ctx->Output(\"Out_1\");\n\n    std::vector<size_t> data_1_shape(bytes / sizeof(int), sizeof(int));\n    output_buf_1->Build(data_1_shape);\n    for (size_t i = 0; i < bytes / sizeof(int); ++i) {\n      auto *dev_data_1 = (int *)(output_buf_1->At(i)->MutableData());\n      *dev_data_1 = *((int *)(external->At(i)->ConstData()));\n    }\n\n    return modelbox::STATUS_OK;\n  };\n\n  auto close_func =\n      [=](const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    if (listener_thread && listener_thread->joinable()) {\n      is_closed = true;\n      listener_thread->join();\n      listener_thread = nullptr;\n    }\n    return modelbox::STATUS_OK;\n  };\n\n  mock_funcitons->RegisterOpenFunc(open_func);\n  mock_funcitons->RegisterProcessFunc(process_func);\n  mock_funcitons->RegisterCloseFunc(close_func);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n}\n\nvoid MockFlow::Register_Test_1_0_Batch_Flowunit() {\n  auto mock_desc = GenerateFlowunitDesc(\"test_1_0_batch\", {\"In_1\"}, {});\n  mock_desc->SetFlowType(STREAM);\n\n  auto process_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    auto input_bufs_1 = data_ctx->Input(\"In_1\");\n    int ending;\n    bool flag = input_bufs_1->At(0)->Get(\"ending\", ending);\n    if (flag) {\n      MBLOG_INFO << ending;\n    }\n    MBLOG_INFO << *((int *)input_bufs_1->ConstData());\n\n    return modelbox::STATUS_OK;\n  };\n\n  auto data_post_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    return modelbox::STATUS_STOP;\n  };\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  mock_funcitons->RegisterProcessFunc(process_func);\n  mock_funcitons->RegisterDataPostFunc(data_post_func);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n}\n\nvoid MockFlow::Register_Test_1_0_Batch_Thread_Flowunit() {\n  auto mock_desc = GenerateFlowunitDesc(\"test_1_0_batch_thread\", {\"In_1\"}, {});\n  mock_desc->SetFlowType(STREAM);\n\n  static std::atomic<int64_t> run_count(0);\n  static int64_t MAX_COUNT = 0;\n\n  auto open_func = [=](const std::shared_ptr<Configuration> &flow_option,\n                       const std::shared_ptr<MockFlowUnit> &mock_flowunit) {\n    MAX_COUNT = flow_option->GetInt64(\"max_count\", 50);\n    run_count = 0;\n    return modelbox::STATUS_OK;\n  };\n\n  auto process_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    auto input_bufs_1 = data_ctx->Input(\"In_1\");\n    int ending;\n    bool flag = input_bufs_1->At(0)->Get(\"ending\", ending);\n    if (flag) {\n      MBLOG_INFO << ending;\n    }\n    MBLOG_INFO << *((int *)input_bufs_1->ConstData());\n\n    run_count += input_bufs_1->Size();\n\n    return modelbox::STATUS_OK;\n  };\n\n  auto data_post_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    if (MAX_COUNT < run_count) {\n      MBLOG_DEBUG << \"check reach max running times, should stop.\";\n      return modelbox::STATUS_STOP;\n    }\n    return modelbox::STATUS_OK;\n  };\n\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  mock_funcitons->RegisterProcessFunc(process_func);\n  mock_funcitons->RegisterOpenFunc(open_func);\n  mock_funcitons->RegisterDataPostFunc(data_post_func);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n}\n\nvoid MockFlow::Register_Test_2_0_Flowunit() {\n  auto mock_desc = GenerateFlowunitDesc(\"test_2_0\", {\"In_1\", \"In_2\"}, {});\n  mock_desc->SetFlowType(STREAM);\n  auto data_post_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    return modelbox::STATUS_STOP;\n  };\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  mock_funcitons->RegisterDataPostFunc(data_post_func);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n}\n\nvoid MockFlow::Register_Test_OK_2_0_Flowunit() {\n  auto mock_desc = GenerateFlowunitDesc(\"test_ok_2_0\", {\"In_1\", \"In_2\"}, {});\n  mock_desc->SetFlowType(STREAM);\n  auto data_post_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    return modelbox::STATUS_OK;\n  };\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  mock_funcitons->RegisterDataPostFunc(data_post_func);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n}\n\nvoid MockFlow::Register_Test_Orgin_0_2_Flowunit() {\n  auto mock_desc =\n      GenerateFlowunitDesc(\"test_orgin_0_2\", {}, {\"Out_1\", \"Out_2\"});\n  mock_desc->SetFlowType(STREAM);\n  auto process_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    auto external = data_ctx->External();\n    auto *external_data_1 = (int *)(*external)[0]->ConstData();\n    auto bytes = external->GetBytes();\n\n    auto output_buf_1 = data_ctx->Output(\"Out_1\");\n    auto output_buf_2 = data_ctx->Output(\"Out_2\");\n\n    std::vector<size_t> data_1_shape(10, 4);\n    output_buf_1->Build(data_1_shape);\n    auto *dev_data_1 = (int *)(output_buf_1->MutableData());\n    for (size_t i = 0; i < bytes / sizeof(int); ++i) {\n      dev_data_1[i] = external_data_1[i];\n    }\n\n    std::vector<size_t> data_2_shape(10, 4);\n    output_buf_2->Build(data_2_shape);\n    auto *dev_data_2 = (int *)(output_buf_2->MutableData());\n    for (size_t i = 0; i < bytes / sizeof(int); ++i) {\n      dev_data_2[i] = external_data_1[i] + 10;\n    }\n\n    return modelbox::STATUS_OK;\n  };\n\n  auto open_func = [=](const std::shared_ptr<Configuration> &opts,\n                       const std::shared_ptr<MockFlowUnit> &mock_flowunit) {\n    auto ext_data = mock_flowunit->CreateExternalData();\n    if (!ext_data) {\n      MBLOG_ERROR << \"can not get external data.\";\n    }\n\n    auto buffer_list = ext_data->CreateBufferList();\n    buffer_list->Build({10 * sizeof(int)});\n    auto *data = (int *)buffer_list->MutableData();\n    for (size_t i = 0; i < 10; i++) {\n      data[i] = i;\n    }\n\n    auto status = ext_data->Send(buffer_list);\n    if (!status) {\n      MBLOG_ERROR << \"external data send buffer list failed:\" << status;\n    }\n\n    status = ext_data->Close();\n    if (!status) {\n      MBLOG_ERROR << \"external data close failed:\" << status;\n    }\n\n    return modelbox::STATUS_OK;\n  };\n\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  mock_funcitons->RegisterProcessFunc(process_func);\n  mock_funcitons->RegisterOpenFunc(open_func);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n}\n\nvoid MockFlow::Register_Loop_End_Flowunit() {\n  auto mock_desc = GenerateFlowunitDesc(\"loop_end\", {\"In_1\"}, {\"Out_1\"});\n  mock_desc->SetFlowType(NORMAL);\n  mock_desc->SetDefaultBatchSize(1);\n  auto process_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    auto input_bufs_1 = data_ctx->Input(\"In_1\");\n    auto output_bufs_1 = data_ctx->Output(\"Out_1\");\n\n    auto device = mock_flowunit->GetBindDevice();\n\n    for (uint32_t i = 0; i < input_bufs_1->Size(); i++) {\n      auto *input_data = (int *)(*input_bufs_1)[i]->ConstData();\n      auto buffer_ptr = std::make_shared<Buffer>(device);\n      buffer_ptr->Build(1 * sizeof(int));\n      auto *output_data = (int *)buffer_ptr->MutableData();\n      output_data[0] = input_data[0] * 2;\n\n      output_bufs_1->PushBack(buffer_ptr);\n    }\n\n    for (size_t i = 0; i < output_bufs_1->Size(); ++i) {\n      int ending = 0;\n      input_bufs_1->At(i)->Get(\"ending\", ending);\n      output_bufs_1->At(i)->Set(\"ending\", ending);\n    }\n\n    return modelbox::STATUS_OK;\n  };\n\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  mock_funcitons->RegisterProcessFunc(process_func);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n}\n\nvoid MockFlow::Register_Listen_Flowunit() {\n  auto mock_desc = GenerateFlowunitDesc(\"listen\", {}, {\"Out_1\", \"Out_2\"});\n  mock_desc->SetDefaultBatchSize(1);\n\n  static std::atomic<bool> is_closed(false);\n  static std::shared_ptr<std::thread> listener_thread = nullptr;\n  static int32_t interval_time = 5 * 1000;\n\n  auto open_func = [=](const std::shared_ptr<Configuration> &opts,\n                       const std::shared_ptr<MockFlowUnit> &mock_flowunit) {\n    interval_time = opts->GetInt32(\"interval_time\", 5000);\n    std::packaged_task<void()> task([=]() {\n      while (!is_closed) {\n        if (interval_time > 0) {\n          usleep(interval_time);\n        }\n\n        auto ext_data = mock_flowunit->CreateExternalData();\n        if (!ext_data) {\n          MBLOG_ERROR << \"can not get external data.\";\n          continue;\n        }\n\n        auto session = ext_data->GetSessionContext();\n        if (!session) {\n          MBLOG_ERROR << \"can not get session.\";\n          continue;\n        }\n\n        auto buffer_list = ext_data->CreateBufferList();\n        TensorList ext_tl(buffer_list);\n\n        constexpr int BUFF_SIZE = 10;\n        ext_tl.Build<int>({BUFF_SIZE, {1}});\n        auto *dev_data = ext_tl.MutableData<int>();\n        for (size_t i = 0; i < BUFF_SIZE; ++i) {\n          dev_data[i] = i;\n        }\n\n        auto status = ext_data->Send(buffer_list);\n        if (!status) {\n          MBLOG_ERROR << \"external data send buffer list failed:\" << status;\n          continue;\n        }\n\n        status = ext_data->Close();\n        if (!status) {\n          MBLOG_ERROR << \"external data close failed:\" << status;\n          continue;\n        }\n\n        MBLOG_DEBUG << \"listen send event.\";\n      }\n    });\n\n    is_closed = false;\n    listener_thread = std::make_shared<std::thread>(std::move(task));\n    return modelbox::STATUS_OK;\n  };\n\n  auto process_func =\n      [=](const std::shared_ptr<DataContext> &op_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    auto output_buf_1 = op_ctx->Output(\"Out_1\");\n    auto output_buf_2 = op_ctx->Output(\"Out_2\");\n    auto ext_buf = op_ctx->External();\n\n    TensorList output_tl_1(output_buf_1);\n    TensorList output_tl_2(output_buf_2);\n    TensorList ext_tl_1(ext_buf);\n\n    output_tl_1.Build<int>(ext_tl_1.GetShape());\n    output_tl_2.Build<int>(ext_tl_1.GetShape());\n\n    const auto *const dev_data = ext_tl_1.ConstData<int>();\n    auto *out_data_1 = output_tl_1.MutableData<int>();\n    auto *out_data_2 = output_tl_2.MutableData<int>();\n    for (size_t i = 0; i < ext_tl_1.Size(); ++i) {\n      out_data_1[i] = dev_data[i];\n      out_data_2[i] = dev_data[i] + 10;\n    }\n\n    return modelbox::STATUS_OK;\n  };\n\n  auto close_func =\n      [=](const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    if (listener_thread && listener_thread->joinable()) {\n      is_closed = true;\n      listener_thread->join();\n      listener_thread = nullptr;\n    }\n    return modelbox::STATUS_OK;\n  };\n\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  mock_funcitons->RegisterOpenFunc(open_func);\n  mock_funcitons->RegisterProcessFunc(process_func);\n  mock_funcitons->RegisterCloseFunc(close_func);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n}\n\nvoid MockFlow::Register_ExternData_Flowunit() {\n  auto mock_desc = GenerateFlowunitDesc(\"extern_data\", {}, {\"Out_1\"});\n  mock_desc->SetDefaultBatchSize(1);\n\n  static std::atomic<bool> is_closed(false);\n  static std::shared_ptr<std::thread> listener_thread = nullptr;\n  auto open_func = [=](const std::shared_ptr<Configuration> &opts,\n                       const std::shared_ptr<MockFlowUnit> &mock_flowunit) {\n    std::packaged_task<void()> task([=]() {\n      while (!is_closed) {\n        usleep(5 * 1000);\n        auto ext_data = mock_flowunit->CreateExternalData();\n        if (!ext_data) {\n          MBLOG_ERROR << \"can not get external data.\";\n          continue;\n        }\n\n        auto session = ext_data->GetSessionContext();\n        if (!session) {\n          MBLOG_ERROR << \"can not get session.\";\n          continue;\n        }\n\n        auto buffer_list = ext_data->CreateBufferList();\n        buffer_list->Build({1, 10 * sizeof(int)});\n        auto *data = (int *)buffer_list->MutableData();\n        for (size_t i = 0; i < 10; i++) {\n          data[i] = i;\n        }\n\n        auto status = ext_data->Send(buffer_list);\n        if (!status) {\n          MBLOG_ERROR << \"external data send buffer list failed:\" << status;\n          continue;\n        }\n\n        status = ext_data->Close();\n        if (!status) {\n          MBLOG_ERROR << \"external data close failed:\" << status;\n          continue;\n        }\n\n        MBLOG_DEBUG << \"listen send event.\";\n      }\n    });\n\n    is_closed = false;\n    listener_thread = std::make_shared<std::thread>(std::move(task));\n    return modelbox::STATUS_OK;\n  };\n\n  auto process_func =\n      [=](const std::shared_ptr<DataContext> &op_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    auto output_buf_1 = op_ctx->Output(\"Out_1\");\n    auto ext_buf = op_ctx->External();\n\n    for (auto &buffer : *ext_buf) {\n      output_buf_1->PushBack(buffer);\n    }\n\n    MBLOG_DEBUG << \"test_0_2 gen data\";\n\n    return modelbox::STATUS_OK;\n  };\n\n  auto close_func =\n      [=](const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    if (listener_thread && listener_thread->joinable()) {\n      is_closed = true;\n      listener_thread->join();\n      listener_thread = nullptr;\n    }\n    return modelbox::STATUS_OK;\n  };\n\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  mock_funcitons->RegisterOpenFunc(open_func);\n  mock_funcitons->RegisterProcessFunc(process_func);\n  mock_funcitons->RegisterCloseFunc(close_func);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n}\n\nvoid MockFlow::Register_Test_2_inputs_2_outputs_Flowunit() {\n  auto mock_desc = GenerateFlowunitDesc(\"test_2_inputs_2_outputs\",\n                                        {\"In_1\", \"In_2\"}, {\"Out_1\", \"Out_2\"});\n  mock_desc->SetFlowType(STREAM);\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n}\n\nvoid MockFlow::Register_Loop_Flowunit() {\n  auto mock_desc = GenerateFlowunitDesc(\"loop\", {\"In_1\"}, {\"Out_1\", \"Out_2\"});\n  mock_desc->SetDefaultBatchSize(1);\n  mock_desc->SetLoopType(LOOP);\n  mock_desc->SetFlowType(NORMAL);\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  auto process_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    auto input_bufs_1 = data_ctx->Input(\"In_1\");\n    int ending = 0;\n\n    auto output_bufs_1 = data_ctx->Output(\"Out_1\");\n    auto output_bufs_2 = data_ctx->Output(\"Out_2\");\n\n    auto device = mock_flowunit->GetBindDevice();\n    for (uint32_t i = 0; i < input_bufs_1->Size(); i++) {\n      bool flag = input_bufs_1->At(0)->Get(\"ending\", ending);\n      if (!flag) {\n        ending = 0;\n      }\n      auto *input_data = (int *)(*input_bufs_1)[i]->ConstData();\n      auto buffer_ptr = std::make_shared<Buffer>(device);\n      buffer_ptr->Build(1 * sizeof(int));\n      auto *output_data = (int *)buffer_ptr->MutableData();\n      output_data[0] = input_data[0] + 1;\n      if (ending == 9) {\n        output_bufs_2->PushBack(buffer_ptr);\n      } else {\n        output_bufs_1->PushBack(buffer_ptr);\n      }\n    }\n    ending++;\n    for (size_t i = 0; i < output_bufs_1->Size(); ++i) {\n      output_bufs_1->At(i)->Set(\"ending\", ending);\n    }\n\n    for (size_t i = 0; i < output_bufs_2->Size(); ++i) {\n      output_bufs_2->At(i)->Set(\"ending\", ending);\n    }\n    return modelbox::STATUS_OK;\n  };\n\n  mock_funcitons->RegisterProcessFunc(process_func);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n}\n\nvoid MockFlow::Register_Condition_Flowunit() {\n  auto mock_desc =\n      GenerateFlowunitDesc(\"condition\", {\"In_1\"}, {\"Out_1\", \"Out_2\"});\n  mock_desc->SetConditionType(IF_ELSE);\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  auto process_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    auto input_bufs_1 = data_ctx->Input(\"In_1\");\n    auto output_bufs_1 = data_ctx->Output(\"Out_1\");\n    auto output_bufs_2 = data_ctx->Output(\"Out_2\");\n\n    auto device = mock_flowunit->GetBindDevice();\n\n    for (uint32_t i = 0; i < input_bufs_1->Size(); i++) {\n      auto *input_data = (int *)(*input_bufs_1)[i]->ConstData();\n      auto buffer_ptr = std::make_shared<Buffer>(device);\n      buffer_ptr->Build(1 * sizeof(int));\n      auto *output_data = (int *)buffer_ptr->MutableData();\n      output_data[0] = input_data[0];\n      if (input_data[0] % 2 == 0) {\n        output_bufs_1->PushBack(buffer_ptr);\n      } else {\n        output_bufs_2->PushBack(buffer_ptr);\n      }\n    }\n    return modelbox::STATUS_OK;\n  };\n  mock_funcitons->RegisterProcessFunc(process_func);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n}\n\nvoid MockFlow::Register_Switch_Case_Flowunit() {\n  auto mock_desc = GenerateFlowunitDesc(\"switch_case\", {\"In_1\"},\n                                        {\"Out_1\", \"Out_2\", \"Out_3\"});\n  mock_desc->SetConditionType(IF_ELSE);\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  auto process_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    auto input_bufs_1 = data_ctx->Input(\"In_1\");\n    auto output_bufs_1 = data_ctx->Output(\"Out_1\");\n    auto output_bufs_2 = data_ctx->Output(\"Out_2\");\n    auto output_bufs_3 = data_ctx->Output(\"Out_3\");\n\n    auto device = mock_flowunit->GetBindDevice();\n\n    for (uint32_t i = 0; i < input_bufs_1->Size(); i++) {\n      auto *input_data = (int *)(*input_bufs_1)[i]->ConstData();\n      auto buffer_ptr = std::make_shared<Buffer>(device);\n      buffer_ptr->Build(1 * sizeof(int));\n      auto *output_data = (int *)buffer_ptr->MutableData();\n      output_data[0] = input_data[0];\n      if (input_data[0] % 3 == 0) {\n        output_bufs_1->PushBack(buffer_ptr);\n      } else if (input_data[0] % 3 == 1) {\n        output_bufs_2->PushBack(buffer_ptr);\n      } else {\n        output_bufs_3->PushBack(buffer_ptr);\n      }\n    }\n    return modelbox::STATUS_OK;\n  };\n  mock_funcitons->RegisterProcessFunc(process_func);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n}\n\nvoid MockFlow::Register_Half_Condition_Flowunit() {\n  auto mock_desc =\n      GenerateFlowunitDesc(\"half-condition\", {\"In_1\"}, {\"Out_1\", \"Out_2\"});\n  mock_desc->SetConditionType(IF_ELSE);\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  auto process_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    auto input_bufs_1 = data_ctx->Input(\"In_1\");\n    auto output_bufs_1 = data_ctx->Output(\"Out_1\");\n    auto output_bufs_2 = data_ctx->Output(\"Out_2\");\n\n    auto device = mock_flowunit->GetBindDevice();\n\n    for (uint32_t i = 0; i < input_bufs_1->Size(); i++) {\n      auto *input_data = (int *)(*input_bufs_1)[i]->ConstData();\n      auto buffer_ptr = std::make_shared<Buffer>(device);\n      buffer_ptr->Build(1 * sizeof(int));\n      auto *output_data = (int *)buffer_ptr->MutableData();\n      output_data[0] = input_data[0];\n      if (input_data[0] >= 5) {\n        output_bufs_1->PushBack(buffer_ptr);\n      } else {\n        output_bufs_2->PushBack(buffer_ptr);\n      }\n    }\n    return modelbox::STATUS_OK;\n  };\n  mock_funcitons->RegisterProcessFunc(process_func);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n}\n\nvoid MockFlow::Register_Normal_Condition_Flowunit() {\n  auto mock_desc =\n      GenerateFlowunitDesc(\"normal-condition\", {\"In_1\"}, {\"Out_1\", \"Out_2\"});\n  mock_desc->SetConditionType(IF_ELSE);\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  auto process_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    auto input_bufs_1 = data_ctx->Input(\"In_1\");\n    auto output_bufs_1 = data_ctx->Output(\"Out_1\");\n    auto output_bufs_2 = data_ctx->Output(\"Out_2\");\n\n    auto device = mock_flowunit->GetBindDevice();\n\n    for (uint32_t i = 0; i < input_bufs_1->Size(); i++) {\n      auto *input_data = (int *)(*input_bufs_1)[i]->ConstData();\n      auto buffer_ptr = std::make_shared<Buffer>(device);\n      buffer_ptr->Build(1 * sizeof(int));\n      auto *output_data = (int *)buffer_ptr->MutableData();\n      output_data[0] = input_data[0];\n      if (input_data[0] >= 5) {\n        output_bufs_1->PushBack(buffer_ptr);\n      } else {\n        output_bufs_2->PushBack(buffer_ptr);\n      }\n    }\n    return modelbox::STATUS_OK;\n  };\n  mock_funcitons->RegisterProcessFunc(process_func);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc(true));\n}\n\nvoid MockFlow::Register_Expand_Normal_Flowunit() {\n  auto mock_desc = GenerateFlowunitDesc(\"expand_normal\", {\"In_1\"}, {\"Out_1\"});\n  mock_desc->SetOutputType(EXPAND);\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  auto process_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    auto session_ctx = data_ctx->GetSessionContext();\n    session_ctx->SetPrivate(\"session\", std::make_shared<std::string>(\"111\"));\n    auto res = session_ctx->GetPrivate(\"session\");\n    MBLOG_INFO << \"res normal expand: \" << res;\n\n    auto input_bufs_1 = data_ctx->Input(\"In_1\");\n    auto output_bufs_1 = data_ctx->Output(\"Out_1\");\n\n    auto *input_data = (int *)(*input_bufs_1)[0]->ConstData();\n    std::vector<size_t> data_shape(5, 4);\n    output_bufs_1->Build(data_shape);\n    auto *output_data = (int *)output_bufs_1->MutableData();\n    for (uint32_t j = 0; j < 5; j++) {\n      output_data[j] = input_data[0] + j;\n    }\n\n    return modelbox::STATUS_OK;\n  };\n  mock_funcitons->RegisterProcessFunc(process_func);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n}\n\nvoid MockFlow::Register_Collapse_Normal_Flowunit() {\n  auto mock_desc = GenerateFlowunitDesc(\"collapse_normal\", {\"In_1\"}, {\"Out_1\"});\n  mock_desc->SetOutputType(COLLAPSE);\n  mock_desc->SetCollapseAll(true);\n\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  auto process_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    auto input_bufs_1 = data_ctx->Input(\"In_1\");\n    auto output_bufs_1 = data_ctx->Output(\"Out_1\");\n    std::vector<size_t> data_shape(1, 4);\n    output_bufs_1->Build(data_shape);\n    auto *output_data = (int *)output_bufs_1->MutableData();\n    auto *input_data = (int *)input_bufs_1->ConstData();\n    output_data[0] = 0;\n    for (uint32_t j = 0; j < 5; j++) {\n      output_data[0] += input_data[j];\n    }\n\n    return modelbox::STATUS_OK;\n  };\n  mock_funcitons->RegisterProcessFunc(process_func);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n}\n\nvoid MockFlow::Register_Expand_Stream_Flowunit() {\n  auto mock_desc = GenerateFlowunitDesc(\"expand_stream\", {\"In_1\"}, {\"Out_1\"});\n  mock_desc->SetOutputType(EXPAND);\n  mock_desc->SetFlowType(STREAM);\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  auto process_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    auto input_bufs_1 = data_ctx->Input(\"In_1\");\n    auto output_bufs_1 = data_ctx->Output(\"Out_1\");\n\n    auto *input_data = (int *)(*input_bufs_1)[0]->ConstData();\n    std::vector<size_t> data_shape(5, 4);\n    output_bufs_1->Build(data_shape);\n    auto *output_data = (int *)output_bufs_1->MutableData();\n    for (uint32_t j = 0; j < 5; j++) {\n      output_data[j] = input_data[0] + j;\n    }\n\n    return modelbox::STATUS_OK;\n  };\n  mock_funcitons->RegisterProcessFunc(process_func);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n}\n\nvoid MockFlow::Register_Collapse_Stream_Flowunit() {\n  auto mock_desc = GenerateFlowunitDesc(\"collapse_stream\", {\"In_1\"}, {\"Out_1\"});\n  mock_desc->SetOutputType(COLLAPSE);\n  mock_desc->SetCollapseAll(true);\n  mock_desc->SetFlowType(STREAM);\n\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  auto process_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    auto input_bufs_1 = data_ctx->Input(\"In_1\");\n    auto output_bufs_1 = data_ctx->Output(\"Out_1\");\n    std::vector<size_t> data_shape(1, 4);\n    output_bufs_1->Build(data_shape);\n    auto *output_data = (int *)output_bufs_1->MutableData();\n    auto *input_data = (int *)input_bufs_1->ConstData();\n    output_data[0] = 0;\n    for (uint32_t j = 0; j < 5; j++) {\n      output_data[0] += input_data[j];\n    }\n\n    return modelbox::STATUS_OK;\n  };\n  mock_funcitons->RegisterProcessFunc(process_func);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n}\n\nStatus Add_Funciton(const std::shared_ptr<DataContext> &data_ctx,\n                    const std::shared_ptr<MockFlowUnit> &mock_flowunit) {\n  auto input_bufs_1 = data_ctx->Input(\"In_1\");\n  auto input_bufs_2 = data_ctx->Input(\"In_2\");\n  auto output_bufs = data_ctx->Output(\"Out_1\");\n\n  if (input_bufs_1->Size() <= 0 || input_bufs_2->Size() <= 0) {\n    return STATUS_FAULT;\n  }\n\n  std::vector<size_t> shape(input_bufs_1->Size(),\n                            (*input_bufs_1)[0]->GetBytes());\n  output_bufs->Build(shape);\n  for (size_t i = 0; i < input_bufs_1->Size(); ++i) {\n    auto *input_data_1 = (int *)(*input_bufs_1)[i]->ConstData();\n    auto *input_data_2 = (int *)(*input_bufs_2)[i]->ConstData();\n    auto *output_data = (int *)(*output_bufs)[i]->MutableData();\n    auto data_size = (*input_bufs_1)[i]->GetBytes() / sizeof(int);\n    for (size_t j = 0; j < data_size; ++j) {\n      output_data[j] = input_data_1[j] + input_data_2[j];\n    }\n  }\n  return modelbox::STATUS_OK;\n}\n\nvoid MockFlow::Register_Stream_Add_Flowunit() {\n  auto mock_desc =\n      GenerateFlowunitDesc(\"stream_add\", {\"In_1\", \"In_2\"}, {\"Out_1\"});\n  mock_desc->SetFlowType(STREAM);\n  mock_desc->SetStreamSameCount(true);\n\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  mock_funcitons->RegisterProcessFunc(Add_Funciton);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n}\n\nvoid MockFlow::Register_Add_Flowunit() {\n  auto mock_desc = GenerateFlowunitDesc(\"add\", {\"In_1\", \"In_2\"}, {\"Out_1\"});\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  mock_funcitons->RegisterProcessFunc(Add_Funciton);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n}\n\nStatus Wrong_Add_Funciton(const std::shared_ptr<DataContext> &data_ctx,\n                          const std::shared_ptr<MockFlowUnit> &mock_flowunit) {\n  auto input_bufs_1 = data_ctx->Input(\"In_1\");\n  auto input_bufs_2 = data_ctx->Input(\"In_2\");\n  auto output_bufs = data_ctx->Output(\"Out_1\");\n\n  if (input_bufs_1->Size() <= 0 || input_bufs_2->Size() <= 0) {\n    return STATUS_FAULT;\n  }\n\n  std::vector<size_t> shape(input_bufs_1->Size() + 2,\n                            (*input_bufs_1)[0]->GetBytes());\n  output_bufs->Build(shape);\n  for (size_t i = 0; i < input_bufs_1->Size(); ++i) {\n    auto *input_data_1 = (int *)(*input_bufs_1)[i]->ConstData();\n    auto *input_data_2 = (int *)(*input_bufs_2)[i]->ConstData();\n    auto *output_data = (int *)(*output_bufs)[i]->MutableData();\n    auto data_size = (*input_bufs_1)[i]->GetBytes() / sizeof(int);\n    for (size_t j = 0; j < data_size; ++j) {\n      output_data[j] = input_data_1[j] + input_data_2[j];\n    }\n  }\n  return modelbox::STATUS_OK;\n}\n\nvoid MockFlow::Register_Wrong_Add_Flowunit() {\n  auto mock_desc =\n      GenerateFlowunitDesc(\"wrong_add\", {\"In_1\", \"In_2\"}, {\"Out_1\"});\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  mock_funcitons->RegisterProcessFunc(Wrong_Add_Funciton);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n}\n\nvoid MockFlow::Register_Wrong_Add_2_Flowunit() {\n  auto mock_desc =\n      GenerateFlowunitDesc(\"wrong_add_2\", {\"In_1\", \"In_2\"}, {\"Out_1\"});\n  mock_desc->SetFlowType(STREAM);\n  mock_desc->SetStreamSameCount(true);\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  mock_funcitons->RegisterProcessFunc(Wrong_Add_Funciton);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n}\n\nvoid MockFlow::Register_Scatter_Flowunit() {\n  auto mock_desc = GenerateFlowunitDesc(\"scatter\", {\"In_1\"}, {\"Out_1\"});\n  mock_desc->SetFlowType(STREAM);\n  mock_desc->SetOutputType(EXPAND);\n\n  auto process_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    auto input_bufs = data_ctx->Input(\"In_1\");\n    auto output_bufs = data_ctx->Output(\"Out_1\");\n\n    if (input_bufs->Size() != 1) {\n      return STATUS_FAULT;\n    }\n\n    auto size_byte = (*input_bufs)[0]->GetBytes() / sizeof(int);\n    auto *input_data_1 = (int *)(*input_bufs)[0]->ConstData();\n    std::vector<size_t> output_shape(size_byte, 1 * sizeof(int));\n    output_bufs->Build(output_shape);\n    auto *output_data_2 = (int *)(output_bufs->MutableData());\n    for (uint32_t i = 0; i < size_byte; i++) {\n      output_data_2[i] = input_data_1[i];\n    }\n    return modelbox::STATUS_OK;\n  };\n\n  auto data_pre_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    auto session_ctx = data_ctx->GetSessionContext();\n    auto *session_content = (int *)(session_ctx->GetPrivate(\"session\").get());\n    MBLOG_INFO << \"session_content is \" << session_content[0];\n    return modelbox::STATUS_OK;\n  };\n\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  mock_funcitons->RegisterProcessFunc(process_func);\n  mock_funcitons->RegisterDataPreFunc(data_pre_func);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n}\n\nstd::function<Status(std::shared_ptr<DataContext> data_ctx,\n                     std::shared_ptr<MockFlowUnit>)>\nGenerate_Garther_function(int32_t i) {\n  return [=](const std::shared_ptr<DataContext> &data_ctx,\n             const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    auto input_bufs = data_ctx->Input(\"In_1\");\n    auto output_bufs = data_ctx->Output(\"Out_1\");\n\n    uint32_t total_size = 0;\n    for (size_t i = 0; i < input_bufs->Size(); ++i) {\n      total_size += (*input_bufs)[i]->GetBytes();\n    }\n    std::vector<size_t> output_shape(i, total_size);\n    output_bufs->Build(output_shape);\n    auto *out_data = (int *)(output_bufs->MutableData());\n\n    size_t z = 0;\n    for (size_t i = 0; i < input_bufs->Size(); ++i) {\n      auto size_byte = (*input_bufs)[i]->GetBytes() / sizeof(int);\n      auto *input_data = (int *)(*input_bufs)[i]->ConstData();\n      for (uint32_t j = 0; j < size_byte; j++) {\n        out_data[z] = input_data[j];\n        z++;\n      }\n    }\n    return modelbox::STATUS_OK;\n  };\n}\n\nvoid MockFlow::Register_Garther_Flowunit() {\n  auto mock_desc = GenerateFlowunitDesc(\"garther\", {\"In_1\"}, {\"Out_1\"});\n  mock_desc->SetFlowType(STREAM);\n  mock_desc->SetOutputType(COLLAPSE);\n  mock_desc->SetCollapseAll(true);\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  mock_funcitons->RegisterProcessFunc(Generate_Garther_function(1));\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n}\n\nvoid MockFlow::Register_Garther_Gen_Flowunit() {\n  auto mock_desc =\n      GenerateFlowunitDesc(\"garther_gen_more\", {\"In_1\"}, {\"Out_1\"});\n  mock_desc->SetFlowType(STREAM);\n  mock_desc->SetOutputType(COLLAPSE);\n  mock_desc->SetCollapseAll(true);\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  mock_funcitons->RegisterProcessFunc(Generate_Garther_function(2));\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n}\n\nvoid MockFlow::Register_Print_Flowunit() {\n  auto mock_desc = GenerateFlowunitDesc(\"print\", {\"In_1\"}, {});\n  mock_desc->SetFlowType(STREAM);\n\n  static std::atomic<int64_t> run_count(0);\n  static int64_t MAX_COUNT = 0;\n\n  auto open_func = [=](const std::shared_ptr<Configuration> &flow_option,\n                       const std::shared_ptr<MockFlowUnit> &mock_flowunit) {\n    MAX_COUNT = flow_option->GetInt64(\"max_count\", 50);\n    run_count = 0;\n    return modelbox::STATUS_OK;\n  };\n\n  auto process_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    const auto input_bufs = data_ctx->Input(\"In_1\");\n\n    std::stringstream ostr;\n    for (size_t i = 0; i < input_bufs->Size(); ++i) {\n      auto *input_data = (int *)(*input_bufs)[i]->ConstData();\n      auto data_size = (*input_bufs)[i]->GetBytes() / sizeof(int);\n      for (size_t j = 0; j < data_size; ++j) {\n        ostr << input_data[j] << \" \";\n      }\n    }\n\n    MBLOG_DEBUG << ostr.str();\n\n    if (MAX_COUNT < run_count++) {\n      MBLOG_DEBUG << \"print reach max running times, should stop.\";\n      return modelbox::STATUS_STOP;\n    }\n\n    return modelbox::STATUS_OK;\n  };\n\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  mock_funcitons->RegisterProcessFunc(process_func);\n  mock_funcitons->RegisterOpenFunc(open_func);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n}\n\nvoid MockFlow::Register_Check_Print_Flowunit() {\n  auto mock_desc =\n      GenerateFlowunitDesc(\"check_print\", {\"IN1\", \"IN2\", \"IN3\"}, {});\n\n  static std::atomic<int64_t> run_count(0);\n  static int64_t MAX_COUNT = 0;\n  static std::atomic<bool> is_print(false);\n\n  auto open_func = [=](const std::shared_ptr<Configuration> &flow_option,\n                       const std::shared_ptr<MockFlowUnit> &mock_flowunit) {\n    MAX_COUNT = flow_option->GetInt64(\"max_count\", 50);\n    run_count = 0;\n    return modelbox::STATUS_OK;\n  };\n\n  auto process_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    auto input_bufs_1 = data_ctx->Input(\"IN1\");\n    auto input_bufs_2 = data_ctx->Input(\"IN2\");\n    auto input_bufs_3 = data_ctx->Input(\"IN3\");\n\n    if (input_bufs_1->Size() == 0 || input_bufs_2->Size() == 0 ||\n        input_bufs_3->Size() == 0 ||\n        (input_bufs_1->Size() != input_bufs_2->Size()) ||\n        (input_bufs_2->Size() != input_bufs_3->Size())) {\n      return modelbox::STATUS_SUCCESS;\n    }\n\n    for (size_t i = 0; i < input_bufs_1->Size(); ++i) {\n      auto *const in_data_1 = (int *)input_bufs_1->ConstBufferData(i);\n      auto *const in_data_2 = (int *)input_bufs_2->ConstBufferData(i);\n      auto *const in_data_3 = (int *)input_bufs_3->ConstBufferData(i);\n      if (in_data_3[0] != in_data_1[0] + in_data_2[0]) {\n        return STATUS_SHUTDOWN;\n      }\n    }\n\n    static auto begin_time = GetTickCount();\n    static std::atomic<uint64_t> print_time{GetTickCount()};\n\n    run_count += input_bufs_1->Size();\n    if (MAX_COUNT < run_count) {\n      MBLOG_DEBUG << \"check reach max running times, should stop.\";\n      return modelbox::STATUS_STOP;\n    }\n\n    auto end_time = GetTickCount();\n    if (end_time - print_time > 1000) {\n      auto expected = false;\n      if (is_print.compare_exchange_weak(expected, true)) {\n        MBLOG_INFO << \"Average throughput: \"\n                   << (run_count * 1000) / (end_time - begin_time) << \"/s\";\n        is_print = false;\n        print_time = GetTickCount();\n      }\n    }\n\n    return modelbox::STATUS_SUCCESS;\n  };\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  mock_funcitons->RegisterProcessFunc(process_func);\n  mock_funcitons->RegisterOpenFunc(open_func);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n}\n\nvoid MockFlow::Register_Dynamic_Config_Flowunit() {\n  auto mock_desc = GenerateFlowunitDesc(\"dynamic_config\", {}, {\"Out_1\"});\n  mock_desc->SetFlowType(STREAM);\n\n  auto open_func = [=](const std::shared_ptr<Configuration> &opts,\n                       const std::shared_ptr<MockFlowUnit> &mock_flowunit) {\n    auto ext_data = mock_flowunit->CreateExternalData();\n    if (!ext_data) {\n      MBLOG_ERROR << \"can not get external data.\";\n    }\n    auto config = ext_data->GetSessionConfig();\n    config->SetProperty(\"nodes.test\", \"nodes.test\");\n    config->SetProperty(\"flowunit.dynamic_get_config.test\",\n                        \"flowunit.dynamic_get_config.test\");\n    config->SetProperty(\"node.dynamic_get_config_1.test\",\n                        \"node.dynamic_get_config_1.test\");\n\n    auto buffer_list = ext_data->CreateBufferList();\n    buffer_list->Build({3 * sizeof(int)});\n    auto *data = (int *)buffer_list->MutableData();\n    data[0] = 0;\n    data[1] = 15;\n    data[2] = 3;\n\n    auto status = ext_data->Send(buffer_list);\n    if (!status) {\n      MBLOG_ERROR << \"external data send buffer list failed:\" << status;\n    }\n\n    status = ext_data->Close();\n    if (!status) {\n      MBLOG_ERROR << \"external data close failed:\" << status;\n    }\n\n    MBLOG_INFO << \"listen send event.\";\n    return modelbox::STATUS_OK;\n  };\n\n  auto process_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    auto output_bufs = data_ctx->Output(\"Out_1\");\n    auto external = data_ctx->External();\n    auto *external_data_1 = (int *)(*external)[0]->ConstData();\n    auto bytes = external->GetBytes();\n\n    std::vector<size_t> data_1_shape({bytes});\n    output_bufs->Build(data_1_shape);\n    auto *dev_data_1 = (int *)(output_bufs->MutableData());\n    for (size_t i = 0; i < bytes / sizeof(int); ++i) {\n      dev_data_1[i] = external_data_1[i];\n    }\n\n    return modelbox::STATUS_OK;\n  };\n\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  mock_funcitons->RegisterProcessFunc(process_func);\n  mock_funcitons->RegisterOpenFunc(open_func);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n}\n\nvoid MockFlow::Register_Dynamic_Get_Config_Flowunit() {\n  auto mock_desc =\n      GenerateFlowunitDesc(\"dynamic_get_config\", {\"In_1\"}, {\"Out_1\"});\n  mock_desc->SetFlowType(STREAM);\n\n  auto process_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    auto input_bufs_1 = data_ctx->Input(\"In_1\");\n    auto output_bufs_1 = data_ctx->Output(\"Out_1\");\n\n    auto device = mock_flowunit->GetBindDevice();\n    auto config = data_ctx->GetSessionConfig();\n    auto test = config->GetProperty(\"test\", std::string(\"\"));\n\n    for (uint32_t i = 0; i < input_bufs_1->Size(); i++) {\n      auto input_buffer = (*input_bufs_1)[i];\n      auto *input_data = (int *)input_buffer->ConstData();\n      auto buffer_ptr = std::make_shared<Buffer>(device);\n      buffer_ptr->Build(1 * sizeof(int));\n      auto *output_data = (int *)buffer_ptr->MutableData();\n      buffer_ptr->Set(\"test\", test);\n      output_data[0] = input_data[0];\n      output_bufs_1->PushBack(buffer_ptr);\n    }\n    return modelbox::STATUS_OK;\n  };\n\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  mock_funcitons->RegisterProcessFunc(process_func);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n}\n\nvoid MockFlow::Register_Dynamic_Get_Config_Other_Flowunit() {\n  auto mock_desc =\n      GenerateFlowunitDesc(\"dynamic_get_config_other\", {\"In_1\"}, {\"Out_1\"});\n  mock_desc->SetFlowType(STREAM);\n\n  auto process_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    auto input_bufs_1 = data_ctx->Input(\"In_1\");\n    auto output_bufs_1 = data_ctx->Output(\"Out_1\");\n\n    auto device = mock_flowunit->GetBindDevice();\n\n    auto config = data_ctx->GetSessionConfig();\n    auto test = config->GetProperty(\"test\", std::string(\"\"));\n\n    for (uint32_t i = 0; i < input_bufs_1->Size(); i++) {\n      auto input_buffer = (*input_bufs_1)[i];\n      auto *input_data = (int *)input_buffer->ConstData();\n      auto buffer_ptr = std::make_shared<Buffer>(device);\n      buffer_ptr->Build(1 * sizeof(int));\n      buffer_ptr->Set(\"test\", test);\n      auto *output_data = (int *)buffer_ptr->MutableData();\n      output_data[0] = input_data[0];\n      output_bufs_1->PushBack(buffer_ptr);\n    }\n    return modelbox::STATUS_OK;\n  };\n\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  mock_funcitons->RegisterProcessFunc(process_func);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n}\n\nstd::function<Status(const std::shared_ptr<Configuration> &,\n                     std::shared_ptr<MockFlowUnit>)>\nGenrate_Stream_Open(uint32_t i) {\n  return [=](const std::shared_ptr<Configuration> &opts,\n             const std::shared_ptr<MockFlowUnit> &mock_flowunit) {\n    auto ext_data = mock_flowunit->CreateExternalData();\n    if (!ext_data) {\n      MBLOG_ERROR << \"can not get external data.\";\n    }\n\n    auto session_ctx = ext_data->GetSessionContext();\n    auto session_content = std::make_shared<int>(1111);\n    session_ctx->SetPrivate(\"session\", session_content);\n\n    if (!session_ctx) {\n      MBLOG_ERROR << \"can not get session.\";\n    }\n\n    auto buffer_list = ext_data->CreateBufferList();\n    buffer_list->Build({3 * sizeof(int)});\n    auto *data = (int *)buffer_list->MutableData();\n    data[0] = 0;\n    data[1] = i;\n    data[2] = 3;\n\n    auto status = ext_data->Send(buffer_list);\n    if (!status) {\n      MBLOG_ERROR << \"external data send buffer list failed:\" << status;\n    }\n\n    status = ext_data->Close();\n    if (!status) {\n      MBLOG_ERROR << \"external data close failed:\" << status;\n    }\n\n    MBLOG_INFO << \"listen send event.\";\n    return modelbox::STATUS_OK;\n  };\n}\n\nStatus Stream_Process(const std::shared_ptr<DataContext> &data_ctx,\n                      const std::shared_ptr<MockFlowUnit> &mock_flowunit) {\n  auto session_ctx = data_ctx->GetSessionContext();\n  auto *session_content = (int *)(session_ctx->GetPrivate(\"session\").get());\n  MBLOG_INFO << \"session_content is \" << session_content[0];\n\n  auto output_bufs = data_ctx->Output(\"Out_1\");\n  auto external = data_ctx->External();\n  auto *external_data_1 = (int *)(*external)[0]->ConstData();\n  auto bytes = external->GetBytes();\n\n  std::vector<size_t> data_1_shape({bytes});\n  output_bufs->Build(data_1_shape);\n  auto *dev_data_1 = (int *)(output_bufs->MutableData());\n  for (size_t i = 0; i < bytes / sizeof(int); ++i) {\n    dev_data_1[i] = external_data_1[i];\n  }\n  return modelbox::STATUS_OK;\n}\n\nStatus Stream_DataPre(const std::shared_ptr<DataContext> &data_ctx,\n                      const std::shared_ptr<MockFlowUnit> &mock_flowunit) {\n  auto output_meta = std::make_shared<DataMeta>();\n  auto magic_num = std::make_shared<int>(3343);\n  output_meta->SetMeta(\"magic_num\", magic_num);\n  data_ctx->SetOutputMeta(\"Out_1\", output_meta);\n  return modelbox::STATUS_OK;\n}\n\nStatus Stream_DataPost(const std::shared_ptr<DataContext> &data_ctx,\n                       const std::shared_ptr<MockFlowUnit> &mock_flowunit) {\n  return modelbox::STATUS_OK;\n}\n\nvoid MockFlow::Register_Stream_Info_Flowunit() {\n  auto mock_desc = GenerateFlowunitDesc(\"stream_info\", {}, {\"Out_1\"});\n  mock_desc->SetFlowType(STREAM);\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  mock_funcitons->RegisterProcessFunc(Stream_Process);\n  mock_funcitons->RegisterDataPreFunc(Stream_DataPre);\n  mock_funcitons->RegisterOpenFunc(Genrate_Stream_Open(15));\n  mock_funcitons->RegisterDataPostFunc(Stream_DataPost);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc(true));\n}\n\nvoid MockFlow::Register_Stream_Normal_Info_Flowunit() {\n  auto mock_desc = GenerateFlowunitDesc(\"stream_normal_info\", {}, {\"Out_1\"});\n  mock_desc->SetFlowType(STREAM);\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  mock_funcitons->RegisterProcessFunc(Stream_Process);\n  mock_funcitons->RegisterDataPreFunc(Stream_DataPre);\n  mock_funcitons->RegisterOpenFunc(Genrate_Stream_Open(25));\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc(true));\n}\n\nvoid MockFlow::Register_Stream_Normal_Info_2_Flowunit() {\n  auto mock_desc = GenerateFlowunitDesc(\"stream_normal_info_2\", {}, {\"Out_1\"});\n  mock_desc->SetFlowType(STREAM);\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  auto open_func = [=](const std::shared_ptr<Configuration> &opts,\n                       const std::shared_ptr<MockFlowUnit> &mock_flowunit) {\n    auto ext_data = mock_flowunit->CreateExternalData();\n    if (!ext_data) {\n      MBLOG_ERROR << \"can not get external data.\";\n    }\n\n    auto buffer_list = ext_data->CreateBufferList();\n    buffer_list->Build({8, 8});\n    auto *data = (int *)buffer_list->MutableData();\n    data[0] = 0;\n    data[1] = 5;\n\n    data[2] = 0;\n    data[3] = 10;\n\n    auto status = ext_data->Send(buffer_list);\n    if (!status) {\n      MBLOG_ERROR << \"external data send buffer list failed:\" << status;\n    }\n\n    status = ext_data->Close();\n    if (!status) {\n      MBLOG_ERROR << \"external data close failed:\" << status;\n    }\n\n    MBLOG_INFO << \"listen send event.\";\n    return modelbox::STATUS_OK;\n  };\n\n  auto process_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    auto output_bufs = data_ctx->Output(\"Out_1\");\n    auto external = data_ctx->External();\n    auto *external_data_1 = (int *)(*external)[0]->ConstData();\n    auto bytes = external->GetBytes();\n\n    std::vector<size_t> data_1_shape(2, 8);\n    output_bufs->Build(data_1_shape);\n    auto *dev_data_1 = (int *)(output_bufs->MutableData());\n    for (size_t i = 0; i < bytes / sizeof(int); ++i) {\n      dev_data_1[i] = external_data_1[i];\n    }\n\n    return modelbox::STATUS_OK;\n  };\n\n  mock_funcitons->RegisterProcessFunc(process_func);\n  mock_funcitons->RegisterDataPreFunc(Stream_DataPre);\n  mock_funcitons->RegisterOpenFunc(open_func);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc(true));\n}\n\nvoid MockFlow::Register_Stream_Start_Flowunit() {\n  auto mock_desc = GenerateFlowunitDesc(\"stream_start\", {\"In_1\"}, {\"Out_1\"});\n  mock_desc->SetFlowType(STREAM);\n  mock_desc->SetOutputType(EXPAND);\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n\n  auto process_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    MBLOG_INFO << \"stream_start process\";\n    auto output_bufs = data_ctx->Output(\"Out_1\");\n    auto now_index = *(\n        std::static_pointer_cast<int>(data_ctx->GetPrivate(\"now_index\")).get());\n    auto end_index = *(\n        std::static_pointer_cast<int>(data_ctx->GetPrivate(\"end_index\")).get());\n\n    std::vector<size_t> shape(5, sizeof(int));\n    output_bufs->Build(shape);\n    for (size_t i = 0; i < 5; ++i) {\n      auto *output_data = (int *)(*output_bufs)[i]->MutableData();\n      output_data[0] = now_index + i;\n    }\n    now_index = now_index + 5;\n    auto now_index_content = std::make_shared<int>(now_index);\n    data_ctx->SetPrivate(\"now_index\", now_index_content);\n    if (now_index + 5 <= end_index) {\n      auto event = std::make_shared<FlowUnitEvent>();\n      data_ctx->SendEvent(event);\n      return modelbox::STATUS_CONTINUE;\n    }\n    return modelbox::STATUS_OK;\n  };\n\n  auto data_pre_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    auto session_ctx = data_ctx->GetSessionContext();\n    auto *session_content = (int *)(session_ctx->GetPrivate(\"session\").get());\n    MBLOG_INFO << \"session_content is \" << session_content[0];\n\n    auto input_bufs = data_ctx->Input(\"In_1\");\n    auto *input_data = (int *)(*input_bufs)[0]->ConstData();\n    auto start_index = input_data[0];\n    auto end_index = input_data[1];\n    auto interval = input_data[2];\n    auto start_index_content = std::make_shared<int>(start_index);\n    data_ctx->SetPrivate(\"now_index\", start_index_content);\n    auto end_index_content = std::make_shared<int>(end_index);\n    data_ctx->SetPrivate(\"end_index\", end_index_content);\n    auto interval_content = std::make_shared<int>(interval);\n    auto output_meta = std::make_shared<DataMeta>();\n    output_meta->SetMeta(\"start_index\", start_index_content);\n    output_meta->SetMeta(\"end_index\", end_index_content);\n    output_meta->SetMeta(\"interval\", interval_content);\n    data_ctx->SetOutputMeta(\"Out_1\", output_meta);\n    return modelbox::STATUS_OK;\n  };\n\n  auto data_post_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    return modelbox::STATUS_OK;\n  };\n\n  mock_funcitons->RegisterProcessFunc(process_func);\n  mock_funcitons->RegisterDataPreFunc(data_pre_func);\n  mock_funcitons->RegisterDataPostFunc(data_post_func);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc(true));\n}\n\nvoid MockFlow::Register_Normal_Expand_Start_Flowunit() {\n  auto mock_desc =\n      GenerateFlowunitDesc(\"normal_expand_start\", {\"In_1\"}, {\"Out_1\"});\n  mock_desc->SetOutputType(EXPAND);\n  auto process_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    auto event = data_ctx->Event();\n    if (event == nullptr) {\n      auto input_bufs = data_ctx->Input(\"In_1\");\n      auto *input_data = (int *)(*input_bufs)[0]->ConstData();\n      auto start_index = input_data[0];\n      auto end_index = input_data[1];\n      auto start_index_content = std::make_shared<int>(start_index);\n      data_ctx->SetPrivate(\"now_index\", start_index_content);\n      auto end_index_content = std::make_shared<int>(end_index);\n      data_ctx->SetPrivate(\"end_index\", end_index_content);\n    }\n\n    auto output_bufs = data_ctx->Output(\"Out_1\");\n    auto now_index = *(\n        std::static_pointer_cast<int>(data_ctx->GetPrivate(\"now_index\")).get());\n    auto end_index = *(\n        std::static_pointer_cast<int>(data_ctx->GetPrivate(\"end_index\")).get());\n\n    std::vector<size_t> shape(5, sizeof(int));\n    output_bufs->Build(shape);\n    for (size_t i = 0; i < 5; ++i) {\n      auto *output_data = (int *)(*output_bufs)[i]->MutableData();\n      output_data[0] = now_index + i;\n    }\n    now_index = now_index + 5;\n    auto now_index_content = std::make_shared<int>(now_index);\n    data_ctx->SetPrivate(\"now_index\", now_index_content);\n    if (now_index + 5 <= end_index) {\n      auto event = std::make_shared<FlowUnitEvent>();\n      data_ctx->SendEvent(event);\n      return modelbox::STATUS_CONTINUE;\n    }\n    return modelbox::STATUS_OK;\n  };\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  mock_funcitons->RegisterProcessFunc(process_func);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc(true));\n}\n\nvoid MockFlow::Register_Stream_Tail_Filter_Flowunit() {\n  auto mock_desc =\n      GenerateFlowunitDesc(\"stream_tail_filter\", {\"In_1\"}, {\"Out_1\"});\n  mock_desc->SetFlowType(STREAM);\n  mock_desc->SetMaxBatchSize(16);\n  auto process_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    auto output_bufs = data_ctx->Output(\"Out_1\");\n    auto input_bufs = data_ctx->Input(\"In_1\");\n\n    auto device = mock_flowunit->GetBindDevice();\n\n    auto end_index = *(\n        std::static_pointer_cast<int>(data_ctx->GetPrivate(\"end_index\")).get());\n\n    for (size_t i = 0; i < input_bufs->Size(); ++i) {\n      auto *input_data = (int *)(*input_bufs)[i]->ConstData();\n      if (input_data[0] < 10) {\n        auto buffer_ptr = std::make_shared<Buffer>(device);\n        buffer_ptr->Build(1 * sizeof(int));\n        auto *output_data = (int *)buffer_ptr->MutableData();\n        output_data[0] = input_data[0];\n        output_bufs->PushBack(buffer_ptr);\n      }\n\n      if (input_data[0] == end_index - 1) {\n        return modelbox::STATUS_OK;\n      }\n    }\n    return modelbox::STATUS_OK;\n  };\n\n  auto data_pre_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    auto meta = data_ctx->GetInputMeta(\"In_1\");\n    auto start_index =\n        *(std::static_pointer_cast<int>(meta->GetMeta(\"start_index\")).get());\n    auto end_index =\n        *(std::static_pointer_cast<int>(meta->GetMeta(\"end_index\")).get());\n    data_ctx->SetPrivate(\"end_index\", std::make_shared<int>(end_index));\n    auto output_meta = std::make_shared<DataMeta>();\n    output_meta->SetMeta(\"start_index\", std::make_shared<int>(start_index));\n    output_meta->SetMeta(\"end_index\", std::make_shared<int>(end_index));\n    data_ctx->SetOutputMeta(\"Out_1\", output_meta);\n\n    return modelbox::STATUS_OK;\n  };\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  mock_funcitons->RegisterProcessFunc(process_func);\n  mock_funcitons->RegisterDataPreFunc(data_pre_func);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc(true));\n}\n\nvoid MockFlow::Register_Stream_Mid_Flowunit() {\n  auto mock_desc = GenerateFlowunitDesc(\"stream_mid\", {\"In_1\"}, {\"Out_1\"});\n  mock_desc->SetFlowType(STREAM);\n  auto process_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    auto output_bufs = data_ctx->Output(\"Out_1\");\n    auto input_bufs = data_ctx->Input(\"In_1\");\n\n    auto device = mock_flowunit->GetBindDevice();\n\n    auto interval = *(\n        std::static_pointer_cast<int>(data_ctx->GetPrivate(\"interval\")).get());\n    auto end_index = *(\n        std::static_pointer_cast<int>(data_ctx->GetPrivate(\"end_index\")).get());\n\n    for (size_t i = 0; i < input_bufs->Size(); ++i) {\n      auto *input_data = (int *)(*input_bufs)[i]->ConstData();\n      if (input_data[0] % interval == 0) {\n        auto buffer_ptr = std::make_shared<Buffer>(device);\n        buffer_ptr->Build(1 * sizeof(int));\n        auto *output_data = (int *)buffer_ptr->MutableData();\n        output_data[0] = input_data[0];\n        output_bufs->PushBack(buffer_ptr);\n      }\n\n      if (input_data[0] == end_index - 1) {\n        return modelbox::STATUS_OK;\n      }\n    }\n    return modelbox::STATUS_CONTINUE;\n  };\n\n  auto data_pre_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    auto session_ctx = data_ctx->GetSessionContext();\n    auto *session_content = (int *)(session_ctx->GetPrivate(\"session\").get());\n    auto meta = data_ctx->GetInputMeta(\"In_1\");\n\n    std::shared_ptr<Device> device;\n    auto interval =\n        *(std::static_pointer_cast<int>(meta->GetMeta(\"interval\")).get());\n    auto end_index =\n        *(std::static_pointer_cast<int>(meta->GetMeta(\"end_index\")).get());\n    data_ctx->SetPrivate(\"interval\", std::make_shared<int>(interval));\n    data_ctx->SetPrivate(\"end_index\", std::make_shared<int>(end_index));\n\n    MBLOG_INFO << \"session_content is \" << session_content[0];\n    return modelbox::STATUS_OK;\n  };\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  mock_funcitons->RegisterProcessFunc(process_func);\n  mock_funcitons->RegisterDataPreFunc(data_pre_func);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc(true));\n}\n\nvoid MockFlow::Register_Stream_End_Flowunit() {\n  auto mock_desc = GenerateFlowunitDesc(\"stream_end\", {\"In_1\"}, {\"Out_1\"});\n  mock_desc->SetFlowType(STREAM);\n  mock_desc->SetOutputType(COLLAPSE);\n  mock_desc->SetCollapseAll(false);\n\n  auto process_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    auto input_bufs = data_ctx->Input(\"In_1\");\n    auto data_meta = data_ctx->GetInputGroupMeta(\"In_1\");\n    if (data_meta != nullptr) {\n      auto magic_num =\n          *(std::static_pointer_cast<int>(data_meta->GetMeta(\"magic_num\")));\n      MBLOG_INFO << \"Data Process magic_num \" << magic_num;\n    }\n\n    if (data_ctx->GetPrivate(\"total_count\") == nullptr) {\n      auto total_count = std::make_shared<int>(0);\n      data_ctx->SetPrivate(\"total_count\", total_count);\n    }\n    auto total_count =\n        *(std::static_pointer_cast<int>(data_ctx->GetPrivate(\"total_count\"))\n              .get());\n\n    for (size_t i = 0; i < input_bufs->Size(); ++i) {\n      auto *input_data = (int *)(*input_bufs)[i]->ConstData();\n      total_count += input_data[0];\n      if (input_data[0] == 12) {\n        auto output_bufs = data_ctx->Output(\"Out_1\");\n        auto device = mock_flowunit->GetBindDevice();\n        auto buffer_ptr = std::make_shared<Buffer>(device);\n        buffer_ptr->Build(1 * sizeof(int));\n        auto *output_data = (int *)buffer_ptr->MutableData();\n        output_data[0] = total_count;\n        output_bufs->PushBack(buffer_ptr);\n        return modelbox::STATUS_OK;\n      }\n    }\n    auto new_total_count = std::make_shared<int>(total_count);\n    data_ctx->SetPrivate(\"total_count\", new_total_count);\n\n    return modelbox::STATUS_OK;\n  };\n\n  auto data_pre_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    MBLOG_INFO << \"stream_end \"\n               << \"DataGroupPre\";\n    auto session_ctx = data_ctx->GetSessionContext();\n    if (session_ctx->GetPrivate(\"session\") != nullptr) {\n      auto *session_content = (int *)(session_ctx->GetPrivate(\"session\").get());\n      MBLOG_INFO << \"session_content is \" << session_content[0];\n    }\n    auto data_meta = data_ctx->GetInputGroupMeta(\"In_1\");\n    if (data_meta != nullptr) {\n      auto magic_num =\n          *(std::static_pointer_cast<int>(data_meta->GetMeta(\"magic_num\")));\n      MBLOG_INFO << \"DataGroupPre magic_num \" << magic_num;\n    }\n\n    return modelbox::STATUS_OK;\n  };\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  mock_funcitons->RegisterProcessFunc(process_func);\n  mock_funcitons->RegisterDataPreFunc(data_pre_func);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc(true));\n}\n\nvoid MockFlow::Register_Add_1_Flowunit() {\n  auto mock_desc = GenerateFlowunitDesc(\"add_1\", {\"In_1\"}, {\"Out_1\"});\n  mock_desc->SetFlowType(STREAM);\n\n  auto process_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    const auto input_bufs = data_ctx->Input(\"In_1\");\n    auto output_bufs = data_ctx->Output(\"Out_1\");\n\n    if (input_bufs->Size() <= 0) {\n      return STATUS_FAULT;\n    }\n\n    std::vector<size_t> shape(input_bufs->Size(), 1 * sizeof(int));\n    output_bufs->Build(shape);\n    for (size_t i = 0; i < input_bufs->Size(); ++i) {\n      auto *input_data = (int *)input_bufs->At(i)->ConstData();\n      auto *output_data = (int *)output_bufs->At(i)->MutableData();\n      *output_data = *input_data + 1;\n    }\n    return modelbox::STATUS_OK;\n  };\n\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  mock_funcitons->RegisterProcessFunc(process_func);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n}\n\nvoid MockFlow::Register_Iflow_Add_1_Flowunit() {\n  auto mock_desc = GenerateFlowunitDesc(\"iflow_add_1\", {\"In_1\"}, {\"Out_1\"});\n  mock_desc->SetFlowType(STREAM);\n\n  auto process_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    auto input_bufs = data_ctx->Input(\"In_1\");\n    auto output_bufs = data_ctx->Output(\"Out_1\");\n\n    if (input_bufs->Size() <= 0) {\n      return STATUS_FAULT;\n    }\n\n    std::vector<size_t> shape(input_bufs->Size(), 1 * sizeof(int));\n    output_bufs->Build(shape);\n    for (size_t i = 0; i < input_bufs->Size(); ++i) {\n      auto *input_data = (int *)((*input_bufs)[i]->ConstData());\n      auto *output_data = (int *)output_bufs->At(i)->MutableData();\n      *output_data = *input_data + 1;\n    }\n\n    return modelbox::STATUS_OK;\n  };\n\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  mock_funcitons->RegisterProcessFunc(process_func);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n}\n\nvoid MockFlow::Register_Add_1_And_Error_Flowunit() {\n  auto mock_desc = GenerateFlowunitDesc(\"add_1_and_error\", {\"In_1\"}, {\"Out_1\"});\n\n  auto process_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    const auto input_bufs = data_ctx->Input(\"In_1\");\n    auto output_bufs = data_ctx->Output(\"Out_1\");\n\n    std::vector<size_t> shape(input_bufs->Size(), 1 * sizeof(int));\n    output_bufs->Build(shape);\n    for (size_t i = 0; i < input_bufs->Size(); ++i) {\n      auto *input_data = (int *)input_bufs->At(i)->ConstData();\n      if (*input_data == 10) {\n        return modelbox::STATUS_INVALID;\n      }\n\n      auto *output_data = (int *)output_bufs->At(i)->MutableData();\n      *output_data = *input_data + 1;\n    }\n\n    return modelbox::STATUS_SUCCESS;\n  };\n\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  mock_funcitons->RegisterProcessFunc(process_func);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n}\n\nvoid MockFlow::Register_Test_Condition_Flowunit() {\n  auto mock_desc =\n      GenerateFlowunitDesc(\"test_condition\", {\"In_1\"}, {\"Out_1\", \"Out_2\"});\n  mock_desc->SetConditionType(IF_ELSE);\n  auto process_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    auto input_bufs = data_ctx->Input(\"In_1\");\n    auto output_bufs_1 = data_ctx->Output(\"Out_1\");\n    auto output_bufs_2 = data_ctx->Output(\"Out_2\");\n\n    auto device = mock_flowunit->GetBindDevice();\n    for (size_t i = 0; i < input_bufs->Size(); ++i) {\n      auto *input_data = (int *)input_bufs->At(i)->ConstData();\n      auto buffer = std::make_shared<Buffer>(device);\n      buffer->Build(1 * sizeof(int));\n      auto *output_data = (int *)buffer->MutableData();\n      *output_data = *input_data;\n\n      if (*input_data == 10) {\n        return STATUS_INVALID;\n      }\n\n      if (*input_data % 2 == 0) {\n        output_bufs_1->PushBack(buffer);\n      } else {\n        output_bufs_2->PushBack(buffer);\n      }\n    }\n\n    return modelbox::STATUS_SUCCESS;\n  };\n\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  mock_funcitons->RegisterProcessFunc(process_func);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n}\n\nvoid MockFlow::Register_Get_Priority_Flowunit() {\n  auto mock_desc = GenerateFlowunitDesc(\"get_priority\", {\"In_1\"}, {\"Out_1\"});\n  mock_desc->SetFlowType(STREAM);\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n}\n\nvoid MockFlow::Register_Error_Start_Flowunit() {\n  auto mock_desc = GenerateFlowunitDesc(\"error_start\", {}, {\"Out_1\"});\n  mock_desc->SetFlowType(STREAM);\n  auto open_func = [=](const std::shared_ptr<Configuration> &opts,\n                       const std::shared_ptr<MockFlowUnit> &mock_flowunit) {\n    auto ext_data = mock_flowunit->CreateExternalData();\n    if (!ext_data) {\n      MBLOG_ERROR << \"can not get external data.\";\n    }\n\n    auto buffer_list = ext_data->CreateBufferList();\n    buffer_list->Build({1 * sizeof(int)});\n    auto status = ext_data->Send(buffer_list);\n    if (!status) {\n      MBLOG_ERROR << \"external data send buffer list failed:\" << status;\n    }\n    status = ext_data->Close();\n    if (!status) {\n      MBLOG_ERROR << \"external data close failed:\" << status;\n    }\n\n    MBLOG_INFO << \"error start\";\n    return modelbox::STATUS_OK;\n  };\n\n  auto process_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    return modelbox::STATUS_INVALID;\n  };\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  mock_funcitons->RegisterOpenFunc(open_func);\n  mock_funcitons->RegisterProcessFunc(process_func);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc(true));\n}\n\nvoid MockFlow::Register_Error_Start_Normal_Flowunit() {\n  auto mock_desc = GenerateFlowunitDesc(\"error_start_normal\", {}, {\"Out_1\"});\n  auto open_func = [=](const std::shared_ptr<Configuration> &opts,\n                       const std::shared_ptr<MockFlowUnit> &mock_flowunit) {\n    auto ext_data = mock_flowunit->CreateExternalData();\n    if (!ext_data) {\n      MBLOG_ERROR << \"can not get external data.\";\n    }\n\n    auto buffer_list = ext_data->CreateBufferList();\n    buffer_list->Build({1 * sizeof(int)});\n    auto status = ext_data->Send(buffer_list);\n    if (!status) {\n      MBLOG_ERROR << \"external data send buffer list failed:\" << status;\n    }\n    status = ext_data->Close();\n    if (!status) {\n      MBLOG_ERROR << \"external data close failed:\" << status;\n    }\n\n    MBLOG_INFO << \"error start\";\n    return modelbox::STATUS_OK;\n  };\n\n  auto process_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    MBLOG_INFO << \"error start process.\";\n    return modelbox::STATUS_INVALID;\n  };\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  mock_funcitons->RegisterOpenFunc(open_func);\n  mock_funcitons->RegisterProcessFunc(process_func);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n}\n\nvoid MockFlow::Register_Normal_Start_Flowunit() {\n  auto mock_desc = GenerateFlowunitDesc(\"normal_start\", {}, {\"Out_1\"});\n  mock_desc->SetFlowType(STREAM);\n  auto open_func = [=](const std::shared_ptr<Configuration> &opts,\n                       const std::shared_ptr<MockFlowUnit> &mock_flowunit) {\n    auto ext_data = mock_flowunit->CreateExternalData();\n    if (!ext_data) {\n      MBLOG_ERROR << \"can not get external data.\";\n    }\n\n    auto buffer_list = ext_data->CreateBufferList();\n    buffer_list->Build({2 * sizeof(int)});\n    auto *data = (int *)buffer_list->MutableData();\n    data[0] = 0;\n    data[1] = 16;\n\n    auto status = ext_data->Send(buffer_list);\n    if (!status) {\n      MBLOG_ERROR << \"external data send buffer list failed:\" << status;\n    }\n\n    status = ext_data->Close();\n    if (!status) {\n      MBLOG_ERROR << \"external data close failed:\" << status;\n    }\n\n    MBLOG_INFO << \"expand start\";\n    return modelbox::STATUS_OK;\n  };\n\n  auto process_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    auto output_bufs = data_ctx->Output(\"Out_1\");\n    auto external = data_ctx->External();\n    auto *external_data_1 = (uint32_t *)(*external)[0]->ConstData();\n\n    auto start = external_data_1[0];\n    auto end = external_data_1[1];\n\n    std::vector<size_t> data_1_shape(4, {(end - start) * sizeof(uint32_t) / 4});\n    output_bufs->Build(data_1_shape);\n    auto *dev_data_1 = (int *)(output_bufs->MutableData());\n    for (size_t i = 0; i < (end - start); ++i) {\n      dev_data_1[i] = i;\n    }\n\n    return modelbox::STATUS_OK;\n  };\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  mock_funcitons->RegisterOpenFunc(open_func);\n  mock_funcitons->RegisterProcessFunc(process_func);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc(true));\n}\n\nvoid MockFlow::Register_Expand_Datapre_Error_Flowunit() {\n  auto mock_desc =\n      GenerateFlowunitDesc(\"expand_datapre_error\", {\"In_1\"}, {\"Out_1\"});\n  mock_desc->SetFlowType(STREAM);\n  mock_desc->SetOutputType(EXPAND);\n\n  auto data_pre_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    return modelbox::STATUS_INVALID;\n  };\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  mock_funcitons->RegisterDataPreFunc(data_pre_func);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc(true));\n}\n\nStatus Expand_Process_Error(\n    const std::shared_ptr<DataContext> &data_ctx,\n    const std::shared_ptr<MockFlowUnit> &mock_flowunit) {\n  auto input_bufs_1 = data_ctx->Input(\"In_1\");\n  auto output_bufs_1 = data_ctx->Output(\"Out_1\");\n  auto device = mock_flowunit->GetBindDevice();\n  auto *input_data = (int *)(input_bufs_1->At(0)->ConstData());\n  auto input_count = input_bufs_1->At(0)->GetBytes() / sizeof(int);\n\n  if (input_data[0] == 4) {\n    MBLOG_ERROR << \"expand process return invalid.\";\n    return modelbox::STATUS_INVALID;\n  }\n  for (uint32_t i = 0; i < input_count; i++) {\n    auto buffer_ptr = std::make_shared<Buffer>(device);\n    buffer_ptr->Build(1 * sizeof(int));\n    auto *output_data = (int *)buffer_ptr->MutableData();\n    output_data[0] = input_data[i];\n    output_bufs_1->PushBack(buffer_ptr);\n  }\n  return modelbox::STATUS_OK;\n}\n\nvoid MockFlow::Register_Normal_Expand_Process_Error_Flowunit() {\n  auto mock_desc =\n      GenerateFlowunitDesc(\"normal_expand_process_error\", {\"In_1\"}, {\"Out_1\"});\n  mock_desc->SetOutputType(EXPAND);\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  mock_funcitons->RegisterProcessFunc(Expand_Process_Error);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc(true));\n}\n\nvoid MockFlow::Register_Expand_Process_Error_Flowunit() {\n  auto mock_desc =\n      GenerateFlowunitDesc(\"expand_process_error\", {\"In_1\"}, {\"Out_1\"});\n  mock_desc->SetOutputType(EXPAND);\n  mock_desc->SetFlowType(STREAM);\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  mock_funcitons->RegisterProcessFunc(Expand_Process_Error);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc(true));\n}\n\nStatus Expand_Process(const std::shared_ptr<DataContext> &data_ctx,\n                      const std::shared_ptr<MockFlowUnit> &mock_flowunit) {\n  auto input_buffer = data_ctx->Input(\"In_1\")->At(0);\n  if (input_buffer->HasError()) {\n    auto output_bufs_1 = data_ctx->Output(\"Out_1\");\n    auto buffer_ptr = std::make_shared<Buffer>();\n    buffer_ptr->SetError(input_buffer->GetErrorMsg(),\n                         input_buffer->GetErrorCode());\n    output_bufs_1->PushBack(buffer_ptr);\n    return modelbox::STATUS_OK;\n  }\n  auto output_bufs_1 = data_ctx->Output(\"Out_1\");\n  auto device = mock_flowunit->GetBindDevice();\n\n  auto *input_data = (int *)(input_buffer->ConstData());\n  auto input_count = input_buffer->GetBytes() / sizeof(int);\n\n  for (uint32_t i = 0; i < input_count; i++) {\n    auto buffer_ptr = std::make_shared<Buffer>(device);\n    buffer_ptr->Build(1 * sizeof(int));\n    auto *output_data = (int *)buffer_ptr->MutableData();\n    output_data[0] = input_data[i];\n    output_bufs_1->PushBack(buffer_ptr);\n  }\n  return modelbox::STATUS_OK;\n}\n\nvoid MockFlow::Register_Normal_Expand_Process_Flowunit() {\n  auto mock_desc =\n      GenerateFlowunitDesc(\"normal_expand_process\", {\"In_1\"}, {\"Out_1\"});\n  mock_desc->SetOutputType(EXPAND);\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  mock_funcitons->RegisterProcessFunc(Expand_Process);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc(true));\n}\n\nvoid MockFlow::Register_Expand_Process_Flowunit() {\n  auto mock_desc = GenerateFlowunitDesc(\"expand_process\", {\"In_1\"}, {\"Out_1\"});\n  mock_desc->SetOutputType(EXPAND);\n  mock_desc->SetFlowType(STREAM);\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  mock_funcitons->RegisterProcessFunc(Expand_Process);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc(true));\n}\n\nStatus Simple_Pass(const std::shared_ptr<DataContext> &data_ctx,\n                   const std::shared_ptr<MockFlowUnit> &mock_flowunit) {\n  auto input_bufs_1 = data_ctx->Input(\"In_1\");\n  auto output_bufs_1 = data_ctx->Output(\"Out_1\");\n  auto device = mock_flowunit->GetBindDevice();\n  for (uint32_t i = 0; i < input_bufs_1->Size(); i++) {\n    auto input_buffer = (*input_bufs_1)[i];\n    if (input_buffer->HasError()) {\n      auto buffer_ptr = std::make_shared<Buffer>(device);\n      buffer_ptr->Build(1 * sizeof(int));\n      buffer_ptr->SetError(input_buffer->GetErrorMsg(),\n                           input_buffer->GetErrorCode());\n      output_bufs_1->PushBack(buffer_ptr);\n      MBLOG_INFO << \"simple pass recive error buffer, return valid buffer\";\n    } else {\n      output_bufs_1->PushBack(input_buffer);\n    }\n  }\n  return modelbox::STATUS_OK;\n}\n\nvoid MockFlow::Register_Simple_Pass_Flowunit() {\n  auto mock_desc = GenerateFlowunitDesc(\"simple_pass\", {\"In_1\"}, {\"Out_1\"});\n  mock_desc->SetDefaultBatchSize(1);\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  mock_funcitons->RegisterProcessFunc(Simple_Pass);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc(true));\n}\n\nvoid MockFlow::Register_HttpServer_Flowunit() {\n  auto mock_desc = GenerateFlowunitDesc(\"httpserver\", {\"IN1\"}, {\"OUT1\"});\n  mock_desc->SetFlowType(STREAM);\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n}\n\nvoid MockFlow::Register_Stream_Simple_Pass_Flowunit() {\n  auto mock_desc =\n      GenerateFlowunitDesc(\"stream_simple_pass\", {\"In_1\"}, {\"Out_1\"});\n  mock_desc->SetFlowType(STREAM);\n  mock_desc->SetStreamSameCount(false);\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  mock_funcitons->RegisterProcessFunc(Simple_Pass);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n}\n\nvoid MockFlow::Register_Simple_Error_Flowunit() {\n  auto mock_desc = GenerateFlowunitDesc(\"simple_error\", {\"In_1\"}, {\"Out_1\"});\n  mock_desc->SetDefaultBatchSize(1);\n  auto process_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    auto input_bufs_1 = data_ctx->Input(\"In_1\");\n    auto output_bufs_1 = data_ctx->Output(\"Out_1\");\n    auto device = mock_flowunit->GetBindDevice();\n    auto *input_data = (int *)(*input_bufs_1)[0]->ConstData();\n    if (input_data[0] < 2) {\n      MBLOG_ERROR << \"return invalid\";\n      return modelbox::STATUS_INVALID;\n    }\n    for (uint32_t i = 0; i < input_bufs_1->Size(); i++) {\n      auto *input_data = (int *)(*input_bufs_1)[i]->ConstData();\n      auto buffer_ptr = std::make_shared<Buffer>(device);\n      buffer_ptr->Build(1 * sizeof(int));\n      auto *output_data = (int *)buffer_ptr->MutableData();\n      output_data[0] = input_data[0];\n      output_bufs_1->PushBack(buffer_ptr);\n    }\n    return modelbox::STATUS_OK;\n  };\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  mock_funcitons->RegisterProcessFunc(process_func);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc(true));\n}\n\nvoid MockFlow::Register_Stream_Datapre_Error_Flowunit() {\n  auto mock_desc =\n      GenerateFlowunitDesc(\"stream_datapre_error\", {\"In_1\"}, {\"Out_1\"});\n  mock_desc->SetFlowType(STREAM);\n  auto data_pre_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    return modelbox::STATUS_INVALID;\n  };\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  mock_funcitons->RegisterDataPreFunc(data_pre_func);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc(true));\n}\n\nvoid MockFlow::Register_Stream_In_Process_Error_Flowunit() {\n  auto mock_desc =\n      GenerateFlowunitDesc(\"stream_in_process_error\", {\"In_1\"}, {\"Out_1\"});\n  mock_desc->SetFlowType(STREAM);\n  mock_desc->SetMaxBatchSize(16);\n  auto data_pre_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    auto start_index_content = std::make_shared<int>(0);\n    data_ctx->SetPrivate(\"error_index\", start_index_content);\n    return modelbox::STATUS_OK;\n  };\n\n  auto process_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    auto error_index =\n        *(std::static_pointer_cast<int>(data_ctx->GetPrivate(\"error_index\"))\n              .get());\n    error_index++;\n    data_ctx->SetPrivate(\"error_index\", std::make_shared<int>(error_index));\n    if (error_index < 2) {\n      auto output_bufs_1 = data_ctx->Output(\"Out_1\");\n      auto device = mock_flowunit->GetBindDevice();\n      for (int i = 0; i < 5; i++) {\n        auto buffer_ptr = std::make_shared<Buffer>(device);\n        buffer_ptr->Build(1 * sizeof(int));\n        auto *output_data = (int *)buffer_ptr->MutableData();\n        output_data[0] = 0;\n        output_bufs_1->PushBack(buffer_ptr);\n      }\n\n      return modelbox::STATUS_OK;\n    }\n    return modelbox::STATUS_INVALID;\n  };\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  mock_funcitons->RegisterDataPreFunc(data_pre_func);\n  mock_funcitons->RegisterProcessFunc(process_func);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc(true));\n}\n\nvoid MockFlow::Register_Stream_Process_Error_Flowunit() {\n  auto mock_desc =\n      GenerateFlowunitDesc(\"stream_process_error\", {\"In_1\"}, {\"Out_1\"});\n  mock_desc->SetFlowType(STREAM);\n  mock_desc->SetMaxBatchSize(16);\n\n  auto process_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    return modelbox::STATUS_INVALID;\n  };\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  mock_funcitons->RegisterProcessFunc(process_func);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc(true));\n}\n\nvoid MockFlow::Register_Error_End_Normal_Flowunit() {\n  auto mock_desc = GenerateFlowunitDesc(\"error_end_normal\", {\"In_1\"}, {});\n  mock_desc->SetDefaultBatchSize(1);\n  mock_desc->SetExceptionVisible(true);\n\n  auto process_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    MBLOG_INFO << \"error_end_normal process\";\n    if (data_ctx->HasError()) {\n      MBLOG_INFO << \"error_end process has error.\";\n    }\n    return modelbox::STATUS_OK;\n  };\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  mock_funcitons->RegisterProcessFunc(process_func);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n}\n\nvoid MockFlow::Register_Error_End_Flowunit() {\n  auto mock_desc = GenerateFlowunitDesc(\"error_end\", {\"In_1\"}, {});\n  mock_desc->SetFlowType(STREAM);\n  mock_desc->SetExceptionVisible(true);\n\n  auto process_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    if (data_ctx->HasError()) {\n      MBLOG_INFO << \"error_end process has error.\";\n    }\n    return modelbox::STATUS_OK;\n  };\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  mock_funcitons->RegisterProcessFunc(process_func);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n}\n\nvoid MockFlow::Register_Stream_Process_Flowunit() {\n  auto mock_desc = GenerateFlowunitDesc(\"stream_process\", {\"In_1\"}, {\"Out_1\"});\n  mock_desc->SetFlowType(STREAM);\n  mock_desc->SetMaxBatchSize(16);\n\n  auto process_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    if (data_ctx->HasError()) {\n      MBLOG_INFO << \"stream_process process has error.\";\n    }\n\n    auto output_bufs_1 = data_ctx->Output(\"Out_1\");\n    auto device = mock_flowunit->GetBindDevice();\n    auto buffer_ptr = std::make_shared<Buffer>(device);\n    buffer_ptr->Build(1 * sizeof(int));\n    auto *output_data = (int *)buffer_ptr->MutableData();\n    output_data[0] = 0;\n    output_bufs_1->PushBack(buffer_ptr);\n    return modelbox::STATUS_OK;\n  };\n\n  auto data_pre_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    if (data_ctx->HasError()) {\n      MBLOG_INFO << \"stream_process DataPre has error.\";\n    }\n\n    return modelbox::STATUS_OK;\n  };\n\n  auto data_post_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    if (data_ctx->HasError()) {\n      MBLOG_INFO << \"stream_process DataPost has error\";\n    }\n    return modelbox::STATUS_OK;\n  };\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  mock_funcitons->RegisterDataPreFunc(data_pre_func);\n  mock_funcitons->RegisterProcessFunc(process_func);\n  mock_funcitons->RegisterDataPostFunc(data_post_func);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc(true));\n}\n\nvoid MockFlow::Register_Collapse_Recieve_Error_Flowunit() {\n  auto mock_desc =\n      GenerateFlowunitDesc(\"collapse_recieve_error\", {\"In_1\"}, {\"Out_1\"});\n  mock_desc->SetOutputType(COLLAPSE);\n  mock_desc->SetCollapseAll(true);\n  mock_desc->SetFlowType(STREAM);\n\n  auto data_group_pre_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    if (data_ctx->HasError()) {\n      MBLOG_INFO << \"collapse_recieve_error DataGroupPre recive error.\";\n    }\n    return modelbox::STATUS_OK;\n  };\n\n  auto data_pre_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    if (data_ctx->HasError()) {\n      MBLOG_INFO << \"collapse_recieve_error DataPre recive error.\";\n    }\n    return modelbox::STATUS_OK;\n  };\n\n  auto process_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    if (data_ctx->HasError()) {\n      MBLOG_INFO << \"collapse_recieve_error Process recive error\";\n    } else {\n      auto input_bufs_1 = data_ctx->Input(\"In_1\");\n    }\n\n    return modelbox::STATUS_OK;\n  };\n\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  mock_funcitons->RegisterDataGroupPreFunc(data_group_pre_func);\n  mock_funcitons->RegisterDataPreFunc(data_pre_func);\n  mock_funcitons->RegisterProcessFunc(process_func);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc(true));\n}\n\nvoid MockFlow::Register_Normal_Collapse_Recieve_Error_Flowunit() {\n  auto mock_desc = GenerateFlowunitDesc(\"normal_collapse_recieve_error\",\n                                        {\"In_1\"}, {\"Out_1\"});\n  mock_desc->SetOutputType(COLLAPSE);\n  mock_desc->SetCollapseAll(true);\n\n  auto data_group_pre_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    if (data_ctx->HasError()) {\n      MBLOG_INFO << \"collapse_recieve_error DataGroupPre recive error.\";\n    }\n    return modelbox::STATUS_OK;\n  };\n\n  auto data_pre_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    if (data_ctx->HasError()) {\n      MBLOG_INFO << \"collapse_recieve_error DataPre recive error.\";\n    }\n    return modelbox::STATUS_OK;\n  };\n\n  auto process_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    if (data_ctx->HasError()) {\n      MBLOG_INFO << \"collapse_recieve_error Process recive error.\";\n    } else {\n      auto input_bufs_1 = data_ctx->Input(\"In_1\");\n    }\n\n    return modelbox::STATUS_OK;\n  };\n\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  mock_funcitons->RegisterDataGroupPreFunc(data_group_pre_func);\n  mock_funcitons->RegisterDataPreFunc(data_pre_func);\n  mock_funcitons->RegisterProcessFunc(process_func);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc(true));\n}\n\nvoid MockFlow::Register_Collapse_Datagrouppre_Error_Flowunit() {\n  auto mock_desc =\n      GenerateFlowunitDesc(\"collapse_datagrouppre_error\", {\"In_1\"}, {\"Out_1\"});\n  mock_desc->SetOutputType(COLLAPSE);\n  mock_desc->SetCollapseAll(true);\n  mock_desc->SetFlowType(STREAM);\n\n  auto data_group_pre_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    return modelbox::STATUS_INVALID;\n  };\n\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  mock_funcitons->RegisterDataGroupPreFunc(data_group_pre_func);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc(true));\n}\n\nvoid MockFlow::Register_Normal_Collapse_Datagrouppre_Error_Flowunit() {\n  auto mock_desc = GenerateFlowunitDesc(\"normal_collapse_datapre_error\",\n                                        {\"In_1\"}, {\"Out_1\"});\n  mock_desc->SetOutputType(COLLAPSE);\n  mock_desc->SetCollapseAll(true);\n\n  auto data_pre_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    return modelbox::STATUS_INVALID;\n  };\n\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  mock_funcitons->RegisterDataPreFunc(data_pre_func);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc(true));\n}\n\nvoid MockFlow::Register_Normal_Collapse_Process_Error_Flowunit() {\n  auto mock_desc = GenerateFlowunitDesc(\"normal_collapse_process_error\",\n                                        {\"In_1\"}, {\"Out_1\"});\n  mock_desc->SetOutputType(COLLAPSE);\n  mock_desc->SetCollapseAll(true);\n\n  auto process_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    return modelbox::STATUS_INVALID;\n  };\n\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  mock_funcitons->RegisterProcessFunc(process_func);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc(true));\n}\n\nvoid MockFlow::Register_Collapse_Datapre_Error_Flowunit() {\n  auto mock_desc =\n      GenerateFlowunitDesc(\"collapse_datapre_error\", {\"In_1\"}, {\"Out_1\"});\n  mock_desc->SetOutputType(COLLAPSE);\n  mock_desc->SetCollapseAll(true);\n  mock_desc->SetFlowType(STREAM);\n\n  auto data_pre_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    MBLOG_INFO << \"collapse_datapre_error\";\n    return modelbox::STATUS_INVALID;\n  };\n\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  mock_funcitons->RegisterDataPreFunc(data_pre_func);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc(true));\n}\n\nvoid MockFlow::Register_Collapse_Process_Error_Flowunit() {\n  auto mock_desc =\n      GenerateFlowunitDesc(\"collapse_process_error\", {\"In_1\"}, {\"Out_1\"});\n  mock_desc->SetOutputType(COLLAPSE);\n  mock_desc->SetCollapseAll(true);\n  mock_desc->SetFlowType(STREAM);\n\n  auto process_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    return modelbox::STATUS_INVALID;\n  };\n\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  mock_funcitons->RegisterProcessFunc(process_func);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc(true));\n}\n\nStatus Collapse_Process(const std::shared_ptr<DataContext> &data_ctx,\n                        const std::shared_ptr<MockFlowUnit> &mock_flowunit) {\n  auto device = mock_flowunit->GetBindDevice();\n  if (data_ctx->HasError()) {\n    MBLOG_INFO << \"collapse_process recive error buffer, return valid buffer.\";\n    auto output_bufs_1 = data_ctx->Output(\"Out_1\");\n    auto buffer_ptr = std::make_shared<Buffer>(device);\n    buffer_ptr->Build(1 * sizeof(int));\n    auto *output_data = (int *)buffer_ptr->MutableData();\n    output_data[0] = 0;\n    output_bufs_1->PushBack(buffer_ptr);\n  } else {\n    MBLOG_INFO << \"collapse_process recive valid buffer, return error buffer.\";\n    auto input_bufs = data_ctx->Input(\"In_1\");\n    auto output_bufs_1 = data_ctx->Output(\"Out_1\");\n    auto buffer_ptr = std::make_shared<Buffer>(device);\n    buffer_ptr->Build(1 * sizeof(int));\n    auto *output_data = (int *)buffer_ptr->MutableData();\n    output_data[0] = 0;\n\n    for (size_t i = 0; i < input_bufs->Size(); ++i) {\n      auto input_buffer = input_bufs->At(i);\n      if (input_buffer->HasError()) {\n        buffer_ptr->SetError(input_buffer->GetErrorMsg(),\n                             input_buffer->GetErrorCode());\n        break;\n      }\n      auto *input_data = (int *)(*input_bufs)[i]->ConstData();\n      auto buffer_ptr = std::make_shared<Buffer>(device);\n      output_data[0] += input_data[0];\n    }\n    output_bufs_1->PushBack(buffer_ptr);\n  }\n\n  return modelbox::STATUS_OK;\n}\n\nvoid MockFlow::Register_Normal_Collapse_Process_Flowunit() {\n  auto mock_desc =\n      GenerateFlowunitDesc(\"normal_collapse_process\", {\"In_1\"}, {\"Out_1\"});\n  mock_desc->SetOutputType(COLLAPSE);\n  mock_desc->SetCollapseAll(true);\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  mock_funcitons->RegisterProcessFunc(Collapse_Process);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc(true));\n}\n\nvoid MockFlow::Register_Collapse_Process_Flowunit() {\n  auto mock_desc =\n      GenerateFlowunitDesc(\"collapse_process\", {\"In_1\"}, {\"Out_1\"});\n  mock_desc->SetOutputType(COLLAPSE);\n  mock_desc->SetCollapseAll(true);\n  mock_desc->SetFlowType(STREAM);\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  mock_funcitons->RegisterProcessFunc(Collapse_Process);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc(true));\n}\n\nvoid MockFlow::Register_Virtual_Stream_Start_Flowunit() {\n  auto mock_desc =\n      GenerateFlowunitDesc(\"virtual_stream_start\", {\"In_1\"}, {\"Out_1\"});\n  mock_desc->SetFlowType(STREAM);\n  mock_desc->SetOutputType(EXPAND);\n  auto process_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    auto output_bufs = data_ctx->Output(\"Out_1\");\n    auto input_bufs = data_ctx->Input(\"In_1\");\n    auto event = data_ctx->Event();\n\n    if (event == nullptr) {\n      auto output_meta = std::make_shared<DataMeta>();\n      auto *input_data = (int *)(*input_bufs)[0]->ConstData();\n      auto start_index = input_data[0];\n      auto end_index = input_data[1];\n      auto interval = input_data[2];\n      auto start_index_content = std::make_shared<int>(start_index);\n      data_ctx->SetPrivate(\"now_index\", start_index_content);\n      auto end_index_content = std::make_shared<int>(end_index);\n      data_ctx->SetPrivate(\"end_index\", end_index_content);\n      auto interval_content = std::make_shared<int>(interval);\n      output_meta->SetMeta(\"start_index\", start_index_content);\n      output_meta->SetMeta(\"end_index\", end_index_content);\n      output_meta->SetMeta(\"interval\", interval_content);\n      data_ctx->SetOutputMeta(\"Out_1\", output_meta);\n    }\n\n    auto now_index = *(\n        std::static_pointer_cast<int>(data_ctx->GetPrivate(\"now_index\")).get());\n    auto end_index = *(\n        std::static_pointer_cast<int>(data_ctx->GetPrivate(\"end_index\")).get());\n\n    std::vector<size_t> shape(5, sizeof(int));\n    output_bufs->Build(shape);\n    for (size_t i = 0; i < 5; ++i) {\n      auto *output_data = (int *)(*output_bufs)[i]->MutableData();\n      output_data[0] = now_index + i;\n    }\n    now_index = now_index + 5;\n    auto now_index_content = std::make_shared<int>(now_index);\n    data_ctx->SetPrivate(\"now_index\", now_index_content);\n    if (now_index + 5 <= end_index) {\n      auto event = std::make_shared<FlowUnitEvent>();\n      data_ctx->SendEvent(event);\n      return modelbox::STATUS_CONTINUE;\n    }\n    return modelbox::STATUS_OK;\n  };\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  mock_funcitons->RegisterProcessFunc(process_func);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n}\n\nvoid MockFlow::Register_Virtual_Stream_Mid_Flowunit() {\n  auto mock_desc =\n      GenerateFlowunitDesc(\"virtual_stream_mid\", {\"In_1\"}, {\"Out_1\"});\n  mock_desc->SetFlowType(STREAM);\n  auto process_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    auto output_bufs = data_ctx->Output(\"Out_1\");\n    auto input_bufs = data_ctx->Input(\"In_1\");\n\n    auto device = mock_flowunit->GetBindDevice();\n    auto interval = *(\n        std::static_pointer_cast<int>(data_ctx->GetPrivate(\"interval\")).get());\n    auto end_index = *(\n        std::static_pointer_cast<int>(data_ctx->GetPrivate(\"end_index\")).get());\n\n    for (size_t i = 0; i < input_bufs->Size(); ++i) {\n      auto *input_data = (int *)(*input_bufs)[i]->ConstData();\n      if (input_data[0] % interval == 0) {\n        auto buffer_ptr = std::make_shared<Buffer>(device);\n        buffer_ptr->Build(1 * sizeof(int));\n        auto *output_data = (int *)buffer_ptr->MutableData();\n        output_data[0] = input_data[0];\n        output_bufs->PushBack(buffer_ptr);\n      }\n\n      if (input_data[0] == end_index - 1) {\n        return modelbox::STATUS_OK;\n      }\n    }\n    return modelbox::STATUS_CONTINUE;\n  };\n\n  auto data_pre_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    auto meta = data_ctx->GetInputMeta(\"In_1\");\n    std::shared_ptr<Device> device;\n    auto interval =\n        *(std::static_pointer_cast<int>(meta->GetMeta(\"interval\")).get());\n    auto end_index =\n        *(std::static_pointer_cast<int>(meta->GetMeta(\"end_index\")).get());\n    data_ctx->SetPrivate(\"interval\", std::make_shared<int>(interval));\n    data_ctx->SetPrivate(\"end_index\", std::make_shared<int>(end_index));\n    return modelbox::STATUS_OK;\n  };\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  mock_funcitons->RegisterDataPreFunc(data_pre_func);\n  mock_funcitons->RegisterProcessFunc(process_func);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n}\n\nvoid MockFlow::Register_Virtual_Stream_End_Flowunit() {\n  auto mock_desc = GenerateFlowunitDesc(\"virtual_stream_end\", {\"In_1\"}, {});\n  mock_desc->SetFlowType(STREAM);\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n}\n\nvoid MockFlow::Register_Virtual_Expand_Flowunit() {\n  auto mock_desc = GenerateFlowunitDesc(\"virtual_expand\", {\"In_1\"}, {\"Out_1\"});\n  mock_desc->SetOutputType(EXPAND);\n  auto process_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    auto output_bufs = data_ctx->Output(\"Out_1\");\n    auto input_bufs = data_ctx->Input(\"In_1\");\n    auto event = data_ctx->Event();\n    auto *input_data = (int *)(*input_bufs)[0]->ConstData();\n    std::vector<size_t> shape(1, 3 * sizeof(int));\n    output_bufs->Build(shape);\n    auto *output_data = (int *)(*output_bufs)[0]->MutableData();\n    for (size_t i = 0; i < 3; ++i) {\n      output_data[i] = input_data[i];\n    }\n    return modelbox::STATUS_OK;\n  };\n\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  mock_funcitons->RegisterProcessFunc(process_func);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n}\n\nvoid MockFlow::Register_Virtual_Stream_Flowunit() {\n  auto mock_desc = GenerateFlowunitDesc(\"virtual_stream\", {\"In_1\"}, {\"Out_1\"});\n  mock_desc->SetFlowType(STREAM);\n  auto process_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    auto output_bufs = data_ctx->Output(\"Out_1\");\n    auto input_bufs = data_ctx->Input(\"In_1\");\n    auto event = data_ctx->Event();\n\n    if (event == nullptr) {\n      auto output_meta = std::make_shared<DataMeta>();\n      auto *input_data = (int *)(*input_bufs)[0]->ConstData();\n      auto start_index = input_data[0];\n      auto end_index = input_data[1];\n      auto interval = input_data[2];\n      auto start_index_content = std::make_shared<int>(start_index);\n      data_ctx->SetPrivate(\"now_index\", start_index_content);\n      auto end_index_content = std::make_shared<int>(end_index);\n      data_ctx->SetPrivate(\"end_index\", end_index_content);\n      auto interval_content = std::make_shared<int>(interval);\n      output_meta->SetMeta(\"start_index\", start_index_content);\n      output_meta->SetMeta(\"end_index\", end_index_content);\n      output_meta->SetMeta(\"interval\", interval_content);\n      data_ctx->SetOutputMeta(\"Out_1\", output_meta);\n    }\n\n    auto now_index = *(\n        std::static_pointer_cast<int>(data_ctx->GetPrivate(\"now_index\")).get());\n    auto end_index = *(\n        std::static_pointer_cast<int>(data_ctx->GetPrivate(\"end_index\")).get());\n\n    std::vector<size_t> shape(5, sizeof(int));\n    output_bufs->Build(shape);\n    for (size_t i = 0; i < 5; ++i) {\n      auto *output_data = (int *)(*output_bufs)[i]->MutableData();\n      output_data[0] = now_index + i;\n    }\n    now_index = now_index + 5;\n    auto now_index_content = std::make_shared<int>(now_index);\n    data_ctx->SetPrivate(\"now_index\", now_index_content);\n    if (now_index + 5 <= end_index) {\n      auto event = std::make_shared<FlowUnitEvent>();\n      data_ctx->SendEvent(event);\n      return modelbox::STATUS_CONTINUE;\n    }\n    return modelbox::STATUS_OK;\n  };\n\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  mock_funcitons->RegisterProcessFunc(process_func);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n}\n\nvoid MockFlow::Register_Tensorlist_Test_1_Flowunit() {\n  auto mock_desc = GenerateFlowunitDesc(\"tensorlist_test_1\", {\"IN1\"}, {\"OUT1\"});\n  auto process_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    auto input_bufs = data_ctx->Input(\"IN1\");\n    auto output_bufs = data_ctx->Output(\"OUT1\");\n\n    TensorList in_tl(input_bufs);\n    TensorList out_tl(output_bufs);\n\n    if (in_tl.Size() == 0) {\n      return modelbox::STATUS_FAULT;\n    }\n\n    out_tl.Build<int>(in_tl.GetShape());\n    for (size_t i = 0; i < in_tl.Size(); ++i) {\n      auto tensor = in_tl[i];\n      const auto *const in_data = in_tl.ConstBufferData<int>(i);\n      auto *out_data = out_tl.MutableBufferData<int>(i);\n      out_data[0] = in_data[0] + 10;\n    }\n\n    return modelbox::STATUS_SUCCESS;\n  };\n\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  mock_funcitons->RegisterProcessFunc(process_func);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n}\n\nvoid MockFlow::Register_Tensorlist_Test_2_Flowunit() {\n  auto mock_desc =\n      GenerateFlowunitDesc(\"tensorlist_test_2\", {\"IN1\", \"IN2\"}, {\"OUT1\"});\n  auto process_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    auto input_bufs_1 = data_ctx->Input(\"IN1\");\n    auto input_bufs_2 = data_ctx->Input(\"IN2\");\n    auto output_bufs_1 = data_ctx->Output(\"OUT1\");\n\n    TensorList in_tl_1(input_bufs_1);\n    TensorList in_tl_2(input_bufs_2);\n    TensorList out_tl_1(output_bufs_1);\n\n    if (in_tl_1.Size() == 0 || in_tl_2.Size() == 0 ||\n        (in_tl_1.Size() != in_tl_2.Size())) {\n      return modelbox::STATUS_FAULT;\n    }\n\n    out_tl_1.Build<int>(in_tl_1.GetShape());\n    for (size_t i = 0; i < in_tl_1.Size(); ++i) {\n      const auto *const in_data_1 = in_tl_1.ConstBufferData<int>(i);\n      const auto *const in_data_2 = in_tl_2.ConstBufferData<int>(i);\n      auto *out_data_1 = out_tl_1.MutableBufferData<int>(i);\n      out_data_1[0] = in_data_1[0] + in_data_2[0];\n    }\n\n    return modelbox::STATUS_SUCCESS;\n  };\n\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  mock_funcitons->RegisterProcessFunc(process_func);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n}\n\nvoid MockFlow::Register_Check_Tensorlist_Test_1_Flowunit() {\n  auto mock_desc =\n      GenerateFlowunitDesc(\"check_tensorlist_test_1\", {\"IN1\", \"IN2\"}, {});\n\n  static std::atomic<int64_t> run_count(0);\n  static int64_t MAX_COUNT = 0;\n  auto process_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    const auto input_tl_1 = data_ctx->Input(\"IN1\");\n    const auto input_tl_2 = data_ctx->Input(\"IN2\");\n\n    TensorList in_tl_1(input_tl_1);\n    TensorList in_tl_2(input_tl_2);\n\n    if (in_tl_1.Size() == 0 || in_tl_1.Size() != in_tl_2.Size()) {\n      return modelbox::STATUS_FAULT;\n    }\n\n    for (size_t i = 0; i < in_tl_1.Size(); ++i) {\n      const auto *const in_data_1 = in_tl_1.ConstBufferData<int>(i);\n      const auto *const in_data_2 = in_tl_2.ConstBufferData<int>(i);\n      if (in_data_2[0] != in_data_1[0]) {\n        return modelbox::STATUS_FAULT;\n      }\n    }\n\n    if (MAX_COUNT < run_count++) {\n      MBLOG_DEBUG << \"check reach max running times, should stop.\";\n      return modelbox::STATUS_STOP;\n    }\n\n    return modelbox::STATUS_SUCCESS;\n  };\n\n  auto open_func = [=](const std::shared_ptr<Configuration> &flow_option,\n                       const std::shared_ptr<MockFlowUnit> &mock_flowunit) {\n    MAX_COUNT = flow_option->GetInt64(\"max_count\", 50);\n    run_count = 0;\n    return modelbox::STATUS_OK;\n  };\n\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  mock_funcitons->RegisterOpenFunc(open_func);\n  mock_funcitons->RegisterProcessFunc(process_func);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n}\n\nvoid MockFlow::Register_Slow_Flowunit() {\n  auto mock_desc = GenerateFlowunitDesc(\"slow\", {\"IN1\", \"IN2\"}, {});\n\n  static std::atomic<int64_t> run_count(0);\n  static int64_t MAX_COUNT = 0;\n  auto process_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    const auto input_tl_1 = data_ctx->Input(\"IN1\");\n    const auto input_tl_2 = data_ctx->Input(\"IN2\");\n\n    TensorList in_tl_1(input_tl_1);\n    TensorList in_tl_2(input_tl_2);\n\n    if (in_tl_1.Size() == 0 || in_tl_1.Size() != in_tl_2.Size()) {\n      return modelbox::STATUS_FAULT;\n    }\n\n    MBLOG_INFO << \"slow flow get data\";\n    sleep(5);\n    MBLOG_INFO << \"slow flow unit sleep 3s, run_count:\" << run_count;\n\n    for (size_t i = 0; i < in_tl_1.Size(); ++i) {\n      const auto *const in_data_1 = in_tl_1.ConstBufferData<int>(i);\n      const auto *const in_data_2 = in_tl_2.ConstBufferData<int>(i);\n      if (in_data_2[0] != in_data_1[0]) {\n        return modelbox::STATUS_FAULT;\n      }\n    }\n\n    if (MAX_COUNT < run_count++) {\n      MBLOG_DEBUG << \"check reach max running times, should stop.\";\n      return modelbox::STATUS_STOP;\n    }\n\n    return modelbox::STATUS_SUCCESS;\n  };\n\n  auto open_func = [=](const std::shared_ptr<Configuration> &flow_option,\n                       const std::shared_ptr<MockFlowUnit> &mock_flowunit) {\n    MAX_COUNT = flow_option->GetInt64(\"max_count\", 50);\n    run_count = 0;\n    return modelbox::STATUS_OK;\n  };\n\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  mock_funcitons->RegisterOpenFunc(open_func);\n  mock_funcitons->RegisterProcessFunc(process_func);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n}\n\nvoid MockFlow::Register_Check_Tensorlist_Test_2_Flowunit() {\n  auto mock_desc = GenerateFlowunitDesc(\"check_tensorlist_test_2\",\n                                        {\"IN1\", \"IN2\", \"IN3\"}, {});\n\n  static std::atomic<int64_t> run_count(0);\n  static int64_t MAX_COUNT = 0;\n  auto process_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    const auto input_tl_1 = data_ctx->Input(\"IN1\");\n    const auto input_tl_2 = data_ctx->Input(\"IN2\");\n    const auto input_tl_3 = data_ctx->Input(\"IN3\");\n\n    TensorList in_tl_1(input_tl_1);\n    TensorList in_tl_2(input_tl_2);\n    TensorList in_tl_3(input_tl_3);\n\n    if (in_tl_1.Size() == 0 || in_tl_2.Size() == 0 || in_tl_3.Size() == 0 ||\n        in_tl_1.Size() != in_tl_2.Size() || in_tl_2.Size() != in_tl_3.Size()) {\n      return modelbox::STATUS_FAULT;\n    }\n\n    for (size_t i = 0; i < in_tl_1.Size(); ++i) {\n      const auto *const in_data_1 = in_tl_1.ConstBufferData<int>(i);\n      const auto *const in_data_2 = in_tl_2.ConstBufferData<int>(i);\n      const auto *const in_data_3 = in_tl_3.ConstBufferData<int>(i);\n      if (in_data_3[0] != (in_data_1[0] + in_data_2[0])) {\n        return modelbox::STATUS_FAULT;\n      }\n    }\n\n    if (MAX_COUNT < run_count++) {\n      MBLOG_DEBUG << \"check reach max running times, should stop.\";\n      return modelbox::STATUS_STOP;\n    }\n\n    return modelbox::STATUS_SUCCESS;\n  };\n\n  auto open_func = [=](const std::shared_ptr<Configuration> &flow_option,\n                       const std::shared_ptr<MockFlowUnit> &mock_flowunit) {\n    MAX_COUNT = flow_option->GetInt64(\"max_count\", 50);\n    run_count = 0;\n    return modelbox::STATUS_OK;\n  };\n\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  mock_funcitons->RegisterOpenFunc(open_func);\n  mock_funcitons->RegisterProcessFunc(process_func);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n}\n\nvoid MockFlow::Register_Statistic_Test_Flowunit() {\n  auto mock_desc = GenerateFlowunitDesc(\"statistic_test\", {\"IN1\"}, {\"OUT1\"});\n\n  auto process_func =\n      [=](const std::shared_ptr<DataContext> &data_ctx,\n          const std::shared_ptr<MockFlowUnit> &mock_flowunit) -> Status {\n    auto stats = data_ctx->GetStatistics();\n    EXPECT_NE(stats, nullptr);\n    if (stats == nullptr) {\n      return modelbox::STATUS_FAULT;\n    }\n\n    int32_t test_val = 1;\n    auto test_stats = stats->AddItem(\"test_key\", test_val);\n    EXPECT_NE(test_stats, nullptr);\n    if (test_stats == nullptr) {\n      return modelbox::STATUS_FAULT;\n    }\n\n    std::this_thread::sleep_for(std::chrono::seconds(1));\n    test_stats->SetValue(test_val);  // notify cooldown test\n    test_stats->SetValue(test_val);\n    test_stats->SetValue(test_val);\n    auto output = data_ctx->Output(\"OUT1\");\n    output->Build({1});\n    return modelbox::STATUS_SUCCESS;\n  };\n\n  auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n  mock_funcitons->RegisterProcessFunc(process_func);\n  AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n}\n\nbool MockFlow::Init(bool with_default_flowunit) {\n  std::shared_ptr<Drivers> drivers = Drivers::GetInstance();\n  ConfigurationBuilder configbuilder;\n  auto config = configbuilder.Build();\n\n  // generate cpu driver\n  modelbox::DriverDesc desc;\n  desc.SetClass(\"DRIVER-DEVICE\");\n  desc.SetType(\"cpu\");\n  desc.SetName(\"device-driver-cpu\");\n  desc.SetDescription(\"the cpu device\");\n  desc.SetVersion(\"8.9.2\");\n  std::string file_path_device =\n      std::string(TEST_LIB_DIR) + \"/libmodelbox-device-cpu.so\";\n  desc.SetFilePath(file_path_device);\n  ctl_->AddMockDriverDevice(\"cpu\", desc);\n  drivers->Add(file_path_device);\n\n  // generate graphmanager\n  desc.SetClass(\"DRIVER-GRAPHCONF\");\n  desc.SetType(\"GRAPHVIZ\");\n  desc.SetName(\"GRAPHCONF-GRAPHVIZ\");\n  desc.SetDescription(\"graph config parse graphviz\");\n  desc.SetVersion(\"0.1.0\");\n  file_path_device =\n      std::string(TEST_LIB_DIR) + \"/libmodelbox-graphconf-graphviz.so\";\n  desc.SetFilePath(file_path_device);\n  ctl_->AddMockDriverGraphConf(\"graphviz\", \"\", desc);\n  drivers->Add(file_path_device);\n\n  std::shared_ptr<DeviceManager> device_mgr = DeviceManager::GetInstance();\n  Status status1 = device_mgr->Initialize(drivers, config);\n\n  auto device = device_mgr->CreateDevice(\"cpu\", \"0\");\n  if (device == nullptr) {\n    MBLOG_ERROR << \"create device failed, \" << StatusError;\n    return false;\n  }\n  device->SetMemQuota(102400000);\n  if (with_default_flowunit) {\n    Register_Test_0_1_Flowunit();\n    Register_Test_0_1_Batch_Thread_Flowunit();\n    Register_Test_0_1_Batch_Flowunit();\n    Register_Test_0_2_Flowunit();\n    Register_Test_1_0_Flowunit();\n    Register_Test_1_0_Batch_Flowunit();\n    Register_Test_1_0_Batch_Thread_Flowunit();\n    Register_Test_2_0_Flowunit();\n    Register_Test_Orgin_0_2_Flowunit();\n    Register_Listen_Flowunit();\n    Register_ExternData_Flowunit();\n    Register_Test_2_inputs_2_outputs_Flowunit();\n    Register_Condition_Flowunit();\n    Register_Switch_Case_Flowunit();\n    Register_Loop_Flowunit();\n    Register_Loop_End_Flowunit();\n    Register_Half_Condition_Flowunit();\n    Register_Normal_Condition_Flowunit();\n    Register_Expand_Normal_Flowunit();\n    Register_Collapse_Normal_Flowunit();\n    Register_Stream_Add_Flowunit();\n    Register_Add_Flowunit();\n    Register_Wrong_Add_Flowunit();\n    Register_Wrong_Add_2_Flowunit();\n    Register_Scatter_Flowunit();\n    Register_Garther_Flowunit();\n    Register_Garther_Gen_Flowunit();\n    Register_Print_Flowunit();\n    Register_Check_Print_Flowunit();\n    Register_Dynamic_Config_Flowunit();\n    Register_Dynamic_Get_Config_Flowunit();\n    Register_Dynamic_Get_Config_Other_Flowunit();\n    Register_Stream_Info_Flowunit();\n    Register_Stream_Normal_Info_Flowunit();\n    Register_Stream_Normal_Info_2_Flowunit();\n    Register_Stream_Start_Flowunit();\n    Register_Normal_Expand_Start_Flowunit();\n    Register_Stream_Tail_Filter_Flowunit();\n    Register_Stream_Mid_Flowunit();\n    Register_Stream_End_Flowunit();\n    Register_Add_1_Flowunit();\n    Register_Iflow_Add_1_Flowunit();\n    Register_Add_1_And_Error_Flowunit();\n    Register_Test_Condition_Flowunit();\n    Register_Get_Priority_Flowunit();\n    Register_Error_Start_Flowunit();\n    Register_Error_Start_Normal_Flowunit();\n    Register_Error_End_Flowunit();\n    Register_Error_End_Normal_Flowunit();\n    Register_Normal_Start_Flowunit();\n    Register_Expand_Datapre_Error_Flowunit();\n    Register_Normal_Expand_Process_Error_Flowunit();\n    Register_Expand_Process_Error_Flowunit();\n    Register_Normal_Expand_Process_Flowunit();\n    Register_Expand_Process_Flowunit();\n    Register_Simple_Pass_Flowunit();\n    Register_Stream_Simple_Pass_Flowunit();\n    Register_Simple_Error_Flowunit();\n    Register_Stream_Datapre_Error_Flowunit();\n    Register_Stream_In_Process_Error_Flowunit();\n    Register_Stream_Process_Error_Flowunit();\n    Register_Stream_Process_Flowunit();\n    Register_Collapse_Recieve_Error_Flowunit();\n    Register_Normal_Collapse_Recieve_Error_Flowunit();\n    Register_Collapse_Datagrouppre_Error_Flowunit();\n    Register_Normal_Collapse_Datagrouppre_Error_Flowunit();\n    Register_Normal_Collapse_Process_Error_Flowunit();\n    Register_Normal_Collapse_Process_Flowunit();\n    Register_Collapse_Datapre_Error_Flowunit();\n    Register_Collapse_Process_Error_Flowunit();\n    Register_Collapse_Process_Flowunit();\n    Register_Virtual_Stream_Start_Flowunit();\n    Register_Virtual_Stream_Mid_Flowunit();\n    Register_Virtual_Stream_End_Flowunit();\n    Register_Virtual_Expand_Flowunit();\n    Register_Virtual_Stream_Flowunit();\n    Register_Tensorlist_Test_1_Flowunit();\n    Register_Tensorlist_Test_2_Flowunit();\n    Register_Check_Tensorlist_Test_1_Flowunit();\n    Register_Check_Tensorlist_Test_2_Flowunit();\n    Register_Slow_Flowunit();\n    Register_Statistic_Test_Flowunit();\n    Register_HttpServer_Flowunit();\n    Register_Collapse_Stream_Flowunit();\n    Register_Expand_Stream_Flowunit();\n  }\n\n  drivers->Scan(TEST_LIB_DIR, \"/libmodelbox-unit-*\");\n\n  std::shared_ptr<FlowUnitManager> flowunit_mgr =\n      FlowUnitManager::GetInstance();\n  auto result = flowunit_mgr->Initialize(drivers, device_mgr, config);\n\n  return result;\n}\n\nStatus MockFlow::InitFlow(const std::string &name, const std::string &graph) {\n  flow_ = std::make_shared<Flow>();\n  return flow_->Init(name, graph);\n}\n\nStatus MockFlow::BuildAndRun(const std::string &name, const std::string &graph,\n                             int timeout) {\n  auto ret = InitFlow(name, graph);\n  if (!ret) {\n    return ret;\n  }\n\n  ret = flow_->Build();\n  if (!ret) {\n    return ret;\n  }\n\n  ret = flow_->RunAsync();\n  if (!ret) {\n    return ret;\n  }\n\n  if (timeout < 0) {\n    return ret;\n  }\n\n  Status retval;\n  flow_->Wait(timeout, &retval);\n  return retval;\n}\n\nstd::shared_ptr<MockDriverCtl> MockFlow::GetMockFlowCtl() { return ctl_; }\n\nstd::shared_ptr<Flow> MockFlow::GetFlow() { return flow_; }\n\nvoid MockFlow::Destroy() {\n  std::shared_ptr<FlowUnitManager> flowunit_mgr =\n      FlowUnitManager::GetInstance();\n  flowunit_mgr->Clear();\n  flowunit_mgr = nullptr;\n  std::shared_ptr<DeviceManager> device_mgr = DeviceManager::GetInstance();\n  device_mgr->Clear();\n  device_mgr = nullptr;\n  std::shared_ptr<Drivers> drivers = Drivers::GetInstance();\n  drivers->Clear();\n  drivers = nullptr;\n  ctl_ = nullptr;\n  flow_ = nullptr;\n}\n\n}  // namespace modelbox\n"
  },
  {
    "path": "test/mock/minimodelbox/mockflow.h",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#ifndef MODELBOX_MOCKFLOW_H_\n#define MODELBOX_MOCKFLOW_H_\n\n#include <iostream>\n#include <string>\n#include <utility>\n\n#include \"mock_driver_ctl.h\"\n\nnamespace modelbox {\n\nstd::shared_ptr<FlowUnitDesc> GenerateFlowunitDesc(\n    const std::string &name, const std::set<std::string> &inputs,\n    const std::set<std::string> &outputs);\nclass MockFlow {\n public:\n  MockFlow() { ctl_ = std::make_shared<MockDriverCtl>(); };\n  virtual ~MockFlow() { Destroy(); };\n\n  bool Init(bool with_default_flowunit = true);\n  void Destroy();\n  Status BuildAndRun(const std::string &name, const std::string &graph,\n                     int timeout = 15 * 1000);\n  std::shared_ptr<Flow> GetFlow();\n  std::shared_ptr<Device> GetDevice() {\n    auto device_mgr = DeviceManager::GetInstance();\n    auto device = device_mgr->GetDevice(\"cpu\", \"0\");\n    if (device == nullptr) {\n      MBLOG_ERROR << \"create device failed, \" << StatusError;\n      return nullptr;\n    }\n    return device;\n  }\n  std::shared_ptr<MockDriverCtl> GetMockFlowCtl();\n\n  void AddFlowUnitDesc(const std::shared_ptr<FlowUnitDesc> &flow_desc,\n                       std::function<std::shared_ptr<modelbox::FlowUnit>(\n                           const std::string &, const std::string &)>\n                           create_func,\n                       const std::string &lib_path = TEST_LIB_DIR);\n\n  void Register_Test_0_1_Flowunit();\n  void Register_Test_1_0_Flowunit();\n  void Register_Test_0_1_Batch_Flowunit();\n  void Register_Test_1_0_Batch_Flowunit();\n  void Register_Test_0_1_Batch_Thread_Flowunit();\n  void Register_Test_1_0_Batch_Thread_Flowunit();\n  void Register_Test_0_2_Flowunit();\n  void Register_Test_2_0_Flowunit();\n  void Register_Test_OK_2_0_Flowunit();\n  void Register_Test_Orgin_0_2_Flowunit();\n  void Register_Listen_Flowunit();\n  void Register_ExternData_Flowunit();\n  void Register_Test_2_inputs_2_outputs_Flowunit();\n  void Register_Condition_Flowunit();\n  void Register_Switch_Case_Flowunit();\n  void Register_Loop_Flowunit();\n  void Register_Loop_End_Flowunit();\n  void Register_Half_Condition_Flowunit();\n  void Register_Normal_Condition_Flowunit();\n  void Register_Expand_Normal_Flowunit();\n  void Register_Collapse_Normal_Flowunit();\n  void Register_Stream_Add_Flowunit();\n  void Register_Add_Flowunit();\n  void Register_Wrong_Add_Flowunit();\n  void Register_Wrong_Add_2_Flowunit();\n  void Register_Scatter_Flowunit();\n  void Register_Garther_Flowunit();\n  void Register_Garther_Gen_Flowunit();\n  void Register_Print_Flowunit();\n  void Register_Check_Print_Flowunit();\n  void Register_Dynamic_Config_Flowunit();\n  void Register_Dynamic_Get_Config_Flowunit();\n  void Register_Dynamic_Get_Config_Other_Flowunit();\n  void Register_Stream_Info_Flowunit();\n  void Register_Stream_Normal_Info_Flowunit();\n  void Register_Stream_Normal_Info_2_Flowunit();\n  void Register_Stream_Start_Flowunit();\n  void Register_Normal_Expand_Start_Flowunit();\n  void Register_Stream_Tail_Filter_Flowunit();\n  void Register_Stream_Mid_Flowunit();\n  void Register_Stream_End_Flowunit();\n  void Register_Add_1_Flowunit();\n  void Register_Iflow_Add_1_Flowunit();\n  void Register_Add_1_And_Error_Flowunit();\n  void Register_Test_Condition_Flowunit();\n  void Register_Get_Priority_Flowunit();\n  void Register_Error_Start_Flowunit();\n  void Register_Error_Start_Normal_Flowunit();\n  void Register_Error_End_Flowunit();\n  void Register_Error_End_Normal_Flowunit();\n  void Register_Normal_Start_Flowunit();\n  void Register_Expand_Datapre_Error_Flowunit();\n  void Register_Normal_Expand_Process_Error_Flowunit();\n  void Register_Expand_Process_Error_Flowunit();\n  void Register_Normal_Expand_Process_Flowunit();\n  void Register_HttpServer_Flowunit();\n  void Register_Expand_Process_Flowunit();\n  void Register_Simple_Pass_Flowunit();\n  void Register_Stream_Simple_Pass_Flowunit();\n  void Register_Simple_Error_Flowunit();\n  void Register_Stream_Datapre_Error_Flowunit();\n  void Register_Stream_In_Process_Error_Flowunit();\n  void Register_Stream_Process_Error_Flowunit();\n  void Register_Stream_Process_Flowunit();\n  void Register_Collapse_Recieve_Error_Flowunit();\n  void Register_Normal_Collapse_Recieve_Error_Flowunit();\n  void Register_Collapse_Datagrouppre_Error_Flowunit();\n  void Register_Normal_Collapse_Datagrouppre_Error_Flowunit();\n  void Register_Normal_Collapse_Process_Error_Flowunit();\n  void Register_Normal_Collapse_Process_Flowunit();\n  void Register_Collapse_Datapre_Error_Flowunit();\n  void Register_Collapse_Process_Error_Flowunit();\n  void Register_Collapse_Process_Flowunit();\n  void Register_Virtual_Stream_Start_Flowunit();\n  void Register_Virtual_Stream_Mid_Flowunit();\n  void Register_Virtual_Stream_End_Flowunit();\n  void Register_Virtual_Expand_Flowunit();\n  void Register_Virtual_Stream_Flowunit();\n  void Register_Tensorlist_Test_1_Flowunit();\n  void Register_Tensorlist_Test_2_Flowunit();\n  void Register_Check_Tensorlist_Test_1_Flowunit();\n  void Register_Check_Tensorlist_Test_2_Flowunit();\n  void Register_Statistic_Test_Flowunit();\n  void Register_Slow_Flowunit();\n  void Register_Expand_Stream_Flowunit();\n  void Register_Collapse_Stream_Flowunit();\n\n  Status InitFlow(const std::string &name, const std::string &graph);\n\n  std::shared_ptr<MockDriverCtl> ctl_;\n  std::shared_ptr<Flow> flow_;\n};\n\nclass MockFunctionCollection\n    : public std::enable_shared_from_this<MockFunctionCollection> {\n public:\n  MockFunctionCollection() = default;\n  virtual ~MockFunctionCollection() = default;\n\n  void RegisterOpenFunc(\n      std::function<Status(const std::shared_ptr<Configuration> &,\n                           std::shared_ptr<MockFlowUnit>)>\n          open_func) {\n    open_func_ = std::move(open_func);\n  };\n  void RegisterCloseFunc(\n      std::function<Status(std::shared_ptr<MockFlowUnit>)> close_func) {\n    close_func_ = std::move(close_func);\n  };\n  void RegisterDataGroupPreFunc(\n      std::function<Status(std::shared_ptr<DataContext> data_ctx,\n                           std::shared_ptr<MockFlowUnit>)>\n          data_group_pre_func) {\n    data_group_pre_func_ = std::move(data_group_pre_func);\n  };\n  void RegisterDataPreFunc(\n      std::function<Status(std::shared_ptr<DataContext> data_ctx,\n                           std::shared_ptr<MockFlowUnit>)>\n          data_pre_func) {\n    data_pre_func_ = std::move(data_pre_func);\n  };\n  void RegisterProcessFunc(\n      std::function<Status(std::shared_ptr<DataContext> data_ctx,\n                           std::shared_ptr<MockFlowUnit>)>\n          process_func) {\n    process_func_ = std::move(process_func);\n  };\n  void RegisterDataPostFunc(\n      std::function<Status(std::shared_ptr<DataContext> data_ctx,\n                           std::shared_ptr<MockFlowUnit>)>\n          data_post_func) {\n    data_post_func_ = std::move(data_post_func);\n  };\n  void RegisterDataGroupPostFunc(\n      std::function<Status(std::shared_ptr<DataContext> data_ctx,\n                           std::shared_ptr<MockFlowUnit>)>\n          data_group_post_func) {\n    data_group_post_func_ = std::move(data_group_post_func);\n  };\n\n  std::function<std::shared_ptr<modelbox::FlowUnit>(const std::string &,\n                                                    const std::string &)>\n  GenerateCreateFunc(bool need_sequence = false);\n\n private:\n  std::function<Status(const std::shared_ptr<Configuration> &,\n                       std::shared_ptr<MockFlowUnit>)>\n      open_func_;\n  std::function<Status(std::shared_ptr<MockFlowUnit>)> close_func_;\n  std::function<Status(std::shared_ptr<DataContext> data_ctx,\n                       std::shared_ptr<MockFlowUnit>)>\n      data_group_pre_func_;\n  std::function<Status(std::shared_ptr<DataContext> data_ctx,\n                       std::shared_ptr<MockFlowUnit>)>\n      data_pre_func_;\n  std::function<Status(std::shared_ptr<DataContext> data_ctx,\n                       std::shared_ptr<MockFlowUnit>)>\n      process_func_;\n  std::function<Status(std::shared_ptr<DataContext> data_ctx,\n                       std::shared_ptr<MockFlowUnit>)>\n      data_post_func_;\n  std::function<Status(std::shared_ptr<DataContext> data_ctx,\n                       std::shared_ptr<MockFlowUnit>)>\n      data_group_post_func_;\n};\n\n}  // namespace modelbox\n#endif  // MODELBOX_MOCKFLOW_H_\n"
  },
  {
    "path": "test/test_config.h.in",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#ifndef MODELBOX_TEST_CONFIG_H_\n#define MODELBOX_TEST_CONFIG_H_\n\nnamespace modelbox {\n\n// test working dir\n#define TEST_WORKING_DIR            \"@TEST_WORKING_DIR@\"\n\n// test lib dir\n#define TEST_LIB_DIR                \"@TEST_WORKING_LIB_DIR@\"\n\n// test bin dir\n#define TEST_BIN_DIR                \"@TEST_WORKING_BIN_DIR@\"\n\n// test data dir\n#define TEST_DATA_DIR                \"@TEST_WORKING_DATA_DIR@\"\n\n// test driver dir\n#define TEST_DRIVER_DIR             \"@TEST_WORKING_DRIVERS_DIR@\"\n\n// test demo dir\n#define TEST_DEMO_DRIVERS_DIR       \"@TEST_DEMO_DRIVERS_DIR@\"\n\n// test demo video\n#define TEST_DEMO_VIDEO_DIR         \"@MODELBOX_DEMO_DIR@/video\"\n\n// test asserts file\n#define TEST_ASSETS                 \"@TEST_ASSETS@\"\n\n// test source code dir\n#define TEST_SOURCE_DIR             \"@TEST_SOURCE_DIR@\"\n\n// python flow unit so path\n#define PYTHON_PATH                 \"@LIBMODELBOX_FLOWUNIT_PYTHON_SO_PATH@\"\n#define DEVICE_CPU_SO_PATH          \"@LIBMODELBOX_DEVICE_CPU_SO_PATH@\"\n#define DEVICE_CUDA_SO_PATH         \"@LIBMODELBOX_DEVICE_CUDA_SO_PATH@\"\n#define DEVICE_ASCEND_SO_PATH       \"@LIBMODELBOX_DEVICE_ASCEND_SO_PATH@\"\n#define INFERENCE_PATH              \"@LIBMODELBOX_FLOWUNIT_INFERENCE_SO_PATH@\"\n#define VIRTUAL_PYTHON_PATH         \"@LIBMODELBOX_VIRTUALDRIVER_PYTHON_SO_PATH@\"\n#define VIRTUAL_INFERENCE_PATH      \"@LIBMODELBOX_VIRTUALDRIVER_INFERENCE_SO_PATH@\"\n\n#define MODELBOX_PLUGIN_SO_PATH        \"@MODELBOX_PLUGIN_SO_PATH@\"\n#define MODELBOX_PLUGIN_EDITOR_SO_PATH \"@MODELBOX_PLUGIN_EDITOR_SO_PATH@\"\n#define MODELBOX_TF_SO_PATH            \"@TENSORFLOW_LIBRARIES@\"\n\n#define MODELBOX_TEMPLATE_BIN_DIR \"@MODELBOX_TEMPLATE_BIN_DIR@\"\n#define MODELBOX_TEMPLATE_CMD_PATH \"@MODELBOX_TEMPLATE_CMD_PATH@\"\n}  // namespace modelbox\n\n#endif  // MODELBOX_TEST_CONFIG_H_\n"
  },
  {
    "path": "test/test_main.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <signal.h>\n\n#include \"gtest/gtest.h\"\n#include \"modelbox/base/log.h\"\n#include \"modelbox/base/utils.h\"\n\nstatic int exit_signal;\n\nstatic int g_sig_list[] = {\n    SIGIO,   SIGPWR,    SIGSTKFLT, SIGPROF, SIGINT,  SIGTERM,\n    SIGBUS,  SIGVTALRM, SIGTRAP,   SIGXCPU, SIGXFSZ, SIGILL,\n    SIGABRT, SIGFPE,    SIGSEGV,   SIGQUIT, SIGSYS,\n};\nstatic int g_sig_num = sizeof(g_sig_list) / sizeof(g_sig_list[0]);\nstatic constexpr int SIG_SKIP_STACK = 2;\n\nstatic void test_sig_handler(int volatile sig_no, siginfo_t *sig_info,\n                             void *volatile ptr) {\n  switch (sig_no) {\n    case SIGINT:\n    case SIGTERM:\n      exit_signal = sig_no;\n      exit(1);\n      break;\n    case SIGQUIT:\n      return;\n      break;\n    case SIGSEGV:\n    case SIGPIPE:\n    case SIGFPE:\n    case SIGABRT:\n    case SIGBUS:\n    case SIGILL:\n      std::cout << \"Segment fault\"\n                << \", Signal: \" << sig_no << \", Addr: \" << sig_info->si_addr\n                << \", Code: \" << sig_info->si_code\n                << \", Caused by: \" << std::endl\n                << modelbox::GetStackTrace(SIG_SKIP_STACK) << std::endl;\n      usleep(300);\n      break;\n    default:\n      break;\n  }\n\n  _exit(1);\n}\n\nstatic int test_sig_register() {\n  int i = 0;\n  struct sigaction sig_act;\n\n  for (i = 0; i < g_sig_num; i++) {\n    sig_act.sa_handler = nullptr;\n    (void)sigemptyset(&sig_act.sa_mask);\n    sig_act.sa_restorer = nullptr;\n    sig_act.sa_sigaction = test_sig_handler;\n    sig_act.sa_flags = SA_SIGINFO | SA_RESTART;\n\n    if (sigaction(g_sig_list[i], &sig_act, nullptr) < 0) {\n      fprintf(stderr, \"Register signal %d failed.\", g_sig_list[i]);\n    }\n  }\n\n  return 0;\n}\n\nstatic int test_init() {\n  if (test_sig_register() != 0) {\n    fprintf(stderr, \"register signal failed.\\n\");\n    return 1;\n  }\n\n  return 0;\n}\n\nstatic void test_exit() {}\n\nint main(int argc, char **argv) {\n  int ret = 0;\n\n  Defer { test_exit(); };\n  /* Run init test */\n  if (test_init() != 0) {\n    fprintf(stderr, \"init test failed.\\n\");\n    return -1;\n  }\n\n  if (getenv(\"MODELBOX_CONSOLE_LOGLEVEL\") == nullptr) {\n    ModelBoxLogger.GetLogger()->SetLogLevel(modelbox::LOG_INFO);\n  }\n\n  ::testing::InitGoogleTest(&argc, argv);\n  ret |= RUN_ALL_TESTS();\n\n  return ret;\n}\n"
  },
  {
    "path": "test/unit/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n \ncmake_minimum_required(VERSION 3.10)\n\nset(CMAKE_EXE_LINKER_FLAGS \"${CMAKE_EXE_LINKER_FLAGS} -rdynamic\")\n\nfile(GLOB_RECURSE UNIT_TEST_SOURCE *.cpp *.cc *.c)\n \ninclude_directories(${CMAKE_CURRENT_SOURCE_DIR})\ninclude_directories(${CMAKE_CURRENT_BINARY_DIR})\n \ninclude_directories(${TEST_INCLUDE})\ninclude_directories(${LIBMODELBOX_DEVICE_MOCKDEVICE_INCLUDE})\ninclude_directories(${LIBMODELBOX_FLOWUNIT_MOCKFLOWUNIT_INCLUDE})\ninclude_directories(${LIBMODELBOX_GRAPHCONF_MOCKGRAPHCONF_INCLUDE})\ninclude_directories(${LIBMODELBOX_VIRTUALDRIVER_PYTHON_INCLUDE})\ninclude_directories(${MOCKFLOW_INCLUDE})\n \nadd_executable(unit EXCLUDE_FROM_ALL\n    ${UNIT_TEST_SOURCE}\n    ${TEST_SOURCE}\n    ${TEST_MAIN_SOURCE}\n)\n\nset_target_properties(unit PROPERTIES ENABLE_EXPORTS 1)\n\n\nif (TARGET ${MODELBOX_SERVER_PLUGIN_EDITOR})\nadd_dependencies(unit ${MODELBOX_SERVER_PLUGIN_EDITOR})\nendif()\n\nif (TARGET ${MODELBOX_SERVER_PLUGIN})\n    add_dependencies(unit ${MODELBOX_SERVER_PLUGIN})\n    add_custom_command(TARGET unit POST_BUILD\n        COMMAND rm -fr ${TEST_WORKING_LIB_DIR}/*\n        COMMAND cp $<TARGET_FILE:${MODELBOX_SERVER_PLUGIN}> ${TEST_WORKING_LIB_DIR}/\n    )\nendif()\n\nif (${LIBMODELBOX_VIRTUALDRIVER_PYTHON_SHARED})\nadd_dependencies(unit ${LIBMODELBOX_VIRTUALDRIVER_PYTHON_SHARED})\nendif()\n\nadd_dependencies(unit ${LIBMODELBOX_VIRTUALDRIVER_INFERENCE_SHARED})\nadd_dependencies(unit ${LIBMODELBOX_DEVICE_CPU_SHARED})\n\nif(${PYTHONLIBS_FOUND})\n    add_dependencies(unit ${LIBMODELBOX_FLOWUNIT_PYTHON_SHARED})\nendif()\n\ntarget_link_libraries(unit pthread)\ntarget_link_libraries(unit rt)\ntarget_link_libraries(unit dl)\ntarget_link_libraries(unit gtest_main)\ntarget_link_libraries(unit gmock_main)\ntarget_link_libraries(unit ${MOCKFLOW_LIB})\ntarget_link_libraries(unit ${TEST_LINK_LIBRARIES})\ntarget_link_libraries(unit ${LIBMODELBOX_SHARED})\ntarget_link_libraries(unit ${DUKTAPE_LIBRARIES})\n\nadd_custom_target(unittest-modelbox\n\tCOMMAND ${TEST_RUNNER_LIST} ${CMAKE_CURRENT_BINARY_DIR}/unit\n\tDEPENDS  unit\n\tWORKING_DIRECTORY ${TEST_WORKING_DIR}\n\tCOMMENT \"Run modelbox-unit Test...\"\n)\n\nlist(APPEND MODELBOX_UNIT_TEST_TARGETS unit)\nset(MODELBOX_UNIT_TEST_TARGETS ${MODELBOX_UNIT_TEST_TARGETS} CACHE INTERNAL \"\")\n\nlist(APPEND MODELBOX_UNIT_TEST_RUN_TARGETS unittest-modelbox)\nset(MODELBOX_UNIT_TEST_RUN_TARGETS ${MODELBOX_UNIT_TEST_RUN_TARGETS} CACHE INTERNAL \"\")\n\n \n"
  },
  {
    "path": "test/unit/libmodelbox/base/blocking_queue_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/base/blocking_queue.h\"\n\n#include <poll.h>\n#include <sys/time.h>\n\n#include <chrono>\n#include <future>\n#include <string>\n#include <thread>\n\n#include \"gtest/gtest.h\"\n#include \"modelbox/base/log.h\"\nnamespace modelbox {\nclass BlockingQueueTest : public testing::Test {\n public:\n  BlockingQueueTest() = default;\n\n protected:\n  void SetUp() override{\n\n  };\n  void TearDown() override{};\n};\n\nclass PriorityBlockingQueueTest : public testing::Test {\n public:\n  PriorityBlockingQueueTest() = default;\n\n protected:\n  void SetUp() override{\n\n  };\n  void TearDown() override{};\n};\n\nclass TestNumber {\n public:\n  TestNumber() = default;\n  TestNumber(int n) { num_ = n; };\n  virtual ~TestNumber() = default;\n  int Get() { return num_; }\n  TestNumber& operator=(int n) {\n    num_ = n;\n    return *this;\n  };\n  bool operator==(int n) const { return n == num_; }\n  bool operator==(const TestNumber& t) const { return t.num_ == num_; }\n  bool operator<(const TestNumber& t) const { return num_ < t.num_; }\n  bool operator>(const TestNumber& t) const { return num_ > t.num_; }\n  bool operator<=(const TestNumber& t) const {\n    if (num_ == t.num_) {\n      return private_num_ >= t.private_num_;\n    }\n    return num_ <= t.num_;\n  }\n  std::string ToString() const {\n    std::ostringstream oss;\n    oss << num_;\n    return oss.str();\n  }\n  void SetPrivate(int n) { private_num_ = n; };\n  int GetPrivate() { return private_num_; };\n\n private:\n  int num_ = 0;\n  int private_num_ = 0;\n};\n\nstd::ostream& operator<<(std::ostream& os, const TestNumber& t) {\n  os << t.ToString();\n  return os;\n}\n\nTEST_F(BlockingQueueTest, EnqueueDequeue) {\n  const int queue_size = 12;\n  BlockingQueue<TestNumber> queue(queue_size);\n\n  for (int i = 0; i < queue_size; i++) {\n    TestNumber value = i * i;\n    queue.Push(value);\n  }\n\n  EXPECT_EQ(queue_size, queue.Size());\n  TestNumber value = -1;\n  queue.Front(&value);\n  EXPECT_EQ(value, 0);\n\n  for (int i = 0; i < queue_size; i++) {\n    TestNumber value = -1;\n    queue.Pop(&value);\n    EXPECT_EQ(value, i * i);\n  }\n}\n\nTEST_F(BlockingQueueTest, EnqueueDequeueSequence) {\n  const int queue_size = 12;\n  const int push_size = 6;\n  std::vector<TestNumber> nums;\n  BlockingQueue<TestNumber> queue(queue_size);\n\n  for (int i = 0; i < push_size; i++) {\n    TestNumber value = i * i;\n    nums.push_back(value);\n  }\n\n  auto ret = queue.Push(&nums);\n  EXPECT_EQ(ret, push_size);\n  nums.clear();\n  for (int i = 0; i < queue_size; i++) {\n    TestNumber value = i * i;\n    nums.push_back(value);\n  }\n\n  ret = queue.Push(&nums);\n  EXPECT_EQ(queue_size, push_size + ret);\n  EXPECT_EQ(push_size + ret, queue.Size());\n  TestNumber value = -1;\n  queue.Front(&value);\n  EXPECT_EQ(value, 0);\n\n  std::vector<TestNumber> out_nums;\n  queue.Pop(&out_nums);\n  EXPECT_EQ(out_nums.size(), queue_size);\n\n  for (size_t i = 0; i < out_nums.size(); i++) {\n    int v = i % push_size;\n    EXPECT_EQ(out_nums[i], v * v);\n  }\n}\n\nTEST_F(BlockingQueueTest, EnqueueDequeueSequenceBatchTimeout) {\n  const int queue_size = 12;\n  std::vector<TestNumber> second;\n  std::vector<TestNumber> first;\n  const int first_data_size = 6;\n  BlockingQueue<TestNumber> queue(queue_size);\n\n  for (int i = 0; i < first_data_size; i++) {\n    TestNumber value = i * i;\n    first.push_back(value);\n  }\n\n  for (int i = 0; i < queue_size; i++) {\n    TestNumber value = i * i;\n    second.push_back(value);\n  }\n\n  queue.Push(&first);\n  auto ret = queue.PushBatch(&second, 10);\n  EXPECT_FALSE(ret);\n\n  EXPECT_EQ(first_data_size, queue.Size());\n  TestNumber value = -1;\n  queue.Front(&value);\n  EXPECT_EQ(value, 0);\n\n  std::vector<TestNumber> out_nums;\n  queue.Pop(&out_nums);\n  EXPECT_EQ(out_nums.size(), first_data_size);\n\n  for (size_t i = 0; i < out_nums.size(); i++) {\n    EXPECT_EQ(out_nums[i], i * i);\n  }\n}\n\nTEST_F(BlockingQueueTest, EnqueueDequeueSequenceBatchBlock) {\n  const int queue_size = 12;\n  std::vector<TestNumber> second;\n  std::vector<TestNumber> first;\n  const int first_data_size = 6;\n  BlockingQueue<TestNumber> queue(queue_size);\n\n  for (int i = 0; i < first_data_size; i++) {\n    TestNumber value = i * i;\n    first.push_back(value);\n  }\n\n  for (int i = 0; i < queue_size; i++) {\n    TestNumber value = i * i;\n    second.push_back(value);\n  }\n\n  queue.Push(&first);\n  EXPECT_EQ(first_data_size, queue.Size());\n\n  auto start = std::chrono::high_resolution_clock::now();\n  auto result_future = std::async(std::launch::async, [&]() {\n    std::this_thread::sleep_for(std::chrono::milliseconds(100));\n    std::vector<TestNumber> first_out;\n    queue.Pop(&first_out);\n    EXPECT_EQ(first_data_size, first_out.size());\n    for (size_t i = 0; i < first_out.size(); i++) {\n      EXPECT_EQ(first_out[i], i * i);\n    }\n  });\n\n  auto ret = queue.PushBatch(&second);\n  EXPECT_TRUE(ret);\n  auto finish = std::chrono::high_resolution_clock::now();\n  auto elapsed =\n      std::chrono::duration_cast<std::chrono::milliseconds>(finish - start);\n  EXPECT_GE(elapsed.count(), 100);\n\n  TestNumber value = -1;\n  queue.Front(&value);\n  EXPECT_EQ(value, 0);\n\n  std::vector<TestNumber> out_nums;\n  queue.Pop(&out_nums);\n  EXPECT_EQ(out_nums.size(), queue_size);\n\n  for (size_t i = 0; i < out_nums.size(); i++) {\n    EXPECT_EQ(out_nums[i], i * i);\n  }\n}\n\nTEST_F(BlockingQueueTest, QueueSize) {\n  const int queue_size = 12;\n  BlockingQueue<int> queue(SIZE_MAX);\n\n  for (int i = 0; i < queue_size; i++) {\n    int value = i * i;\n    queue.Push(value);\n  }\n\n  EXPECT_EQ(queue_size, queue.Size());\n\n  int pop_num = 3;\n  for (int i = 0; i < pop_num; i++) {\n    int value = -1;\n    queue.Pop(&value);\n    EXPECT_EQ(value, i * i);\n  }\n\n  EXPECT_EQ(queue_size - pop_num, queue.Size());\n}\n\nTEST_F(BlockingQueueTest, QueueRemain) {\n  const int queue_size = 12;\n  BlockingQueue<int> queue(queue_size);\n\n  int push_num = 6;\n  for (int i = 0; i < push_num; i++) {\n    int value = i * i;\n    queue.Push(value);\n  }\n\n  EXPECT_EQ(queue_size - push_num, queue.RemainCapacity());\n}\n\nTEST_F(BlockingQueueTest, QueueClear) {\n  const int queue_size = 12;\n  BlockingQueue<int> queue(queue_size);\n\n  int push_num = 6;\n  for (int i = 0; i < push_num; i++) {\n    int value = i * i;\n    queue.Push(value);\n  }\n\n  queue.Clear();\n  EXPECT_EQ(queue_size, queue.RemainCapacity());\n  EXPECT_EQ(0, queue.Size());\n}\n\nTEST_F(BlockingQueueTest, QueueBlockClear) {\n  const int queue_size = 12;\n  BlockingQueue<TestNumber> queue(queue_size);\n\n  int push_num = 6;\n  for (int i = 0; i < push_num; i++) {\n    TestNumber value = i * i;\n    queue.Push(value);\n  }\n\n  EXPECT_EQ(push_num, queue.Size());\n  auto result_future = std::async(std::launch::async, [&]() {\n    std::this_thread::sleep_for(std::chrono::milliseconds(100));\n    queue.Close();\n  });\n\n  std::vector<TestNumber> in_nums;\n  for (int i = 0; i < queue_size; i++) {\n    TestNumber value = i * i;\n    in_nums.push_back(value);\n  }\n\n  auto ret = queue.PushBatch(&in_nums);\n  EXPECT_LE(ret, 0);\n  EXPECT_EQ(queue_size, queue.RemainCapacity());\n  EXPECT_EQ(0, queue.Size());\n}\n\nTEST_F(BlockingQueueTest, PopBatch) {\n  const int queue_size = 12;\n  BlockingQueue<TestNumber> queue(queue_size);\n\n  std::vector<TestNumber> in_nums;\n  for (int i = 0; i < queue_size; i++) {\n    TestNumber value = i * i;\n    in_nums.push_back(value);\n  }\n\n  std::vector<TestNumber> out_nums;\n  auto ret_num = queue.PopBatch(&out_nums, -1);\n  EXPECT_EQ(ret_num, 0);\n  EXPECT_EQ(out_nums.size(), 0);\n\n  auto ret = queue.PushBatch(&in_nums);\n  EXPECT_EQ(ret, queue_size);\n  EXPECT_EQ(queue_size, queue.Size());\n\n  ret_num = queue.PopBatch(&out_nums);\n  EXPECT_EQ(ret_num, ret);\n  EXPECT_EQ(out_nums.size(), ret_num);\n}\n\nTEST_F(BlockingQueueTest, PopBatchMaxNum) {\n  const int queue_size = 12;\n  const int first_pop = 3;\n  BlockingQueue<TestNumber> queue(queue_size);\n\n  for (int i = 0; i < queue_size; i++) {\n    TestNumber value = i * i;\n    queue.Push(value);\n  }\n\n  std::vector<TestNumber> out_nums;\n\n  auto ret_num = queue.PopBatch(&out_nums, -1, first_pop);\n  EXPECT_EQ(ret_num, first_pop);\n  EXPECT_EQ(out_nums.size(), ret_num);\n\n  out_nums.clear();\n  ret_num = queue.PopBatch(&out_nums, -1);\n  EXPECT_EQ(queue_size - first_pop, ret_num);\n  EXPECT_EQ(out_nums.size(), ret_num);\n}\n\nTEST_F(BlockingQueueTest, QueuePoll) {\n  const int queue_size = 12;\n  BlockingQueue<int> queue(queue_size);\n\n  int push_num = 6;\n  for (int i = 0; i < push_num; i++) {\n    int value = i * i;\n    queue.Push(value);\n  }\n\n  for (int i = 0; i < queue_size; i++) {\n    int value = -1;\n    bool ret = queue.Poll(&value);\n    if (i < push_num) {\n      EXPECT_EQ(value, i * i);\n    } else {\n      EXPECT_FALSE(ret);\n    }\n  }\n\n  queue.Clear();\n  EXPECT_EQ(queue_size, queue.RemainCapacity());\n  EXPECT_EQ(0, queue.Size());\n}\n\nTEST_F(BlockingQueueTest, QueueCapacity) {\n  const int queue_size = 12;\n  BlockingQueue<int> queue(SIZE_MAX);\n\n  EXPECT_EQ(SIZE_MAX, queue.GetCapacity());\n  queue.SetCapacity(queue_size);\n  EXPECT_EQ(queue_size, queue.GetCapacity());\n}\n\nTEST_F(BlockingQueueTest, QueueFull) {\n  const int queue_size = 12;\n  BlockingQueue<int> queue(queue_size);\n\n  for (int i = 0; i < queue_size; i++) {\n    int value = i * i;\n    queue.Push(value);\n  }\n\n  /* None block */\n  int value = 0;\n  EXPECT_FALSE(queue.Push(value, -1));\n\n  /* wait for 100ms */\n  value = queue_size + 1;\n  auto start = std::chrono::high_resolution_clock::now();\n  EXPECT_FALSE(queue.Push(value, 100));\n  auto finish = std::chrono::high_resolution_clock::now();\n  auto elapsed =\n      std::chrono::duration_cast<std::chrono::milliseconds>(finish - start);\n  EXPECT_GE(elapsed.count(), 100);\n\n  /* wait until wakeup */\n  start = std::chrono::high_resolution_clock::now();\n  auto result_future = std::async(std::launch::async, [&]() {\n    std::this_thread::sleep_for(std::chrono::milliseconds(100));\n    int value = -1;\n    queue.Pop(&value);\n    EXPECT_EQ(0, value);\n  });\n\n  /* check wait time */\n  EXPECT_TRUE(queue.Push(value, 0));\n  finish = std::chrono::high_resolution_clock::now();\n  elapsed =\n      std::chrono::duration_cast<std::chrono::milliseconds>(finish - start);\n  EXPECT_GE(elapsed.count(), 100);\n}\n\nTEST_F(BlockingQueueTest, QueueFront) {\n  const int queue_size = 12;\n  BlockingQueue<std::shared_ptr<TestNumber>> queue(queue_size);\n\n  std::shared_ptr<TestNumber> value = nullptr;\n  EXPECT_FALSE(queue.Front(&value));\n  EXPECT_EQ(value, nullptr);\n\n  int push_num = 6;\n  for (int i = 0; i < push_num; i++) {\n    std::shared_ptr<TestNumber> value = std::make_shared<TestNumber>(i * i);\n    queue.Push(value);\n  }\n\n  value = nullptr;\n  EXPECT_EQ(push_num, queue.Size());\n  EXPECT_TRUE(queue.Front(&value));\n  EXPECT_EQ(*value, 0);\n  EXPECT_EQ(push_num, queue.Size());\n\n  for (int i = 0; i < push_num; i++) {\n    std::shared_ptr<TestNumber> value_front = nullptr;\n    std::shared_ptr<TestNumber> value_pop = nullptr;\n    EXPECT_TRUE(queue.Front(&value_front));\n    EXPECT_EQ(*value_front, i * i);\n    queue.Pop(&value_pop);\n    EXPECT_EQ(*value_pop, i * i);\n    EXPECT_EQ(*value_front, *value_pop);\n    EXPECT_EQ(*value, 0);\n  }\n}\n\nTEST_F(BlockingQueueTest, QueueEmpty) {\n  const int queue_size = 12;\n  BlockingQueue<int> queue(queue_size);\n\n  /* none blocking */\n  int value = -1;\n  bool ret = queue.Pop(&value, -1);\n  EXPECT_FALSE(ret);\n  EXPECT_TRUE(queue.Empty());\n\n  /* wait until wakeup */\n  auto start = std::chrono::high_resolution_clock::now();\n  auto result_future = std::async(std::launch::async, [&]() {\n    std::this_thread::sleep_for(std::chrono::milliseconds(100));\n    int value = 1;\n    queue.Push(value);\n  });\n  EXPECT_TRUE(queue.Pop(&value, 0));\n  EXPECT_EQ(value, 1);\n  EXPECT_TRUE(queue.Empty());\n  auto finish = std::chrono::high_resolution_clock::now();\n  auto elapsed =\n      std::chrono::duration_cast<std::chrono::milliseconds>(finish - start);\n  EXPECT_GE(elapsed.count(), 100);\n\n  /* wait timeout */\n  value = -1;\n  start = std::chrono::high_resolution_clock::now();\n  EXPECT_FALSE(queue.Pop(&value, 100));\n  EXPECT_EQ(errno, ETIMEDOUT);\n  EXPECT_EQ(value, -1);\n  EXPECT_TRUE(queue.Empty());\n  finish = std::chrono::high_resolution_clock::now();\n  elapsed =\n      std::chrono::duration_cast<std::chrono::milliseconds>(finish - start);\n  EXPECT_GE(elapsed.count(), 100);\n}\n\nTEST_F(BlockingQueueTest, ForcePush) {\n  const int queue_size = 1;\n  const int push_count = 12;\n  BlockingQueue<int> queue(queue_size);\n\n  for (int i = 0; i < push_count; i++) {\n    int value = i * i;\n    queue.PushForce(value);\n  }\n\n  EXPECT_EQ(queue.Size(), push_count);\n  EXPECT_TRUE(queue.Full());\n\n  for (int i = 0; i < push_count; i++) {\n    int value;\n    queue.Pop(&value);\n    EXPECT_EQ(value, i * i);\n  }\n}\n\nTEST_F(BlockingQueueTest, ForcePushBatch) {\n  const int queue_size = 1;\n  const int push_count = 12;\n  BlockingQueue<int> queue(queue_size);\n  std::vector<int> numbers;\n\n  for (int i = 0; i < push_count; i++) {\n    int value = i * i;\n    numbers.push_back(value);\n  }\n\n  queue.PushBatchForce(&numbers);\n\n  EXPECT_EQ(queue.Size(), push_count);\n\n  for (int i = 0; i < push_count; i++) {\n    int value;\n    queue.Pop(&value);\n    EXPECT_EQ(value, i * i);\n  }\n}\n\nTEST_F(BlockingQueueTest, ForcePushBatchWait) {\n  const int queue_size = 1;\n  const int push_count = 12;\n  BlockingQueue<int> queue(queue_size);\n  std::vector<int> numbers;\n\n  for (int i = 0; i < push_count; i++) {\n    int value = i * i;\n    numbers.push_back(value);\n  }\n\n  queue.PushBatchForce(&numbers);\n\n  for (int i = push_count; i < push_count * 2; i++) {\n    int value = i * i;\n    numbers.push_back(value);\n  }\n\n  auto start = std::chrono::high_resolution_clock::now();\n  queue.PushBatchForce(&numbers, true, 20);\n  auto finish = std::chrono::high_resolution_clock::now();\n  auto elapsed =\n      std::chrono::duration_cast<std::chrono::milliseconds>(finish - start);\n  EXPECT_GE(elapsed.count(), 20);\n  EXPECT_EQ(queue.Size(), push_count);\n\n  for (int i = 0; i < push_count; i++) {\n    int value;\n    queue.Pop(&value);\n    EXPECT_EQ(value, i * i);\n  }\n}\n\nTEST_F(BlockingQueueTest, QueueShutdown) {\n  const int queue_size = 12;\n  BlockingQueue<int> queue(queue_size);\n\n  /* none blocking */\n  int value = -1;\n  bool ret = queue.Pop(&value, -1);\n  EXPECT_FALSE(ret);\n\n  /* wait until wakeup */\n  auto start = std::chrono::high_resolution_clock::now();\n  auto result_future = std::async(std::launch::async, [&]() {\n    std::this_thread::sleep_for(std::chrono::milliseconds(100));\n    queue.Shutdown();\n  });\n\n  /* wakup */\n  EXPECT_FALSE(queue.Pop(&value, 0));\n  EXPECT_EQ(value, -1);\n  EXPECT_TRUE(queue.IsShutdown());\n  auto finish = std::chrono::high_resolution_clock::now();\n  auto elapsed =\n      std::chrono::duration_cast<std::chrono::milliseconds>(finish - start);\n  EXPECT_GE(elapsed.count(), 100);\n\n  BlockingQueue<int> queueTwo(queue_size);\n  for (int i = 0; i < queue_size; i++) {\n    int value = i * i;\n    queueTwo.Push(value);\n  }\n\n  /* wait until wakeup */\n  start = std::chrono::high_resolution_clock::now();\n  result_future = std::async(std::launch::async, [&]() {\n    std::this_thread::sleep_for(std::chrono::milliseconds(100));\n    queueTwo.Shutdown();\n  });\n\n  /* wakup */\n  value = queue_size + 1;\n  EXPECT_FALSE(queueTwo.Push(value, 0));\n  EXPECT_TRUE(queue.IsShutdown());\n  finish = std::chrono::high_resolution_clock::now();\n  elapsed =\n      std::chrono::duration_cast<std::chrono::milliseconds>(finish - start);\n  EXPECT_GE(elapsed.count(), 100);\n\n  for (int i = 0; i < queue_size; i++) {\n    int value = i * i;\n    queueTwo.Pop(&value);\n    EXPECT_EQ(value, i * i);\n  }\n\n  value = -1;\n  EXPECT_FALSE(queueTwo.Pop(&value));\n  EXPECT_EQ(value, -1);\n}\n\nTEST_F(BlockingQueueTest, Wakeup) {\n  const int queue_size = 12;\n  BlockingQueue<int> queue(queue_size);\n\n  /* wait until wakeup */\n  auto result_future = std::async(std::launch::async, [&]() {\n    std::this_thread::sleep_for(std::chrono::milliseconds(100));\n    queue.Wakeup();\n  });\n\n  auto func = [&]() {\n    int value = -1;\n    auto ret = queue.Pop(&value);\n    EXPECT_FALSE(ret);\n    EXPECT_EQ(errno, EINTR);\n    EXPECT_EQ(value, -1);\n  };\n\n  std::thread Wait(func);\n\n  func();\n  Wait.join();\n}\n\nTEST_F(BlockingQueueTest, ConsumerProducer) {\n  int queue_size = 10;\n  int loop = 10000;\n  int total_sum1 = 0;\n  int total_sum2 = 0;\n  int expect_sum1 = 0;\n  int expect_sum2 = 0;\n\n  BlockingQueue<TestNumber> queue(queue_size);\n\n  std::thread producer1([&]() {\n    for (int i = 0; i < loop; i++) {\n      TestNumber value = i;\n      queue.Push(value);\n      expect_sum1 += i;\n    }\n  });\n\n  std::thread producer2([&]() {\n    for (int i = 0; i < loop; i++) {\n      TestNumber value = i;\n      queue.Push(value);\n      expect_sum2 += i;\n    }\n  });\n\n  std::thread consumer1([&]() {\n    while (true) {\n      TestNumber value = -1;\n      auto ret = queue.Pop(&value);\n      if (ret == false) {\n        break;\n      }\n\n      total_sum1 += value.Get();\n    }\n  });\n\n  std::thread consumer2([&]() {\n    while (true) {\n      TestNumber value = -1;\n      auto ret = queue.Pop(&value);\n      if (ret == false) {\n        break;\n      }\n\n      total_sum2 += value.Get();\n    }\n  });\n\n  producer1.join();\n  producer2.join();\n  queue.Shutdown();\n  consumer1.join();\n  consumer2.join();\n  EXPECT_EQ(expect_sum1 + expect_sum2, total_sum1 + total_sum2);\n}\n\nTEST_F(PriorityBlockingQueueTest, PushPriorityCheck) {\n  const int queue_size = 12;\n  PriorityBlockingQueue<int> queue(queue_size);\n\n  for (int i = 1; i <= queue_size; i++) {\n    queue.Push(i);\n  }\n  int value = -1;\n  queue.Front(&value);\n  EXPECT_EQ(value, queue_size);\n\n  EXPECT_EQ(queue_size, queue.Size());\n  for (int i = queue_size; i > 0; i--) {\n    int value = -1;\n    queue.Pop(&value);\n    EXPECT_EQ(i, value);\n  }\n}\n\nTEST_F(PriorityBlockingQueueTest, PopBatch) {\n  const int queue_size = 12;\n  const int loop = 8;\n\n  PriorityBlockingQueue<int> queue(queue_size * loop);\n\n  for (int i = 1; i <= queue_size; i++) {\n    for (int j = 0; j < loop; j++) {\n      queue.Push(i);\n    }\n  }\n  int value = -1;\n  queue.Front(&value);\n  EXPECT_EQ(value, 12);\n\n  EXPECT_EQ(queue_size * loop, queue.Size());\n  for (int i = queue_size; i > 0; i--) {\n    std::vector<int> out_nums;\n    queue.PopBatch(&out_nums);\n    EXPECT_EQ(loop, out_nums.size());\n    EXPECT_EQ(i, out_nums[0]);\n  }\n}\n\nTEST_F(PriorityBlockingQueueTest, PopBatchMaxNum) {\n  const int queue_size = 12;\n  const int loop = 8;\n  const int first_pop_num = 3;\n\n  PriorityBlockingQueue<int> queue(queue_size * loop);\n\n  for (int i = 1; i <= queue_size; i++) {\n    for (int j = 0; j < loop; j++) {\n      queue.Push(i);\n    }\n  }\n  int value = -1;\n  queue.Front(&value);\n  EXPECT_EQ(value, 12);\n\n  EXPECT_EQ(queue_size * loop, queue.Size());\n  for (int i = queue_size; i > 0; i--) {\n    std::vector<int> out_nums;\n    auto ret = queue.PopBatch(&out_nums, -1, first_pop_num);\n    EXPECT_EQ(first_pop_num, ret);\n    ret = queue.PopBatch(&out_nums, -1);\n    EXPECT_EQ(loop - first_pop_num, ret);\n    EXPECT_EQ(loop, out_nums.size());\n    EXPECT_EQ(i, out_nums[0]);\n  }\n}\n\nTEST_F(PriorityBlockingQueueTest, PushPriorityCustomCompare) {\n  const int queue_size = 12;\n  struct CustomCompare {\n    auto operator()(TestNumber const& a, TestNumber const& b) const -> bool {\n      return a <= b;\n    }\n  };\n\n  PriorityBlockingQueue<TestNumber, CustomCompare> queue(queue_size);\n\n  for (int i = 1; i <= queue_size; i++) {\n    TestNumber value = i;\n    queue.Push(value);\n  }\n  TestNumber value = -1;\n  queue.Front(&value);\n  EXPECT_EQ(value, queue_size);\n\n  EXPECT_EQ(queue_size, queue.Size());\n  for (int i = queue_size; i > 0; i--) {\n    TestNumber value = -1;\n    queue.Pop(&value);\n    EXPECT_EQ(value, i);\n  }\n}\n\nTEST_F(PriorityBlockingQueueTest, PopBatchCheckOrder) {\n  const int queue_size = 2;\n  const int loop = 4;\n  PriorityBlockingQueue<TestNumber> queue(queue_size * loop);\n\n  for (int i = 1; i <= queue_size; i++) {\n    for (int j = 0; j < loop; j++) {\n      TestNumber value = i;\n      value.SetPrivate(j);\n      queue.Push(value);\n    }\n  }\n  TestNumber value = -1;\n  queue.Front(&value);\n  EXPECT_EQ(value, queue_size);\n\n  EXPECT_EQ(queue_size * loop, queue.Size());\n  for (int i = queue_size; i > 0; i--) {\n    std::vector<TestNumber> out_nums;\n    queue.PopBatch(&out_nums);\n    EXPECT_EQ(loop, out_nums.size());\n    for (int j = 0; j < loop; j++) {\n      EXPECT_EQ(out_nums[j].GetPrivate(), j);\n    }\n    EXPECT_EQ(out_nums[0], i);\n  }\n}\n\nTEST_F(PriorityBlockingQueueTest, SharedPtrPopBatchCheckOrder) {\n  const int queue_size = 2;\n  const int loop = 4;\n\n  struct CustomCompare {\n    auto operator()(std::shared_ptr<TestNumber> const& a,\n                    std::shared_ptr<TestNumber> const& b) const -> bool {\n      return a->Get() < b->Get();\n    }\n  };\n\n  PriorityBlockingQueue<std::shared_ptr<TestNumber>, CustomCompare> queue(\n      queue_size * loop);\n\n  for (int i = 1; i <= queue_size; i++) {\n    for (int j = 0; j < loop; j++) {\n      std::shared_ptr<TestNumber> value = std::make_shared<TestNumber>(i);\n      value->SetPrivate(j);\n      queue.Push(value);\n    }\n  }\n\n  std::shared_ptr<TestNumber> value = std::make_shared<TestNumber>(-1);\n  queue.Front(&value);\n  EXPECT_EQ(*value, queue_size);\n\n  EXPECT_EQ(queue_size * loop, queue.Size());\n  for (int i = queue_size; i > 0; i--) {\n    std::vector<std::shared_ptr<TestNumber>> out_nums;\n    queue.PopBatch(&out_nums);\n    EXPECT_EQ(loop, out_nums.size());\n    for (int j = 0; j < loop; j++) {\n      EXPECT_EQ(out_nums[j]->GetPrivate(), j);\n    }\n    EXPECT_EQ(*out_nums[0], i);\n  }\n}\n\nTEST_F(PriorityBlockingQueueTest, ConsumerProducerBatch) {\n  int queue_size = 5;\n  int loop = 10;\n  int total_sum1 = 0;\n  int total_sum2 = 0;\n  int expect_sum1 = 0;\n  int expect_sum2 = 0;\n\n  PriorityBlockingQueue<TestNumber> queue(queue_size);\n\n  std::thread producer1([&]() {\n    std::vector<TestNumber> in_nums;\n    for (int i = 0; i < loop; i++) {\n      TestNumber value = i;\n      in_nums.push_back(value);\n      expect_sum1 += i;\n      if (i % queue_size == 0) {\n        queue.PushBatch(&in_nums);\n      }\n    }\n    queue.PushBatch(&in_nums);\n  });\n\n  std::thread producer2([&]() {\n    for (int i = 0; i < loop; i++) {\n      TestNumber value = i;\n      queue.Push(value);\n      expect_sum2 += i;\n    }\n  });\n\n  std::thread consumer1([&]() {\n    while (true) {\n      TestNumber value = -1;\n      auto ret = queue.Pop(&value);\n      if (ret == false) {\n        break;\n      }\n\n      total_sum1 += value.Get();\n    }\n  });\n\n  std::thread consumer2([&]() {\n    while (true) {\n      std::vector<TestNumber> out_nums;\n      auto ret = queue.PopBatch(&out_nums);\n      if (ret == false) {\n        break;\n      }\n\n      for (auto& out_num : out_nums) {\n        total_sum2 += out_num.Get();\n      }\n    }\n  });\n\n  producer1.join();\n  producer2.join();\n  queue.Shutdown();\n  consumer1.join();\n  consumer2.join();\n  EXPECT_EQ(expect_sum1 + expect_sum2, total_sum1 + total_sum2);\n}\n\nTEST_F(PriorityBlockingQueueTest, Perf) {\n  int total_count = 0;\n  int expect_count = 0;\n  unsigned long begin;\n  unsigned long end;\n  bool stop = false;\n\n  PriorityBlockingQueue<TestNumber> queue(8192);\n\n  begin = GetTickCount();\n  std::thread producer([&]() {\n    std::vector<TestNumber> in_nums;\n    int i = 0;\n    while (stop == false) { // NOLINT\n      TestNumber value = i++;\n      in_nums.push_back(value);\n      expect_count += 1;\n      queue.PushBatch(&in_nums);\n    }\n  });\n\n  std::thread consumer([&]() {\n    while (true) {\n      TestNumber value = -1;\n      auto ret = queue.Pop(&value);\n      if (ret == false) {\n        break;\n      }\n\n      total_count += 1;\n    }\n  });\n\n  std::this_thread::sleep_for(std::chrono::milliseconds(500));\n  stop = true;\n  producer.join();\n  queue.Shutdown();\n  consumer.join();\n  end = GetTickCount();\n  EXPECT_EQ(expect_count, total_count);\n\n  MBLOG_INFO << \"total: \" << total_count;\n  MBLOG_INFO << \"ops: \" << 1.0 * total_count / (end - begin) * 1000.0;\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "test/unit/libmodelbox/base/configuration_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/base/configuration.h\"\n\n#include <fstream>\n\n#include \"gtest/gtest.h\"\n#include \"test_config.h\"\n\nnamespace modelbox {\nclass ConfigurationTest : public testing::Test {\n public:\n  ConfigurationTest() = default;\n\n protected:\n  void SetUp() override{\n\n  };\n  void TearDown() override{};\n};\n\nTEST_F(ConfigurationTest, AddPropertyTest) {\n  ConfigurationBuilder builder;\n  std::map<std::string, std::string> values = {{\"1\", \"aaa\"}, {\"2\", \"bbb\"}};\n\n  builder.AddProperty(\"graph.name\", \"111111\");\n  builder.AddProperty(\"graph.name\", \"123123\");\n  builder.AddProperty(\"graph.node.index\", \"abcabc\");\n  builder.AddProperties(values);\n  auto config = builder.Build();\n\n  EXPECT_TRUE(config->Contain(\"graph.name\"));\n  EXPECT_FALSE(config->Contain(\"graph.nameX\"));\n  EXPECT_EQ(config->GetString(\"graph.name\"), \"123123\");\n  EXPECT_EQ(config->GetString(\"graph.node.index\"), \"abcabc\");\n  EXPECT_EQ(config->GetString(\"graph.nokey\"), \"\");\n  EXPECT_EQ(config->GetString(\"1\"), \"aaa\");\n  EXPECT_EQ(config->GetString(\"2\"), \"bbb\");\n  EXPECT_EQ(config->GetString(\"3\", \"cc\"), \"cc\");\n}\n\nTEST_F(ConfigurationTest, GetBoolTest) {\n  ConfigurationBuilder builder;\n  builder.AddProperty(\"1\", \"false\");\n  builder.AddProperty(\"2\", \"true\");\n  builder.AddProperty(\"3\", \"0\");\n  builder.AddProperty(\"4\", \"1\");\n  builder.AddProperty(\"5\", \"!D!DSA\");\n  builder.AddProperty(\"6\", \"01s12\");\n  builder.AddProperty(\"7\", \"\");\n  auto config = builder.Build();\n\n  EXPECT_FALSE(config->GetBool(\"1\"));\n  EXPECT_TRUE(config->GetBool(\"2\"));\n  EXPECT_FALSE(config->GetBool(\"3\"));\n  EXPECT_TRUE(config->GetBool(\"4\"));\n  EXPECT_FALSE(config->GetBool(\"5\"));\n  EXPECT_TRUE(config->GetBool(\"6\", true));\n  EXPECT_FALSE(config->GetBool(\"7\"));\n  EXPECT_FALSE(config->GetBool(\"8\"));\n  EXPECT_TRUE(config->GetBool(\"9\", true));\n}\n\nTEST_F(ConfigurationTest, GetIntTest) {\n  ConfigurationBuilder builder;\n  builder.AddProperty(\"1\", \"12aa12\");\n  builder.AddProperty(\"2\", \"123a\");\n  builder.AddProperty(\"3\", \"123.0\");\n  builder.AddProperty(\"4\", \"123-0\");\n  builder.AddProperty(\"5\", \"0x123\");\n  builder.AddProperty(\"6\", \"123b\");\n  builder.AddProperty(\"7\", \"\");\n  builder.AddProperty(\"8\", \"a!@\");\n  builder.AddProperty(\"9\", \"123.123.123\");\n  builder.AddProperty(\"10\", \"99999999999999999999\");\n\n  auto invalidConfig = builder.Build();\n\n  for (size_t i = 0; i <= invalidConfig->Size(); ++i) {\n    EXPECT_EQ(invalidConfig->GetInt8(std::to_string(i)), 0);\n    EXPECT_EQ(invalidConfig->GetUint8(std::to_string(i)), 0);\n    EXPECT_EQ(invalidConfig->GetInt16(std::to_string(i)), 0);\n    EXPECT_EQ(invalidConfig->GetUint16(std::to_string(i)), 0);\n    EXPECT_EQ(invalidConfig->GetInt32(std::to_string(i)), 0);\n    EXPECT_EQ(invalidConfig->GetUint32(std::to_string(i)), 0);\n    EXPECT_EQ(invalidConfig->GetInt64(std::to_string(i)), 0);\n    EXPECT_EQ(invalidConfig->GetUint64(std::to_string(i)), 0);\n  }\n\n  std::map<std::string, std::string> range_test = {\n      {\"0\", \"0\"},\n      {\"1\", \"+1\"},\n      {\"2\", \"-1\"},\n      {\"3\", \"127\"},\n      {\"4\", \"-128\"},\n      {\"5\", \"255\"},\n      {\"6\", \"32767\"},\n      {\"7\", \"-32768\"},\n      {\"8\", \"65535\"},\n      {\"9\", \"2147483647\"},\n      {\"10\", \"-2147483648\"},\n      {\"11\", \"4294967295\"},\n      {\"12\", \"9223372036854775807\"},\n      {\"13\", \"-9223372036854775808\"},\n      {\"14\", \"18446744073709551615\"}};\n  builder.AddProperties(range_test);\n  auto range_test_config = builder.Build();\n\n  std::vector<int8_t> int8_result = {0, 1, -1, INT8_MAX, INT8_MIN, 0, 0, 0,\n                                     0, 0, 0,  0,        0,        0, 0};\n  std::vector<uint8_t> uint8_result = {0, 1, 0, INT8_MAX, 0, UINT8_MAX, 0, 0,\n                                       0, 0, 0, 0,        0, 0,         0};\n  std::vector<int16_t> int16_result = {\n      0, 1, -1, INT8_MAX, INT8_MIN, UINT8_MAX, INT16_MAX, INT16_MIN,\n      0, 0, 0,  0,        0,        0,         0};\n  std::vector<uint16_t> uint16_result = {\n      0,          1, 0, INT8_MAX, 0, UINT8_MAX, INT16_MAX, 0,\n      UINT16_MAX, 0, 0, 0,        0, 0,         0};\n  std::vector<int32_t> int32_result = {\n      0,         1,         -1,        INT8_MAX,   INT8_MIN,\n      UINT8_MAX, INT16_MAX, INT16_MIN, UINT16_MAX, INT32_MAX,\n      INT32_MIN, 0,         0,         0,          0};\n  std::vector<uint32_t> uint32_result = {\n      0,          1,         0, INT8_MAX,   0, UINT8_MAX, INT16_MAX, 0,\n      UINT16_MAX, INT32_MAX, 0, UINT32_MAX, 0, 0,         0};\n  std::vector<int64_t> int64_result = {\n      0,         1,          -1,        INT8_MAX,   INT8_MIN,\n      UINT8_MAX, INT16_MAX,  INT16_MIN, UINT16_MAX, INT32_MAX,\n      INT32_MIN, UINT32_MAX, INT64_MAX, INT64_MIN,  0};\n  std::vector<uint64_t> uint64_result = {\n      0,          1,         0, INT8_MAX,   0,         UINT8_MAX, INT16_MAX, 0,\n      UINT16_MAX, INT32_MAX, 0, UINT32_MAX, INT64_MAX, 0,         UINT64_MAX};\n\n  for (size_t i = 0; i < range_test_config->Size(); ++i) {\n    EXPECT_EQ(range_test_config->GetInt8(std::to_string(i)), int8_result[i]);\n    EXPECT_EQ(range_test_config->GetUint8(std::to_string(i)), uint8_result[i]);\n    EXPECT_EQ(range_test_config->GetInt16(std::to_string(i)), int16_result[i]);\n    EXPECT_EQ(range_test_config->GetUint16(std::to_string(i)),\n              uint16_result[i]);\n    EXPECT_EQ(range_test_config->GetInt32(std::to_string(i)), int32_result[i]);\n    EXPECT_EQ(range_test_config->GetUint32(std::to_string(i)),\n              uint32_result[i]);\n    EXPECT_EQ(range_test_config->GetInt64(std::to_string(i)), int64_result[i]);\n    EXPECT_EQ(range_test_config->GetUint64(std::to_string(i)),\n              uint64_result[i]);\n  }\n}\n\nTEST_F(ConfigurationTest, GetFloatTest) {\n  ConfigurationBuilder builder;\n  builder.AddProperty(\"1\", \"12aa12\");\n  builder.AddProperty(\"2\", \"123a\");\n  builder.AddProperty(\"3\", \"123.0+2\");\n  builder.AddProperty(\"4\", \"1.23-0\");\n  builder.AddProperty(\"5\", \"0.x123\");\n  builder.AddProperty(\"6\", \"123.b\");\n  builder.AddProperty(\"7\", \"\");\n  builder.AddProperty(\"8\", \"a!@\");\n  builder.AddProperty(\"9\", \"123.123.123\");\n  auto invalidConfig = builder.Build();\n\n  for (size_t i = 0; i <= invalidConfig->Size(); ++i) {\n    EXPECT_EQ(invalidConfig->GetFloat(std::to_string(i)), 0);\n    EXPECT_EQ(invalidConfig->GetDouble(std::to_string(i)), 0);\n  }\n\n  builder.AddProperty(\"0\", \"0\");\n  builder.AddProperty(\"1\", \"1\");\n  builder.AddProperty(\"2\", \"-1\");\n  builder.AddProperty(\"3\", \"123456789\");\n  builder.AddProperty(\"4\", \"-123456789\");\n  builder.AddProperty(\"5\", \"12345.6789\");\n  builder.AddProperty(\"6\", \"-12345.6789\");\n  builder.AddProperty(\"7\", \"1.7e+30\");\n  builder.AddProperty(\"8\", \"-1.7e+30\");\n  builder.AddProperty(\"9\", \"1.7e-30\");\n  builder.AddProperty(\"10\", \"-1.7e-30\");\n  auto config = builder.Build();\n\n  std::vector<float> float_result = {\n      0,           1,       -1,       123456789.0, -123456789.0, 12345.6789,\n      -12345.6789, 1.7e+30, -1.7e+30, 1.7e-30,     -1.7e-30};\n  std::vector<double> double_result = {\n      0,           1,       -1,       123456789.0, -123456789.0, 12345.6789,\n      -12345.6789, 1.7e+30, -1.7e+30, 1.7e-30,     -1.7e-30};\n\n  for (size_t i = 0; i < config->Size(); ++i) {\n    EXPECT_EQ(config->GetFloat(std::to_string(i)), float_result[i]);\n    EXPECT_EQ(config->GetDouble(std::to_string(i)), double_result[i]);\n  }\n}\n\nTEST_F(ConfigurationTest, GetVectorTest) {\n  ConfigurationBuilder builder;\n  builder.AddProperty(\"1\", std::string(\"1\") + LIST_DELIMITER + \"0\" +\n                               LIST_DELIMITER + \"true\" + LIST_DELIMITER +\n                               \"false\");\n  builder.AddProperty(\"2\", std::string(\"1\") + LIST_DELIMITER + \"0\" +\n                               LIST_DELIMITER + \"true\" + LIST_DELIMITER +\n                               \"false\" + LIST_DELIMITER + \"g\");\n  builder.AddProperty(\"3\", std::string(\"1\") + LIST_DELIMITER + \"0\" +\n                               LIST_DELIMITER + \"-3\" + LIST_DELIMITER + \"5\" +\n                               LIST_DELIMITER + \"-9\");\n  std::vector<int8_t> int_result = {1, 0, -3, 5, -9};\n  builder.AddProperty(\"4\", std::string(\"1\") + LIST_DELIMITER + \"0\" +\n                               LIST_DELIMITER + \"3\" + LIST_DELIMITER + \"5\" +\n                               LIST_DELIMITER + \"22\");\n  std::vector<uint8_t> uint_result = {1, 0, 3, 5, 22};\n  builder.AddProperty(\"5\", std::string(\"1.0\") + LIST_DELIMITER + \"0.0\" +\n                               LIST_DELIMITER + \"3.45645641\" + LIST_DELIMITER +\n                               \"551631.13124\" + LIST_DELIMITER + \"-22e+10\");\n  std::vector<float> float_result = {1.0, 0, 3.45645641, 551631.13124, -22e+10};\n  std::vector<double> double_result = {1.0, 0, 3.45645641, 551631.13124,\n                                       -22e+10};\n\n  auto config = builder.Build();\n\n  auto strings = config->GetStrings(\"1\");\n  EXPECT_EQ(strings.size(), 4);\n  EXPECT_EQ(strings[0], \"1\");\n  EXPECT_EQ(strings[1], \"0\");\n  EXPECT_EQ(strings[2], \"true\");\n  EXPECT_EQ(strings[3], \"false\");\n\n  auto bools = config->GetBools(\"1\");\n  EXPECT_EQ(bools.size(), 4);\n  EXPECT_TRUE(bools[0]);\n  EXPECT_FALSE(bools[1]);\n  EXPECT_TRUE(bools[2]);\n  EXPECT_FALSE(bools[3]);\n  EXPECT_EQ(config->GetBools(\"2\").size(), 0);\n\n  auto int8s = config->GetInt8s(\"3\");\n  auto uint8s = config->GetUint8s(\"4\");\n  auto int16s = config->GetInt16s(\"3\");\n  auto uint16s = config->GetUint16s(\"4\");\n  auto int32s = config->GetInt32s(\"3\");\n  auto uint32s = config->GetUint32s(\"4\");\n  auto int64s = config->GetInt64s(\"3\");\n  auto uint64s = config->GetUint64s(\"4\");\n  auto floats = config->GetFloats(\"5\");\n  auto doubles = config->GetDoubles(\"5\");\n  EXPECT_EQ(int8s.size(), 5);\n  EXPECT_EQ(uint8s.size(), 5);\n  EXPECT_EQ(int16s.size(), 5);\n  EXPECT_EQ(uint16s.size(), 5);\n  EXPECT_EQ(int32s.size(), 5);\n  EXPECT_EQ(uint32s.size(), 5);\n  EXPECT_EQ(int64s.size(), 5);\n  EXPECT_EQ(uint64s.size(), 5);\n  EXPECT_EQ(floats.size(), 5);\n  EXPECT_EQ(doubles.size(), 5);\n  for (size_t i = 0; i < int8s.size(); ++i) {\n    EXPECT_EQ(int8s[i], int_result[i]);\n    EXPECT_EQ(uint8s[i], uint_result[i]);\n    EXPECT_EQ(int16s[i], int_result[i]);\n    EXPECT_EQ(uint16s[i], uint_result[i]);\n    EXPECT_EQ(int32s[i], int_result[i]);\n    EXPECT_EQ(uint32s[i], uint_result[i]);\n    EXPECT_EQ(int64s[i], int_result[i]);\n    EXPECT_EQ(uint64s[i], uint_result[i]);\n    EXPECT_EQ(floats[i], float_result[i]);\n    EXPECT_EQ(doubles[i], double_result[i]);\n  }\n\n  EXPECT_EQ(config->GetInt8s(\"1\").size(), 0);\n  EXPECT_EQ(config->GetUint8s(\"1\").size(), 0);\n  EXPECT_EQ(config->GetInt16s(\"1\").size(), 0);\n  EXPECT_EQ(config->GetUint16s(\"1\").size(), 0);\n  EXPECT_EQ(config->GetInt32s(\"1\").size(), 0);\n  EXPECT_EQ(config->GetUint32s(\"1\").size(), 0);\n  EXPECT_EQ(config->GetInt64s(\"1\").size(), 0);\n  EXPECT_EQ(config->GetUint64s(\"1\").size(), 0);\n  EXPECT_EQ(config->GetFloats(\"1\").size(), 0);\n  EXPECT_EQ(config->GetDoubles(\"1\").size(), 0);\n}\n\nTEST_F(ConfigurationTest, SetPropertyTest) {\n  ConfigurationBuilder builder;\n  auto config = builder.Build();\n  config->SetProperty(\"1\", 1);\n  config->SetProperty(\"2\", 1.2F);\n  config->SetProperty(\"3\", 1.3);\n  config->SetProperty(\"4\", false);\n  config->SetProperty(\"5\", true);\n  config->SetProperty(\"6\", \"test\");\n\n  EXPECT_EQ(config->GetString(\"1\"), \"1\");\n  EXPECT_FLOAT_EQ(config->GetFloat(\"2\"), 1.2F);\n  EXPECT_FLOAT_EQ(config->GetDouble(\"3\"), 1.3);\n  EXPECT_EQ(config->GetString(\"4\"), \"0\");\n  EXPECT_EQ(config->GetString(\"5\"), \"1\");\n  EXPECT_EQ(config->GetString(\"6\"), \"test\");\n\n  config->SetProperty(\"5\", std::vector<int32_t>{1, 2, 3});\n  auto float_list = std::vector<float>{1.1F, 2.2F, 3.3F};\n  config->SetProperty(\"6\", float_list);\n\n  EXPECT_EQ(config->GetString(\"5\"),\n            std::string(\"1\") + LIST_DELIMITER + \"2\" + LIST_DELIMITER + \"3\");\n  auto res = config->GetFloats(\"6\");\n  EXPECT_EQ(res.size(), float_list.size());\n  for (size_t i = 0; i < float_list.size(); ++i) {\n    EXPECT_FLOAT_EQ(res[i], float_list[i]);\n  }\n}\n\nTEST_F(ConfigurationTest, SetPropertyWithoutBuilderTest) {\n  Configuration config;\n  config.SetProperty(\"1\", 1);\n  config.SetProperty(\"2\", 1.2F);\n  config.SetProperty(\"3\", 1.3);\n  config.SetProperty(\"4\", false);\n  config.SetProperty(\"5\", true);\n  config.SetProperty(\"6\", \"test\");\n\n  EXPECT_EQ(config.GetString(\"1\"), \"1\");\n  EXPECT_FLOAT_EQ(config.GetFloat(\"2\"), 1.2F);\n  EXPECT_FLOAT_EQ(config.GetDouble(\"3\"), 1.3);\n  EXPECT_EQ(config.GetString(\"4\"), \"0\");\n  EXPECT_EQ(config.GetString(\"5\"), \"1\");\n  EXPECT_EQ(config.GetString(\"6\"), \"test\");\n}\n\nTEST_F(ConfigurationTest, GetSubKeysTest) {\n  ConfigurationBuilder builder;\n  auto config = builder.Build();\n  EXPECT_EQ(StatusError, STATUS_SUCCESS);\n\n  config->SetProperty(\"graph.node.1\", 1);\n  config->SetProperty(\"graph.node.2\", 1.2F);\n  config->SetProperty(\"device.gpu.0\", 1.3);\n  config->SetProperty(\"device.gpu.1\", false);\n  config->SetProperty(\"graph.edge.in.1\", true);\n  config->SetProperty(\"graph.edge.in.2\", \"test\");\n  config->SetProperty(\"graph.edge.out.1\", \"test\");\n  config->SetProperty(\"graph.edge.out.2\", \"test\");\n  config->SetProperty(\"graph.edge.out.3\", \"test\");\n\n  auto res = config->GetSubKeys(\"graph\");\n  EXPECT_EQ(res.size(), 2);\n  std::set<std::string> expect_res = {\"node\", \"edge\"};\n  auto is_equal = std::equal(res.begin(), res.end(), expect_res.begin());\n  EXPECT_TRUE(is_equal);\n\n  res = config->GetSubKeys(\"graph.edge\");\n  EXPECT_EQ(res.size(), 2);\n  expect_res = {\"in\", \"out\"};\n  is_equal = std::equal(res.begin(), res.end(), expect_res.begin());\n  EXPECT_TRUE(is_equal);\n\n  res = config->GetSubKeys(\"graph.edge.out\");\n  EXPECT_EQ(res.size(), 3);\n  expect_res = {\"1\", \"2\", \"3\"};\n  is_equal = std::equal(res.begin(), res.end(), expect_res.begin());\n  EXPECT_TRUE(is_equal);\n\n  res = config->GetSubKeys(\"graph.\");\n  EXPECT_EQ(res.size(), 0);\n\n  res = config->GetSubKeys(\"graph.nod\");\n  EXPECT_EQ(res.size(), 0);\n\n  res = config->GetSubKeys(\"graph.node.1\");\n  EXPECT_EQ(res.size(), 0);\n\n  res = config->GetSubKeys(\"graph.node.1.2\");\n  EXPECT_EQ(res.size(), 0);\n\n  res = config->GetSubKeys(\"\");\n  EXPECT_EQ(res.size(), 0);\n\n  res = config->GetSubKeys(\"node\");\n  EXPECT_EQ(res.size(), 0);\n}\n\nTEST_F(ConfigurationTest, GetConfigKeysTest) {\n  ConfigurationBuilder builder;\n  builder.AddProperties({{\"1\", \"1\"}, {\"2\", \"2\"}, {\"3\", \"3\"}});\n  auto config = builder.Build();\n  EXPECT_EQ(StatusError, STATUS_SUCCESS);\n\n  auto keys = config->GetKeys();\n  EXPECT_EQ(keys.size(), 3);\n  EXPECT_NE(keys.find(\"1\"), keys.end());\n  EXPECT_NE(keys.find(\"2\"), keys.end());\n  EXPECT_NE(keys.find(\"3\"), keys.end());\n  EXPECT_EQ(keys.find(\"4\"), keys.end());\n}\n\nTEST_F(ConfigurationTest, GetSubConfigTest) {\n  ConfigurationBuilder builder;\n  auto config = builder.Build();\n  EXPECT_EQ(StatusError, STATUS_SUCCESS);\n\n  config->SetProperty(\"graph.node.1\", 1);\n  config->SetProperty(\"graph.node.2\", 1.2F);\n  config->SetProperty(\"device.gpu.0\", 1.3);\n  config->SetProperty(\"device.gpu.1\", false);\n  config->SetProperty(\"graph.edge.in.1\", true);\n  config->SetProperty(\"graph.edge.in.2\", \"test\");\n  config->SetProperty(\"graph.edge.out.1\", \"test\");\n  config->SetProperty(\"graph.edge.out.2\", \"test\");\n  config->SetProperty(\"graph.edge.out.3\", \"test\");\n\n  auto sub_config = config->GetSubConfig(\"device\");\n  EXPECT_EQ(sub_config->Size(), 2);\n  EXPECT_EQ(sub_config->GetString(\"gpu.0\"), \"1.3\");\n  EXPECT_EQ(sub_config->GetString(\"gpu.1\"), \"0\");\n  EXPECT_EQ(sub_config->GetString(\"node.1\"), \"\");\n\n  sub_config = config->GetSubConfig(\"graph.edge.out\");\n  EXPECT_EQ(sub_config->Size(), 3);\n  EXPECT_EQ(sub_config->GetString(\"1\"), \"test\");\n  EXPECT_EQ(sub_config->GetString(\"2\"), \"test\");\n  EXPECT_EQ(sub_config->GetString(\"3\"), \"test\");\n  EXPECT_EQ(sub_config->GetString(\"in.1\"), \"\");\n\n  sub_config = config->GetSubConfig(\"graph.node.1\");\n  EXPECT_EQ(sub_config->Size(), 0);\n  EXPECT_EQ(sub_config->GetString(\"graph.node.1\"), \"\");\n  EXPECT_EQ(sub_config->GetString(\"graph.node.2\"), \"\");\n\n  sub_config = config->GetSubConfig(\"graph.nothing\");\n  EXPECT_EQ(sub_config->Size(), 0);\n}\n\nTEST_F(ConfigurationTest, BuildFromTomlTest) {\n  std::string toml_content = R\"(\n    [device]\n    cpu = \"x86\"\n    freq = \"3.5GHZ\"\n    [device.cpu1]\n    cap = 123123\n    [device.cpu1.detail]\n    vendor=1.3\n    sec=false\n    [graph]\n    data = \"123123\"\n    type = \"graphviz\"\n  )\";\n  std::string toml_file_path =\n      std::string(TEST_DATA_DIR) + \"/configure_test.toml\";\n  std::ofstream ofs(toml_file_path);\n  EXPECT_TRUE(ofs.is_open());\n  ofs.write(toml_content.data(), toml_content.size());\n  ofs.flush();\n  ofs.close();\n  Defer { remove(toml_file_path.c_str()); };\n\n  ConfigurationBuilder builder;\n  auto config = builder.Build(toml_file_path);\n  EXPECT_EQ(StatusError, STATUS_SUCCESS);\n\n  EXPECT_EQ(config->Size(), 7);\n  EXPECT_EQ(config->GetString(\"device.cpu\"), \"x86\");\n  EXPECT_EQ(config->GetString(\"device.freq\"), \"3.5GHZ\");\n  EXPECT_EQ(config->GetString(\"device.cpu1.cap\"), \"123123\");\n  EXPECT_EQ(config->GetString(\"device.cpu1.detail.vendor\"), \"1.3\");\n  EXPECT_EQ(config->GetString(\"device.cpu1.detail.sec\"), \"false\");\n  EXPECT_EQ(config->GetString(\"graph.data\"), \"123123\");\n  EXPECT_EQ(config->GetString(\"graph.type\"), \"graphviz\");\n\n  std::set<std::string> expect_value{\"cpu\", \"freq\", \"cpu1\"};\n  EXPECT_EQ(config->GetSubKeys(\"device\"), expect_value);\n\n  std::set<std::string> expect_value2{\"data\", \"type\"};\n  EXPECT_EQ(config->GetSubKeys(\"graph\"), expect_value2);\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "test/unit/libmodelbox/base/crypto_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include \"modelbox/base/crypto.h\"\n\n#include <poll.h>\n#include <sys/time.h>\n\n#include <chrono>\n#include <mutex>\n#include <string>\n#include <thread>\n\n#include \"modelbox/base/log.h\"\n#include \"gtest/gtest.h\"\n\nnamespace modelbox {\n\nclass CryptoTest : public testing::Test {\n public:\n  CryptoTest() = default;\n\n protected:\n  void SetUp() override{};\n  void TearDown() override{};\n};\n\nTEST_F(CryptoTest, Base64) {\n  char data[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 0};\n  int len = sizeof(data);\n  std::string base64_text;\n  std::vector<unsigned char> in(&data[0], &data[len]);\n  std::vector<unsigned char> out;\n  EXPECT_TRUE(Base64Encode(in, &base64_text));\n  EXPECT_TRUE(Base64Decode(base64_text, &out));\n  MBLOG_INFO << \"Base64: \" << base64_text;\n  for (unsigned int i = 0; i < out.size(); i++) {\n    EXPECT_EQ(data[i], out[i]);\n  }\n}\nTEST_F(CryptoTest, AesEncryptPass) {\n  std::string str = \"password\";\n  std::vector<char> pass(str.begin(), str.end());\n  std::string rootkey;\n  std::string enpass;\n\n  EXPECT_EQ(PassEncrypt(pass, true, &rootkey, &enpass), STATUS_OK);\n\n  std::vector<char> outpass;\n  EXPECT_EQ(PassDecrypt(enpass, rootkey, &outpass), STATUS_OK);\n  EXPECT_EQ(pass, outpass);\n}\n}  // namespace modelbox"
  },
  {
    "path": "test/unit/libmodelbox/base/device_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include \"modelbox/base/device.h\"\n\n#include <chrono>\n#include <memory>\n#include <string>\n#include <thread>\n\n#include \"modelbox/base/driver.h\"\n#include \"modelbox/base/log.h\"\n#include \"modelbox/base/status.h\"\n#include \"modelbox/base/utils.h\"\n#include \"modelbox/device/mockdevice/device_mockdevice.h\"\n#include \"flowunit_mockflowunit.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"mock_driver_ctl.h\"\n\nusing ::testing::_;\n\nnamespace modelbox {\n\nclass DeviceManagerTest : public testing::Test {\n public:\n  DeviceManagerTest() = default;\n\n protected:\n  void SetUp() override{\n\n  };\n\n  void TearDown() override {\n    std::shared_ptr<Drivers> drivers = Drivers::GetInstance();\n    std::shared_ptr<DeviceManager> device_mgr = DeviceManager::GetInstance();\n    device_mgr->Clear();\n    drivers->Clear();\n  };\n};\n\nclass DeviceMemoryTest : public testing::Test {\n public:\n  DeviceMemoryTest() = default;\n\n protected:\n  void SetUp() override {\n    auto drivers = Drivers::GetInstance();\n    ConfigurationBuilder config_builder;\n\n    auto device_cpu_src_path = std::string(DEVICE_CPU_SO_PATH);\n    auto device_cpu_dest_path =\n        std::string(TEST_LIB_DIR) + \"/libmodelbox-device-cpu.so\";\n    CopyFile(device_cpu_src_path, device_cpu_dest_path, 0, true);\n\n    drivers->Scan(TEST_LIB_DIR, \"libmodelbox-device-cpu.so\");\n    std::shared_ptr<DeviceManager> device_mgr = DeviceManager::GetInstance();\n    device_mgr->Initialize(drivers, config_builder.Build());\n    device_ = device_mgr->CreateDevice(\"cpu\", \"0\");\n\n    auto device_cuda_src_path = std::string(DEVICE_CUDA_SO_PATH);\n    device_cuda_dest_path =\n        std::string(TEST_LIB_DIR) + \"/libmodelbox-device-cuda.so\";\n    CopyFile(device_cuda_src_path, device_cuda_dest_path, 0, true);\n\n    auto device_ascend_src_path = std::string(DEVICE_ASCEND_SO_PATH);\n    device_ascend_dest_path =\n        std::string(TEST_LIB_DIR) + \"/libmodelbox-device-ascend.so\";\n    CopyFile(device_ascend_src_path, device_ascend_dest_path, 0, true);\n  };\n\n  void TearDown() override {\n    std::shared_ptr<Drivers> drivers = Drivers::GetInstance();\n    std::shared_ptr<DeviceManager> device_mgr = DeviceManager::GetInstance();\n    device_ = nullptr;\n    device_mgr->Clear();\n    drivers->Clear();\n\n    remove(device_cuda_dest_path.c_str());\n  };\n\n  std::shared_ptr<Device> device_;\n  std::string device_cuda_dest_path;\n  std::string device_ascend_dest_path;\n};\n\nTEST_F(DeviceManagerTest, CheckInit) {\n  std::shared_ptr<DeviceManager> device_mgr = DeviceManager::GetInstance();\n  auto device = device_mgr->CreateDevice(\"cpu\", \"0\");\n  EXPECT_EQ(device, nullptr);\n}\n\nTEST_F(DeviceManagerTest, InitDeviceFactory) {\n  std::shared_ptr<Drivers> drivers = Drivers::GetInstance();\n  MockDriverCtl ctl;\n  modelbox::DriverDesc desc;\n\n  desc.SetClass(\"DRIVER-DEVICE\");\n  desc.SetType(\"cpu\");\n  desc.SetName(\"device-driver-cpu\");\n  desc.SetDescription(\"the cpu device\");\n  desc.SetVersion(\"8.9.2\");\n  std::string file_path_device =\n      std::string(TEST_LIB_DIR) + \"/libmodelbox-device-cpu.so\";\n  desc.SetFilePath(file_path_device);\n  ctl.AddMockDriverDevice(\"cpu\", desc);\n\n  bool result = drivers->Scan(TEST_LIB_DIR, \"libmodelbox-device-cpu.so\");\n\n  EXPECT_TRUE(result);\n  std::shared_ptr<DeviceManager> device_mgr = DeviceManager::GetInstance();\n  Status result1 = device_mgr->InitDeviceFactory(drivers);\n  auto factory_list = device_mgr->GetDeviceFactoryList();\n  for (auto iter = factory_list.begin(); iter != factory_list.end(); iter++) {\n    EXPECT_EQ(iter->first, \"cpu\");\n    EXPECT_NE(iter->second, nullptr);\n  }\n}\n\nTEST_F(DeviceManagerTest, Probe) {\n  std::shared_ptr<Drivers> drivers = Drivers::GetInstance();\n  MockDriverCtl ctl;\n  modelbox::DriverDesc desc;\n\n  desc.SetClass(\"DRIVER-DEVICE\");\n  desc.SetType(\"cpu\");\n  desc.SetName(\"device-driver-cpu\");\n  desc.SetDescription(\"the cpu device\");\n  desc.SetVersion(\"8.9.2\");\n  std::string file_path_device =\n      std::string(TEST_LIB_DIR) + \"/libmodelbox-device-cpu.so\";\n  desc.SetFilePath(file_path_device);\n  ctl.AddMockDriverDevice(\"cpu\", desc);\n\n  bool result = drivers->Scan(TEST_LIB_DIR, \"libmodelbox-device-cpu.so\");\n\n  EXPECT_TRUE(result);\n  std::shared_ptr<DeviceManager> device_mgr = DeviceManager::GetInstance();\n  Status status1 = device_mgr->InitDeviceFactory(drivers);\n  auto cpu_factory = device_mgr->GetDeviceFactoryList().begin();\n  auto mock_factory =\n      std::dynamic_pointer_cast<MockDeviceFactory>(cpu_factory->second);\n  EXPECT_CALL(*mock_factory, DeviceProbe())\n      .WillRepeatedly(testing::Invoke([&]() {\n        std::map<std::string, std::shared_ptr<DeviceDesc>> tmp_map;\n        std::shared_ptr<DeviceDesc> device_desc =\n            std::make_shared<DeviceDesc>();\n        device_desc->SetDeviceId(\"0\");\n        device_desc->SetDeviceDesc(\"test desc\");\n        device_desc->SetDeviceMemory(\"8Gi\");\n        device_desc->SetDeviceVersion(\"xxxx\");\n        device_desc->SetDeviceType(\"CPU\");\n        tmp_map.insert(std::make_pair(\"0\", device_desc));\n        return tmp_map;\n      }));\n\n  Status status2 = device_mgr->DeviceProbe();\n  EXPECT_EQ(status1, STATUS_OK);\n  EXPECT_EQ(status2, STATUS_OK);\n\n  auto desc_list = device_mgr->GetDeviceDescList();\n  auto iter1 = desc_list.find(\"cpu\");\n  EXPECT_EQ(iter1->first, \"cpu\");\n\n  auto iter2 = iter1->second.find(\"0\");\n  EXPECT_EQ(iter2->first, \"0\");\n  auto device_desc = iter2->second;\n  EXPECT_EQ(device_desc->GetDeviceDesc(), \"test desc\");\n  EXPECT_EQ(device_desc->GetDeviceId(), \"0\");\n  EXPECT_EQ(device_desc->GetDeviceMemory(), \"8Gi\");\n  EXPECT_EQ(device_desc->GetDeviceType(), \"CPU\");\n}\n\nTEST_F(DeviceManagerTest, CreateDevice) {\n  std::shared_ptr<Drivers> drivers = Drivers::GetInstance();\n  MockDriverCtl ctl;\n  modelbox::DriverDesc desc;\n\n  desc.SetClass(\"DRIVER-DEVICE\");\n  desc.SetType(\"cpu\");\n  desc.SetName(\"device-driver-cpu\");\n  desc.SetDescription(\"the cpu device\");\n  desc.SetVersion(\"8.9.2\");\n  std::string file_path_device =\n      std::string(TEST_LIB_DIR) + \"/libmodelbox-device-cpu.so\";\n  desc.SetFilePath(file_path_device);\n  ctl.AddMockDriverDevice(\"cpu\", desc);\n\n  bool result = drivers->Scan(TEST_LIB_DIR, \"libmodelbox-device-cpu.so\");\n  EXPECT_TRUE(result);\n  std::shared_ptr<DeviceManager> device_mgr = DeviceManager::GetInstance();\n  Status status1 = device_mgr->InitDeviceFactory(drivers);\n\n  auto cpu_factory = device_mgr->GetDeviceFactoryList().begin();\n  auto mock_factory =\n      std::dynamic_pointer_cast<MockDeviceFactory>(cpu_factory->second);\n  EXPECT_CALL(*mock_factory, DeviceProbe())\n      .WillRepeatedly(testing::Invoke([&]() {\n        std::map<std::string, std::shared_ptr<DeviceDesc>> tmp_map;\n        std::shared_ptr<DeviceDesc> device_desc =\n            std::make_shared<DeviceDesc>();\n        device_desc->SetDeviceId(\"0\");\n        device_desc->SetDeviceDesc(\"test desc\");\n        device_desc->SetDeviceMemory(\"8Gi\");\n        device_desc->SetDeviceVersion(\"xxxx\");\n        device_desc->SetDeviceType(\"CPU\");\n        tmp_map.insert(std::make_pair(\"0\", device_desc));\n        return tmp_map;\n      }));\n\n  Status status2 = device_mgr->DeviceProbe();\n  auto ss = device_mgr->GetDevicesTypes();\n  EXPECT_EQ(ss[0], \"cpu\");\n\n  auto device_null = device_mgr->GetDevice(\"cpu\", \"0\");\n  EXPECT_EQ(device_null, nullptr);\n\n  EXPECT_CALL(*mock_factory, CreateDevice(_))\n      .WillRepeatedly(testing::Invoke([&](const std::string &device_id) {\n        std::shared_ptr<MockDevice> temp_mockdevice =\n            std::make_shared<MockDevice>();\n        return temp_mockdevice;\n      }));\n\n  auto device = device_mgr->CreateDevice(\"cpu\", \"0\");\n  EXPECT_NE(device->GetDeviceManager(), nullptr);\n  auto device_desc = device->GetDeviceDesc();\n  EXPECT_EQ(device_desc->GetDeviceDesc(), \"test desc\");\n  EXPECT_EQ(device_desc->GetDeviceId(), \"0\");\n  EXPECT_EQ(device_desc->GetDeviceMemory(), \"8Gi\");\n  EXPECT_EQ(device_desc->GetDeviceVersion(), \"xxxx\");\n  EXPECT_EQ(device_desc->GetDeviceType(), \"CPU\");\n\n  auto device_sec = device_mgr->CreateDevice(\"cpu\", \"0\");\n  EXPECT_EQ(device, device_sec);\n\n  auto device_get = device_mgr->GetDevice(\"cpu\", \"0\");\n  EXPECT_EQ(device, device_get);\n\n  auto test = device_mgr->GetDevicesIdList(\"cpu\");\n  EXPECT_EQ(test[0], \"0\");\n}\n\nTEST_F(DeviceManagerTest, CreateDeviceMemory) {\n  std::shared_ptr<Drivers> drivers = Drivers::GetInstance();\n  ConfigurationBuilder configbuilder;\n  MockDriverCtl ctl;\n  modelbox::DriverDesc desc;\n\n  desc.SetClass(\"DRIVER-DEVICE\");\n  desc.SetType(\"cpu\");\n  desc.SetName(\"device-driver-cpu\");\n  desc.SetDescription(\"the cpu device\");\n  desc.SetVersion(\"8.9.2\");\n  std::string file_path_device =\n      std::string(TEST_LIB_DIR) + \"/libmodelbox-device-cpu.so\";\n  desc.SetFilePath(file_path_device);\n  ctl.AddMockDriverDevice(\"cpu\", desc);\n\n  bool result = drivers->Scan(TEST_LIB_DIR, \"libmodelbox-device-cpu.so\");\n\n  EXPECT_TRUE(result);\n  std::shared_ptr<DeviceManager> device_mgr = DeviceManager::GetInstance();\n  device_mgr->Initialize(drivers, configbuilder.Build());\n  EXPECT_EQ(device_mgr->GetDrivers(), drivers);\n  auto device = device_mgr->CreateDevice(\"cpu\", \"0\");\n  device->SetMemQuota(1024);\n  auto device_memory = device->MemAlloc(100);\n\n  EXPECT_EQ(*((uint64_t *)(device_memory->GetConstPtr<uint8_t>().get() + 100)),\n            DeviceMemory::MEM_MAGIC_CODE);\n}\n\nTEST_F(DeviceMemoryTest, MemAlloc) {\n  device_->SetMemQuota(1024);\n\n  auto mem1 = device_->MemAlloc(1024);\n  EXPECT_NE(mem1, nullptr);\n  EXPECT_EQ(mem1->GetSize(), 1024);\n  EXPECT_EQ(mem1->Verify(), STATUS_SUCCESS);\n  EXPECT_NE(mem1->GetPtr<void>().get(), nullptr);\n  EXPECT_NE(mem1->GetConstPtr<void>().get(), nullptr);\n\n  EXPECT_EQ(device_->GetAllocatedMemSize(), 1024);\n\n  auto mem2 = device_->MemAlloc(0);\n  EXPECT_NE(mem2, nullptr);\n  EXPECT_EQ(mem2->GetSize(), 0);\n  EXPECT_EQ(mem2->Verify(), STATUS_SUCCESS);\n  EXPECT_EQ(mem2->GetPtr<void>().get(), nullptr);\n  EXPECT_EQ(mem2->GetConstPtr<void>().get(), nullptr);\n\n  auto mem3 = device_->MemAlloc(1);\n  EXPECT_EQ(mem3.get(), nullptr);\n\n  EXPECT_EQ(device_->GetAllocatedMemSize(), 1024);\n}\n\nTEST_F(DeviceMemoryTest, MemWrite) {\n  device_->SetMemQuota(1024);\n\n  std::shared_ptr<uint32_t> data(new uint32_t[100],\n                                 [](const uint32_t *ptr) { delete[] ptr; });\n  data.get()[13] = 111333;\n  data.get()[33] = 333333;\n  auto mem1 = device_->MemWrite(data.get(), 100 * sizeof(uint32_t));\n  EXPECT_NE(mem1, nullptr);\n  EXPECT_EQ(mem1->GetSize(), 100 * sizeof(uint32_t));\n  EXPECT_EQ(mem1->Verify(), STATUS_SUCCESS);\n  EXPECT_EQ(mem1->GetPtr<uint32_t>().get()[13], data.get()[13]);\n  EXPECT_EQ(mem1->GetConstPtr<uint32_t>().get()[33], data.get()[33]);\n\n  std::shared_ptr<uint32_t> data2(new uint32_t[300],\n                                  [](const uint32_t *ptr) { delete[] ptr; });\n  auto mem2 = device_->MemWrite(data.get(), 300 * sizeof(uint32_t));\n  EXPECT_EQ(mem2.get(), nullptr);\n}\n\nTEST_F(DeviceMemoryTest, MemClone) {\n  device_->SetMemQuota(1024);\n\n  auto mem1 = device_->MemAlloc(100);\n  auto ptr1 = mem1->GetPtr<uint32_t>();\n  EXPECT_NE(mem1.get(), nullptr);\n  ptr1.get()[13] = 111333;\n  ptr1.get()[17] = 111777;\n\n  auto mem2 = device_->MemClone(mem1);\n  EXPECT_NE(mem2.get(), nullptr);\n  auto ptr2 = mem2->GetPtr<uint32_t>();\n  EXPECT_EQ(ptr2.get()[13], 111333);\n  EXPECT_EQ(ptr2.get()[17], 111777);\n\n  mem1->SetContentMutable(false);\n  auto mem3 = device_->MemClone(mem1);\n  EXPECT_EQ(mem3.get(), mem1.get());\n}\n\n// Read & Write\nTEST_F(DeviceMemoryTest, DeviceMemoryReadWrite) {\n  device_->SetMemQuota(1024);\n\n  auto mem1 = device_->MemAlloc(100);\n  EXPECT_NE(mem1.get(), nullptr);\n  auto ptr = mem1->GetPtr<uint8_t>();\n  EXPECT_NE(ptr.get(), nullptr);\n  ptr.get()[0] = 1;\n  ptr.get()[1] = 2;\n  auto ptr2 = mem1->GetConstPtr<uint8_t>();\n  EXPECT_NE(ptr2.get(), nullptr);\n  EXPECT_EQ(ptr2.get()[0], 1);\n  EXPECT_EQ(ptr2.get()[1], 2);\n}\n\n// Copy\nTEST_F(DeviceMemoryTest, DeviceMemoryCopy) {\n  device_->SetMemQuota(1024);\n\n  auto mem1 = device_->MemAlloc(100);\n  EXPECT_NE(mem1.get(), nullptr);\n\n  auto mem2 = device_->MemAlloc(100);\n  EXPECT_NE(mem2.get(), nullptr);\n\n  auto ptr1 = mem1->GetPtr<uint8_t>();\n  EXPECT_NE(ptr1.get(), nullptr);\n  ptr1.get()[3] = 3;\n  ptr1.get()[15] = 15;\n  ptr1.get()[27] = 27;\n\n  auto ret = mem2->ReadFrom(mem1, 2, 97, 3);\n  EXPECT_EQ(ret, STATUS_SUCCESS);\n\n  auto ptr2 = mem2->GetConstPtr<uint8_t>();\n  EXPECT_NE(ptr2.get(), nullptr);\n  EXPECT_EQ(ptr2.get()[4], 3);\n  EXPECT_EQ(ptr2.get()[16], 15);\n  EXPECT_EQ(ptr2.get()[28], 27);\n\n  ret = mem1->WriteTo(mem2, 3, 30, 1);\n  EXPECT_EQ(ret, STATUS_SUCCESS);\n\n  auto ptr3 = mem2->GetConstPtr<uint8_t>();\n  EXPECT_NE(ptr3.get(), nullptr);\n  EXPECT_EQ(ptr3.get()[1], 3);\n  EXPECT_EQ(ptr3.get()[13], 15);\n  EXPECT_EQ(ptr3.get()[25], 27);\n}\n\n// Mutale\nTEST_F(DeviceMemoryTest, DeviceMemoryMutable) {\n  device_->SetMemQuota(1024);\n\n  auto mem1 = device_->MemAlloc(100);\n  EXPECT_NE(mem1.get(), nullptr);\n\n  auto ptr1 = mem1->GetPtr<uint8_t>();\n  EXPECT_NE(ptr1.get(), nullptr);\n\n  ptr1.get()[13] = 3;\n  ptr1.get()[25] = 5;\n  ptr1.get()[37] = 7;\n\n  mem1->SetContentMutable(false);\n  EXPECT_EQ(mem1->IsContentMutable(), false);\n\n  auto ptr2 = mem1->GetPtr<uint8_t>();\n  EXPECT_EQ(ptr2.get(), nullptr);\n\n  auto ptr3 = mem1->GetConstPtr<uint8_t>();\n  EXPECT_NE(ptr3.get(), nullptr);\n\n  EXPECT_EQ(ptr3.get()[13], 3);\n  EXPECT_EQ(ptr3.get()[25], 5);\n  EXPECT_EQ(ptr3.get()[37], 7);\n}\n\n// Resize\nTEST_F(DeviceMemoryTest, DeviceMemoryResize) {\n  device_->SetMemQuota(1024);\n\n  auto mem1 = device_->MemAlloc(100);\n  EXPECT_NE(mem1.get(), nullptr);\n  EXPECT_EQ(mem1->GetSize(), 100);\n\n  auto ptr1 = mem1->GetPtr<uint8_t>();\n  EXPECT_NE(ptr1.get(), nullptr);\n  ptr1.get()[25] = 3;\n  ptr1.get()[13] = 5;\n  ptr1.get()[1] = 7;\n\n  auto ret = mem1->Resize(50);\n  EXPECT_EQ(ret, STATUS_SUCCESS);\n  EXPECT_EQ(mem1->GetSize(), 50);\n\n  ret = mem1->Resize(150);\n  EXPECT_NE(ret, STATUS_SUCCESS);\n  EXPECT_EQ(mem1->GetSize(), 50);\n\n  auto ptr2 = mem1->GetConstPtr<uint8_t>();\n  EXPECT_NE(ptr2.get(), nullptr);\n  EXPECT_EQ(ptr2.get()[25], 3);\n  EXPECT_EQ(ptr2.get()[13], 5);\n  EXPECT_EQ(ptr2.get()[1], 7);\n}\n\n// Realloc\nTEST_F(DeviceMemoryTest, DeviceMemoryRealloc) {\n  device_->SetMemQuota(1024);\n\n  auto mem1 = device_->MemAlloc(100);\n  EXPECT_NE(mem1.get(), nullptr);\n  EXPECT_EQ(mem1->GetSize(), 100);\n  EXPECT_EQ(mem1->GetCapacity(), 100);\n\n  auto ptr1 = mem1->GetPtr<uint8_t>();\n  EXPECT_NE(ptr1.get(), nullptr);\n  ptr1.get()[25] = 3;\n  ptr1.get()[13] = 5;\n  ptr1.get()[1] = 7;\n\n  auto ret = mem1->Realloc(50);\n  EXPECT_EQ(ret, STATUS_SUCCESS);\n  EXPECT_EQ(mem1->GetSize(), 100);\n  EXPECT_EQ(mem1->GetCapacity(), 100);\n\n  ret = mem1->Realloc(150);\n  EXPECT_EQ(ret, STATUS_SUCCESS);\n  EXPECT_EQ(mem1->GetSize(), 100);\n  EXPECT_EQ(mem1->GetCapacity(), 150);\n\n  auto ptr2 = mem1->GetConstPtr<uint8_t>();\n  EXPECT_NE(ptr2.get(), nullptr);\n  EXPECT_EQ(ptr2.get()[25], 3);\n  EXPECT_EQ(ptr2.get()[13], 5);\n  EXPECT_EQ(ptr2.get()[1], 7);\n\n  ret = mem1->Realloc(1024);\n  EXPECT_NE(ret, STATUS_SUCCESS);\n  EXPECT_EQ(mem1->GetSize(), 100);\n  EXPECT_EQ(mem1->GetCapacity(), 150);\n}\n\n// Memory size\nTEST_F(DeviceMemoryTest, DeviceMemorySize) {\n  size_t free;\n  size_t total;\n  auto ret = device_->GetMemInfo(&free, &total);\n  EXPECT_EQ(ret, STATUS_SUCCESS);\n  EXPECT_NE(free, 0);\n  EXPECT_NE(total, 0);\n\n  EXPECT_EQ(device_->GetMemQuota(), total);\n}\n\nTEST_F(DeviceMemoryTest, CudaMemoryTest) {\n  auto drivers = Drivers::GetInstance();\n  drivers->Scan(TEST_LIB_DIR, \"libmodelbox-device-cuda.so\");\n  auto dev_mgr = DeviceManager::GetInstance();\n  ConfigurationBuilder configbuilder;\n  dev_mgr->Initialize(drivers, configbuilder.Build());\n  auto cuda_device = dev_mgr->CreateDevice(\"cuda\", \"0\");\n  if (cuda_device == nullptr) {\n    GTEST_SKIP();\n  }\n\n  cuda_device->SetMemQuota(1024);\n  device_->SetMemQuota(1024);\n  auto cpu_mem = device_->MemAlloc(1024);\n  {\n    // Malloc\n    auto mem1 = cuda_device->MemAlloc(1024);\n    EXPECT_NE(mem1, nullptr);\n    EXPECT_EQ(mem1->GetSize(), 1024);\n    EXPECT_EQ(mem1->Verify(), STATUS_SUCCESS);\n    EXPECT_NE(mem1->GetPtr<void>().get(), nullptr);\n    EXPECT_NE(mem1->GetConstPtr<void>().get(), nullptr);\n    EXPECT_EQ(cuda_device->GetAllocatedMemSize(), 1024);\n  }\n  {\n    // MemWrite\n    std::shared_ptr<uint32_t> data(new uint32_t[100],\n                                   [](const uint32_t *ptr) { delete[] ptr; });\n    data.get()[35] = 333555;\n    data.get()[53] = 555333;\n    auto mem1 = cuda_device->MemWrite(data.get(), 100 * sizeof(uint32_t));\n    EXPECT_NE(mem1, nullptr);\n    EXPECT_EQ(mem1->GetSize(), 100 * sizeof(uint32_t));\n    EXPECT_EQ(mem1->Verify(), STATUS_SUCCESS);\n    EXPECT_NE(mem1->GetPtr<uint8_t>(), nullptr);\n    EXPECT_NE(mem1->GetConstPtr<uint8_t>(), nullptr);\n\n    mem1->WriteTo(cpu_mem, 0, 100 * sizeof(uint32_t), 0);\n    auto cpu_data = cpu_mem->GetConstPtr<uint32_t>();\n    EXPECT_NE(cpu_data, nullptr);\n    EXPECT_EQ(cpu_data.get()[35], 333555);\n    EXPECT_EQ(cpu_data.get()[53], 555333);\n  }\n  {\n    // MemClone & Copy\n    auto ptr1 = cpu_mem->GetPtr<uint32_t>();\n    ptr1.get()[17] = 111777;\n    ptr1.get()[13] = 333111;\n    auto mem1 = cuda_device->MemAlloc(100);\n    auto ret = mem1->ReadFrom(cpu_mem, 0, 100, 0);\n    EXPECT_EQ(ret, STATUS_SUCCESS);\n    auto mem2 = cuda_device->MemClone(mem1);\n    ret = mem2->WriteTo(cpu_mem, 0, 100, 100);\n    EXPECT_EQ(ret, STATUS_SUCCESS);\n    auto ptr2 = cpu_mem->GetPtr<uint32_t>();\n    EXPECT_EQ(ptr2.get()[25 + 17], 111777);\n    EXPECT_EQ(ptr2.get()[25 + 13], 333111);\n  }\n  {\n    // Mutable\n    auto mem1 = cuda_device->MemAlloc(100);\n    mem1->SetContentMutable(false);\n    auto ptr = mem1->GetPtr<uint8_t>();\n    EXPECT_EQ(ptr, nullptr);\n    auto ptr2 = mem1->GetConstPtr<uint8_t>();\n    EXPECT_NE(ptr2, nullptr);\n  }\n  {\n    // Realloc\n    auto mem1 = cuda_device->MemAlloc(100);\n    EXPECT_EQ(mem1->GetSize(), 100);\n    EXPECT_EQ(mem1->GetCapacity(), 100);\n\n    mem1->Realloc(50);\n    EXPECT_EQ(mem1->GetSize(), 100);\n    EXPECT_EQ(mem1->GetCapacity(), 100);\n\n    mem1->Realloc(200);\n    EXPECT_EQ(mem1->GetSize(), 100);\n    EXPECT_EQ(mem1->GetCapacity(), 200);\n    mem1->WriteTo(cpu_mem, 0, 100);  // sync stream\n  }\n  {\n    // Memory size\n    size_t free;\n    size_t total;\n    auto ret = cuda_device->GetMemInfo(&free, &total);\n    EXPECT_EQ(ret, STATUS_SUCCESS);\n    EXPECT_NE(free, 0);\n    EXPECT_NE(total, 0);\n  }\n\n  cuda_device = nullptr;\n}\n\nTEST_F(DeviceMemoryTest, CudaStreamTest) {\n  auto drivers = Drivers::GetInstance();\n  drivers->Scan(TEST_LIB_DIR, \"libmodelbox-device-cuda.so\");\n  auto dev_mgr = DeviceManager::GetInstance();\n  ConfigurationBuilder configbuilder;\n  dev_mgr->Initialize(drivers, configbuilder.Build());\n  auto cuda_device = dev_mgr->CreateDevice(\"cuda\", \"0\");\n  if (cuda_device == nullptr) {\n    GTEST_SKIP();\n  }\n\n  cuda_device->SetMemQuota(1024);\n  device_->SetMemQuota(1024);\n  {\n    auto unit3_output = cuda_device->MemAlloc(100);\n    EXPECT_NE(unit3_output, nullptr);\n    {\n      auto unit2_output = cuda_device->MemAlloc(100);\n      EXPECT_NE(unit2_output, nullptr);\n      {\n        auto unit1_output = cuda_device->MemAlloc(100);\n        EXPECT_NE(unit1_output, nullptr);\n        {\n          // unit1\n          uint32_t host_data[5]{1, 2, 3, 4, 5};\n          auto size = 5 * sizeof(uint32_t);\n          auto mem1 = cuda_device->MemWrite(host_data, size);\n          EXPECT_NE(mem1, nullptr);\n          auto ret = unit1_output->ReadFrom(mem1, 0, size);\n          EXPECT_EQ(ret, STATUS_SUCCESS);\n        }\n        // unit2\n        auto ret = unit2_output->ReadFrom(unit1_output, 0, 100);\n        EXPECT_EQ(ret, STATUS_SUCCESS);\n      }\n      // unit3\n      auto ret = unit3_output->ReadFrom(unit2_output, 0, 100);\n      EXPECT_EQ(ret, STATUS_SUCCESS);\n    }\n    // check output\n    auto host_mem = device_->MemAlloc(100);\n    EXPECT_NE(host_mem, nullptr);\n    auto ret = host_mem->ReadFrom(unit3_output, 0, 100);\n    EXPECT_EQ(ret, STATUS_SUCCESS);\n    auto ptr = host_mem->GetConstPtr<uint32_t>();\n    EXPECT_EQ(ptr.get()[0], 1);\n    EXPECT_EQ(ptr.get()[1], 2);\n    EXPECT_EQ(ptr.get()[2], 3);\n    EXPECT_EQ(ptr.get()[3], 4);\n    EXPECT_EQ(ptr.get()[4], 5);\n  }\n\n  cuda_device = nullptr;\n}\n\nTEST_F(DeviceMemoryTest, DeviceMemoryAppend) {\n  device_->SetMemQuota(1024);\n\n  auto mem1 = device_->MemAlloc(50, (size_t)100, 0);\n  EXPECT_NE(mem1, nullptr);\n  EXPECT_EQ(mem1->GetSize(), 50);\n  EXPECT_EQ(mem1->GetCapacity(), 100);\n  auto ptr = mem1->GetPtr<uint8_t>();\n  EXPECT_NE(ptr, nullptr);\n  ptr.get()[13] = 13;\n  ptr.get()[23] = 23;\n  ptr.get()[33] = 33;\n  auto mem2 = device_->MemAlloc(50);\n  EXPECT_NE(mem2, nullptr);\n  EXPECT_EQ(mem2->GetSize(), 50);\n  EXPECT_EQ(mem2->GetCapacity(), 50);\n  auto ptr2 = mem2->GetPtr<uint8_t>();\n  EXPECT_NE(ptr2, nullptr);\n  ptr2.get()[13] = 33;\n  ptr2.get()[23] = 32;\n  ptr2.get()[33] = 31;\n\n  auto mem3 = mem1->Append(mem2);\n  EXPECT_NE(mem3, nullptr);\n  EXPECT_EQ(mem3->GetSize(), 100);\n  EXPECT_EQ(mem3->GetCapacity(), 100);\n  EXPECT_EQ(mem3->GetPtr<uint8_t>(), mem1->GetPtr<uint8_t>());\n  auto ptr3 = mem3->GetPtr<uint8_t>();\n  EXPECT_NE(ptr3, nullptr);\n  EXPECT_EQ(ptr3.get()[13], 13);\n  EXPECT_EQ(ptr3.get()[23], 23);\n  EXPECT_EQ(ptr3.get()[33], 33);\n  EXPECT_EQ(ptr3.get()[63], 33);\n  EXPECT_EQ(ptr3.get()[73], 32);\n  EXPECT_EQ(ptr3.get()[83], 31);\n\n  auto mem4 = mem3->Append(mem2);\n  EXPECT_NE(mem4, nullptr);\n  EXPECT_EQ(mem4->GetSize(), 150);\n  EXPECT_EQ(mem4->GetCapacity(), 150);\n  EXPECT_NE(mem4->GetPtr<uint8_t>(), mem3->GetPtr<uint8_t>());\n  auto ptr4 = mem4->GetPtr<uint8_t>();\n  EXPECT_NE(ptr4, nullptr);\n  EXPECT_EQ(ptr4.get()[13], 13);\n  EXPECT_EQ(ptr4.get()[23], 23);\n  EXPECT_EQ(ptr4.get()[33], 33);\n  EXPECT_EQ(ptr4.get()[63], 33);\n  EXPECT_EQ(ptr4.get()[73], 32);\n  EXPECT_EQ(ptr4.get()[83], 31);\n  EXPECT_EQ(ptr4.get()[113], 33);\n  EXPECT_EQ(ptr4.get()[123], 32);\n  EXPECT_EQ(ptr4.get()[133], 31);\n}\n\nTEST_F(DeviceMemoryTest, DeviceMemoryAppend2) {\n  device_->SetMemQuota(1024);\n\n  auto mem1 = device_->MemAlloc(50, (size_t)100, 0);\n  mem1->GetPtr<uint8_t>().get()[4] = 14;\n  auto mem2 = device_->MemAlloc(50);\n  mem2->GetPtr<uint8_t>().get()[4] = 24;\n  auto mem3 = device_->MemAlloc(50);\n  mem3->GetPtr<uint8_t>().get()[4] = 34;\n\n  auto mem4 = mem1->Append({mem2, mem3});\n  EXPECT_NE(mem4, nullptr);\n  EXPECT_EQ(mem4->GetSize(), 150);\n  EXPECT_EQ(mem4->GetCapacity(), 150);\n  auto ptr4 = mem4->GetPtr<uint8_t>();\n  EXPECT_EQ(ptr4.get()[4], 14);\n  EXPECT_EQ(ptr4.get()[54], 24);\n  EXPECT_EQ(ptr4.get()[104], 34);\n\n  auto mem5 = DeviceMemory::Combine({mem1, mem2, mem3});\n  EXPECT_NE(mem5, nullptr);\n  EXPECT_EQ(mem5->GetSize(), 150);\n  EXPECT_EQ(mem5->GetCapacity(), 150);\n  auto ptr5 = mem5->GetPtr<uint8_t>();\n  EXPECT_EQ(ptr5.get()[4], 14);\n  EXPECT_EQ(ptr5.get()[54], 24);\n  EXPECT_EQ(ptr5.get()[104], 34);\n}\n\nTEST_F(DeviceMemoryTest, DeviceMemoryAppend3) {\n  device_->SetMemQuota(1024);\n  auto mem1 = device_->MemAlloc(100, (size_t)100, 0);\n  auto sub1 = mem1->Cut(0, 10);\n  auto sub2 = mem1->Cut(10, 10);\n  auto sub3 = mem1->Cut(20, 10);\n  auto sub4 = mem1->Cut(29, 10);  // overlap\n  // continuous\n  auto mem2 = DeviceMemory::Combine({sub1, sub3, sub2});\n  EXPECT_NE(mem2, nullptr);\n  EXPECT_NE(mem2->GetConstPtr<void>(), sub1->GetConstPtr<void>());\n  EXPECT_EQ(mem2->GetSize(), 30);\n  EXPECT_EQ(mem2->GetCapacity(), 30);\n  auto mem3 = DeviceMemory::Combine({sub3, sub2});\n  EXPECT_NE(mem3, nullptr);\n  EXPECT_NE(mem3->GetConstPtr<void>(), sub2->GetConstPtr<void>());\n  EXPECT_EQ(mem3->GetSize(), 20);\n  EXPECT_EQ(mem3->GetCapacity(), 20);\n  // fragment\n  auto mem4 = DeviceMemory::Combine({sub1, sub2, sub3, sub4});\n  EXPECT_NE(mem4, nullptr);\n  EXPECT_NE(mem4->GetConstPtr<void>(), sub1->GetConstPtr<void>());\n  EXPECT_EQ(mem4->GetSize(), 40);\n  EXPECT_EQ(mem4->GetCapacity(), 40);\n  auto mem5 = DeviceMemory::Combine({sub1, sub3});\n  EXPECT_NE(mem5, nullptr);\n  EXPECT_NE(mem5->GetConstPtr<void>(), sub1->GetConstPtr<void>());\n  EXPECT_EQ(mem5->GetSize(), 20);\n  EXPECT_EQ(mem5->GetCapacity(), 20);\n}\n\nTEST_F(DeviceMemoryTest, CudaMemoryAppend) {\n  auto drivers = Drivers::GetInstance();\n  drivers->Scan(TEST_LIB_DIR, \"libmodelbox-device-cuda.so\");\n  auto dev_mgr = DeviceManager::GetInstance();\n  ConfigurationBuilder configbuilder;\n  dev_mgr->Initialize(drivers, configbuilder.Build());\n  auto cuda_device = dev_mgr->CreateDevice(\"cuda\", \"0\");\n  if (cuda_device == nullptr) {\n    GTEST_SKIP();\n  }\n  // cuda meta\n  device_->SetMemQuota(1024);\n  auto mem1 = device_->MemAlloc(100, (size_t)100, 0);\n  auto *ptr = mem1->GetPtr<uint8_t>().get();\n  ptr[1] = 13;\n  ptr[5] = 53;\n  ptr[9] = 93;\n  cuda_device->SetMemQuota(1024);\n  auto cuda_mem1 = cuda_device->MemAlloc(100, (size_t)100, 0);\n  auto cuda_sub1 = cuda_mem1->Cut(0, 10);\n  auto cuda_sub2 = cuda_mem1->Cut(10, 10);\n  // Create different stream\n  auto ret = cuda_sub1->ReadFrom(mem1, 0, 10);\n  EXPECT_EQ(ret, STATUS_SUCCESS);\n  ret = cuda_sub2->ReadFrom(mem1, 0, 10);\n  EXPECT_EQ(ret, STATUS_SUCCESS);\n  auto cuda_mem2 = DeviceMemory::Combine({cuda_sub1, cuda_sub2});\n  EXPECT_NE(cuda_mem2, nullptr);\n  EXPECT_EQ(cuda_mem2->GetSize(), 20);\n  EXPECT_EQ(cuda_mem2->GetCapacity(), 100);\n  auto mem2 = device_->MemAlloc(100, (size_t)100, 0);\n  EXPECT_NE(mem2, nullptr);\n  ret = mem2->ReadFrom(cuda_mem2, 0, 20);\n  EXPECT_EQ(ret, STATUS_SUCCESS);\n  const auto *ptr2 = mem2->GetConstPtr<uint8_t>().get();\n  EXPECT_EQ(ptr2[1], 13);\n  EXPECT_EQ(ptr2[5], 53);\n  EXPECT_EQ(ptr2[9], 93);\n  EXPECT_EQ(ptr2[11], 13);\n  EXPECT_EQ(ptr2[15], 53);\n  EXPECT_EQ(ptr2[19], 93);\n  cuda_device = nullptr;\n}\n\nTEST_F(DeviceMemoryTest, DeviceMemoryCut) {\n  device_->SetMemQuota(1024);\n\n  auto mem1 = device_->MemAlloc(100);\n  EXPECT_NE(mem1, nullptr);\n  EXPECT_EQ(mem1->GetSize(), 100);\n  EXPECT_EQ(mem1->GetCapacity(), 100);\n  auto ptr = mem1->GetPtr<uint8_t>();\n  EXPECT_NE(ptr, nullptr);\n  ptr.get()[13] = 13;\n  ptr.get()[23] = 23;\n  ptr.get()[33] = 33;\n  ptr.get()[43] = 43;\n  ptr.get()[53] = 53;\n  auto mem_part1 = mem1->Cut(10, 10);\n  EXPECT_NE(mem_part1, nullptr);\n  EXPECT_EQ(mem_part1->GetSize(), 10);\n  EXPECT_EQ(mem_part1->GetCapacity(), 90);\n  auto mem_part2 = mem1->Cut(20, 10);\n  EXPECT_NE(mem_part2, nullptr);\n  EXPECT_EQ(mem_part2->GetSize(), 10);\n  EXPECT_EQ(mem_part2->GetCapacity(), 80);\n  EXPECT_EQ(mem_part2->GetPtr<uint8_t>().get()[3], 23);\n  auto mem_part3 = mem1->Cut(30, 10);\n  EXPECT_NE(mem_part3, nullptr);\n  auto mem_part4 = mem1->Cut(40, 10);\n  EXPECT_NE(mem_part4, nullptr);\n  auto mem_part5 = mem1->Cut(50, 50);\n  EXPECT_NE(mem_part5, nullptr);\n  EXPECT_EQ(mem_part5->GetSize(), 50);\n  EXPECT_EQ(mem_part5->GetCapacity(), 50);\n  EXPECT_EQ(mem_part5->GetPtr<uint8_t>().get()[3], 53);\n}\n\nTEST_F(DeviceMemoryTest, DeviceMemoryDelete) {\n  device_->SetMemQuota(1024);\n\n  auto mem1 = device_->MemAlloc(100);\n  EXPECT_NE(mem1, nullptr);\n  auto ptr = mem1->GetPtr<uint8_t>();\n  ptr.get()[3] = 3;\n  ptr.get()[13] = 13;\n  ptr.get()[23] = 23;\n  auto mem2 = mem1->Delete(0, 10, 100);\n  EXPECT_EQ(mem2->GetSize(), 90);\n  EXPECT_NE(mem2, nullptr);\n  auto ptr2 = mem2->GetPtr<uint8_t>();\n  EXPECT_EQ(ptr2.get()[3], 13);\n  EXPECT_EQ(ptr2.get()[13], 23);\n  auto mem3 = mem1->Delete(10, 10);\n  EXPECT_NE(mem3, nullptr);\n  EXPECT_EQ(mem3->GetSize(), 90);\n  EXPECT_EQ(mem3->GetCapacity(), 90);\n  auto ptr3 = mem3->GetPtr<uint8_t>();\n  EXPECT_EQ(ptr3.get()[3], 3);\n  EXPECT_EQ(ptr3.get()[13], 23);\n  auto mem4 = mem1->Delete(90, 10, 100);\n  EXPECT_NE(mem4, nullptr);\n  EXPECT_EQ(mem4->GetSize(), 90);\n  EXPECT_EQ(mem4->GetCapacity(), 100);\n  auto mem5 = mem1->Delete(90, 20, 100);\n  EXPECT_EQ(mem5, nullptr);\n}\n\nTEST_F(DeviceMemoryTest, DeviceMemoryCopy2) {\n  device_->SetMemQuota(1024);\n\n  auto mem1 = device_->MemAlloc(100);\n  EXPECT_NE(mem1, nullptr);\n  auto ptr = mem1->GetPtr<uint8_t>();\n  ptr.get()[3] = 3;\n  ptr.get()[13] = 13;\n  ptr.get()[23] = 23;\n  auto mem2 = mem1->Copy(0, 10, 100);\n  EXPECT_NE(mem2, nullptr);\n  auto ptr2 = mem2->GetPtr<uint8_t>();\n  EXPECT_EQ(ptr2.get()[3], 3);\n  auto mem3 = mem1->Copy(10, 20);\n  EXPECT_NE(mem3, nullptr);\n  EXPECT_EQ(mem3->GetSize(), 20);\n  EXPECT_EQ(mem3->GetCapacity(), 20);\n  auto ptr3 = mem3->GetPtr<uint8_t>();\n  EXPECT_EQ(ptr3.get()[3], 13);\n  EXPECT_EQ(ptr3.get()[13], 23);\n  auto mem4 = mem1->Copy(50, 60, 100);\n  EXPECT_EQ(mem4, nullptr);\n}\n\nTEST_F(DeviceMemoryTest, DeviceMemoryClone2) {\n  device_->SetMemQuota(1024);\n\n  auto mem1 = device_->MemAlloc(100);\n  EXPECT_NE(mem1, nullptr);\n  auto ptr = mem1->GetPtr<uint8_t>();\n  ptr.get()[3] = 3;\n  ptr.get()[13] = 13;\n  ptr.get()[23] = 23;\n\n  auto mem2 = mem1->Clone();\n  EXPECT_EQ(mem2->GetPtr<uint8_t>(), mem1->GetPtr<uint8_t>());\n  auto mem3 = mem1->Clone(true);\n  EXPECT_NE(mem3->GetPtr<uint8_t>(), mem1->GetPtr<uint8_t>());\n  auto ptr2 = mem3->GetPtr<uint8_t>();\n  EXPECT_EQ(ptr2.get()[3], 3);\n  EXPECT_EQ(ptr2.get()[13], 13);\n  EXPECT_EQ(ptr2.get()[23], 23);\n}\n\nTEST_F(DeviceMemoryTest, DeviceMemoryContiguous) {\n  auto mem1 = device_->MemAlloc(100);\n  EXPECT_NE(mem1, nullptr);\n  auto mem2 = mem1->Cut(0, 10);\n  auto mem3 = mem1->Cut(10, 20);\n  auto mem4 = mem1->Cut(30, 50);\n  auto mem5 = mem4->Cut(10, 30);\n  EXPECT_EQ(device_->GetAllocatedMemSize(), 100);\n  std::vector<std::shared_ptr<DeviceMemory>> mem_list = {mem2, mem3, mem4};\n  std::vector<std::shared_ptr<DeviceMemory>> mem_list2 = {mem3, mem2, mem4};\n  std::vector<std::shared_ptr<DeviceMemory>> mem_list3 = {mem3, mem2, mem4,\n                                                          mem5};\n  // Basic test\n  EXPECT_TRUE(DeviceMemory::IsContiguous(mem_list));\n  EXPECT_TRUE(DeviceMemory::IsContiguous(mem_list, false));\n  // Test order\n  EXPECT_FALSE(DeviceMemory::IsContiguous(mem_list2));\n  EXPECT_TRUE(DeviceMemory::IsContiguous(mem_list2, false));\n  // Test mem offset\n  EXPECT_FALSE(DeviceMemory::IsContiguous(mem_list3));\n  EXPECT_FALSE(DeviceMemory::IsContiguous(mem_list3, false));\n  // Test mem block\n  auto mem6 = device_->MemAlloc(100);\n  mem_list.push_back(mem6);\n  EXPECT_FALSE(DeviceMemory::IsContiguous(mem_list));\n  EXPECT_FALSE(DeviceMemory::IsContiguous(mem_list, false));\n}\n\nTEST_F(DeviceMemoryTest, DeviceMemoryAcquire) {\n  device_->SetMemQuota(1024);\n\n  auto *data = new uint8_t[100];\n  data[33] = 33;\n  data[44] = 44;\n  data[55] = 55;\n  data[66] = 66;\n\n  auto dev_mem = device_->MemAcquire(\n      (void *)data, 100, [](void *ptr) { delete[](uint8_t *) ptr; });\n  EXPECT_NE(dev_mem, nullptr);\n  const auto *ptr = dev_mem->GetConstPtr<uint8_t>().get();\n  EXPECT_NE(ptr, nullptr);\n  EXPECT_EQ(ptr[33], 33);\n  EXPECT_EQ(ptr[44], 44);\n  EXPECT_EQ(ptr[55], 55);\n  EXPECT_EQ(ptr[66], 66);\n\n  std::shared_ptr<uint8_t> data2(new uint8_t[100],\n                                 [](const uint8_t *ptr) { delete[] ptr; });\n  data2.get()[33] = 33;\n  data2.get()[44] = 44;\n  data2.get()[55] = 55;\n  data2.get()[66] = 66;\n\n  auto dev_mem2 = device_->MemAcquire(data2, 100);\n  EXPECT_NE(dev_mem2, nullptr);\n  const auto *ptr2 = dev_mem2->GetConstPtr<uint8_t>().get();\n  EXPECT_NE(ptr2, nullptr);\n  EXPECT_EQ(ptr2[33], 33);\n  EXPECT_EQ(ptr2[44], 44);\n  EXPECT_EQ(ptr2[55], 55);\n  EXPECT_EQ(ptr2[66], 66);\n}\n\nTEST_F(DeviceMemoryTest, AscendMemoryTest) {\n  auto drivers = Drivers::GetInstance();\n  drivers->Scan(TEST_LIB_DIR, \"libmodelbox-device-ascend.so\");\n  auto dev_mgr = DeviceManager::GetInstance();\n  ConfigurationBuilder configbuilder;\n  dev_mgr->Initialize(drivers, configbuilder.Build());\n  auto ascend_device = dev_mgr->CreateDevice(\"ascend\", \"0\");\n  if (ascend_device == nullptr) {\n    GTEST_SKIP();\n  }\n\n  ascend_device->SetMemQuota(1024);\n  device_->SetMemQuota(1024);\n  auto cpu_mem = device_->MemAlloc(1024);\n  {\n    // Malloc\n    auto mem1 = ascend_device->MemAlloc(1024);\n    EXPECT_NE(mem1, nullptr);\n    EXPECT_EQ(mem1->GetSize(), 1024);\n    EXPECT_EQ(mem1->Verify(), STATUS_SUCCESS);\n    EXPECT_NE(mem1->GetPtr<void>().get(), nullptr);\n    EXPECT_NE(mem1->GetConstPtr<void>().get(), nullptr);\n    EXPECT_EQ(ascend_device->GetAllocatedMemSize(), 1024);\n  }\n  {\n    // MemWrite\n    std::shared_ptr<uint32_t> data(new uint32_t[100],\n                                   [](const uint32_t *ptr) { delete[] ptr; });\n    data.get()[35] = 333555;\n    data.get()[53] = 555333;\n    auto mem1 = ascend_device->MemWrite(data.get(), 100 * sizeof(uint32_t));\n    EXPECT_NE(mem1, nullptr);\n    EXPECT_EQ(mem1->GetSize(), 100 * sizeof(uint32_t));\n    EXPECT_EQ(mem1->Verify(), STATUS_SUCCESS);\n    EXPECT_NE(mem1->GetPtr<uint8_t>(), nullptr);\n    EXPECT_NE(mem1->GetConstPtr<uint8_t>(), nullptr);\n\n    mem1->WriteTo(cpu_mem, 0, 100 * sizeof(uint32_t), 0);\n    auto cpu_data = cpu_mem->GetConstPtr<uint32_t>();\n    EXPECT_NE(cpu_data, nullptr);\n    EXPECT_EQ(cpu_data.get()[35], 333555);\n    EXPECT_EQ(cpu_data.get()[53], 555333);\n  }\n  {\n    // MemClone & Copy\n    auto ptr1 = cpu_mem->GetPtr<uint32_t>();\n    ptr1.get()[17] = 111777;\n    ptr1.get()[13] = 333111;\n    auto mem1 = ascend_device->MemAlloc(100);\n    auto ret = mem1->ReadFrom(cpu_mem, 0, 100, 0);\n    EXPECT_EQ(ret, STATUS_SUCCESS);\n    auto mem2 = ascend_device->MemClone(mem1);\n    ret = mem2->WriteTo(cpu_mem, 0, 100, 100);\n    EXPECT_EQ(ret, STATUS_SUCCESS);\n    auto ptr2 = cpu_mem->GetPtr<uint32_t>();\n    EXPECT_EQ(ptr2.get()[25 + 17], 111777);\n    EXPECT_EQ(ptr2.get()[25 + 13], 333111);\n  }\n  {\n    // Mutable\n    auto mem1 = ascend_device->MemAlloc(100);\n    mem1->SetContentMutable(false);\n    auto ptr = mem1->GetPtr<uint8_t>();\n    EXPECT_EQ(ptr, nullptr);\n    auto ptr2 = mem1->GetConstPtr<uint8_t>();\n    EXPECT_NE(ptr2, nullptr);\n  }\n  {\n    // Realloc\n    auto mem1 = ascend_device->MemAlloc(100);\n    EXPECT_EQ(mem1->GetSize(), 100);\n    EXPECT_EQ(mem1->GetCapacity(), 100);\n\n    mem1->Realloc(50);\n    EXPECT_EQ(mem1->GetSize(), 100);\n    EXPECT_EQ(mem1->GetCapacity(), 100);\n\n    mem1->Realloc(200);\n    EXPECT_EQ(mem1->GetSize(), 100);\n    EXPECT_EQ(mem1->GetCapacity(), 200);\n\n    // Sync\n    mem1->WriteTo(cpu_mem, 0, 100);\n  }\n  {\n    // Memory size\n    size_t free;\n    size_t total;\n    auto ret = ascend_device->GetMemInfo(&free, &total);\n    EXPECT_EQ(ret, STATUS_SUCCESS);\n    EXPECT_NE(free, 0);\n    EXPECT_NE(total, 0);\n  }\n}\n\nTEST_F(DeviceMemoryTest, AscendStreamTest) {\n  auto drivers = Drivers::GetInstance();\n  drivers->Scan(TEST_LIB_DIR, \"libmodelbox-device-ascend.so\");\n  auto dev_mgr = DeviceManager::GetInstance();\n  ConfigurationBuilder configbuilder;\n  dev_mgr->Initialize(drivers, configbuilder.Build());\n  auto ascend_device = dev_mgr->CreateDevice(\"ascend\", \"0\");\n  if (ascend_device == nullptr) {\n    GTEST_SKIP();\n  }\n\n  ascend_device->SetMemQuota(1024);\n  device_->SetMemQuota(1024);\n  {\n    auto unit3_output = ascend_device->MemAlloc(100);\n    EXPECT_NE(unit3_output, nullptr);\n    {\n      auto unit2_output = ascend_device->MemAlloc(100);\n      EXPECT_NE(unit2_output, nullptr);\n      {\n        auto unit1_output = ascend_device->MemAlloc(100);\n        EXPECT_NE(unit1_output, nullptr);\n        {\n          // unit1\n          auto *host_data = new uint32_t[5]{1, 2, 3, 4, 5};\n          auto size = 5 * sizeof(uint32_t);\n          auto mem1 = ascend_device->MemWrite(host_data, size);\n          delete []host_data;\n          EXPECT_NE(mem1, nullptr);\n          auto ret = unit1_output->ReadFrom(mem1, 0, size);\n          EXPECT_EQ(ret, STATUS_SUCCESS);\n        }\n        // unit2\n        auto ret = unit2_output->ReadFrom(unit1_output, 0, 100);\n        EXPECT_EQ(ret, STATUS_SUCCESS);\n      }\n      // unit3\n      auto ret = unit3_output->ReadFrom(unit2_output, 0, 100);\n      EXPECT_EQ(ret, STATUS_SUCCESS);\n    }\n    // check output\n    auto host_mem = device_->MemAlloc(100);\n    EXPECT_NE(host_mem, nullptr);\n    auto ret = host_mem->ReadFrom(unit3_output, 0, 100);\n    EXPECT_EQ(ret, STATUS_SUCCESS);\n    auto ptr = host_mem->GetConstPtr<uint32_t>();\n    EXPECT_EQ(ptr.get()[0], 1);\n    EXPECT_EQ(ptr.get()[1], 2);\n    EXPECT_EQ(ptr.get()[2], 3);\n    EXPECT_EQ(ptr.get()[3], 4);\n    EXPECT_EQ(ptr.get()[4], 5);\n  }\n}\n\nTEST_F(DeviceMemoryTest, AscendMemoryAppend) {\n  auto drivers = Drivers::GetInstance();\n  drivers->Scan(TEST_LIB_DIR, \"libmodelbox-device-ascend.so\");\n  auto dev_mgr = DeviceManager::GetInstance();\n  ConfigurationBuilder configbuilder;\n  dev_mgr->Initialize(drivers, configbuilder.Build());\n  auto ascend_device = dev_mgr->CreateDevice(\"ascend\", \"0\");\n  if (ascend_device == nullptr) {\n    GTEST_SKIP();\n  }\n\n  // ascend meta\n  device_->SetMemQuota(1024);\n  auto mem1 = device_->MemAlloc(100, (size_t)100, 0);\n  auto *ptr = mem1->GetPtr<uint8_t>().get();\n  ptr[1] = 13;\n  ptr[5] = 53;\n  ptr[9] = 93;\n  ascend_device->SetMemQuota(1024);\n  auto ascend_mem1 = ascend_device->MemAlloc(100, (size_t)100, 0);\n  auto ascend_sub1 = ascend_mem1->Cut(0, 10);\n  auto ascend_sub2 = ascend_mem1->Cut(10, 10);\n  // Create different stream\n  auto ret = ascend_sub1->ReadFrom(mem1, 0, 10);\n  EXPECT_EQ(ret, STATUS_SUCCESS);\n  ret = ascend_sub2->ReadFrom(mem1, 0, 10);\n  EXPECT_EQ(ret, STATUS_SUCCESS);\n  auto ascend_mem2 = DeviceMemory::Combine({ascend_sub1, ascend_sub2});\n  EXPECT_NE(ascend_mem2, nullptr);\n  EXPECT_EQ(ascend_mem2->GetSize(), 20);\n  EXPECT_EQ(ascend_mem2->GetCapacity(), 100);\n  auto mem2 = device_->MemAlloc(100, (size_t)100, 0);\n  EXPECT_NE(mem2, nullptr);\n  ret = mem2->ReadFrom(ascend_mem2, 0, 20);\n  EXPECT_EQ(ret, STATUS_SUCCESS);\n  const auto *ptr2 = mem2->GetConstPtr<uint8_t>().get();\n  EXPECT_EQ(ptr2[1], 13);\n  EXPECT_EQ(ptr2[5], 53);\n  EXPECT_EQ(ptr2[9], 93);\n  EXPECT_EQ(ptr2[11], 13);\n  EXPECT_EQ(ptr2[15], 53);\n  EXPECT_EQ(ptr2[19], 93);\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "test/unit/libmodelbox/base/driver_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/base/driver.h\"\n\n#include <dlfcn.h>\n#include <poll.h>\n#include <sys/time.h>\n\n#include <algorithm>\n#include <chrono>\n#include <fstream>\n#include <nlohmann/json.hpp>\n#include <string>\n#include <thread>\n\n#include \"flowunit_mockflowunit.h\"\n#include \"gtest/gtest.h\"\n#include \"mock_driver_ctl.h\"\n#include \"modelbox/base/config.h\"\n#include \"modelbox/base/driver_utils.h\"\n#include \"modelbox/base/log.h\"\n#include \"modelbox/base/utils.h\"\n\nnamespace modelbox {\n\nclass DriverTest : public testing::Test {\n public:\n  DriverTest() = default;\n\n protected:\n  void SetUp() override{\n\n  };\n  void TearDown() override {\n    std::shared_ptr<Drivers> drivers = Drivers::GetInstance();\n    drivers->Clear();\n  };\n};\nstatic std::string CalCode(const std::vector<std::string> &dirs);\n\nstd::string CalCode(const std::vector<std::string> &dirs) {\n  int64_t check_sum = 0;\n  for (const auto &dir : dirs) {\n    std::vector<std::string> drivers_list;\n    std::string filter = \"libmodelbox-*.so*\";\n    struct stat s;\n    lstat(dir.c_str(), &s);\n\n    if (!S_ISDIR(s.st_mode)) {\n      check_sum += s.st_mtim.tv_sec;\n      continue;\n    }\n\n    Status status = ListFiles(dir, filter, &drivers_list);\n    if (drivers_list.size() == 0) {\n      continue;\n    }\n\n    for (auto &driver_file : drivers_list) {\n      struct stat buf;\n      auto ret = lstat(driver_file.c_str(), &buf);\n      if (ret) {\n        continue;\n      }\n\n      if (S_ISLNK(buf.st_mode)) {\n        continue;\n      }\n      check_sum += buf.st_mtim.tv_sec;\n    }\n  }\n\n  return GenerateKey(check_sum);\n}\n\nTEST_F(DriverTest, Factory) {\n  std::shared_ptr<Drivers> drivers = Drivers::GetInstance();\n  MockDriverCtl ctl;\n  modelbox::DriverDesc desc;\n  MockFlowUnitDriverDesc desc_flowunit;\n\n  desc_flowunit.SetClass(\"driver-flowunit\");\n  desc_flowunit.SetType(\"cpu\");\n  desc_flowunit.SetName(\"httpserver\");\n  desc_flowunit.SetDescription(\"A httpserver flowunit on CPU\");\n  desc_flowunit.SetVersion(\"1.0.0\");\n  std::string file_path_flowunit =\n      std::string(TEST_LIB_DIR) + \"/libmodelbox-unit-cpu-httpserver.so\";\n  desc_flowunit.SetFilePath(file_path_flowunit);\n  ctl.AddMockDriverFlowUnit(\"httpserver\", \"cpu\", desc_flowunit);\n\n  desc.SetClass(\"driver-device\");\n  desc.SetType(\"ascend\");\n  desc.SetName(\"device-driver-ascend\");\n  desc.SetDescription(\"the ascend device\");\n  desc.SetVersion(\"8.9.2\");\n  std::string file_path_device =\n      std::string(TEST_LIB_DIR) + \"/libmodelbox-device-ascend.so\";\n  desc.SetFilePath(file_path_device);\n  ctl.AddMockDriverDevice(\"ascend\", desc);\n\n  bool result = drivers->Add(file_path_device);\n  EXPECT_TRUE(result);\n  result = drivers->Add(file_path_flowunit);\n  EXPECT_TRUE(result);\n  std::vector<std::shared_ptr<Driver>> driver_list =\n      drivers->GetAllDriverList();\n  EXPECT_EQ(driver_list.size(), 2);\n  std::shared_ptr<DriverDesc> desc_fu = driver_list[1]->GetDriverDesc();\n  EXPECT_EQ(desc_fu->GetFilePath(), file_path_flowunit);\n\n  std::shared_ptr<FlowUnitFactory> flowunit_factory =\n      std::dynamic_pointer_cast<FlowUnitFactory>(\n          driver_list[1]->CreateFactory());\n  std::shared_ptr<FlowUnit> flowunit =\n      flowunit_factory->CreateFlowUnit(desc_fu->GetName(), desc_fu->GetType());\n  EXPECT_NE(flowunit.get(), nullptr);\n\n  std::shared_ptr<DriverDesc> desc_de = driver_list[0]->GetDriverDesc();\n  EXPECT_EQ(desc_de->GetFilePath(), file_path_device);\n\n  std::shared_ptr<DeviceFactory> device_factory =\n      std::dynamic_pointer_cast<DeviceFactory>(driver_list[0]->CreateFactory());\n  std::shared_ptr<Device> device =\n      device_factory->CreateDevice(desc_de->GetName());\n  EXPECT_NE(device.get(), nullptr);\n}\n\nTEST_F(DriverTest, ScanFail) {\n  std::shared_ptr<Drivers> drivers = Drivers::GetInstance();\n  MockDriverCtl ctl;\n  modelbox::DriverDesc desc;\n  MockFlowUnitDriverDesc desc_flowunit;\n\n  desc_flowunit.SetClass(\"driver-flowunit\");\n  desc_flowunit.SetType(\"cpu\");\n  desc_flowunit.SetName(\"httpserver\");\n  desc_flowunit.SetDescription(\"A httpserver flowunit on CPU\");\n  desc_flowunit.SetVersion(\"1.0.0\");\n  ctl.AddMockDriverFlowUnit(\"httpserver\", \"cpu\", desc_flowunit);\n\n  bool result = drivers->Scan(TEST_LIB_DIR, \"/libaifolw-*\");\n  EXPECT_FALSE(result);\n  std::vector<std::shared_ptr<Driver>> driver_list =\n      drivers->GetAllDriverList();\n  EXPECT_EQ(driver_list.size(), 0);\n\n  result = drivers->Scan(\n      std::string(TEST_LIB_DIR) + \"/libmodelbox-unit-cpu-httpserver.so\", \"\");\n  EXPECT_TRUE(result);\n}\n\nTEST_F(DriverTest, ScanSuccess) {\n  std::shared_ptr<Drivers> drivers = Drivers::GetInstance();\n  MockDriverCtl ctl;\n  modelbox::DriverDesc desc;\n  MockFlowUnitDriverDesc desc_flowunit;\n\n  desc_flowunit.SetClass(\"driver-flowunit\");\n  desc_flowunit.SetType(\"cpu\");\n  desc_flowunit.SetName(\"httpserver\");\n  desc_flowunit.SetDescription(\"A httpserver flowunit on CPU\");\n  desc_flowunit.SetVersion(\"1.0.0\");\n  ctl.AddMockDriverFlowUnit(\"httpserver\", \"cpu\", desc_flowunit);\n\n  desc_flowunit.SetClass(\"driver-flowunit\");\n  desc_flowunit.SetType(\"cuda\");\n  desc_flowunit.SetName(\"resize\");\n  desc_flowunit.SetDescription(\"A resize flowunit on GPU\");\n  desc_flowunit.SetVersion(\"0.1.2\");\n  ctl.AddMockDriverFlowUnit(\"resize\", \"cuda\", desc_flowunit);\n\n  desc_flowunit.SetClass(\"driver-flowunit\");\n  desc_flowunit.SetType(\"ascend\");\n  desc_flowunit.SetName(\"inference\");\n  desc_flowunit.SetDescription(\"A inference flowunit on NPU\");\n  desc_flowunit.SetVersion(\"2.0.1\");\n  ctl.AddMockDriverFlowUnit(\"inference\", \"cpu\", desc_flowunit);\n\n  desc.SetClass(\"driver-device\");\n  desc.SetType(\"ascend\");\n  desc.SetName(\"device-driver-ascend\");\n  desc.SetDescription(\"the ascend device\");\n  desc.SetVersion(\"8.9.2\");\n  ctl.AddMockDriverDevice(\"ascend\", desc);\n\n  desc.SetClass(\"driver-device\");\n  desc.SetType(\"cuda\");\n  desc.SetName(\"device-driver-cuda\");\n  desc.SetDescription(\"the gpu device\");\n  desc.SetVersion(\"7.0.0\");\n  ctl.AddMockDriverDevice(\"cuda\", desc);\n\n  bool result = drivers->Scan(TEST_LIB_DIR, \"/libmodelbox-unit-*\");\n  std::vector<std::shared_ptr<Driver>> driver_list1 =\n      drivers->GetAllDriverList();\n  EXPECT_TRUE(result);\n  EXPECT_EQ(driver_list1.size(), 3);\n  for (auto &driver : driver_list1) {\n    std::shared_ptr<DriverDesc> desc_unit = driver->GetDriverDesc();\n    EXPECT_EQ(desc_unit->GetClass(), \"driver-flowunit\");\n    if (desc_unit->GetName() == \"httpserver\") {\n      EXPECT_EQ(desc_unit->GetType(), \"cpu\");\n      EXPECT_EQ(desc_unit->GetDescription(), \"A httpserver flowunit on CPU\");\n      EXPECT_EQ(desc_unit->GetVersion(), \"1.0.0\");\n    } else if (desc_unit->GetName() == \"resize\") {\n      EXPECT_EQ(desc_unit->GetType(), \"cuda\");\n      EXPECT_EQ(desc_unit->GetDescription(), \"A resize flowunit on GPU\");\n      EXPECT_EQ(desc_unit->GetVersion(), \"0.1.2\");\n    } else {\n      EXPECT_EQ(desc_unit->GetType(), \"ascend\");\n      EXPECT_EQ(desc_unit->GetDescription(), \"A inference flowunit on NPU\");\n      EXPECT_EQ(desc_unit->GetVersion(), \"2.0.1\");\n    }\n  }\n}\n\nTEST_F(DriverTest, Add) {\n  std::shared_ptr<Drivers> drivers = Drivers::GetInstance();\n  MockDriverCtl ctl;\n  modelbox::DriverDesc desc;\n  MockFlowUnitDriverDesc desc_flowunit;\n\n  desc_flowunit.SetClass(\"driver-flowunit\");\n  desc_flowunit.SetType(\"cpu\");\n  desc_flowunit.SetName(\"httpserver\");\n  desc_flowunit.SetDescription(\"A httpserver flowunit on CPU\");\n  desc_flowunit.SetVersion(\"1.0.0\");\n  ctl.AddMockDriverFlowUnit(\"httpserver\", \"cpu\", desc_flowunit);\n\n  desc.SetClass(\"driver-device\");\n  desc.SetType(\"ascend\");\n  desc.SetName(\"device-driver-ascend\");\n  desc.SetDescription(\"the ascend device\");\n  desc.SetVersion(\"8.9.2\");\n  ctl.AddMockDriverDevice(\"ascend\", desc);\n\n  std::string file_unit =\n      std::string(TEST_LIB_DIR) + \"/libmodelbox-unit-cpu-httpserver.so\";\n  std::string file_device =\n      std::string(TEST_LIB_DIR) + \"/libmodelbox-device-ascend.so\";\n  auto result = drivers->Add(file_unit);\n  EXPECT_TRUE(result);\n  result = drivers->Add(file_device);\n  EXPECT_TRUE(result);\n\n  std::vector<std::shared_ptr<Driver>> drivers_list =\n      drivers->GetAllDriverList();\n  EXPECT_EQ(drivers_list.size(), 2);\n  std::shared_ptr<DriverDesc> desc_unit = drivers_list[0]->GetDriverDesc();\n  EXPECT_EQ(desc_unit->GetClass(), \"driver-flowunit\");\n  EXPECT_EQ(desc_unit->GetType(), \"cpu\");\n  EXPECT_EQ(desc_unit->GetName(), \"httpserver\");\n  EXPECT_EQ(desc_unit->GetDescription(), \"A httpserver flowunit on CPU\");\n  EXPECT_EQ(desc_unit->GetVersion(), \"1.0.0\");\n\n  std::shared_ptr<DriverDesc> desc_device = drivers_list[1]->GetDriverDesc();\n  EXPECT_EQ(desc_device->GetClass(), \"driver-device\");\n  EXPECT_EQ(desc_device->GetType(), \"ascend\");\n  EXPECT_EQ(desc_device->GetName(), \"device-driver-ascend\");\n  EXPECT_EQ(desc_device->GetDescription(), \"the ascend device\");\n  EXPECT_EQ(desc_device->GetVersion(), \"8.9.2\");\n\n  result = drivers->Add(file_unit);\n  EXPECT_EQ(result, STATUS_EXIST);\n}\n\nTEST_F(DriverTest, GetDriverListByClass) {\n  std::shared_ptr<Drivers> drivers = Drivers::GetInstance();\n  MockDriverCtl ctl;\n  modelbox::DriverDesc desc;\n  MockFlowUnitDriverDesc desc_flowunit;\n  desc_flowunit.SetClass(\"driver-flowunit\");\n  desc_flowunit.SetType(\"cpu\");\n  desc_flowunit.SetName(\"httpserver\");\n  desc_flowunit.SetDescription(\"A httpserver flowunit on CPU\");\n  desc_flowunit.SetVersion(\"1.0.0\");\n  ctl.AddMockDriverFlowUnit(\"httpserver\", \"cpu\", desc_flowunit);\n\n  desc_flowunit.SetClass(\"driver-flowunit\");\n  desc_flowunit.SetType(\"cpu\");\n  desc_flowunit.SetName(\"resize\");\n  desc_flowunit.SetDescription(\"A resize flowunit on cpu\");\n  desc_flowunit.SetVersion(\"0.1.2\");\n  ctl.AddMockDriverFlowUnit(\"resize\", \"cpu\", desc_flowunit);\n\n  desc.SetClass(\"driver-device\");\n  desc.SetType(\"ascend\");\n  desc.SetName(\"device-driver-ascend\");\n  desc.SetDescription(\"the ascend device\");\n  desc.SetVersion(\"8.9.2\");\n  ctl.AddMockDriverDevice(\"ascend\", desc);\n\n  desc.SetClass(\"driver-device\");\n  desc.SetType(\"cuda\");\n  desc.SetName(\"device-driver-cuda\");\n  desc.SetDescription(\"the gpu device\");\n  desc.SetVersion(\"7.0.0\");\n  ctl.AddMockDriverDevice(\"cuda\", desc);\n\n  bool result = drivers->Scan(TEST_LIB_DIR, \"*\");\n  EXPECT_TRUE(result);\n\n  std::vector<std::shared_ptr<Driver>> device_list =\n      drivers->GetDriverListByClass(\"driver-device\");\n  EXPECT_EQ(device_list.size(), 2);\n  for (auto &device_item : device_list) {\n    auto desc_device = device_item->GetDriverDesc();\n    EXPECT_EQ(desc_device->GetClass(), \"driver-device\");\n  }\n\n  std::vector<std::shared_ptr<Driver>> flowunit_list =\n      drivers->GetDriverListByClass(\"driver-flowunit\");\n  EXPECT_EQ(device_list.size(), 2);\n  for (auto &flowunit_item : flowunit_list) {\n    auto desc_flowunit = flowunit_item->GetDriverDesc();\n    EXPECT_EQ(desc_flowunit->GetClass(), \"driver-flowunit\");\n  }\n}\n\nTEST_F(DriverTest, GetDriverTypeList) {\n  std::shared_ptr<Drivers> drivers = Drivers::GetInstance();\n  MockDriverCtl ctl;\n  modelbox::DriverDesc desc;\n  MockFlowUnitDriverDesc desc_flowunit;\n  desc_flowunit.SetClass(\"driver-flowunit\");\n  desc_flowunit.SetType(\"cpu\");\n  desc_flowunit.SetName(\"httpserver\");\n  desc_flowunit.SetDescription(\"A httpserver flowunit on CPU\");\n  desc_flowunit.SetVersion(\"1.0.0\");\n  ctl.AddMockDriverFlowUnit(\"httpserver\", \"cpu\", desc_flowunit);\n\n  desc_flowunit.SetClass(\"driver-flowunit\");\n  desc_flowunit.SetType(\"cpu\");\n  desc_flowunit.SetName(\"resize\");\n  desc_flowunit.SetDescription(\"A resize flowunit on cpu\");\n  desc_flowunit.SetVersion(\"0.1.2\");\n  ctl.AddMockDriverFlowUnit(\"resize\", \"cpu\", desc_flowunit);\n\n  desc_flowunit.SetClass(\"driver-flowunit\");\n  desc_flowunit.SetType(\"ascend\");\n  desc_flowunit.SetName(\"inference\");\n  desc_flowunit.SetDescription(\"A inference flowunit on NPU\");\n  desc_flowunit.SetVersion(\"2.0.1\");\n  ctl.AddMockDriverFlowUnit(\"inference\", \"ascend\", desc_flowunit);\n\n  desc.SetClass(\"driver-device\");\n  desc.SetType(\"cpu\");\n  desc.SetName(\"device-driver-cpu\");\n  desc.SetDescription(\"the cpu device\");\n  desc.SetVersion(\"8.9.2\");\n  ctl.AddMockDriverDevice(\"cpu\", desc);\n\n  bool result = drivers->Scan(TEST_LIB_DIR, \"/libmodelbox-*\");\n  EXPECT_TRUE(result);\n\n  std::vector<std::string> type_list =\n      drivers->GetDriverTypeList(\"driver-flowunit\");\n  EXPECT_EQ(type_list.size(), 2);\n\n  bool found = false;\n  for (auto &it : type_list) {\n    if (it == \"cuda\") {\n      found = true;\n      break;\n    }\n  }\n  EXPECT_FALSE(found);\n  EXPECT_EQ(*find(type_list.begin(), type_list.end(), \"cpu\"), \"cpu\");\n  EXPECT_EQ(*find(type_list.begin(), type_list.end(), \"ascend\"), \"ascend\");\n}\n\nTEST_F(DriverTest, GetDriverClassList) {\n  std::shared_ptr<Drivers> drivers = Drivers::GetInstance();\n  MockDriverCtl ctl;\n  modelbox::DriverDesc desc;\n  MockFlowUnitDriverDesc desc_flowunit;\n  desc_flowunit.SetClass(\"driver-flowunit\");\n  desc_flowunit.SetType(\"cpu\");\n  desc_flowunit.SetName(\"httpserver\");\n  desc_flowunit.SetDescription(\"A httpserver flowunit on CPU\");\n  desc_flowunit.SetVersion(\"1.0.0\");\n  ctl.AddMockDriverFlowUnit(\"httpserver\", \"cpu\", desc_flowunit);\n\n  desc_flowunit.SetClass(\"driver-flowunit\");\n  desc_flowunit.SetType(\"cpu\");\n  desc_flowunit.SetName(\"resize\");\n  desc_flowunit.SetDescription(\"A resize flowunit on cpu\");\n  desc_flowunit.SetVersion(\"0.1.2\");\n  ctl.AddMockDriverFlowUnit(\"resize\", \"cpu\", desc_flowunit);\n\n  desc_flowunit.SetClass(\"driver-flowunit\");\n  desc_flowunit.SetType(\"ascend\");\n  desc_flowunit.SetName(\"inference\");\n  desc_flowunit.SetDescription(\"A inference flowunit on NPU\");\n  desc_flowunit.SetVersion(\"2.0.1\");\n  ctl.AddMockDriverFlowUnit(\"inference\", \"cpu\", desc_flowunit);\n\n  desc.SetClass(\"driver-device\");\n  desc.SetType(\"cpu\");\n  desc.SetName(\"device-driver-cpu\");\n  desc.SetDescription(\"the cpu device\");\n  desc.SetVersion(\"8.9.2\");\n  ctl.AddMockDriverDevice(\"cpu\", desc);\n\n  bool result = drivers->Scan(TEST_LIB_DIR, \"/libmodelbox-*\");\n  EXPECT_TRUE(result);\n\n  std::vector<std::string> class_list = drivers->GetDriverClassList();\n  EXPECT_EQ(class_list.size(), 2);\n\n  bool found = false;\n  for (auto &it : class_list) {\n    if (it == \"test\") {\n      found = true;\n      break;\n    }\n  }\n  EXPECT_FALSE(found);\n  EXPECT_EQ(*find(class_list.begin(), class_list.end(), \"driver-flowunit\"),\n            \"driver-flowunit\");\n  EXPECT_EQ(*find(class_list.begin(), class_list.end(), \"driver-device\"),\n            \"driver-device\");\n}\n\nTEST_F(DriverTest, GetDriverNameList) {\n  std::shared_ptr<Drivers> drivers = Drivers::GetInstance();\n  MockDriverCtl ctl;\n  modelbox::DriverDesc desc;\n  MockFlowUnitDriverDesc desc_flowunit;\n  desc_flowunit.SetClass(\"driver-flowunit\");\n  desc_flowunit.SetType(\"cpu\");\n  desc_flowunit.SetName(\"httpserver\");\n  desc_flowunit.SetDescription(\"A httpserver flowunit on CPU\");\n  desc_flowunit.SetVersion(\"1.0.0\");\n  ctl.AddMockDriverFlowUnit(\"httpserver\", \"cpu\", desc_flowunit);\n\n  desc_flowunit.SetClass(\"driver-flowunit\");\n  desc_flowunit.SetType(\"cpu\");\n  desc_flowunit.SetName(\"resize\");\n  desc_flowunit.SetDescription(\"A resize flowunit on cpu\");\n  desc_flowunit.SetVersion(\"0.1.2\");\n  ctl.AddMockDriverFlowUnit(\"resize\", \"cpu\", desc_flowunit);\n\n  desc_flowunit.SetClass(\"driver-flowunit\");\n  desc_flowunit.SetType(\"ascend\");\n  desc_flowunit.SetName(\"inference\");\n  desc_flowunit.SetDescription(\"A inference flowunit on NPU\");\n  desc_flowunit.SetVersion(\"2.0.1\");\n  ctl.AddMockDriverFlowUnit(\"inference\", \"ascend\", desc_flowunit);\n\n  desc.SetClass(\"driver-device\");\n  desc.SetType(\"cpu\");\n  desc.SetName(\"device-driver-cpu\");\n  desc.SetDescription(\"the cpu device\");\n  desc.SetVersion(\"8.9.2\");\n  ctl.AddMockDriverDevice(\"cpu\", desc);\n\n  bool result = drivers->Scan(TEST_LIB_DIR, \"/libmodelbox-*\");\n  EXPECT_TRUE(result);\n\n  std::vector<std::string> name_list =\n      drivers->GetDriverNameList(\"driver-flowunit\", \"cpu\");\n  EXPECT_EQ(name_list.size(), 2);\n\n  bool found = false;\n  for (auto &it : name_list) {\n    if (it == \"test\") {\n      found = true;\n      break;\n    }\n  }\n  EXPECT_FALSE(found);\n  EXPECT_EQ(*find(name_list.begin(), name_list.end(), \"httpserver\"),\n            \"httpserver\");\n  EXPECT_EQ(*find(name_list.begin(), name_list.end(), \"resize\"), \"resize\");\n}\n\nTEST_F(DriverTest, GetDriver) {\n  std::shared_ptr<Drivers> drivers = Drivers::GetInstance();\n  MockDriverCtl ctl;\n  modelbox::DriverDesc desc;\n  MockFlowUnitDriverDesc desc_flowunit;\n  desc_flowunit.SetClass(\"driver-flowunit\");\n  desc_flowunit.SetType(\"cpu\");\n  desc_flowunit.SetName(\"httpserver\");\n  desc_flowunit.SetDescription(\"A httpserver flowunit on CPU\");\n  desc_flowunit.SetVersion(\"1.0.0\");\n  ctl.AddMockDriverFlowUnit(\"httpserver\", \"cpu\", desc_flowunit);\n\n  bool result = drivers->Scan(TEST_LIB_DIR, \"/libmodelbox-*\");\n  EXPECT_TRUE(result);\n\n  std::shared_ptr<Driver> driver_success =\n      drivers->GetDriver(\"driver-flowunit\", \"cpu\", \"httpserver\");\n  std::shared_ptr<Driver> driver_fail =\n      drivers->GetDriver(\"driver-flowunit\", \"cuda\", \"httpserver\");\n  std::shared_ptr<Driver> driver_version =\n      drivers->GetDriver(\"driver-flowunit\", \"cpu\", \"httpserver\", \"1.0.0\");\n\n  std::shared_ptr<DriverDesc> desc_success = driver_success->GetDriverDesc();\n  std::shared_ptr<DriverDesc> desc_version = driver_version->GetDriverDesc();\n  EXPECT_EQ(desc_success->GetDescription(), \"A httpserver flowunit on CPU\");\n  EXPECT_EQ(desc_success->GetVersion(), \"1.0.0\");\n  EXPECT_EQ(driver_fail.get(), nullptr);\n  EXPECT_EQ(desc_version->GetDescription(), \"A httpserver flowunit on CPU\");\n}\n\nTEST_F(DriverTest, VersionTest) {\n  std::shared_ptr<Drivers> drivers = Drivers::GetInstance();\n  MockDriverCtl ctl;\n  modelbox::DriverDesc desc;\n  MockFlowUnitDriverDesc desc_flowunit;\n  desc_flowunit.SetClass(\"driver-flowunit\");\n  desc_flowunit.SetType(\"cpu\");\n  desc_flowunit.SetName(\"httpserver\");\n  desc_flowunit.SetDescription(\"A httpserver flowunit on CPU\");\n  desc_flowunit.SetVersion(\"1.1.1\");\n  ctl.AddMockDriverFlowUnit(\"httpserver111\", \"cpu\", desc_flowunit);\n\n  desc_flowunit.SetClass(\"driver-flowunit\");\n  desc_flowunit.SetType(\"cpu\");\n  desc_flowunit.SetName(\"httpserver\");\n  desc_flowunit.SetDescription(\"A httpserver flowunit on CPU\");\n  desc_flowunit.SetVersion(\"1.2.0\");\n  ctl.AddMockDriverFlowUnit(\"httpserver120\", \"cpu\", desc_flowunit);\n\n  bool result = drivers->Scan(TEST_LIB_DIR, \"/libmodelbox-*\");\n  EXPECT_TRUE(result);\n\n  std::shared_ptr<Driver> driver_120 =\n      drivers->GetDriver(\"driver-flowunit\", \"cpu\", \"httpserver\");\n  std::shared_ptr<Driver> driver_111 =\n      drivers->GetDriver(\"driver-flowunit\", \"cpu\", \"httpserver\", \"1.1.1\");\n  std::shared_ptr<DriverDesc> desc_120 = driver_120->GetDriverDesc();\n  std::shared_ptr<DriverDesc> desc_111 = driver_111->GetDriverDesc();\n  EXPECT_EQ(desc_120->GetVersion(), \"1.2.0\");\n  EXPECT_EQ(desc_111->GetVersion(), \"1.1.1\");\n}\n\nTEST_F(DriverTest, SetVersionFailTest) {\n  std::shared_ptr<Drivers> drivers = Drivers::GetInstance();\n  MockDriverCtl ctl;\n  modelbox::DriverDesc desc;\n  MockFlowUnitDriverDesc desc_flowunit;\n  desc_flowunit.SetClass(\"driver-flowunit\");\n  desc_flowunit.SetType(\"cpu\");\n  desc_flowunit.SetName(\"httpserver\");\n  desc_flowunit.SetDescription(\"A httpserver flowunit on CPU\");\n  desc_flowunit.SetVersion(\"1111\");\n  desc_flowunit.SetVersion(\"1.1\");\n  desc_flowunit.SetVersion(\"a.b.c\");\n  ctl.AddMockDriverFlowUnit(\"httpserver\", \"cpu\", desc_flowunit);\n\n  bool result = drivers->Scan(TEST_LIB_DIR, \"/libmodelbox-*\");\n  EXPECT_TRUE(result);\n\n  std::shared_ptr<Driver> driver_version =\n      drivers->GetDriver(\"driver-flowunit\", \"cpu\", \"httpserver\");\n\n  std::shared_ptr<DriverDesc> desc_version = driver_version->GetDriverDesc();\n  EXPECT_EQ(desc_version->GetVersion(), \"\");\n}\n\nTEST_F(DriverTest, DoubleScan) {\n  ConfigurationBuilder builder;\n  builder.AddProperty(DRIVER_SKIP_DEFAULT, \"false\");\n  std::shared_ptr<Configuration> config = builder.Build();\n  std::shared_ptr<Drivers> drivers = Drivers::GetInstance();\n  drivers->Initialize(config);\n  struct stat buffer;\n  if (stat(DEFAULT_SCAN_INFO, &buffer) != -1) {\n    EXPECT_EQ(remove(DEFAULT_SCAN_INFO), 0);\n  }\n  auto status = drivers->Scan();\n  EXPECT_EQ(stat(DEFAULT_SCAN_INFO, &buffer), 0);\n  auto driver_nums = drivers->GetAllDriverList().size();\n  drivers->Clear();\n  drivers->Initialize(config);\n\n  std::ifstream ifs(DEFAULT_SCAN_INFO);\n  nlohmann::json dump_json;\n  ifs >> dump_json;\n  std::string read_check_node = dump_json[\"check_code\"];\n\n  std::vector<std::string> dirs{MODELBOX_DEFAULT_DRIVER_PATH};\n  auto check_code = CalCode(dirs);\n  MBLOG_INFO << \"check_code: \" << check_code << \", read_check_node: \" << read_check_node;\n  EXPECT_EQ(check_code, read_check_node);\n  status = drivers->Scan();\n  auto second_driver_nums = drivers->GetAllDriverList().size();\n  EXPECT_EQ(driver_nums, second_driver_nums);\n  EXPECT_EQ(status, STATUS_OK);\n}\n\nclass VirtualDriverTest : public testing::Test {\n public:\n  VirtualDriverTest() = default;\n\n protected:\n  void SetUp() override {\n    std::string cpu_python_src_path = std::string(PYTHON_PATH);\n    cpu_python_dest_path =\n        std::string(TEST_LIB_DIR) + \"/libmodelbox-unit-cpu-python.so\";\n    CopyFile(cpu_python_src_path, cpu_python_dest_path, 0, true);\n\n    std::string cpu_inference_src_path = std::string(INFERENCE_PATH);\n    cpu_inference_dest_path =\n        std::string(TEST_LIB_DIR) + \"/libmodelbox-unit-cpu-inference.so\";\n    CopyFile(cpu_python_src_path, cpu_inference_dest_path, 0, true);\n\n    std::string virtual_python_src_path = std::string(VIRTUAL_PYTHON_PATH);\n    virtual_python_dest_path =\n        std::string(TEST_LIB_DIR) + \"/libmodelbox-virtualdriver-python.so\";\n    CopyFile(virtual_python_src_path, virtual_python_dest_path, 0, true);\n\n    std::string virtual_inference_src_path =\n        std::string(VIRTUAL_INFERENCE_PATH);\n    virtual_inference_dest_path =\n        std::string(TEST_LIB_DIR) + \"/libmodelbox-virtualdriver-inference.so\";\n    CopyFile(virtual_inference_src_path, virtual_inference_dest_path, 0, true);\n  };\n\n  void TearDown() override {\n    std::shared_ptr<Drivers> drivers = Drivers::GetInstance();\n    drivers->Clear();\n    remove(cpu_python_dest_path.c_str());\n    remove(virtual_python_dest_path.c_str());\n    remove(cpu_inference_dest_path.c_str());\n    remove(virtual_inference_dest_path.c_str());\n  };\n\n private:\n  std::string cpu_python_dest_path, cpu_inference_dest_path;\n  std::string virtual_python_dest_path, virtual_inference_dest_path;\n};\n\nTEST_F(VirtualDriverTest, VirtualDriver) {\n  ConfigurationBuilder builder;\n  builder.AddProperty(DRIVER_DIR, TEST_ASSETS);\n  builder.AddProperty(DRIVER_SKIP_DEFAULT, \"true\");\n  std::shared_ptr<Configuration> config = builder.Build();\n  std::shared_ptr<Drivers> drivers = Drivers::GetInstance();\n  modelbox::DriverDesc desc;\n  MockDriverCtl ctl;\n  MockFlowUnitDriverDesc desc_flowunit;\n\n  desc.SetClass(\"driver-device\");\n  desc.SetType(\"ascend\");\n  desc.SetName(\"device-driver-ascend\");\n  desc.SetDescription(\"the ascend device\");\n  desc.SetVersion(\"8.9.2\");\n  std::string file_path_device =\n      std::string(TEST_LIB_DIR) + \"/libmodelbox-device-ascend.so\";\n  desc.SetFilePath(file_path_device);\n  ctl.AddMockDriverDevice(\"ascend\", desc);\n  bool result = drivers->Initialize(config);\n  EXPECT_TRUE(result);\n\n  result = drivers->Scan(TEST_LIB_DIR, \"/libmodelbox-device-*\");\n  EXPECT_TRUE(result);\n  result = drivers->Add(PYTHON_PATH);\n  EXPECT_TRUE(result);\n  if (access(INFERENCE_PATH, R_OK) == 0) {\n    drivers->Add(INFERENCE_PATH);\n  }\n  \n  result = drivers->Scan(TEST_LIB_DIR, \"libmodelbox-virtualdriver-*.so\");\n  drivers->VirtualDriverScan();\n  EXPECT_TRUE(result);\n  std::shared_ptr<Driver> driver_python = drivers->GetDriver(\n      \"DRIVER-FLOWUNIT\", \"cpu\", \"httpserver_python\", \"1.1.1\");\n  std::shared_ptr<DriverDesc> desc_python_test = driver_python->GetDriverDesc();\n  EXPECT_EQ(desc_python_test->GetClass(), \"DRIVER-FLOWUNIT\");\n  EXPECT_EQ(desc_python_test->GetType(), \"cpu\");\n  EXPECT_EQ(desc_python_test->GetName(), \"httpserver_python\");\n  EXPECT_EQ(desc_python_test->GetVersion(), \"1.1.1\");\n  std::string file_path_python =\n      std::string(TEST_ASSETS) + \"/resize_cpu/virtual_python_test.toml\";\n  EXPECT_EQ(desc_python_test->GetFilePath(), file_path_python);\n\n  std::shared_ptr<Driver> driver_inference =\n      drivers->GetDriver(\"DRIVER-FLOWUNIT\", \"cpu\", \"inference\", \"1.1.2\");\n  std::shared_ptr<DriverDesc> desc_inference_test =\n      driver_inference->GetDriverDesc();\n  EXPECT_EQ(desc_inference_test->GetClass(), \"DRIVER-FLOWUNIT\");\n  EXPECT_EQ(desc_inference_test->GetType(), \"cpu\");\n  EXPECT_EQ(desc_inference_test->GetName(), \"inference\");\n  EXPECT_EQ(desc_inference_test->GetVersion(), \"1.1.2\");\n  std::string file_path_inference =\n      std::string(TEST_ASSETS) + \"/test_inference/virtual_model_test.toml\";\n  EXPECT_EQ(desc_inference_test->GetFilePath(), file_path_inference);\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "test/unit/libmodelbox/base/graph_manager_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include <dlfcn.h>\n#include <poll.h>\n#include <sys/time.h>\n\n#include <algorithm>\n#include <chrono>\n#include <fstream>\n#include <string>\n#include <thread>\n\n#include \"modelbox/base/driver.h\"\n#include \"modelbox/base/graph_manager.h\"\n#include \"modelbox/base/log.h\"\n#include \"modelbox/base/utils.h\"\n#include \"graph_conf_mockgraphconf/graph_conf_mockgraphconf.h\"\n#include \"gtest/gtest.h\"\n#include \"mock_driver_ctl.h\"\n\nnamespace modelbox {\n\nStatus SaveConfigFile(std::string &name, std::string &value) {\n  std::ofstream fp(name);\n  fp << value;\n  fp << std::endl;\n  fp.close();\n  return STATUS_OK;\n}\n\nStatus RemoveFile(std::string &name) {\n  remove(name.c_str());\n  return STATUS_OK;\n}\n\nclass GraphManagerTest : public testing::Test {\n public:\n  GraphManagerTest() = default;\n  MockDriverCtl ctl_;\n  std::shared_ptr<modelbox::Configuration> config_;\n  std::string conf_file_name_;\n  std::string conf_file_value_;\n\n protected:\n  void SetUp() override {\n    std::shared_ptr<Drivers> drivers_ = Drivers::GetInstance();\n\n    modelbox::DriverDesc desc;\n    desc.SetClass(\"DRIVER-GRAPHCONF\");\n    desc.SetType(\"GRAPHVIZ\");\n    desc.SetName(\"GRAPHCONF-GRAPHVIZ\");\n    desc.SetDescription(\"graph config parse graphviz\");\n    desc.SetVersion(\"0.1.0\");\n    std::string file_path_device =\n        std::string(TEST_LIB_DIR) + \"/libmodelbox-graphconf-graphviz.so\";\n    desc.SetFilePath(file_path_device);\n\n    ctl_.AddMockDriverGraphConf(\"graphviz\", \"\", desc);\n\n    drivers_->Scan(TEST_LIB_DIR, \"libmodelbox-graphconf-graphviz.so\");\n\n    ConfigurationBuilder builder;\n    config_ = builder.Build();\n\n    conf_file_value_ =R\"(digraph demo { \n          bgcolor=\"beige\"                    \n          node [shape=\"record\", height=.1]   \n          node0[label=\"<f0> | <f1> G | <f2>\" A=\"1,2,3\"]\n          node1[label=\"<f0> | <f1> E | <f2>\" A=\"\\\"1,2,3\\\"\"]\n          node2[label=\"<f0> | <f1> B | <f2>\" A=\"1,2\\,3\"]\n          node0:f0 -> node1:f1                 \n          node0:f2 -> node2:f1                 \n      })\";\n\n    conf_file_name_ = std::string(TEST_DATA_DIR) + \"/test_graph.gv\";\n    SaveConfigFile(conf_file_name_, conf_file_value_);\n  };\n\n  void TearDown() override {\n    std::shared_ptr<Drivers> drivers = Drivers::GetInstance();\n    drivers->Clear();\n    RemoveFile(conf_file_name_);\n  };\n};\n\nTEST_F(GraphManagerTest, ResolveStr) {\n  config_->SetProperty(\"graph.format\", \"graphviz\");\n  config_->SetProperty(\"graph.graphconf\", conf_file_value_);\n\n  std::shared_ptr<Drivers> drivers = Drivers::GetInstance();\n  GraphConfigManager graphconf_mgr = GraphConfigManager::GetInstance();\n\n  auto ret = graphconf_mgr.Initialize(drivers, config_);\n  EXPECT_EQ(ret, STATUS_OK);\n\n  auto graphvizconf = graphconf_mgr.LoadGraphConfig(config_);\n  EXPECT_NE(graphvizconf, nullptr);\n\n  auto gcgraph = graphvizconf->Resolve();\n  EXPECT_NE(gcgraph, nullptr);\n\n  auto graph_configuration = gcgraph->GetConfiguration();\n  auto graph_bgcolor = graph_configuration->GetString(\"bgcolor\");\n  MBLOG_INFO << \"bgcolor : \" << graph_bgcolor;\n  EXPECT_EQ(graph_bgcolor, \"beige\");\n  auto graph_error = graph_configuration->GetString(\"error\");\n  EXPECT_EQ(graph_error, \"\");\n}\n\nTEST_F(GraphManagerTest, NodeStr) {\n  config_->SetProperty(\"graph.format\", \"graphviz\");\n  config_->SetProperty(\"graph.graphconf\", conf_file_value_);\n\n  std::shared_ptr<Drivers> drivers = Drivers::GetInstance();\n  GraphConfigManager graphconf_mgr = GraphConfigManager::GetInstance();\n\n  auto ret = graphconf_mgr.Initialize(drivers, config_);\n  EXPECT_EQ(ret, STATUS_OK);\n\n  auto graphvizconf = graphconf_mgr.LoadGraphConfig(config_);\n\n  auto gcgraph = graphvizconf->Resolve();\n\n  auto node0 = gcgraph->GetNode(\"node0\");\n  EXPECT_NE(node0, nullptr);\n\n  auto outputPort = node0->GetOutputPorts();\n  std::string node0_output_port0 = *outputPort->begin();\n  MBLOG_INFO << \"node0 outputPort0 : \" << node0_output_port0;\n  EXPECT_EQ(node0_output_port0, \"f0\");\n\n  std::string node0_output_port1 = *(++outputPort->begin());\n  MBLOG_INFO << \"node0 outputPort1 : \" << node0_output_port1;\n  EXPECT_EQ(node0_output_port1, \"f2\");\n\n  auto node1 = gcgraph->GetNode(\"node1\");\n  EXPECT_NE(node1, nullptr);\n\n  auto node1_configuration = node1->GetConfiguration();\n  auto node1_height = node1_configuration->GetString(\"height\");\n  EXPECT_EQ(node1_height, \".1\");\n  auto node1_label = node1_configuration->GetString(\"label\");\n  EXPECT_EQ(node1_label, \"<f0> | <f1> E | <f2>\");\n  auto node1_shape = node1_configuration->GetString(\"shape\");\n  EXPECT_EQ(node1_shape, \"record\");\n  auto node1_error = node1_configuration->GetString(\"error\");\n  EXPECT_EQ(node1_error, \"\");\n\n  auto node2 = gcgraph->GetNode(\"node2\");\n  EXPECT_NE(node2, nullptr);\n\n  auto inputPorts = node2->GetInputPorts();\n  std::string node2_input_port0 = *inputPorts->begin();\n  MBLOG_INFO << \"node2 inputPort0 : \" << node2_input_port0;\n  EXPECT_EQ(node2_input_port0, \"f1\");\n\n  auto err_node = gcgraph->GetNode(\"err_node\");\n  EXPECT_EQ(err_node, nullptr);\n\n  auto all_nodes = gcgraph->GetAllNodes();\n  EXPECT_EQ(all_nodes.size(), 3);\n\n  auto root_graph = gcgraph->GetNode(\"node0\")->GetRootGraph();\n  EXPECT_EQ(root_graph, gcgraph);\n}\n\nTEST_F(GraphManagerTest, EdgeStr) {\n  config_->SetProperty(\"graph.format\", \"graphviz\");\n  config_->SetProperty(\"graph.graphconf\", conf_file_value_);\n\n  std::shared_ptr<Drivers> drivers = Drivers::GetInstance();\n  GraphConfigManager graphconf_mgr = GraphConfigManager::GetInstance();\n\n  auto ret = graphconf_mgr.Initialize(drivers, config_);\n  EXPECT_EQ(ret, STATUS_OK);\n\n  auto graphvizconf = graphconf_mgr.LoadGraphConfig(config_);\n\n  auto gcgraph = graphvizconf->Resolve();\n\n  auto edge1 = gcgraph->GetEdge(\"node0:f0-node1:f1\");\n  EXPECT_NE(edge1, nullptr);\n\n  auto edge1_configuration = edge1->GetConfiguration();\n  auto edge1_headport = edge1_configuration->GetString(\"headport\");\n  EXPECT_EQ(edge1_headport, \"f1\");\n  auto edge1_tailport = edge1_configuration->GetString(\"tailport\");\n  EXPECT_EQ(edge1_tailport, \"f0\");\n  auto edge1_error = edge1_configuration->GetString(\"error\");\n  EXPECT_EQ(edge1_error, \"\");\n\n  auto head_node = edge1->GetHeadNode();\n  auto node0 = gcgraph->GetNode(\"node0\");\n  EXPECT_EQ(head_node, node0);\n\n  auto tail_node = edge1->GetTailNode();\n  auto node1 = gcgraph->GetNode(\"node1\");\n  EXPECT_EQ(tail_node, node1);\n\n  auto edge2 = gcgraph->GetEdge(\"node0:f2-node2:f1\");\n  EXPECT_NE(edge2, nullptr);\n\n  auto edge3 = gcgraph->GetEdge(\"node1-node2\");\n  EXPECT_EQ(edge3, nullptr);\n\n  auto all_edges = gcgraph->GetAllEdges();\n  EXPECT_EQ(all_edges.size(), 2);\n}\n\nTEST_F(GraphManagerTest, SubgraphStr){\n\n}\n\nTEST_F(GraphManagerTest, ResolveFile) {\n  config_->SetProperty(\"graph.format\", \"graphviz\");\n  config_->SetProperty(\"graph.graphconffilepath\", conf_file_name_);\n\n  std::shared_ptr<Drivers> drivers = Drivers::GetInstance();\n  GraphConfigManager graphconf_mgr = GraphConfigManager::GetInstance();\n\n  auto ret = graphconf_mgr.Initialize(drivers, config_);\n  EXPECT_EQ(ret, STATUS_OK);\n\n  auto graphvizconf = graphconf_mgr.LoadGraphConfig(config_);\n\n  auto gcgraph = graphvizconf->Resolve();\n}\n\nTEST_F(GraphManagerTest, NodeFile) {\n  config_->SetProperty(\"graph.format\", \"graphviz\");\n  config_->SetProperty(\"graph.graphconffilepath\", conf_file_name_);\n\n  std::shared_ptr<Drivers> drivers = Drivers::GetInstance();\n  GraphConfigManager graphconf_mgr = GraphConfigManager::GetInstance();\n\n  auto ret = graphconf_mgr.Initialize(drivers, config_);\n  EXPECT_EQ(ret, STATUS_OK);\n\n  auto graphvizconf = graphconf_mgr.LoadGraphConfig(config_);\n\n  auto gcgraph = graphvizconf->Resolve();\n  ASSERT_NE(gcgraph, nullptr);\n\n  auto node0 = gcgraph->GetNode(\"node0\");\n  EXPECT_NE(node0, nullptr);\n\n  auto node0_config = node0->GetConfiguration();\n  auto values = node0_config->GetStrings(\"A\");\n  ASSERT_EQ(values.size(), 3);\n  EXPECT_EQ(values[0], \"1\");\n  EXPECT_EQ(values[1], \"2\");\n  EXPECT_EQ(values[2], \"3\");\n\n  auto node1 = gcgraph->GetNode(\"node1\");\n  ASSERT_NE(node1, nullptr);\n  auto node1_config = node1->GetConfiguration();\n  values = node1_config->GetStrings(\"A\");\n  ASSERT_EQ(values.size(), 1);\n  EXPECT_EQ(values[0], \"1,2,3\");\n\n  auto node2 = gcgraph->GetNode(\"node2\");\n  EXPECT_NE(node2, nullptr);\n  auto node2_config = node2->GetConfiguration();\n  values = node2_config->GetStrings(\"A\");\n  ASSERT_EQ(values.size(), 2);\n  EXPECT_EQ(values[0], \"1\");\n  EXPECT_EQ(values[1], \"2,3\");\n\n  auto err_node = gcgraph->GetNode(\"err_node\");\n  EXPECT_EQ(err_node, nullptr);\n\n  auto all_nodes = gcgraph->GetAllNodes();\n  EXPECT_EQ(all_nodes.size(), 3);\n\n  auto root_graph = gcgraph->GetNode(\"node0\")->GetRootGraph();\n  EXPECT_EQ(root_graph, gcgraph);\n}\n\nTEST_F(GraphManagerTest, EdgeFile) {\n  config_->SetProperty(\"graph.format\", \"graphviz\");\n  config_->SetProperty(\"graph.graphconffilepath\", conf_file_name_);\n\n  std::shared_ptr<Drivers> drivers = Drivers::GetInstance();\n  GraphConfigManager graphconf_mgr = GraphConfigManager::GetInstance();\n\n  auto ret = graphconf_mgr.Initialize(drivers, config_);\n  EXPECT_EQ(ret, STATUS_OK);\n\n  auto graphvizconf = graphconf_mgr.LoadGraphConfig(config_);\n\n  auto gcgraph = graphvizconf->Resolve();\n\n  auto edge1 = gcgraph->GetEdge(\"node0:f0-node1:f1\");\n  EXPECT_NE(edge1, nullptr);\n\n  auto head_node = edge1->GetHeadNode();\n  auto node0 = gcgraph->GetNode(\"node0\");\n  EXPECT_EQ(head_node, node0);\n\n  auto tail_node = edge1->GetTailNode();\n  auto node1 = gcgraph->GetNode(\"node1\");\n  EXPECT_EQ(tail_node, node1);\n\n  auto edge2 = gcgraph->GetEdge(\"node0:f2-node2:f1\");\n  EXPECT_NE(edge2, nullptr);\n\n  auto edge3 = gcgraph->GetEdge(\"node1-node2\");\n  EXPECT_EQ(edge3, nullptr);\n\n  auto all_edges = gcgraph->GetAllEdges();\n  EXPECT_EQ(all_edges.size(), 2);\n}\n\nTEST_F(GraphManagerTest, SubgraphFile){\n\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "test/unit/libmodelbox/base/list_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include \"modelbox/base/list.h\"\n\n#include <poll.h>\n#include <sys/time.h>\n\n#include <chrono>\n#include <string>\n#include <thread>\n\n#include \"gtest/gtest.h\"\n\nnamespace modelbox {\n\nclass ListTest : public testing::Test {\n public:\n  ListTest() = default;\n\n protected:\n  void SetUp() override{\n\n  };\n  void TearDown() override{};\n};\n\nstruct Item {\n  ListHead list;\n  int value;\n};\n\nTEST_F(ListTest, ForEach) {\n  ListHead head;\n  struct Item *item;\n  struct Item *tmp;\n  int i = 0;\n  ListInit(&head);\n\n  for (int i = 0; i < 10; i++) {\n    item = (struct Item *)malloc(sizeof(struct Item));\n    ListAddTail(&item->list, &head);\n    item->value = i;\n  }\n\n  i = 0;\n  ListForEachEntrySafe(item, tmp, &head, list) {\n    EXPECT_EQ(item->value, i);\n    i++;\n    ListDel(&item->list);\n    free(item);\n  }\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "test/unit/libmodelbox/base/log_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include \"modelbox/base/log.h\"\n\n#include <poll.h>\n#include <sys/time.h>\n\n#include <chrono>\n#include <mutex>\n#include <string>\n#include <thread>\n\n#include \"gtest/gtest.h\"\n#include \"securec.h\"\nnamespace modelbox {\n\nclass LoggerTest : public Logger {\n public:\n  LoggerTest() = default;\n  ~LoggerTest() override = default;\n  void Vprint(LogLevel level, const char *file, int lineno, const char *func,\n              const char *format, va_list ap) override {\n    char msg[1024];\n    vsnprintf_s(msg, sizeof(msg), sizeof(msg), format, ap);\n    std::unique_lock<std::mutex> lock(mutex_);\n    log_msg_level_ = level;\n    log_msg_file_ = file;\n    log_msg_line_ = lineno;\n    log_msg_ = msg;\n    log_a_msg_ = true;\n  };\n  void SetLogLevel(LogLevel level) override { level_ = level; };\n  LogLevel GetLogLevel() override { return level_; };\n\n  std::string GetLogMsg() { return log_msg_; }\n  void ClearLogMsg() {\n    log_a_msg_ = false;\n    return log_msg_.clear();\n  }\n\n  const char *GetLogMsgFile() { return log_msg_file_; }\n  void ClearLogMsgFile() { log_msg_file_ = nullptr; }\n\n  int GetLogMsgLine() { return log_msg_line_; }\n  void ClearLogMsgLine() { log_msg_line_ = -1; }\n\n  LogLevel GetLogMsgLevel() { return log_msg_level_; }\n  void ClearLogMsgLevel() { log_msg_level_ = LOG_OFF; }\n\n  bool IsLogMsg() { return log_a_msg_; }\n\n  void Clear() {\n    ClearLogMsg();\n    ClearLogMsgFile();\n    ClearLogMsgLine();\n    ClearLogMsgLevel();\n  }\n\n private:\n  LogLevel level_ = LOG_DEBUG;\n  std::mutex mutex_;\n  std::string log_msg_;\n  LogLevel log_msg_level_;\n  const char *log_msg_file_;\n  int log_msg_line_;\n  bool log_a_msg_;\n};\n\nclass LogTest : public testing::Test {\n public:\n  LogTest() = default;\n\n protected:\n  void SetUp() override {\n    old_logger_ = ModelBoxLogger.GetLogger();\n    old_level_ = ModelBoxLogger.GetLogger()->GetLogLevel();\n  };\n  void TearDown() override {\n    ModelBoxLogger.SetLogger(old_logger_);\n    ModelBoxLogger.GetLogger()->SetLogLevel(old_level_);\n  };\n\n private:\n  std::shared_ptr<Logger> old_logger_;\n  LogLevel old_level_;\n};\n\nTEST_F(LogTest, LoggerConsole) {\n  ModelBoxLogger.GetLogger()->SetLogLevel(LOG_DEBUG);\n  MODELBOX_DEBUG(\"%s\", \"this is DEBUG\");\n  MODELBOX_INFO(\"%s\", \"this is INFO\");\n  MODELBOX_NOTICE(\"%s\", \"this is NOTICE\");\n  MODELBOX_ERROR(\"%s\", \"this is ERROR\");\n  MODELBOX_FATAL(\"%s\", \"this is FATAL\");\n\n  MBLOG_DEBUG << \"this is DEBUG\";\n  MBLOG_INFO << \"this is INFO\";\n  MBLOG_NOTICE << \"this is NOTICE\";\n  MBLOG_ERROR << \"this is ERROR\";\n  MBLOG_FATAL << \"this is FATAL\";\n  MBLOG_STACKTRACE(LOG_INFO);\n}\n\nTEST_F(LogTest, LoggerWithID) {\n  ModelBoxLogger.GetLogger()->SetLogLevel(LOG_DEBUG);\n  auto logid = modelbox::LogSetLogID(\"LOGID\");\n  MBLOG_DEBUG << \"this is DEBUG\";\n  MBLOG_INFO << \"this is INFO\";\n  MBLOG_NOTICE << \"this is NOTICE\";\n  MBLOG_ERROR << \"this is ERROR\";\n  MBLOG_FATAL << \"this is FATAL\";\n  MBLOG_STACKTRACE(LOG_INFO);\n}\n\nTEST_F(LogTest, LoggerCallBackPrint) {\n  std::string origin_msg = \"this is message\";\n  std::string expect_msg;\n  RegLogPrint([&](LogLevel level, const char *file, int lineno,\n                  const char *func, const char *msg) { expect_msg = msg; });\n  MBLOG_ERROR << origin_msg;\n  EXPECT_EQ(origin_msg, expect_msg);\n}\n\nTEST_F(LogTest, LoggerCallBackVprint) {\n  std::string origin_msg = \"this is message\";\n  std::string expect_msg;\n  RegLogVprint([&](LogLevel level, const char *file, int lineno,\n                   const char *func, const char *format, va_list ap) {\n    char buff[4096];\n    vsnprintf_s(buff, sizeof(buff), sizeof(buff), format, ap);\n    expect_msg = buff;\n  });\n  MBLOG_ERROR << origin_msg;\n  EXPECT_EQ(origin_msg, expect_msg);\n}\n\nTEST_F(LogTest, LoggerDataCheck) {\n  std::string expect_msg = \"this is a log\";\n  auto test_logger = std::make_shared<LoggerTest>();\n  ModelBoxLogger.SetLogger(test_logger);\n\n  test_logger->Clear();\n  test_logger->SetLogLevel(LOG_DEBUG);\n  int line = __LINE__ + 1;\n  MBLOG_DEBUG << expect_msg;\n  EXPECT_EQ(expect_msg, test_logger->GetLogMsg());\n  EXPECT_EQ(LOG_DEBUG, test_logger->GetLogMsgLevel());\n  EXPECT_EQ(BASE_FILE_NAME, test_logger->GetLogMsgFile());\n  EXPECT_EQ(line, test_logger->GetLogMsgLine());\n  EXPECT_TRUE(test_logger->IsLogMsg());\n\n  test_logger->Clear();\n  test_logger->SetLogLevel(LOG_OFF);\n\n  MBLOG_DEBUG << expect_msg;\n  EXPECT_EQ(\"\", test_logger->GetLogMsg());\n  EXPECT_EQ(LOG_OFF, test_logger->GetLogMsgLevel());\n  EXPECT_FALSE(test_logger->IsLogMsg());\n}\n\nTEST_F(LogTest, LoggerMultiThread) {\n  std::string expect_msg = \"this is a log\";\n  std::vector<std::thread> threads;\n  int loop = 100;\n  auto test_logger = std::make_shared<LoggerTest>();\n  ModelBoxLogger.SetLogger(test_logger);\n\n  test_logger->Clear();\n  test_logger->SetLogLevel(LOG_DEBUG);\n\n  for (int i = 0; i < 10; i++) {\n    std::thread t([&, i]() {\n      for (int j = 0; j < loop; j++) {\n        MODELBOX_LOGSTREAM(LogLevel(j % LOG_OFF))\n            << \"Thread\" << i << \": Number:\" << j;\n      }\n    });\n\n    threads.emplace_back(std::move(t));\n  }\n\n  for (auto &thread : threads) {\n    thread.join();\n  }\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "test/unit/libmodelbox/base/memory_pool_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/base/memory_pool.h\"\n\n#include <memory>\n#include <thread>\n\n#include \"gtest/gtest.h\"\n#include \"modelbox/base/utils.h\"\n#include \"securec.h\"\n\nnamespace modelbox {\n\nclass MemoryPoolTest : public testing::Test {\n public:\n  MemoryPoolTest() = default;\n  ~MemoryPoolTest() override = default;\n\n protected:\n  void SetUp() override{};\n\n  void TearDown() override{};\n};\n\nTEST_F(MemoryPoolTest, MemoryPool) {\n  MemoryPoolBase p;\n  p.InitSlabCache();\n  unsigned int num = 0;\n  for (int i = 0; i < 10; i++) {\n    GetRandom((unsigned char *)&num, sizeof(num));\n    int size = num % (1024 * 512);\n    auto ptr = p.AllocSharedPtr(size);\n    ASSERT_NE(ptr, nullptr);\n    memset_s(ptr.get(), size, 0, size);\n  }\n}\n\nTEST_F(MemoryPoolTest, MemoryPoolShrink) {\n  MemoryPoolBase p;\n  int slab_number = 4;\n  int slab_expand_size = 1024 * 1024;\n  int obj_size = 1024;\n  int obj_number_per_slab = slab_expand_size / obj_size;\n  int obj_number = obj_number_per_slab * slab_number;\n  p.InitSlabCache(10, 10);\n  std::vector<std::shared_ptr<void>> results;\n  for (int i = 0; i < obj_number; i++) {\n    auto size = obj_size;\n    auto ptr = p.AllocSharedPtr(size);\n    ASSERT_NE(ptr, nullptr);\n    memset_s(ptr.get(), size, 0, size);\n    results.push_back(ptr);\n  }\n\n  EXPECT_EQ(p.GetAllObjectNum(), obj_number);\n  results.clear();\n  EXPECT_EQ(p.GetAllActiveObjectNum(), 0);\n  p.ShrinkSlabCache(3, 0);\n  slab_number = 3;\n  obj_number = obj_number_per_slab * slab_number;\n  EXPECT_EQ(p.GetAllObjectNum(), obj_number);\n  p.ShrinkSlabCache(0, 1);\n  EXPECT_EQ(p.GetAllObjectNum(), obj_number);\n  std::this_thread::sleep_for(std::chrono::milliseconds(1100));\n  p.ShrinkSlabCache(0, 1);\n  EXPECT_EQ(p.GetAllObjectNum(), 0);\n}\n\nTEST_F(MemoryPoolTest, MemoryPoolShrinkExpire) {\n  MemoryPoolBase p;\n  int slab_number = 4;\n  int slab_expand_size = 1024 * 1024;\n  int obj_size = 1024;\n  int obj_number_per_slab = slab_expand_size / obj_size;\n  int obj_number = obj_number_per_slab * slab_number;\n  p.InitSlabCache(10, 10);\n  std::vector<std::shared_ptr<void>> results;\n  for (int i = 0; i < obj_number; i++) {\n    auto size = obj_size;\n    auto ptr = p.AllocSharedPtr(size);\n    ASSERT_NE(ptr, nullptr);\n    memset_s(ptr.get(), size, 0, size);\n    results.push_back(ptr);\n  }\n\n  EXPECT_EQ(p.GetAllObjectNum(), obj_number);\n  results.clear();\n  EXPECT_EQ(p.GetAllActiveObjectNum(), 0);\n  std::this_thread::sleep_for(std::chrono::milliseconds(1100));\n  p.ShrinkSlabCache(4, 0, 1);\n  EXPECT_EQ(p.GetAllObjectNum(), 0);\n}\n\nTEST_F(MemoryPoolTest, GetList) {\n  std::map<std::string, std::shared_ptr<MemoryPoolBase>> pool_list;\n  int num = 10;\n  for (int i = 0; i < num; i++) {\n    auto p = std::make_shared<MemoryPoolBase>(std::to_string(i));\n    p->InitSlabCache();\n    pool_list[std::to_string(i)] = p;\n  }\n\n  EXPECT_EQ(MemoryPoolBase::GetAllPools().size(), num);\n  pool_list.erase(\"1\");\n  EXPECT_EQ(MemoryPoolBase::GetAllPools().size(), num - 1);\n\n  MBLOG_INFO << \"Total Num: \" << MemoryPoolBase::GetAllPools().size();\n  for (auto &p : MemoryPoolBase::GetAllPools()) {\n    EXPECT_EQ(p.get(), pool_list[p->GetName()].get());\n  }\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "test/unit/libmodelbox/base/memory_statistic_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include \"modelbox/base/memory_statistic.h\"\n"
  },
  {
    "path": "test/unit/libmodelbox/base/os_stats_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include \"modelbox/base/log.h\"\n#include \"modelbox/base/os.h\"\n#include \"gtest/gtest.h\"\n\n#include <memory>\n\nnamespace modelbox {\n\nclass StatsTest : public testing::Test {\n public:\n  StatsTest() = default;\n\n protected:\n  void SetUp() override{\n\n  };\n  void TearDown() override{};\n};\n\nTEST_F(StatsTest, OSProcess) {\n  EXPECT_EQ(os->Process->GetPid(), getpid());\n  EXPECT_EQ(os->Thread->GetTid(), std::this_thread::get_id());\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "test/unit/libmodelbox/base/popen_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/base/popen.h\"\n\n#include <poll.h>\n#include <sys/time.h>\n\n#include <chrono>\n#include <string>\n#include <thread>\n\n#include \"gtest/gtest.h\"\n#include \"modelbox/base/status.h\"\n\nnamespace modelbox {\n\nclass PopenTest : public testing::Test {\n public:\n  PopenTest() = default;\n\n protected:\n  void SetUp() override{\n\n  };\n  void TearDown() override{};\n};\n\n#include <fcntl.h>\n#include <sys/stat.h>\n#include <sys/types.h>\n\nTEST_F(PopenTest, OpenCaptureOutput) {\n  Popen p;\n  std::vector<std::string> args;\n  args.emplace_back(\"/bin/bash\");\n  args.emplace_back(\"-c\");\n  args.emplace_back(\n      \"N=0;\"\n      \"while [ $N -lt 1000 ]; do echo msg $N; echo err $N >&2; ((N=N+1)); \"\n      \"done\");\n  p.Open(args, 1000, \"re\");\n\n  int expect = 1000;\n  int ret = 0;\n  int stdoutcount = 0;\n  int stderrcount = 0;\n  while (true) {\n    ret = p.WaitForLineRead(1000);\n    if (ret < 0) {\n      break;\n    }\n\n    if (ret == 0) {\n      continue;\n    }\n\n    std::string line;\n    p.ReadOutLine(line);\n    if (line.length() > 0) {\n      stdoutcount++;\n    }\n\n    line.clear();\n    p.ReadErrLine(line);\n    if (line.length() > 0) {\n      stderrcount++;\n    }\n  }\n  ret = p.Close();\n  EXPECT_EQ(ret, 0);\n  EXPECT_EQ(stdoutcount, expect);\n  EXPECT_EQ(stderrcount, expect);\n}\n\nTEST_F(PopenTest, OpenTimeout) {\n  Popen p;\n  std::vector<std::string> args;\n  args.emplace_back(\"/bin/bash\");\n  args.emplace_back(\"-c\");\n  args.emplace_back(\n      \"N=0;\"\n      \"while [ true ]; do echo msg $N; echo err $N >&2; ((N=N+1)); \"\n      \"done\");\n  p.Open(args, 100, \"re\");\n\n  int expect = 100;\n  int ret = 0;\n  int stdoutcount = 0;\n  int stderrcount = 0;\n  while (true) {\n    ret = p.WaitForLineRead();\n    if (ret < 0) {\n      break;\n    }\n\n    if (ret == 0) {\n      continue;\n    }\n\n    std::string line;\n    p.ReadOutLine(line);\n    if (line.length() > 0) {\n      stdoutcount++;\n    }\n\n    line.clear();\n    p.ReadErrLine(line);\n    if (line.length() > 0) {\n      stderrcount++;\n    }\n  }\n  ret = p.Close();\n  EXPECT_EQ(WIFSIGNALED(ret), 1);\n  EXPECT_EQ(WTERMSIG(ret), SIGTERM);\n  EXPECT_GT(stdoutcount, expect);\n  EXPECT_GT(stderrcount, expect);\n}\n\nTEST_F(PopenTest, OpenCaptureStdOutputOnly) {\n  Popen p;\n  std::vector<std::string> args;\n  args.emplace_back(\"/bin/bash\");\n  args.emplace_back(\"-c\");\n  args.emplace_back(\n      \"N=0;\"\n      \"while [ $N -lt 10 ]; do echo msg $N; echo err $N >&2; ((N=N+1)); \"\n      \"done\");\n  p.Open(args, 1000);\n\n  int expect = 10;\n  int ret = 0;\n  int stdoutcount = 0;\n  while (true) {\n    std::string line;\n    auto ret = p.ReadOutLine(line);\n    if (ret < 0) {\n      break;\n    }\n\n    if (line.length() > 0) {\n      stdoutcount++;\n    }\n  }\n  ret = p.Close();\n  EXPECT_EQ(ret, 0);\n  EXPECT_EQ(stdoutcount, expect);\n}\n\nTEST_F(PopenTest, OpenCaptureNone) {\n  Popen p;\n  std::vector<std::string> args;\n  args.emplace_back(\"/bin/bash\");\n  args.emplace_back(\"-c\");\n  args.emplace_back(\n      \"N=0;\"\n      \"while [ $N -lt 10 ]; do echo msg $N; echo err $N >&2; ((N=N+1)); \"\n      \"done\");\n  p.Open(args, 1000, \"\");\n  auto ret = p.Close();\n  EXPECT_EQ(ret, 0);\n}\n\nTEST_F(PopenTest, OpenCaptureNoneTimeOut) {\n  Popen p;\n  std::vector<std::string> args;\n  args.emplace_back(\"/bin/sleep\");\n  args.emplace_back(\"10\");\n  p.Open(args, 100, \"\");\n  auto ret = p.Close();\n  EXPECT_EQ(WIFSIGNALED(ret), 1);\n  EXPECT_EQ(WTERMSIG(ret), SIGTERM);\n}\n\nTEST_F(PopenTest, OpenWait) {\n  Popen p;\n  std::vector<std::string> args;\n  args.emplace_back(\"/bin/sleep\");\n  args.emplace_back(\"0.1\");\n  p.Open(args, -1, \"\");\n  auto ret = p.Close();\n  EXPECT_EQ(WIFSIGNALED(ret), 0);\n  EXPECT_EQ(WEXITSTATUS(ret), 0);\n}\n\nTEST_F(PopenTest, OpenInput) {\n  Popen p;\n  std::vector<std::string> args;\n  args.emplace_back(\"/bin/bash\");\n  args.emplace_back(\"-c\");\n  args.emplace_back(\n      \"N=0;\\n\"\n      \"read N\\n\"\n      \"echo $N\\n\"\n      \"exit $N\\n\");\n  p.Open(args, 1000, \"w\");\n  auto ret = p.WriteString(\"10\\n\");\n  EXPECT_EQ(ret, 0);\n  ret = p.Close();\n  EXPECT_EQ(WEXITSTATUS(ret), 10);\n}\n\nTEST_F(PopenTest, OpenCmdLine) {\n  Popen p;\n  p.Open(\"/bin/bash -c \\\"ls -h /bin/sh \\\"\", 100, \"r\");\n  std::string line;\n  p.ReadOutLine(line);\n  EXPECT_EQ(line, \"/bin/sh\\n\");\n  auto ret = p.Close();\n  EXPECT_EQ(WEXITSTATUS(ret), 0);\n}\n\nTEST_F(PopenTest, OpenEnvCheck) {\n  Popen p;\n  PopenEnv env = \"TEST_ENV_1=a TEST_ENV_2=b\";\n  env.Rmv(\"USER\");\n  p.Open(\"/bin/bash -c \\\"echo $USER && echo $TEST_ENV_1; echo $TEST_ENV_2\\\"\",\n         100, \"r\", env);\n  std::string line;\n  p.ReadOutLine(line);\n  EXPECT_EQ(line, \"\\n\");\n  p.ReadOutLine(line);\n  EXPECT_EQ(line, \"a\\n\");\n  p.ReadOutLine(line);\n  EXPECT_EQ(line, \"b\\n\");\n  auto ret = p.Close();\n  EXPECT_EQ(WEXITSTATUS(ret), 0);\n}\n\nTEST_F(PopenTest, OpenReadAll) {\n  Popen p;\n  std::string cmd =\n      \"/bin/bash -c \\\"\"\n      \"N=0;\"\n      \"while [ $N -lt 100 ]; do echo msg $N; echo err $N >&2; ((N=N+1)); \"\n      \"done\\\"\";\n  p.Open(cmd, 1000, \"re\");\n\n  std::string out;\n  std::string err;\n  auto ret = p.ReadAll(&out, &err);\n  EXPECT_EQ(ret, 0);\n\n  auto count_line = [](const std::string &str) {\n    size_t i = 0;\n    int count = 0;\n    for (i = 0; i < str.length(); i++) {\n      if (str.c_str()[i] == '\\n' || str.c_str()[i] == '\\0') {\n        count++;\n      }\n    }\n\n    return count;\n  };\n\n  ret = p.Close();\n  EXPECT_EQ(count_line(out), 100);\n  EXPECT_EQ(count_line(err), 100);\n  EXPECT_EQ(ret, 0);\n}\n\nTEST_F(PopenTest, OpenNotExists) {\n  Popen p;\n  std::string cmd = \"/NOT-EXIST\";\n  p.Open(cmd, 1000, \"re\");\n\n  std::string out;\n  std::string err;\n  auto ret = p.ReadAll(&out, &err);\n  EXPECT_EQ(ret, 0);\n\n  EXPECT_GT(err.find_first_of(cmd), 0);\n\n  ret = p.Close();\n  EXPECT_EQ(ret, 256);\n}\n\n}  // namespace modelbox\n"
  },
  {
    "path": "test/unit/libmodelbox/base/refcache_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include <modelbox/base/log.h>\n#include <modelbox/base/refcache.h>\n#include <modelbox/base/utils.h>\n\n#include <future>\n#include <thread>\n\n#include \"gtest/gtest.h\"\n#include \"test_config.h\"\n\nnamespace modelbox {\nclass RefCacheTest : public testing::Test {\n public:\n  RefCacheTest() = default;\n\n protected:\n  void SetUp() override{};\n  void TearDown() override{};\n};\n\nclass Data {\n public:\n  Data() = default;\n  virtual ~Data() = default;\n};\n\nTEST_F(RefCacheTest, Get) {\n  RefCache<Data> cache;\n  std::shared_ptr<Data> data = std::make_shared<Data>();\n  auto trans = cache.Insert(\"a\");\n  auto keep = trans->UpdateData(data);\n  EXPECT_TRUE(cache.Get(\"a\") != nullptr);\n  keep = nullptr;\n  EXPECT_TRUE(cache.Get(\"a\") == nullptr);\n}\n\nTEST_F(RefCacheTest, Insert) {\n  RefCache<Data> cache;\n  std::shared_ptr<Data> data = std::make_shared<Data>();\n  std::shared_ptr<Data> data2 = std::make_shared<Data>();\n  auto trans1 = cache.Insert(\"a\");\n  auto trans2 = cache.Insert(\"a\");\n  auto keep = trans1->UpdateData(data);\n  EXPECT_TRUE(keep != nullptr);\n  EXPECT_TRUE(trans2 == nullptr);\n}\n\nTEST_F(RefCacheTest, InsertDelay) {\n  RefCache<Data> cache;\n  std::shared_ptr<Data> data = std::make_shared<Data>();\n  auto trans1 = cache.Insert(\"a\");\n  auto result_future = std::async(std::launch::async, [&]() {\n    auto data2 = cache.Get(\"a\", true);\n    EXPECT_TRUE(data2 != nullptr);\n  });\n  std::this_thread::sleep_for(std::chrono::milliseconds(50));\n  auto data3 = trans1->UpdateData(data);\n  EXPECT_TRUE(data3 != nullptr);\n  data3 = nullptr;\n  result_future.get();\n  EXPECT_TRUE(cache.Get(\"a\") == nullptr);\n}\n\nTEST_F(RefCacheTest, InsertAndGet) {\n  RefCache<Data> cache;\n  std::shared_ptr<Data> data;\n  std::atomic_int count{0};\n  int loop = 10;\n  std::vector<std::future<void>> result;\n  for (int i = 0; i < loop; i++) {\n    auto result_future = std::async(std::launch::async, [&]() {\n      auto trans = cache.InsertAndGet(\"a\");\n      if (trans == nullptr) {\n        return;\n      }\n      ASSERT_TRUE(trans != nullptr);\n      auto data = trans->GetData();\n      if (data) {\n        count++;\n      } else {\n        std::shared_ptr<Data> datain = std::make_shared<Data>();\n        std::this_thread::sleep_for(std::chrono::milliseconds(50));\n        trans->UpdateData(datain);\n      }\n    });\n    result.push_back(std::move(result_future));\n  }\n\n  for (auto &r : result) {\n    r.get();\n  }\n\n  EXPECT_EQ(count, loop - 1);\n}\n\nTEST_F(RefCacheTest, Update) {\n  RefCache<Data> cache;\n  std::shared_ptr<Data> data = std::make_shared<Data>();\n  std::shared_ptr<Data> data2 = std::make_shared<Data>();\n  auto keep = cache.Update(\"a\", data);\n  auto keep2 = cache.Update(\"a\", data2);\n  EXPECT_TRUE(keep != nullptr);\n  EXPECT_TRUE(keep2 != nullptr);\n}\n\nTEST_F(RefCacheTest, GetCacheData) {\n  RefCacheData cache;\n  std::shared_ptr<Data> data = std::make_shared<Data>();\n  auto trans = cache.Insert(\"a\");\n  auto keep = trans->UpdateData(data);\n  EXPECT_TRUE(cache.Get(\"a\") != nullptr);\n  keep = nullptr;\n  EXPECT_TRUE(cache.Get(\"a\") == nullptr);\n}\n\n}  // namespace modelbox\n"
  },
  {
    "path": "test/unit/libmodelbox/base/slab_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include \"modelbox/base/slab.h\"\n\n#include <poll.h>\n#include <sys/time.h>\n\n#include <chrono>\n#include <string>\n#include <thread>\n\n#include \"modelbox/base/list.h\"\n#include \"modelbox/base/log.h\"\n#include \"modelbox/base/utils.h\"\n#include \"gtest/gtest.h\"\n#include \"securec.h\"\n\nnamespace modelbox {\n\nclass SlabTest : public testing::Test {\n public:\n  SlabTest() = default;\n\n protected:\n  void SetUp() override{\n\n  };\n  void TearDown() override{};\n};\n\nTEST_F(SlabTest, SlabMalloc) {\n  Slab cache(nullptr, 128, 4096);\n  EXPECT_TRUE(cache.Init());\n  void *ptr = cache.Alloc();\n  void *ptr1 = cache.Alloc();\n  ASSERT_NE(ptr, nullptr);\n  ASSERT_NE(ptr1, nullptr);\n  EXPECT_EQ(2, cache.ActiveObjects());\n  cache.Free(ptr);\n  EXPECT_EQ(1, cache.ActiveObjects());\n  cache.Free(ptr1);\n  EXPECT_EQ(0, cache.ActiveObjects());\n}\n\nTEST_F(SlabTest, SlabMallocSharedPtrCheck) {\n  int objsize = 128;\n  int num = 1000 + 1;\n  char mem_cmp[objsize];\n  SlabCache cache(objsize, objsize * 4);\n  std::vector<std::shared_ptr<void>> mem_ptr;\n  for (int i = 0; i < num; i++) {\n    auto ptr = cache.AllocSharedPtr();\n    ASSERT_NE(ptr, nullptr);\n    mem_ptr.push_back(ptr);\n  }\n\n  EXPECT_EQ(num, cache.GetActiveObjNumber());\n  EXPECT_EQ(num / 4 + 1, cache.SlabNumber());\n\n  for (int i = 0; i < num; i++) {\n    auto ptr = mem_ptr[i];\n    memset_s(ptr.get(), objsize, i, objsize);\n  }\n\n  for (int i = 0; i < num; i++) {\n    auto ptr = mem_ptr[i];\n    memset_s(mem_cmp, objsize, i, objsize);\n    EXPECT_EQ(0, memcmp(ptr.get(), mem_cmp, objsize));\n  }\n\n  auto ptr = mem_ptr[0];\n  memset_s(mem_cmp, objsize, 0, objsize);\n  EXPECT_EQ(0, memcmp(ptr.get(), mem_cmp, objsize));\n  ptr = nullptr;\n\n  mem_ptr.pop_back();\n  auto ptr_last = cache.AllocSharedPtr();\n  memset_s(mem_cmp, objsize, num - 1, objsize);\n  EXPECT_EQ(0, memcmp(ptr_last.get(), mem_cmp, objsize));\n  ptr_last = nullptr;\n\n  mem_ptr.clear();\n  EXPECT_EQ(num / 4 + 1, cache.GetEmptySlabNumber());\n  EXPECT_EQ(0, cache.GetActiveObjNumber());\n}\n\nTEST_F(SlabTest, SlabMallocSharedPtr) {\n  SlabCache cache(128, 4096);\n  auto ptr = cache.AllocSharedPtr();\n  ASSERT_NE(ptr, nullptr);\n  EXPECT_EQ(1, cache.GetActiveObjNumber());\n  ptr = nullptr;\n  EXPECT_EQ(0, cache.GetActiveObjNumber());\n}\n\nTEST_F(SlabTest, SlabCacheMallocSharedPtr) {\n  SlabCache cache(128, 256);\n  EXPECT_EQ(128, cache.ObjectSize());\n  auto ptr = cache.AllocSharedPtr();\n  ASSERT_NE(ptr, nullptr);\n  EXPECT_EQ(256 / 128, cache.GetObjNumber());\n  auto ptr1 = cache.AllocSharedPtr();\n  ASSERT_NE(ptr1, nullptr);\n  auto ptr2 = cache.AllocSharedPtr();\n  ASSERT_NE(ptr2, nullptr);\n  EXPECT_EQ(3, cache.GetActiveObjNumber());\n  ptr = nullptr;\n  EXPECT_EQ(2, cache.GetActiveObjNumber());\n  ptr1 = nullptr;\n  EXPECT_EQ(1, cache.GetActiveObjNumber());\n  ptr2 = nullptr;\n  EXPECT_EQ(0, cache.GetActiveObjNumber());\n  EXPECT_EQ(2, cache.GetEmptySlabNumber());\n}\n\nTEST_F(SlabTest, SlabCacheActiveNumber) {\n  SlabCache cache(128, 128);\n  auto ptr = cache.AllocSharedPtr();\n  auto ptr1 = cache.AllocSharedPtr();\n  ASSERT_NE(ptr, nullptr);\n  ASSERT_NE(ptr1, nullptr);\n  EXPECT_EQ(2, cache.GetObjNumber());\n  EXPECT_EQ(2, cache.GetActiveObjNumber());\n  ptr = nullptr;\n  EXPECT_EQ(2, cache.GetObjNumber());\n  EXPECT_EQ(1, cache.GetActiveObjNumber());\n  EXPECT_EQ(2, cache.GetObjNumber());\n  ptr1 = nullptr;\n  EXPECT_EQ(2, cache.GetEmptySlabNumber());\n}\n\nTEST_F(SlabTest, SlabShrink) {\n  int obj_size = 128;\n  int slab_size = 4096;\n  int total = 0;\n  int slab_count = 10;\n  SlabCache cache(obj_size, slab_size);\n  std::vector<std::shared_ptr<void>> results;\n  for (int i = 0; i < slab_size / obj_size * slab_count; i++) {\n    auto p = cache.AllocSharedPtr();\n    ASSERT_NE(p, nullptr);\n    results.push_back(p);\n    total++;\n  }\n\n  cache.Shrink();\n  EXPECT_EQ(cache.SlabNumber(), slab_count);\n  EXPECT_EQ(total, cache.GetActiveObjNumber());\n  EXPECT_GT(cache.GetObjNumber(), 0);\n  results.clear();\n  cache.Shrink(5);\n  EXPECT_EQ(cache.SlabNumber(), 5);\n\n  cache.Shrink(0, 1);\n  EXPECT_EQ(cache.SlabNumber(), 5);\n\n  std::this_thread::sleep_for(std::chrono::milliseconds(1100));\n  cache.Shrink(0, 1);\n  EXPECT_EQ(cache.SlabNumber(), 0);\n\n  cache.Shrink();\n  EXPECT_EQ(0, cache.GetActiveObjNumber());\n  EXPECT_EQ(cache.GetObjNumber(), 0);\n  EXPECT_EQ(cache.SlabNumber(), 0);\n}\n\nTEST_F(SlabTest, SlabCacheReclaim) {\n  SlabCache cache(128, 128);\n  std::vector<std::shared_ptr<void>> ptrs;\n  int number = 100;\n  for (int i = 0; i < number; i++) {\n    auto ptr = cache.AllocSharedPtr();\n    ptrs.emplace_back(ptr);\n  }\n\n  EXPECT_EQ(number, cache.GetObjNumber());\n  EXPECT_EQ(number, cache.GetActiveObjNumber());\n  ptrs.clear();\n  EXPECT_EQ(number, cache.GetObjNumber());\n  EXPECT_EQ(0, cache.GetActiveObjNumber());\n  EXPECT_EQ(number, cache.GetEmptySlabNumber());\n  cache.Reclaim(0);\n  EXPECT_EQ(number * 10 / 100, cache.GetEmptySlabNumber());\n  cache.Reclaim(0);\n  EXPECT_EQ(1, cache.GetEmptySlabNumber());\n}\n\nTEST_F(SlabTest, Perf) {\n  int obj_size = 4;\n  SlabCache cache(obj_size, 4096);\n  std::vector<std::thread> threads;\n  std::atomic<unsigned long> number;\n  bool stop = false;\n  unsigned long begin;\n  unsigned long end;\n  int cpu_num = std::thread::hardware_concurrency() * 2;\n\n  number = 0;\n  begin = GetTickCount();\n  for (int i = 0; i < cpu_num; i++) {\n    auto t = std::thread([&, i]() {\n      while (stop == false) {\n        std::vector<std::shared_ptr<void>> ptrs;\n        std::this_thread::sleep_for(std::chrono::milliseconds(i));\n        for (int j = 0; j < 100000; j++) {\n          if (stop == true) {\n            break;\n          }\n          auto p = cache.AllocSharedPtr();\n          if (p) {\n            ptrs.push_back(p);\n            *(int *)(p.get()) = i * j;\n          }\n        }\n\n        for (int j = 0; j < (int)(ptrs.size()); j++) {\n          EXPECT_EQ(i * j, *(int *)(ptrs[j].get()));\n        }\n        number += ptrs.size();\n        ptrs.clear();\n      }\n    });\n    threads.push_back(std::move(t));\n  }\n\n  std::this_thread::sleep_for(std::chrono::milliseconds(500));\n  stop = true;\n\n  for (auto &t : threads) {\n    t.join();\n  }\n  end = GetTickCount();\n\n  MBLOG_INFO << \"total: \" << number;\n  MBLOG_INFO << \"ops: \" << 1.0 * number / (end - begin) * 1000.0;\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "test/unit/libmodelbox/base/status_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include \"modelbox/base/status.h\"\n\n#include <poll.h>\n#include <sys/time.h>\n\n#include <chrono>\n#include <string>\n#include <thread>\n\n#include \"modelbox/base/utils.h\"\n#include \"gtest/gtest.h\"\n\nnamespace modelbox {\n\nclass StatusTest : public testing::Test {\n public:\n  StatusTest() = default;\n\n protected:\n  void SetUp() override{\n\n  };\n  void TearDown() override{};\n};\n\nTEST_F(StatusTest, OK) {\n  EXPECT_EQ(STATUS_OK, STATUS_SUCCESS);\n  const Status &result = STATUS_OK;\n  const Status &result_OK1 = STATUS_OK;\n  EXPECT_EQ(&result, &result_OK1);\n  EXPECT_TRUE(result);\n}\n\nTEST_F(StatusTest, EqualNotEqual) {\n  Status first(STATUS_SUCCESS);\n  Status second(STATUS_OK);\n  Status thrid(STATUS_ALREADY);\n\n  EXPECT_EQ(first, second);\n  EXPECT_NE(first, thrid);\n\n  Status ret = STATUS_EXIST;\n  EXPECT_TRUE(STATUS_EXIST == ret);\n}\n\nTEST_F(StatusTest, Message) {\n  const char *msg = \"this is message\";\n  Status result(STATUS_SUCCESS, msg);\n  EXPECT_EQ(result.Errormsg(), msg);\n}\n\nTEST_F(StatusTest, WrapError) {\n  const char *wrap_msg = \"origin wrap msg.\";\n  const char *empty_msg = \"\";\n  const char *middle_msg = \"middle msg.\";\n  const char *msg_ret = \"new msg.\";\n  Status origin(STATUS_EXIST, wrap_msg);\n  Status middle(origin, middle_msg);\n  Status empty(middle, empty_msg);\n  Status ret(empty, msg_ret);\n  std::string expect_msg = ret.StrCode() + \", \" + ret.Errormsg() + \" -> \" +\n                           middle.Errormsg() + \" -> \" + origin.Errormsg();\n  EXPECT_EQ(ret.WrapErrormsgs(), expect_msg);\n}\n\nTEST_F(StatusTest, WrapErrorCodeOnly) {\n  const char *wrap_msg = \"\";\n  const char *empty_msg = \"\";\n  const char *msg_ret = \"\";\n  Status origin(STATUS_EXIST, wrap_msg);\n  Status empty(origin, empty_msg);\n  Status ret(empty, msg_ret);\n  std::string expect_msg = origin.StrCode();\n  EXPECT_EQ(ret.WrapErrormsgs(), expect_msg);\n}\n\nTEST_F(StatusTest, ToString) {\n  std::string msg = \"this is message\";\n  Status result(STATUS_SUCCESS, msg);\n  EXPECT_EQ(result.Errormsg(), msg);\n  EXPECT_EQ(result.ToString(), \"code: Success, errmsg: \" + msg);\n}\n\nTEST_F(StatusTest, OperationLogicalNot) {\n  Status result_ok(STATUS_SUCCESS);\n  EXPECT_FALSE(!result_ok);\n  Status result_fault(STATUS_FAULT);\n  EXPECT_TRUE(!result_fault);\n}\n\nTEST_F(StatusTest, GetAllMessage) {\n  int num = STATUS_LASTFLAG;\n  for (int i = 0; i < num; i++) {\n    Status check((StatusCode)i);\n    EXPECT_NE(check.StrCode(), \"\");\n  }\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "test/unit/libmodelbox/base/thread_pool_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/base/thread_pool.h\"\n\n#include \"gtest/gtest.h\"\nclass ThreadPoolTest : public testing::Test {\n public:\n  ThreadPoolTest() = default;\n  ~ThreadPoolTest() override = default;\n\n protected:\n  void SetUp() override{\n\n  };\n  void TearDown() override{};\n};\n\nint compute(int a, int b) { return a + b; }\n\nstd::mutex coutMtx;\n\nvoid short_task(int task_id) {\n  std::this_thread::sleep_for(std::chrono::milliseconds(100));\n}\n\nvoid long_task(int consume_time) {\n  std::this_thread::sleep_for(std::chrono::milliseconds(consume_time));\n}\n\nTEST_F(ThreadPoolTest, PoolCreate) {\n  modelbox::ThreadPool pool(10);\n  EXPECT_EQ(pool.GetThreadsNum(), 0);\n}\n\nTEST_F(ThreadPoolTest, SubmitTasks) {\n  modelbox::ThreadPool pool;\n  auto fut1 = pool.Submit(compute, 100, 100);\n  auto fut2 = pool.Submit(compute, 100, 101);\n  auto fut3 = pool.Submit(compute, 100, 102);\n  auto fut4 = pool.Submit(compute, 100, 103);\n  auto fut5 = pool.Submit(compute, 100, 104);\n\n  EXPECT_EQ(fut1.get(), 200);\n  EXPECT_EQ(fut2.get(), 201);\n  EXPECT_EQ(fut3.get(), 202);\n  EXPECT_EQ(fut4.get(), 203);\n  EXPECT_EQ(fut5.get(), 204);\n}\n\nTEST_F(ThreadPoolTest, SubmitTasksMinTaskNumber) {\n  modelbox::ThreadPool pool(0, 0, 0, 0);\n  auto fut1 = pool.Submit(compute, 100, 100);\n  auto fut2 = pool.Submit(compute, 100, 101);\n  auto fut3 = pool.Submit(compute, 100, 102);\n  auto fut4 = pool.Submit(compute, 100, 103);\n  auto fut5 = pool.Submit(compute, 100, 104);\n\n  EXPECT_EQ(fut1.get(), 200);\n  EXPECT_EQ(fut2.get(), 201);\n  EXPECT_EQ(fut3.get(), 202);\n  EXPECT_EQ(fut4.get(), 203);\n  EXPECT_EQ(fut5.get(), 204);\n}\n\nTEST_F(ThreadPoolTest, ThreadSize) {\n  int thread_size = 4;\n  modelbox::ThreadPool pool(thread_size, thread_size * 2, 2001);\n  for (size_t i = 0; i < 2000; i++) {\n    auto fut = pool.Submit(compute, 10, 100);\n  }\n\n  EXPECT_EQ(pool.GetThreadsNum(), thread_size);\n}\n\nTEST_F(ThreadPoolTest, SetThreadSize) {\n  int thread_size = 4;\n  modelbox::ThreadPool pool(thread_size, thread_size * 2, 2001);\n  std::vector<std::future<int>> results;\n  for (size_t i = 0; i < 2000; i++) {\n    auto fut = pool.Submit(compute, 10, 100);\n    results.push_back(std::move(fut));\n  }\n\n  results.clear();\n  EXPECT_EQ(pool.GetThreadsNum(), thread_size);\n  pool.SetThreadSize(1);\n  pool.SetKeepAlive(10);\n  std::this_thread::sleep_for(std::chrono::milliseconds(200));\n  if (pool.GetThreadsNum() > 1) {\n    pool.SetThreadSize(1);\n    pool.SetKeepAlive(10);\n    std::this_thread::sleep_for(std::chrono::milliseconds(1000));\n  }\n  EXPECT_EQ(pool.GetThreadsNum(), 1);\n}\n\nTEST_F(ThreadPoolTest, MaxThreadSize) {\n  int thread_size = 4;\n  int max_thread_size = 10;\n  modelbox::ThreadPool pool(thread_size, max_thread_size, 1);\n  for (size_t i = 0; i < 2000; i++) {\n    auto fut = pool.Submit(compute, 10, 21000);\n  }\n\n  EXPECT_GT(pool.GetThreadsNum(), thread_size);\n  EXPECT_LE(pool.GetThreadsNum(), max_thread_size);\n}\n\nTEST_F(ThreadPoolTest, Shutdown) {\n  int thread_size = 4;\n  int max_thread_size = 10;\n  std::vector<std::future<int>> future_queue;\n  modelbox::ThreadPool pool(thread_size, max_thread_size, 1);\n  for (size_t i = 0; i < 20000; i++) {\n    auto fut = pool.Submit(compute, 10, 21000);\n    if (i == 1000) {\n      pool.Shutdown();\n    }\n\n    if (i > 1000) {\n      EXPECT_FALSE(fut.valid());\n    } else {\n      EXPECT_TRUE(fut.valid());\n    }\n    future_queue.emplace_back(std::move(fut));\n  }\n\n  future_queue.clear();\n  EXPECT_EQ(pool.GetThreadsNum(), 0);\n}\n\nTEST_F(ThreadPoolTest, GetMaxThreadsNum) {\n  int thread_size = 4;\n  int max_thread_size = 10;\n  modelbox::ThreadPool pool(thread_size, max_thread_size, 1);\n  EXPECT_EQ(pool.GetMaxThreadsNum(), max_thread_size);\n}\n\nTEST_F(ThreadPoolTest, GetWaitingWorkCount) {\n  int thread_size = 4;\n  int max_thread_size = 10;\n  modelbox::ThreadPool pool(thread_size, max_thread_size, 1);\n  EXPECT_EQ(pool.GetWaitingWorkCount(), 0);\n  for (size_t i = 0; i < 2000; i++) {\n    auto fut = pool.Submit(compute, 10, 21000);\n  }\n\n  EXPECT_LE(pool.GetWaitingWorkCount(), 2000);\n}\n\nTEST_F(ThreadPoolTest, KeepAlive) {\n  int thread_size = 4;\n  int max_thread_size = 10;\n  modelbox::ThreadPool pool(thread_size, max_thread_size, 1, -1);\n  std::vector<std::future<int>> future_queue;\n  for (size_t i = 0; i < 20000; i++) {\n    auto fut = pool.Submit(compute, i, i);\n    future_queue.push_back(std::move(fut));\n  }\n\n  for (size_t i = 0; i < future_queue.size(); ++i) {\n    EXPECT_EQ(future_queue[i].get(), compute(i, i));\n  }\n\n  std::this_thread::sleep_for(std::chrono::milliseconds(50));\n  EXPECT_EQ(pool.GetThreadsNum(), max_thread_size);\n  std::this_thread::sleep_for(std::chrono::milliseconds(110));\n  EXPECT_EQ(pool.GetThreadsNum(), thread_size);\n  future_queue.clear();\n\n  pool.SetKeepAlive(200);\n  for (size_t i = 0; i < 1; i++) {\n    auto fut = pool.Submit(compute, i, i);\n    future_queue.push_back(std::move(fut));\n  }\n\n  for (size_t i = 0; i < future_queue.size(); ++i) {\n    EXPECT_EQ(future_queue[i].get(), compute(i, i));\n  }\n  future_queue.clear();\n\n  EXPECT_GE(pool.GetThreadsNum(), thread_size);\n  EXPECT_LE(pool.GetThreadsNum(), max_thread_size);\n  std::this_thread::sleep_for(std::chrono::milliseconds(250));\n  EXPECT_EQ(pool.GetThreadsNum(), thread_size);\n}\n\nTEST_F(ThreadPoolTest, Performance) {\n  modelbox::ThreadPool pool(std::thread::hardware_concurrency());\n  std::atomic<bool> is_stop_{false};\n  std::atomic<bool> is_launch_print(true);\n  std::atomic<bool> is_wait_print(true);\n\n  std::mutex fut_mux;\n  std::queue<std::future<int>> future_queue;\n  std::condition_variable cv;\n\n  size_t LAUNCH_TASK_COUNT = std::thread::hardware_concurrency();\n  std::atomic<int64_t> launch_count{0};\n  std::vector<std::future<void>> launch_list;\n  for (size_t i = 0; i < LAUNCH_TASK_COUNT; ++i) {\n    auto launch_task = std::async(std::launch::async, [&]() {\n      auto begin_tick = modelbox::GetTickCount();\n      while (!is_stop_) {\n        auto fut = pool.Submit(compute, 1, 1);\n        std::lock_guard<std::mutex> lock(fut_mux);\n        future_queue.push(std::move(fut));\n        cv.notify_all();\n        launch_count++;\n      }\n\n      bool local_print = true;\n      if (is_launch_print.compare_exchange_strong(local_print, false)) {\n        MBLOG_INFO << \"Submit rate: \"\n                   << ((float)(launch_count * 1000)) /\n                          (modelbox::GetTickCount() - begin_tick)\n                   << \"/s\";\n      }\n    });\n\n    launch_list.push_back(std::move(launch_task));\n  }\n\n  size_t WAIT_TASK_COUNT = std::thread::hardware_concurrency();\n  std::atomic<int64_t> wait_count{0};\n  std::vector<std::future<void>> wait_list;\n  for (size_t i = 0; i < WAIT_TASK_COUNT; ++i) {\n    auto wait_task = std::async(std::launch::async, [&]() {\n      auto begin_tick = modelbox::GetTickCount();\n      while (!is_stop_) {\n        std::unique_lock<std::mutex> lock(fut_mux);\n        cv.wait(lock, [&]() { return !future_queue.empty() || is_stop_; });\n        if (is_stop_) {\n          break;\n        }\n        auto fut = std::move(future_queue.front());\n        future_queue.pop();\n        wait_count++;\n        lock.unlock();\n\n        EXPECT_TRUE(fut.valid());\n        fut.wait();\n      }\n\n      bool local_print = true;\n      if (is_wait_print.compare_exchange_strong(local_print, false)) {\n        MBLOG_INFO << \"Process rate: \"\n                   << ((float)(wait_count * 1000)) /\n                          (modelbox::GetTickCount() - begin_tick)\n                   << \"/s\";\n      }\n    });\n\n    wait_list.push_back(std::move(wait_task));\n  }\n\n  for (size_t i = 0; i < LAUNCH_TASK_COUNT; ++i) {\n    EXPECT_TRUE(launch_list[i].valid());\n  }\n\n  for (size_t i = 0; i < WAIT_TASK_COUNT; ++i) {\n    EXPECT_TRUE(wait_list[i].valid());\n  }\n\n  auto status = wait_list[0].wait_for(std::chrono::milliseconds(1 * 500));\n  if (status != std::future_status::ready) {\n    is_stop_ = true;\n    MBLOG_INFO << \"set stop\";\n  }\n\n  for (size_t i = 0; i < LAUNCH_TASK_COUNT; ++i) {\n    launch_list[i].wait();\n  }\n\n  for (size_t i = 0; i < WAIT_TASK_COUNT; ++i) {\n    wait_list[i].wait();\n  }\n\n  MBLOG_INFO << \"launch count: \" << launch_count\n             << \" wait_count: \" << wait_count;\n}"
  },
  {
    "path": "test/unit/libmodelbox/base/timer_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include <modelbox/base/log.h>\n#include <modelbox/base/timer.h>\n#include <modelbox/base/utils.h>\n\n#include <future>\n\n#include \"gtest/gtest.h\"\n#include \"test_config.h\"\n\nnamespace modelbox {\nclass TimerTest : public testing::Test {\n public:\n  TimerTest() = default;\n\n protected:\n  void SetUp() override{};\n  void TearDown() override{};\n};\n\nTEST_F(TimerTest, Empty) {\n  {\n    Timer tm;\n    tm.Start();\n  }\n  EXPECT_TRUE(true);\n}\n\nTEST_F(TimerTest, Sched) {\n  Timer tm;\n  int count = 0;\n  int loop = 2;\n  uint64_t start = GetTickCount();\n  uint64_t end = GetTickCount();\n  const char *msg = \"Hello\";\n\n  std::shared_ptr<TimerTask> task;\n  task = std::make_shared<TimerTask>(\n      [&](const char *id) {\n        EXPECT_LE(count, loop);\n        count++;\n        if (count == loop) {\n          task->Stop();\n          end = GetTickCount();\n        }\n        EXPECT_STREQ(msg, id);\n      },\n      msg);\n  tm.Start();\n  tm.Schedule(task, 0, 10);\n  tm.Shutdown();\n  EXPECT_GE(end - start, 20);\n}\n\nTEST_F(TimerTest, SchedMany) {\n  Timer tm;\n  int count = 10;\n  uint64_t start = GetTickCount();\n  const char *msg = \"Hello\";\n\n  std::vector<std::shared_ptr<TimerTask>> taskset;\n  std::vector<uint64_t> end_time;\n  end_time.resize(count);\n  taskset.resize(count);\n\n  tm.Start(false);\n  for (int i = count - 1; i >= 0; i--) {\n    std::shared_ptr<TimerTask> task = std::make_shared<TimerTask>();\n    task->Callback(\n        [&, i](const char *id, TimerTask *task) {\n          EXPECT_STREQ(msg, id);\n          end_time[i] = GetTickCount();\n          task->Stop();\n        },\n        msg, task.get());\n    task->SetName(std::to_string(i));\n    tm.Schedule(task, 0, 10 * (i + 1));\n    taskset[i] = task;\n  }\n  auto result_future = std::async(std::launch::async, [&]() {\n    std::this_thread::sleep_for(std::chrono::milliseconds(150));\n  });\n\n  tm.Shutdown();\n  result_future.wait();\n  for (int i = 0; i < count; i++) {\n    auto end = end_time[i];\n    EXPECT_GE(end - start, 10 * (i + 1));\n    EXPECT_LE(end - start, 10 * (i + 1) + 10);\n  }\n}\n\nTEST_F(TimerTest, Callback) {\n  Timer tm;\n  int count = 0;\n  int loop = 2;\n  const char *msg = \"Hello\";\n\n  std::shared_ptr<TimerTask> task;\n  task = std::make_shared<TimerTask>();\n  task->Callback(\n      [&](const char *id) {\n        EXPECT_LE(count, loop);\n        count++;\n        if (count == loop) {\n          task->Stop();\n        }\n        EXPECT_STREQ(msg, id);\n      },\n      msg);\n  tm.Start();\n  tm.Schedule(task, 0, 10);\n  tm.Shutdown();\n}\n\nTEST_F(TimerTest, ThreadLocalTaskGet) {\n  Timer tm;\n  int count = 0;\n  int loop = 2;\n  const char *msg = \"Hello\";\n\n  tm.Start();\n  {\n    std::shared_ptr<TimerTask> task;\n    task = std::make_shared<TimerTask>();\n    auto *task_ptr = task.get();\n    task->Callback(\n        [&, task_ptr](const char *id) {\n          EXPECT_LE(count, loop);\n          count++;\n          if (count == loop) {\n            task->Stop();\n          }\n          EXPECT_STREQ(msg, id);\n          EXPECT_EQ(task_ptr, Timer::CurrentTimerTask().get());\n        },\n        msg);\n    tm.Schedule(task, 0, 10);\n  }\n  tm.Shutdown();\n}\n\nTEST_F(TimerTest, CallbackNoOwnerShip) {\n  Timer tm;\n  int count = 0;\n  int loop = 2;\n  const char *msg = \"Hello\";\n\n  std::shared_ptr<TimerTask> task;\n  task = std::make_shared<TimerTask>();\n  task->Callback(\n      [&](const char *id) {\n        EXPECT_LE(count, loop);\n        count++;\n        if (count == loop) {\n          task->Stop();\n        }\n        EXPECT_STREQ(msg, id);\n      },\n      msg);\n  tm.Start();\n  tm.Schedule(task, 0, 10, true);\n  tm.Shutdown();\n}\n\nTEST_F(TimerTest, NoCallbackNoOwnerShip) {\n  Timer tm;\n  int count = 0;\n  int loop = 2;\n  {\n    std::shared_ptr<TimerTask> task;\n    task = std::make_shared<TimerTask>();\n    task->Callback(\n        [&]() {\n          EXPECT_LE(count, loop);\n          count++;\n          if (count == loop) {\n            task->Stop();\n          }\n        });\n    tm.Start();\n    tm.Schedule(task, 0, 10);\n    task = nullptr;\n  }\n  std::this_thread::sleep_for(std::chrono::milliseconds(100));\n  EXPECT_EQ(count, 0);\n  tm.Shutdown();\n}\n\nTEST_F(TimerTest, CallbackWithOwnerShip) {\n  Timer tm;\n  int count = 0;\n  int loop = 2;\n  const char *msg = \"Hello\";\n  {\n    std::shared_ptr<TimerTask> task;\n    task = std::make_shared<TimerTask>();\n    task->Callback(\n        [&](const char *id, TimerTask *t) {\n          EXPECT_LE(count, loop);\n          count++;\n          if (count == loop) {\n            t->Stop();\n          }\n          EXPECT_STREQ(msg, id);\n        },\n        msg, task.get());\n    tm.Start();\n    tm.Schedule(task, 0, 10, true);\n  }\n\n  std::this_thread::sleep_for(std::chrono::milliseconds(100));\n  EXPECT_EQ(count, 2);\n  tm.Shutdown();\n}\n\nTEST_F(TimerTest, SchedDelay) {\n  Timer tm;\n  int count = 0;\n  int loop = 2;\n  uint64_t start = GetTickCount();\n  uint64_t end = GetTickCount();\n  const char *msg = \"Hello\";\n\n  std::shared_ptr<TimerTask> task;\n  task = std::make_shared<TimerTask>(\n      [&](const char *id) {\n        EXPECT_LE(count, loop);\n        count++;\n        if (count == loop) {\n          task->Stop();\n          end = GetTickCount();\n        }\n        EXPECT_STREQ(msg, id);\n      },\n      msg);\n  tm.Start();\n  tm.Schedule(task, 100, 10);\n  tm.Shutdown();\n  EXPECT_GE(end - start, 120);\n}\n\nTEST_F(TimerTest, SchedOnce) {\n  Timer tm;\n  int count = 0;\n  uint64_t start = GetTickCount();\n  uint64_t end = GetTickCount();\n  const char *msg = \"Hello\";\n\n  std::shared_ptr<TimerTask> task;\n  task = std::make_shared<TimerTask>(\n      [&](const char *id) {\n        count++;\n        EXPECT_EQ(count, 1);\n        EXPECT_STREQ(msg, id);\n        end = GetTickCount();\n      },\n      msg);\n  tm.Start();\n  tm.Schedule(task, 10, 0);\n  auto result_future = std::async(std::launch::async, [&]() {\n    std::this_thread::sleep_for(std::chrono::milliseconds(100));\n    task->Stop();\n  });\n\n  tm.Shutdown();\n  EXPECT_GE(end - start, 10);\n  result_future.wait();\n  EXPECT_EQ(count, 1);\n}\n\nTEST_F(TimerTest, StopBeforeHit) {\n  Timer tm;\n  int count = 0;\n  int i = 0;\n  uint64_t start = GetTickCount();\n  uint64_t end = GetTickCount();\n  tm.Start(false);\n\n  std::vector<std::shared_ptr<TimerTask>> taskset;\n  for (i = 0; i < 10; i++) {\n    std::shared_ptr<TimerTask> task;\n    task = std::make_shared<TimerTask>([&]() { EXPECT_TRUE(false); });\n    tm.Schedule(task, 0, 1000 * (i + 1));\n    task->SetName(std::to_string(i));\n    taskset.push_back(task);\n  }\n\n  std::this_thread::sleep_for(std::chrono::milliseconds(10));\n  for (i = 0; i < 10; i++) {\n    taskset[i]->Stop();\n  }\n\n  tm.Shutdown();\n  end = GetTickCount();\n  EXPECT_EQ(count, 0);\n  EXPECT_LT(end - start, 30);\n}\n\nTEST_F(TimerTest, TakeOwnerShipStopBeforeHit) {\n  Timer tm;\n  int count = 0;\n  tm.Start();\n  {\n    // trigger here\n    std::shared_ptr<TimerTask> task;\n    task = std::make_shared<TimerTask>();\n    auto *task_ptr = task.get();\n    task->Callback([&, task_ptr]() {\n      count++;\n      EXPECT_TRUE(true);\n      task_ptr->Stop();\n    });\n\n    tm.Schedule(task, 0, 10, true);\n  }\n\n  {\n    // no trigger\n    std::shared_ptr<TimerTask> task;\n    task = std::make_shared<TimerTask>();\n    task->Callback([&]() {\n      count++;\n      EXPECT_TRUE(true);\n    });\n    tm.Schedule(task, 0, 10, false);\n  }\n\n  auto result_future = std::async(std::launch::async, [&]() {\n    std::this_thread::sleep_for(std::chrono::milliseconds(100));\n  });\n\n  result_future.wait();\n  EXPECT_EQ(count, 1);\n  tm.Stop();\n}\n\nTEST_F(TimerTest, SchedStopBeforeHit) {\n  Timer tm;\n  int count = 0;\n\n  std::shared_ptr<TimerTask> task;\n  task = std::make_shared<TimerTask>([&]() {\n    count++;\n    EXPECT_FALSE(true);\n  });\n\n  tm.Start();\n  tm.Schedule(task, 0, 1000);\n  auto result_future = std::async(std::launch::async, [&]() {\n    std::this_thread::sleep_for(std::chrono::milliseconds(100));\n    task->Stop();\n  });\n\n  result_future.wait();\n\n  EXPECT_EQ(count, 0);\n  tm.Stop();\n}\n\nTEST_F(TimerTest, SchedStopAfterHit) {\n  Timer tm;\n  int count = 0;\n\n  std::shared_ptr<TimerTask> task;\n  task = std::make_shared<TimerTask>([&]() {\n    count++;\n    EXPECT_EQ(count, 1);\n    task->Stop();\n  });\n\n  tm.Start();\n  tm.Schedule(task, 0, 10);\n  auto result_future = std::async(std::launch::async, [&]() {\n    std::this_thread::sleep_for(std::chrono::milliseconds(100));\n  });\n\n  tm.Shutdown();\n  EXPECT_EQ(count, 1);\n  result_future.wait();\n}\n\nTEST_F(TimerTest, SchedBatch) {\n  Timer tm;\n  std::atomic<uint32_t> count;\n  int loop = 10;\n\n  count = 0;\n  std::vector<std::shared_ptr<TimerTask>> list;\n  for (int i = 0; i < loop; i++) {\n    std::shared_ptr<TimerTask> task;\n    task = std::make_shared<TimerTask>();\n    task->SetName(std::to_string(i));\n    std::weak_ptr<TimerTask> task_weak = task;\n    task->Callback(\n        [&, task_weak](int index) {\n          auto t = task_weak.lock();\n          if (t == nullptr) {\n            return;\n          }\n\n          count++;\n          t->Stop();\n        },\n        i);\n    list.push_back(task);\n  }\n\n  tm.Start();\n  for (size_t i = 0; i < list.size(); i++) {\n    tm.Schedule(list[i], 0, 10 * i);\n  }\n\n  tm.Shutdown();\n  EXPECT_EQ(count, loop);\n}\n\nTEST_F(TimerTest, GlobalTimer) {\n  int count = 0;\n  std::shared_ptr<TimerTask> task;\n  task = std::make_shared<TimerTask>([&]() {\n    count++;\n    EXPECT_EQ(count, 1);\n    task->Stop();\n  });\n\n  TimerGlobal::Start();\n  Defer { TimerGlobal::Stop(); };\n\n  {\n    TimerGlobal::Start();\n    Defer { TimerGlobal::Stop(); };\n  }\n\n  TimerGlobal::Schedule(task, 0, 10);\n  auto result_future = std::async(std::launch::async, [&]() {\n    std::this_thread::sleep_for(std::chrono::milliseconds(10));\n  });\n\n  std::this_thread::sleep_for(std::chrono::milliseconds(30));\n  EXPECT_EQ(count, 1);\n  result_future.wait();\n}\n\nTEST_F(TimerTest, GlobalTimerStopBeforHit) {\n  int count = 0;\n  std::shared_ptr<TimerTask> task;\n  task = std::make_shared<TimerTask>([&]() {\n    count++;\n  });\n\n  TimerGlobal::Start();\n  TimerGlobal::Schedule(task, 0, 10);\n  auto result_future = std::async(std::launch::async, [&]() {\n    std::this_thread::sleep_for(std::chrono::milliseconds(10));\n  });\n  TimerGlobal::Stop(); \n\n  std::this_thread::sleep_for(std::chrono::milliseconds(30));\n  EXPECT_EQ(count, 0);\n  result_future.wait();\n}\n\nTEST_F(TimerTest, GlobalTimerTakeTooLong) {\n  int count = 0;\n  std::shared_ptr<TimerTask> task;\n  task = std::make_shared<TimerTask>([&]() {\n    std::this_thread::sleep_for(std::chrono::milliseconds(60));\n    count++;\n    EXPECT_EQ(count, 1);\n    task->Stop();\n  });\n\n  TimerGlobal::Start();\n  Defer { TimerGlobal::Stop(); };\n  TimerGlobal::Schedule(task, 0, 10);\n  std::this_thread::sleep_for(std::chrono::milliseconds(100));\n  EXPECT_EQ(count, 1);\n}\n\n}  // namespace modelbox\n"
  },
  {
    "path": "test/unit/libmodelbox/base/utils_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n/* clang-format off */\n#include <modelbox/base/log.h>\n#include <modelbox/base/utils.h>\n\n#include <list>\n#include <toml.hpp>\n#include <string.h>\n#include <mutex>\n\n#include <nlohmann/json.hpp>\n#include \"gtest/gtest.h\"\n#include \"test_config.h\"\n/* clang-format on */\n\nnamespace modelbox {\nclass BaseUtilsTest : public testing::Test {\n public:\n  BaseUtilsTest() = default;\n\n protected:\n  void SetUp() override{};\n  void TearDown() override{};\n};\n\nclass RefVarTest {\n public:\n  RefVarTest() { count_++; }\n  ~RefVarTest() { count_--; }\n  static int GetRefCount() { return count_; }\n\n private:\n  static int count_;\n};\n\nint RefVarTest::count_ = 0;\n\nTEST_F(BaseUtilsTest, RefVar) {\n  RefVar<RefVarTest> Var(2);\n  Var.MakeFunc([](int index) { return std::make_shared<RefVarTest>(); });\n\n  auto a = Var.Get();\n  auto b = Var.Get();\n  EXPECT_EQ(RefVarTest::GetRefCount(), 1);\n  EXPECT_EQ(a.get(), b.get());\n  auto c = Var.Get(1);\n  EXPECT_EQ(RefVarTest::GetRefCount(), 2);\n  auto d = Var.Get(1);\n  EXPECT_EQ(RefVarTest::GetRefCount(), 2);\n  EXPECT_NE(c.get(), b.get());\n  a = nullptr;\n  EXPECT_EQ(RefVarTest::GetRefCount(), 2);\n  b = nullptr;\n  EXPECT_EQ(RefVarTest::GetRefCount(), 1);\n}\n\nTEST_F(BaseUtilsTest, Volume) {\n  {\n    std::vector<size_t> test({1, 2, 3, 4, 5});\n    EXPECT_EQ(Volume(test), 120);\n  }\n\n  {\n    std::vector<size_t> test({1, 2, 3, 4, 5});\n    std::vector<std::vector<size_t>> test_vec;\n    for (size_t i = 0; i < 10; ++i) {\n      test_vec.push_back(test);\n    }\n\n    EXPECT_EQ(Volume(test_vec), 1200);\n  }\n}\n\nTEST_F(BaseUtilsTest, RegexMatch) {\n  const auto *test = \"aaa=000 bbb=111   ccc=222     ddd=333\";\n  EXPECT_TRUE(RegexMatch(test, \".*111.*\"));\n  EXPECT_TRUE(RegexMatch(test, \".*333$\"));\n  EXPECT_TRUE(RegexMatch(test, \"^aaa.*\"));\n  EXPECT_FALSE(RegexMatch(test, \"^bbb.*\"));\n}\n\nTEST_F(BaseUtilsTest, StringSplit) {\n  const auto *test = \"aaa=000 bbb=111   ccc=222     ddd=333\";\n  auto split_test = StringSplit(test, ' ');\n  for (size_t i = 0; i < split_test.size(); ++i) {\n    switch (i) {\n      case 0:\n        EXPECT_EQ(split_test[i], \"aaa=000\");\n        break;\n      case 1:\n        EXPECT_EQ(split_test[i], \"bbb=111\");\n        break;\n      case 2:\n        EXPECT_EQ(split_test[i], \"ccc=222\");\n        break;\n      case 3:\n        EXPECT_EQ(split_test[i], \"ddd=333\");\n        break;\n      default:\n        break;\n    }\n  }\n}\n\nTEST_F(BaseUtilsTest, BytesReadable) {\n  size_t byte = 1;\n  size_t kilo = byte * 1024;\n  size_t mega = kilo * 1024;\n  size_t giga = mega * 1024;\n  size_t tera = giga * 1024;\n  size_t peta = tera * 1024;\n  size_t kilo_half = kilo + kilo / 2;\n  size_t mega_half = mega + mega / 2;\n  size_t giga_half = giga + giga / 2;\n  size_t tera_half = tera + tera / 2;\n  size_t peta_half = peta + peta / 2;\n\n  EXPECT_EQ(GetBytesReadable(byte), \"1B\");\n  EXPECT_EQ(GetBytesReadable(kilo), \"1KB\");\n  EXPECT_EQ(GetBytesReadable(mega), \"1MB\");\n  EXPECT_EQ(GetBytesReadable(giga), \"1GB\");\n  EXPECT_EQ(GetBytesReadable(tera), \"1TB\");\n  EXPECT_EQ(GetBytesReadable(peta), \"1PB\");\n  EXPECT_EQ(GetBytesReadable(kilo_half), \"1.5KB\");\n  EXPECT_EQ(GetBytesReadable(mega_half), \"1.5MB\");\n  EXPECT_EQ(GetBytesReadable(giga_half), \"1.5GB\");\n  EXPECT_EQ(GetBytesReadable(tera_half), \"1.5TB\");\n  EXPECT_EQ(GetBytesReadable(peta_half), \"1.5PB\");\n}\n\nTEST_F(BaseUtilsTest, BytesFromReadable) {\n  size_t byte = 1;\n  size_t kilo = byte * 1024;\n  size_t mega = kilo * 1024;\n  size_t giga = mega * 1024;\n  size_t tera = giga * 1024;\n  size_t peta = tera * 1024;\n  size_t kilo_half = kilo + kilo / 2;\n  size_t mega_half = mega + mega / 2;\n  size_t giga_half = giga + giga / 2;\n  size_t tera_half = tera + tera / 2;\n  size_t peta_half = peta + peta / 2;\n\n  EXPECT_EQ(byte, GetBytesFromReadable(\"1B\"));\n  EXPECT_EQ(kilo, GetBytesFromReadable(\"1KB\"));\n  EXPECT_EQ(mega, GetBytesFromReadable(\"1MB\"));\n  EXPECT_EQ(giga, GetBytesFromReadable(\"1GB\"));\n  EXPECT_EQ(tera, GetBytesFromReadable(\"1TB\"));\n  EXPECT_EQ(peta, GetBytesFromReadable(\"1PB\"));\n  EXPECT_EQ(kilo_half, GetBytesFromReadable(\"1.5KB\"));\n  EXPECT_EQ(mega_half, GetBytesFromReadable(\"1.5MB\"));\n  EXPECT_EQ(giga_half, GetBytesFromReadable(\"1.5GB\"));\n  EXPECT_EQ(tera_half, GetBytesFromReadable(\"1.5TB\"));\n  EXPECT_EQ(peta_half, GetBytesFromReadable(\"1.5PB\"));\n\n  EXPECT_EQ(byte, GetBytesFromReadable(\"1\"));\n  EXPECT_EQ(kilo, GetBytesFromReadable(\"1K\"));\n  EXPECT_EQ(mega, GetBytesFromReadable(\"1M\"));\n  EXPECT_EQ(giga, GetBytesFromReadable(\"1G\"));\n  EXPECT_EQ(tera, GetBytesFromReadable(\"1T\"));\n  EXPECT_EQ(peta, GetBytesFromReadable(\"1P\"));\n  EXPECT_EQ(kilo_half, GetBytesFromReadable(\"1.5K\"));\n  EXPECT_EQ(mega_half, GetBytesFromReadable(\"1.5M\"));\n  EXPECT_EQ(giga_half, GetBytesFromReadable(\"1.5G\"));\n  EXPECT_EQ(tera_half, GetBytesFromReadable(\"1.5T\"));\n  EXPECT_EQ(peta_half, GetBytesFromReadable(\"1.5P\"));\n}\n\nTEST_F(BaseUtilsTest, DeferCondition) {\n  int first = 0;\n  int second = 0;\n  int third = 0;\n  int count = 0;\n  {\n    bool ret = true;\n    DeferCond { return ret; };\n    DeferCondAdd { first = ++count; };\n    DeferCondAdd { second = ++count; };\n    DeferCondAdd { third = ++count; };\n  }\n\n  EXPECT_EQ(first, 3);\n  EXPECT_EQ(second, 2);\n  EXPECT_EQ(third, 1);\n}\n\nTEST_F(BaseUtilsTest, DeferTest) {\n  int i = 0;\n  Defer { EXPECT_EQ(i, 3); };\n  Defer { i++; };\n  Defer { EXPECT_EQ(i, 2); };\n  Defer {\n    [&]() { i++; }();\n  };\n\n  try {\n    Defer { i++; };\n    throw \"exit\";\n    Defer { i++; };\n  } catch (...) {\n  }\n}\n\nTEST_F(BaseUtilsTest, ListSubDirectoryFiles) {\n  std::string filter = \"*.toml\";\n  std::vector<std::string> listfiles;\n  std::string python_path = std::string(TEST_ASSETS);\n  Status status = ListSubDirectoryFiles(python_path, filter, &listfiles);\n  EXPECT_GE(listfiles.size(), 1);\n}\n\nTEST_F(BaseUtilsTest, IsAbsolutePath) {\n  EXPECT_TRUE(IsAbsolutePath(\"/\"));\n  EXPECT_TRUE(IsAbsolutePath(\" /\"));\n  EXPECT_TRUE(IsAbsolutePath(\" / \"));\n  EXPECT_FALSE(IsAbsolutePath(\"a/\"));\n  EXPECT_FALSE(IsAbsolutePath(\"a/ \"));\n  EXPECT_FALSE(IsAbsolutePath(\" a/\"));\n  EXPECT_FALSE(IsAbsolutePath(\" a /\"));\n  EXPECT_FALSE(IsAbsolutePath(\"./\"));\n  EXPECT_FALSE(IsAbsolutePath(\"../ \"));\n  EXPECT_FALSE(IsAbsolutePath(\" ../\"));\n  EXPECT_FALSE(IsAbsolutePath(\" . /\"));\n}\n\nTEST_F(BaseUtilsTest, GetDirName) {\n  EXPECT_EQ(GetDirName(\"/\"), \"/\");\n  EXPECT_EQ(GetDirName(\"/a\"), \"/\");\n  EXPECT_EQ(GetDirName(\"/a/\"), \"/\");\n  EXPECT_EQ(GetDirName(\"../\"), \".\");\n  EXPECT_EQ(GetDirName(\"../../\"), \"..\");\n}\n\nTEST_F(BaseUtilsTest, PathCanonicalize) {\n  EXPECT_EQ(PathCanonicalize(\"/\"), \"/\");\n  EXPECT_EQ(PathCanonicalize(\"/../\"), \"/\");\n  EXPECT_EQ(PathCanonicalize(\"/../../\"), \"/\");\n  EXPECT_EQ(PathCanonicalize(\"/a/b/..\"), \"/a\");\n  EXPECT_EQ(PathCanonicalize(\"/a/b/c/../..\"), \"/a\");\n  EXPECT_EQ(PathCanonicalize(\"//a\"), \"/a\");\n  EXPECT_EQ(PathCanonicalize(\"//a/..\"), \"/\");\n  EXPECT_EQ(PathCanonicalize(\"../../\", \"/a\"), \"/a\");\n}\n\nTEST_F(BaseUtilsTest, ListFiles) {\n  std::string filter = \"*\";\n  std::vector<std::string> all;\n  Status status = ListFiles(TEST_ASSETS, filter, &all, LIST_FILES_ALL);\n  EXPECT_TRUE(status);\n  std::vector<std::string> alldirs;\n  status = ListFiles(TEST_ASSETS, filter, &alldirs, LIST_FILES_DIR);\n  EXPECT_TRUE(status);\n  std::vector<std::string> allfiles;\n  status = ListFiles(TEST_ASSETS, filter, &allfiles, LIST_FILES_FILE);\n  EXPECT_TRUE(status);\n  EXPECT_EQ(all.size(), alldirs.size() + allfiles.size());\n  MBLOG_INFO << all.size();\n  MBLOG_INFO << alldirs.size();\n  MBLOG_INFO << allfiles.size();\n}\n\nTEST_F(BaseUtilsTest, Json2Toml_JsonFailed) {\n  std::string jsondata = R\"({\n    \"server\": {\n        \"ip\": \"0.0.0.0\",\n    },\n    }\n  )\";\n\n  std::string tomldata;\n\n  bool ret = JsonToToml(jsondata, &tomldata);\n  EXPECT_FALSE(ret);\n}\n\nTEST_F(BaseUtilsTest, Toml2Json) {\n  std::string tomldata = R\"(\n  root = \"root\"\n  [basic]\n  str = \"str\"\n  bool = true\n  int = 10\n  double = 1.0\n  multiline = '''\nline1\nline2\n'''\n  array = [\n    \"array1\",\n    \"array2\"\n  ]\n  [basic.nest]\n  aaa = \"aaa\"\n  int = 10\n  )\";\n  std::string json_data;\n  auto ret = TomlToJson(tomldata, &json_data);\n  std::cout << json_data << std::endl;\n  ASSERT_TRUE(ret);\n  std::istringstream instring(json_data);\n  auto root = nlohmann::json::parse(instring);\n  std::cout << root.dump() << std::endl;\n  EXPECT_EQ(root[\"root\"], \"root\");\n  EXPECT_EQ(root[\"basic\"][\"str\"], \"str\");\n  EXPECT_EQ(root[\"basic\"][\"bool\"], true);\n  EXPECT_EQ(root[\"basic\"][\"int\"], 10);\n  EXPECT_EQ(root[\"basic\"][\"multiline\"], \"line1\\nline2\\n\");\n  EXPECT_EQ(root[\"basic\"][\"nest\"][\"aaa\"], \"aaa\");\n  EXPECT_EQ(root[\"basic\"][\"nest\"][\"int\"], 10);\n  auto array = root[\"basic\"][\"array\"];\n  EXPECT_EQ(array[0], \"array1\");\n  EXPECT_EQ(array[1], \"array2\");\n}\n\nTEST_F(BaseUtilsTest, Json2Toml) {\n  std::string jsondata = R\"(\n{\n    \"root\": \"root\",\n    \"basic\": {\n        \"null\": null,\n        \"str\": \"str\",\n        \"bool\": true,\n        \"int\": 10,\n        \"double\": 1.0,\n        \"multiline\": \"a\\nb\\n\",\n        \"array\": [\n            \"array1\",\n            \"array2\"\n        ],\n        \"nest\" : {\n            \"aaa\":\"aaa\",\n            \"int\": 10\n        }\n    },\n    \"array\": [\n        \"array1\",\n        \"array2\"\n    ],\n    \"nestarray\": [\n        {\n            \"nest\": \"nest1\",\n            \"value\": 10,\n            \"bool\": false\n        },\n        {\n            \"nest\": \"nest2\"\n        }\n    ]\n}\n  )\";\n\n  std::string tomldata;\n\n  bool ret = JsonToToml(jsondata, &tomldata);\n  ASSERT_TRUE(ret);\n  MBLOG_INFO << tomldata;\n  std::stringstream ins;\n  ins << tomldata;\n  auto tom_data = toml::parse(ins);\n  EXPECT_EQ(tom_data[\"root\"].as_string(), \"root\");\n  EXPECT_EQ(tom_data[\"basic\"][\"str\"].as_string(), \"str\");\n  EXPECT_EQ(tom_data[\"basic\"][\"bool\"].as_boolean(), true);\n  EXPECT_EQ(tom_data[\"basic\"][\"int\"].as_integer(), 10);\n  EXPECT_EQ(tom_data[\"basic\"][\"double\"].as_floating(), 1.0);\n  EXPECT_EQ(tom_data[\"basic\"][\"multiline\"].as_string(), \"a\\nb\\n\");\n  auto array = tom_data[\"basic\"][\"array\"].as_array();\n  EXPECT_EQ(array[0].as_string(), \"array1\");\n  EXPECT_EQ(array[1].as_string(), \"array2\");\n\n  auto root_array = tom_data[\"array\"].as_array();\n  EXPECT_EQ(root_array[0].as_string(), \"array1\");\n  EXPECT_EQ(root_array[1].as_string(), \"array2\");\n\n  auto nestarray = tom_data[\"nestarray\"].as_array();\n  EXPECT_EQ(nestarray[0][\"nest\"].as_string(), \"nest1\");\n  EXPECT_EQ(nestarray[1][\"nest\"].as_string(), \"nest2\");\n}\n\nTEST_F(BaseUtilsTest, FindTheEarliestFileIndex) {\n  std::string dir = std::string(TEST_DATA_DIR) + \"/test_files\";\n  auto ret = mkdir(dir.c_str(), 0700);\n  EXPECT_EQ(ret, 0);\n  for (int i = 1; i <= 5; ++i) {\n    auto open_file = dir + \"/\" + std::to_string(i) + \".txt\";\n    std::ofstream out(open_file, std::ios::binary | std::ios::trunc);\n    EXPECT_EQ(out.fail(), false);\n    out << i;\n    out.close();\n  }\n\n  std::vector<std::string> list_files;\n  auto status = ListSubDirectoryFiles(dir, \"*.txt\", &list_files);\n  EXPECT_EQ(status, modelbox::STATUS_OK);\n  auto earliest_file_index = FindTheEarliestFileIndex(list_files);\n  EXPECT_EQ(earliest_file_index, 0);\n  EXPECT_EQ(list_files.size(), 5);\n\n  for (const auto &file : list_files) {\n    auto ret = remove(file.c_str());\n    EXPECT_EQ(ret, 0);\n  }\n  ret = remove(dir.c_str());\n  EXPECT_EQ(ret, 0);\n}\n\nTEST_F(BaseUtilsTest, StrError) {\n  int err = EACCES;\n  MBLOG_INFO << modelbox::StrError(err);\n  EXPECT_EQ(modelbox::StrError(err), strerror(err));\n  err = 245;\n  MBLOG_INFO << modelbox::StrError(err);\n  EXPECT_EQ(modelbox::StrError(err), strerror(err));\n}\n\nTEST_F(BaseUtilsTest, ExpandEnvironmentVariables) {\n  EXPECT_EQ(\"test\", ExpandEnvironmentVariables(\"test\"));\n  setenv(\"BUTEST\", \"butest\", true);\n  EXPECT_EQ(\"butest\", ExpandEnvironmentVariables(\"${BUTEST}\"));\n  EXPECT_EQ(\"testbutest\", ExpandEnvironmentVariables(\"test${BUTEST}\"));\n  EXPECT_EQ(\"butesttest\", ExpandEnvironmentVariables(\"${BUTEST}test\"));\n  EXPECT_EQ(\"testbutesttest\", ExpandEnvironmentVariables(\"test${BUTEST}test\"));\n  setenv(\"BUINNER\", \"TES\", true);\n  EXPECT_EQ(\"testbutesttest\",\n            ExpandEnvironmentVariables(\"test${BU${BUINNER}T}test\"));\n  unsetenv(\"BUINNER\");\n  unsetenv(\"BUTEST\");\n}\n\n}  // namespace modelbox\n"
  },
  {
    "path": "test/unit/libmodelbox/engine/buffer_index_info_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/buffer_index_info.h\"\n\n#include <functional>\n#include <future>\n#include <thread>\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"mock_driver_ctl.h\"\n#include \"modelbox/base/log.h\"\n#include \"modelbox/device/mockdevice/device_mockdevice.h\"\n\nnamespace modelbox {\nclass BufferIndexInfoTest : public testing::Test {};\n\nTEST_F(BufferIndexInfoTest, IndexInfoTest) {\n  auto index_info = std::make_shared<BufferIndexInfo>();\n  index_info->SetIndex(0);\n  EXPECT_TRUE(index_info->IsFirstBufferInStream());\n  EXPECT_FALSE(index_info->IsEndFlag());\n  EXPECT_FALSE(index_info->IsPlaceholder());\n  index_info->SetIndex(1);\n  EXPECT_FALSE(index_info->IsFirstBufferInStream());\n  EXPECT_FALSE(index_info->IsEndFlag());\n  EXPECT_FALSE(index_info->IsPlaceholder());\n  index_info = std::make_shared<BufferIndexInfo>();\n  index_info->SetIndex(1);\n  index_info->MarkAsEndFlag();\n  EXPECT_FALSE(index_info->IsFirstBufferInStream());\n  EXPECT_TRUE(index_info->IsEndFlag());\n  EXPECT_FALSE(index_info->IsPlaceholder());\n  index_info = std::make_shared<BufferIndexInfo>();\n  index_info->SetIndex(1);\n  index_info->MarkAsPlaceholder();\n  EXPECT_FALSE(index_info->IsFirstBufferInStream());\n  EXPECT_FALSE(index_info->IsEndFlag());\n  EXPECT_TRUE(index_info->IsPlaceholder());\n}\n\nTEST_F(BufferIndexInfoTest, ProcessInfoTest) {\n  auto process_info = std::make_shared<BufferProcessInfo>();\n  auto a_buffer = std::make_shared<BufferIndexInfo>();\n  auto b_buffer = std::make_shared<BufferIndexInfo>();\n  process_info->SetParentBuffers(\"a\", {a_buffer});\n  process_info->SetParentBuffers(\"b\", {b_buffer});\n  const auto &buffers = process_info->GetParentBuffers();\n  ASSERT_EQ(buffers.size(), 2);\n  ASSERT_EQ(buffers.at(\"a\").size(), 1);\n  ASSERT_EQ(buffers.at(\"b\").size(), 1);\n  EXPECT_EQ(buffers.at(\"a\").front(), a_buffer);\n  EXPECT_EQ(buffers.at(\"b\").front(), b_buffer);\n  EXPECT_EQ(buffers.begin()->second.front(), a_buffer);  // test read order\n\n  EXPECT_EQ(process_info->GetType(), BufferProcessType::ORIGIN);\n  process_info->SetType(BufferProcessType::EXPAND);\n  EXPECT_EQ(process_info->GetType(), BufferProcessType::EXPAND);\n}\n\nTEST_F(BufferIndexInfoTest, InheritInfoTest) {\n  auto inherit_info = std::make_shared<BufferInheritInfo>();\n  EXPECT_EQ(inherit_info->GetType(), BufferProcessType::EXPAND);\n  inherit_info->SetType(BufferProcessType::CONDITION_START);\n  EXPECT_EQ(inherit_info->GetType(), BufferProcessType::CONDITION_START);\n\n  auto root_index_info = std::make_shared<BufferIndexInfo>();\n  EXPECT_EQ(inherit_info->GetDeepth(), 0);\n  inherit_info->SetInheritFrom(root_index_info);\n  EXPECT_EQ(inherit_info->GetDeepth(), 0);\n  EXPECT_EQ(inherit_info->GetInheritFrom(), root_index_info);\n\n  auto second_index_info = std::make_shared<BufferIndexInfo>();\n  second_index_info->SetInheritInfo(inherit_info);\n  auto inherit_info2 = std::make_shared<BufferInheritInfo>();\n  EXPECT_EQ(inherit_info2->GetDeepth(), 0);\n  inherit_info2->SetInheritFrom(second_index_info);\n  EXPECT_EQ(inherit_info2->GetDeepth(), 1);\n  EXPECT_EQ(inherit_info2->GetInheritFrom(), second_index_info);\n}\n\nTEST_F(BufferIndexInfoTest, BufferManageViewTest) {\n  auto buffer = std::make_shared<Buffer>();\n  EXPECT_EQ(BufferManageView::GetPriority(buffer), 0);\n  BufferManageView::SetPriority(buffer, 1);\n  EXPECT_EQ(BufferManageView::GetPriority(buffer), 1);\n\n  EXPECT_NE(BufferManageView::GetIndexInfo(buffer), nullptr);\n  EXPECT_EQ(BufferManageView::GetFirstParentBuffer(buffer), nullptr);\n\n  auto index_info = std::make_shared<BufferIndexInfo>();\n  BufferManageView::SetIndexInfo(buffer, index_info);\n  EXPECT_EQ(BufferManageView::GetIndexInfo(buffer), index_info);\n  EXPECT_EQ(BufferManageView::GetFirstParentBuffer(buffer), nullptr);\n\n  auto in_buffer = std::make_shared<Buffer>();\n  auto in_buffer_index = std::make_shared<BufferIndexInfo>();\n  BufferManageView::SetIndexInfo(in_buffer, in_buffer_index);\n  auto in_buffer2 = std::make_shared<Buffer>();\n  auto in_buffer2_index = std::make_shared<BufferIndexInfo>();\n  BufferManageView::SetIndexInfo(in_buffer2, in_buffer2_index);\n  std::unordered_map<std::string, std::vector<std::shared_ptr<Buffer>>>\n      input_data;\n  input_data[\"a\"].push_back(in_buffer);\n  input_data[\"a\"].push_back(in_buffer2);\n  std::vector<std::shared_ptr<BufferProcessInfo>> process_info_list;\n  BufferManageView::GenProcessInfo<std::vector<std::shared_ptr<Buffer>>>(\n      input_data, 2,\n      [](const std::vector<std::shared_ptr<Buffer>> &container, size_t idx) {\n        return container[idx];\n      },\n      process_info_list, false);\n  ASSERT_EQ(process_info_list.size(), 2);\n  ASSERT_EQ(process_info_list.front()->GetParentBuffers().size(), 1);\n  ASSERT_EQ(\n      process_info_list.front()->GetParentBuffers().begin()->second.size(), 1);\n  ASSERT_EQ(\n      process_info_list.front()->GetParentBuffers().begin()->second.front(),\n      in_buffer_index);\n\n  std::vector<std::shared_ptr<BufferProcessInfo>> process_info_list2;\n  BufferManageView::GenProcessInfo<std::vector<std::shared_ptr<Buffer>>>(\n      input_data, 2,\n      [](const std::vector<std::shared_ptr<Buffer>> &container, size_t idx) {\n        return container[idx];\n      },\n      process_info_list2, true);\n  ASSERT_EQ(process_info_list2.size(), 1);\n  ASSERT_EQ(process_info_list2.front()->GetParentBuffers().size(), 1);\n  ASSERT_EQ(\n      process_info_list2.front()->GetParentBuffers().begin()->second.size(), 2);\n  ASSERT_EQ(\n      process_info_list2.front()->GetParentBuffers().begin()->second.front(),\n      in_buffer_index);\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "test/unit/libmodelbox/engine/buffer_list_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <functional>\n#include <future>\n#include <thread>\n\n#include \"gmock/gmock.h\"\n#include \"mock_driver_ctl.h\"\n#include \"modelbox/device/mockdevice/device_mockdevice.h\"\n\n#include \"modelbox/base/log.h\"\n\n#include \"gtest/gtest.h\"\n#include \"modelbox/buffer_list.h\"\n\nnamespace modelbox {\nclass BufferListTest : public testing::Test {\n public:\n  BufferListTest() = default;\n\n protected:\n  std::shared_ptr<Device> device_;\n  std::shared_ptr<MockDriverCtl> ctl_;\n\n  void SetUp() override {\n    std::shared_ptr<Drivers> drivers = Drivers::GetInstance();\n    ctl_ = std::make_shared<MockDriverCtl>();\n    modelbox::DriverDesc desc;\n\n    desc.SetClass(\"DRIVER-DEVICE\");\n    desc.SetType(\"cpu\");\n    desc.SetName(\"device-driver-cpu\");\n    desc.SetDescription(\"the cpu device\");\n    desc.SetVersion(\"8.9.2\");\n    std::string file_path_device =\n        std::string(TEST_LIB_DIR) + \"/libmodelbox-device-cpu.so\";\n    desc.SetFilePath(file_path_device);\n    ctl_->AddMockDriverDevice(\"cpu\", desc);\n\n    bool result = drivers->Scan(TEST_LIB_DIR, \"libmodelbox-device-cpu.so\");\n\n    EXPECT_TRUE(result);\n    std::shared_ptr<DeviceManager> device_mgr = DeviceManager::GetInstance();\n    ConfigurationBuilder configbuilder;\n    auto config = configbuilder.Build();\n    device_mgr->Initialize(drivers, config);\n    device_ = device_mgr->CreateDevice(\"cpu\", \"0\");\n    device_->SetMemQuota(10240);\n  };\n\n  void TearDown() override {\n    std::shared_ptr<Drivers> drivers = Drivers::GetInstance();\n    std::shared_ptr<DeviceManager> device_mgr = DeviceManager::GetInstance();\n    device_mgr->Clear();\n    drivers->Clear();\n    device_ = nullptr;\n  };\n};\n\nTEST_F(BufferListTest, BufferList) {\n  {\n    BufferList buffer_list(device_);\n    EXPECT_EQ(buffer_list.GetBytes(), 0);\n  }\n}\n\nTEST_F(BufferListTest, Build) {\n  BufferList buffer_list(device_);\n\n  const int BATCH_NUM = 10;\n  std::vector<std::vector<size_t>> shapes(BATCH_NUM, {1, 2, 3});\n\n  std::vector<size_t> lengths(shapes.size());\n  std::transform(shapes.begin(), shapes.end(), lengths.begin(),\n                 [](const std::vector<size_t> &shape) -> size_t {\n                   return Volume(shape) * sizeof(int);\n                 });\n  auto status = buffer_list.Build(lengths);\n  EXPECT_EQ(status, STATUS_OK);\n  EXPECT_EQ(buffer_list.Size(), shapes.size());\n\n  size_t size = BATCH_NUM * Volume(shapes[0]);\n  auto *data = (int *)buffer_list.MutableData();\n  for (size_t i = 0; i < size; ++i) {\n    data[i] = i;\n  }\n\n  for (size_t i = 0; i < buffer_list.Size(); ++i) {\n    auto buffer = buffer_list[i];\n    auto *tensor_data = (int *)(buffer->ConstData());\n    auto tensor_size = buffer->GetBytes() / sizeof(int);\n    for (size_t j = 0; j < tensor_size; ++j) {\n      EXPECT_EQ(tensor_data[j], i * tensor_size + j);\n    }\n  }\n\n  BufferList buffer_list_2(device_);\n  std::vector<size_t> lengths_2(BATCH_NUM, 0);\n  status = buffer_list_2.Build(lengths_2);\n  EXPECT_EQ(status, STATUS_OK);\n  EXPECT_EQ(buffer_list_2.Size(), lengths_2.size());\n  std::vector<int *> data_list;\n  for (size_t i = 0; i < buffer_list_2.Size(); ++i) {\n    EXPECT_EQ(nullptr, buffer_list_2.ConstBufferData(i));\n\n    auto *data = new int[6];\n    buffer_list_2[i]->Build(data, 6 * sizeof(int),\n                            [](void *ptr) { delete[](int *) ptr; });\n    data_list.push_back(data);\n  }\n\n  for (size_t i = 0; i < buffer_list_2.Size(); ++i) {\n    EXPECT_EQ(data_list[i], buffer_list_2.ConstBufferData(i));\n  }\n}\n\nTEST_F(BufferListTest, Get) {\n  BufferList buffer_list(device_);\n  buffer_list.Build({10, 100});\n  buffer_list.Set(\"Height\", 720);\n  buffer_list.Set(\"Width\", 1280);\n\n  buffer_list.Set(\"PTS\", 10);\n  buffer_list.Set(\"FPS\", 30.1F);\n\n  int i_value = 0;\n  float f_valud = 0.0;\n  for (size_t i = 0; i < buffer_list.Size(); ++i) {\n    EXPECT_TRUE(buffer_list[i]->Get(\"Height\", i_value));\n    EXPECT_EQ(i_value, 720);\n\n    EXPECT_TRUE(buffer_list[i]->Get(\"Width\", i_value));\n    EXPECT_EQ(i_value, 1280);\n\n    EXPECT_TRUE(buffer_list[i]->Get(\"PTS\", i_value));\n    EXPECT_EQ(i_value, 10);\n\n    EXPECT_TRUE(buffer_list[i]->Get(\"FPS\", f_valud));\n    EXPECT_EQ(f_valud, 30.1F);\n  }\n\n  std::shared_ptr<BufferList> bl_ptr(&buffer_list, [](void *p) {});\n\n  BufferList buffer_list2(device_);\n  buffer_list2.Build({10, 100});\n  buffer_list2.CopyMeta(bl_ptr);\n\n  buffer_list2.Set(\"Height\", 360);\n\n  for (size_t i = 0; i < buffer_list2.Size(); ++i) {\n    EXPECT_TRUE(buffer_list[i]->Get(\"Height\", i_value));\n    EXPECT_EQ(i_value, 720);\n\n    EXPECT_TRUE(buffer_list2[i]->Get(\"Height\", i_value));\n    EXPECT_EQ(i_value, 360);\n  }\n}\n\nTEST_F(BufferListTest, EmplaceBack) {\n  auto ptr = std::make_shared<uint8_t>();\n  *ptr = 123;\n  BufferList buffer_list(device_);\n\n  buffer_list.EmplaceBack(ptr, 1);\n  auto buffer1 = buffer_list.Front();\n  EXPECT_EQ(buffer1->MutableData(), ptr.get());\n\n  buffer_list.EmplaceBack(ptr.get(), 1, [](void * /*unused*/) {});\n  auto buffer2 = buffer_list.Back();\n  EXPECT_EQ(buffer2->MutableData(), ptr.get());\n\n  buffer_list.EmplaceBack(ptr.get(), 1);\n  auto buffer3 = buffer_list.Back();\n  EXPECT_NE(buffer3->MutableData(), ptr.get());\n  auto *ptr3 = (uint8_t *)(buffer3->MutableData());\n  EXPECT_EQ(*ptr3, 123);\n\n  *ptr = 234;\n  buffer_list.EmplaceBackFromHost(ptr.get(), 1);\n  auto buffer4 = buffer_list.Back();\n  EXPECT_NE(buffer4->MutableData(), ptr.get());\n  auto *ptr4 = (uint8_t *)(buffer4->MutableData());\n  EXPECT_EQ(*ptr4, 234);\n}\n\n}  // namespace modelbox\n"
  },
  {
    "path": "test/unit/libmodelbox/engine/buffer_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/buffer.h\"\n\n#include <functional>\n#include <future>\n#include <thread>\n#include <utility>\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"mock_driver_ctl.h\"\n#include \"modelbox/base/log.h\"\n#include \"modelbox/device/mockdevice/device_mockdevice.h\"\n\nnamespace modelbox {\nclass BufferTest : public testing::Test {\n public:\n  BufferTest() = default;\n\n protected:\n  std::shared_ptr<Device> device_;\n  std::shared_ptr<MockDriverCtl> ctl_;\n\n  void SetUp() override {\n    std::shared_ptr<Drivers> drivers = Drivers::GetInstance();\n    ctl_ = std::make_shared<MockDriverCtl>();\n    modelbox::DriverDesc desc;\n\n    desc.SetClass(\"DRIVER-DEVICE\");\n    desc.SetType(\"cpu\");\n    desc.SetName(\"device-driver-cpu\");\n    desc.SetDescription(\"the cpu device\");\n    desc.SetVersion(\"8.9.2\");\n    std::string file_path_device =\n        std::string(TEST_LIB_DIR) + \"/libmodelbox-device-cpu.so\";\n    desc.SetFilePath(file_path_device);\n    ctl_->AddMockDriverDevice(\"cpu\", desc);\n\n    bool result = drivers->Scan(TEST_LIB_DIR, \"libmodelbox-device-cpu.so\");\n\n    EXPECT_TRUE(result);\n    std::shared_ptr<DeviceManager> device_mgr = DeviceManager::GetInstance();\n    ConfigurationBuilder configbuilder;\n    auto config = configbuilder.Build();\n\n    device_mgr->Initialize(drivers, config);\n    device_ = device_mgr->CreateDevice(\"cpu\", \"0\");\n    device_->SetMemQuota(10240);\n  };\n\n  void TearDown() override {\n    std::shared_ptr<Drivers> drivers = Drivers::GetInstance();\n    std::shared_ptr<DeviceManager> device_mgr = DeviceManager::GetInstance();\n    device_mgr->Clear();\n    drivers->Clear();\n    device_ = nullptr;\n  };\n};\n\nTEST_F(BufferTest, MutableData) {\n  std::vector<int> data = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};\n\n  Buffer buffer(device_);\n  buffer.Build(data.size() * sizeof(int));\n  auto *dev_data = static_cast<int *>(buffer.MutableData());\n  for (size_t i = 0; i < data.size(); ++i) {\n    dev_data[i] = data[i];\n  }\n\n  const auto *const buffer_data = (const int *)buffer.ConstData();\n  EXPECT_NE(nullptr, buffer_data);\n  for (size_t i = 0; i < data.size(); i++) {\n    EXPECT_EQ(buffer_data[i], data[i]);\n  }\n\n  buffer.SetError(\"BufferTest.ProcessError\", \"exception test\");\n\n  auto *buffer_data2 = (int *)buffer.MutableData();\n  EXPECT_EQ(nullptr, buffer_data2);\n  EXPECT_TRUE(buffer.HasError());\n}\n\nTEST_F(BufferTest, SetException) {\n  Buffer buffer;\n  EXPECT_FALSE(buffer.HasError());\n\n  buffer.SetError(\"BufferTest.ProcessError\", \"exception test\");\n  EXPECT_TRUE(buffer.HasError());\n}\n\nTEST_F(BufferTest, Size) {\n  std::vector<int> data = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};\n\n  auto dev_mem = device_->MemAlloc(data.size() * sizeof(int));\n  Buffer buffer(device_);\n  buffer.Build(data.size() * sizeof(int));\n  EXPECT_EQ(buffer.GetBytes(), data.size() * sizeof(int));\n}\n\nTEST_F(BufferTest, Get) {\n  Buffer buffer(device_);\n  buffer.Set(\"Height\", 720);\n  buffer.Set(\"Width\", 1280);\n\n  buffer.Set(\"PTS\", 10);\n  buffer.Set(\"FPS\", 30.1F);\n\n  int i_value = 0;\n  float f_valud = 0.0;\n  EXPECT_TRUE(buffer.Get(\"Height\", i_value));\n  EXPECT_EQ(i_value, 720);\n\n  EXPECT_TRUE(buffer.Get(\"Width\", i_value));\n  EXPECT_EQ(i_value, 1280);\n\n  EXPECT_TRUE(buffer.Get(\"PTS\", i_value));\n  EXPECT_EQ(i_value, 10);\n\n  EXPECT_TRUE(buffer.Get(\"FPS\", f_valud));\n  EXPECT_EQ(f_valud, 30.1F);\n\n  std::shared_ptr<Buffer> buf_ptr(&buffer, [](void *p) {});\n\n  Buffer buffer2(device_);\n  buffer2.CopyMeta(buf_ptr);\n\n  buffer2.Set(\"Height\", 360);\n\n  EXPECT_TRUE(buffer.Get(\"Height\", i_value));\n  EXPECT_EQ(i_value, 720);\n\n  EXPECT_TRUE(buffer2.Get(\"Height\", i_value));\n  EXPECT_EQ(i_value, 360);\n\n  buffer2.Get(\"Not_Found\", i_value, 1000);\n  EXPECT_EQ(i_value, 1000);\n\n  buffer2.Get(\"Not_Found\", f_valud, 100.F);\n  EXPECT_EQ(f_valud, 100.F);\n}\n\nTEST_F(BufferTest, GetCast) {\n  Buffer buffer(device_);\n  int32_t weight = 720;\n  buffer.Set(\"weight\", weight);\n  int64_t weight64 = 0;\n  bool res = buffer.Get(\"weight\", weight64);\n  EXPECT_TRUE(res);\n  EXPECT_EQ(weight64, 720);\n}\n\nTEST_F(BufferTest, Buffer1) {\n  Buffer buffer(device_);\n  Buffer buffer2 = buffer;\n\n  EXPECT_EQ(buffer.MutableData(), buffer2.MutableData());\n}\n\nTEST_F(BufferTest, Copy) {\n  Buffer buffer(device_);\n  auto buffer2 = buffer.Copy();\n\n  EXPECT_EQ(buffer.MutableData(), buffer2->MutableData());\n}\n\nTEST_F(BufferTest, BuildFromHost) {\n  auto buffer = std::make_shared<Buffer>(device_);\n  std::string data = \"this is a message\";\n  auto ret = buffer->BuildFromHost((char*)data.c_str(), data.length() + 1);\n  EXPECT_STREQ((char*)buffer->ConstData(), data.c_str());\n}\n\nTEST_F(BufferTest, DeepCopy) {\n  Buffer buffer(device_);\n\n  constexpr int DATA_SIZE = 10;\n  std::vector<int> data(DATA_SIZE, 0);\n  for (size_t i = 0; i < data.size(); ++i) {\n    data[i] = i;\n  }\n\n  buffer.Build(data.data(), data.size() * sizeof(int), [](void *ptr) {});\n  buffer.Set(\"Height\", 720);\n  buffer.Set(\"Width\", 1280);\n\n  auto buffer2 = buffer.DeepCopy();\n\n  int buffer_value = 0;\n  int buffer2_value = -1;\n  buffer.Get(\"Height\", buffer_value);\n  buffer2->Get(\"Height\", buffer2_value);\n  EXPECT_EQ(buffer_value, buffer2_value);\n\n  buffer_value = 0;\n  buffer2_value = -1;\n  buffer.Get(\"Width\", buffer_value);\n  buffer2->Get(\"Width\", buffer2_value);\n  EXPECT_EQ(buffer_value, buffer2_value);\n\n  auto *buf_data = (int *)buffer.MutableData();\n  auto *buf_data2 = (int *)buffer2->MutableData();\n  EXPECT_NE(buf_data, buf_data2);\n\n  EXPECT_EQ(buffer.GetBytes(), buffer2->GetBytes());\n  for (size_t i = 0; i < data.size(); ++i) {\n    EXPECT_EQ(buf_data[i], data[i]);\n    EXPECT_EQ(buf_data2[i], data[i]);\n  }\n}\n\nclass MockBuffer : public Buffer {\n public:\n  MockBuffer(const std::shared_ptr<Device> &device) : Buffer(device){};\n  ~MockBuffer() override = default;\n  void SetDelayedCopyDestinationDevice(std::shared_ptr<Device> dest_device) {\n    Buffer::SetDelayedCopyDestinationDevice(std::move(dest_device));\n  }\n};\n\nTEST_F(BufferTest, MoveToTargetDevice) {\n  auto device_cuda_src_path = std::string(DEVICE_CUDA_SO_PATH);\n  auto device_cuda_dest_path =\n      std::string(TEST_LIB_DIR) + \"/libmodelbox-device-cuda.so\";\n  CopyFile(device_cuda_src_path, device_cuda_dest_path, 0, true);\n  auto drivers = Drivers::GetInstance();\n  drivers->Scan(TEST_LIB_DIR, \"libmodelbox-device-cuda.so\");\n  auto dev_mgr = DeviceManager::GetInstance();\n  ConfigurationBuilder configbuilder;\n  auto config = configbuilder.Build();\n  dev_mgr->Clear();\n  dev_mgr->Initialize(drivers, config);\n  auto device_cuda = dev_mgr->CreateDevice(\"cuda\", \"0\");\n  if (device_cuda == nullptr) {\n    GTEST_SKIP();\n  }\n\n  MockBuffer buffer(device_cuda);\n  buffer.Build(3 * sizeof(int));\n\n  auto device_cpu = dev_mgr->CreateDevice(\"cpu\", \"0\");\n  buffer.SetDelayedCopyDestinationDevice(device_cpu);\n  EXPECT_EQ(\"cuda\", buffer.GetDevice()->GetType());\n  const auto *data = buffer.ConstData();\n  EXPECT_NE(data, nullptr);\n  EXPECT_EQ(\"cpu\", buffer.GetDevice()->GetType());\n  EXPECT_EQ(3 * sizeof(int), buffer.GetBytes());\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "test/unit/libmodelbox/engine/buffer_type_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include \"modelbox/buffer_type.h\"\n#include \"gtest/gtest.h\"\nnamespace modelbox {\nclass BufferTypeTest : public testing::Test {\n public:\n  BufferTypeTest() = default;\n\n protected:\n  void SetUp() override {\n    auto *tree = BufferTypeTree::GetInstance();\n    tree->AddRootType(\"raw\");\n  };\n  void TearDown() override {\n    auto *tree = BufferTypeTree::GetInstance();\n    tree->RemoveType(\"raw\");\n  };\n};\n\nTEST_F(BufferTypeTest, AddRootType) {\n  auto *tree = BufferTypeTree::GetInstance();\n  EXPECT_EQ(true,tree->AddRootType(\"raw\"));\n  EXPECT_EQ(false,tree->AddRootType(\"unknow_node\"));\n  EXPECT_EQ(true,tree->RemoveType(\"raw\"));\n  EXPECT_EQ(true,tree->AddRootType(\"raw\"));\n  EXPECT_EQ(\"raw\",tree->GetType(\"raw\")->GetType());\n}\n\nTEST_F(BufferTypeTest, AddType) {\n  auto *tree = BufferTypeTree::GetInstance();\n  EXPECT_EQ(true, tree->AddType(\"tensor\", \"raw\"));\n  EXPECT_EQ(\"tensor\", tree->GetType(\"tensor\")->GetType());\n  EXPECT_EQ(false, tree->AddType(\"tensor\", \"wrong_node\"));\n\n  EXPECT_EQ(false, tree->AddType(\"other_tensor\", \"wrong_node\"));\n  EXPECT_EQ(nullptr, tree->GetType(\"other_tensor\"));\n  tree->AddType(\"other_tensor\", \"raw\");\n  EXPECT_EQ(false, tree->AddType(\"other_tensor\", \"tensor\"));\n}\n\nTEST_F(BufferTypeTest, IsCompatible) {\n  auto *tree = BufferTypeTree::GetInstance();\n  tree->AddType(\"other_tensor\", \"raw\");\n  tree->AddType(\"tensor\", \"raw\");\n  tree->AddType(\"nhwc_tensor\", \"tensor\");\n  EXPECT_EQ(true, tree->IsCompatible(\"nhwc_tensor\",\"tensor\"));\n  EXPECT_EQ(true, tree->IsCompatible(\"nhwc_tensor\",\"raw\"));\n  EXPECT_EQ(true, tree->IsCompatible(\"nhwc_tensor\",\"nhwc_tensor\"));\n  EXPECT_EQ(false, tree->IsCompatible(\"nhwc_tensor\",\"other_tensor\"));\n  EXPECT_EQ(false, tree->IsCompatible(\"nhwc_tensor\",\"unknow_node\"));\n  EXPECT_EQ(false, tree->IsCompatible(\"unknow_node\",\"raw\"));\n}\n\nTEST_F(BufferTypeTest, RemoveType) {\n  auto *tree = BufferTypeTree::GetInstance();\n  EXPECT_EQ(false, tree->RemoveType(\"unknow_format\"));\n  tree->AddType(\"other_tensor\", \"raw\");\n  tree->AddType(\"tensor\", \"raw\");\n  tree->AddType(\"nhwc_tensor\", \"tensor\");\n  EXPECT_EQ(false, tree->RemoveType(\"unknow_format\"));\n  EXPECT_EQ(true, tree->RemoveType(\"tensor\"));\n  EXPECT_EQ(nullptr, tree->GetType(\"nhwc_tensor\"));\n  EXPECT_EQ(nullptr, tree->GetType(\"tensor\"));\n  EXPECT_EQ(1, tree->GetType(\"raw\")->GetChildrenType().size());\n  EXPECT_EQ(\"other_tensor\",\n            tree->GetType(\"raw\")->GetChildrenType()[0]->GetType());\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "test/unit/libmodelbox/engine/data_context_test.cc",
    "content": "\n/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/data_context.h\"\n\n#include \"gtest/gtest.h\"\n#include \"modelbox/node.h\"\n#include \"modelbox/port.h\"\n\nnamespace modelbox {\n\nclass TestNode : public Node {\n public:\n  void InitIO(const std::set<std::string>& input_port_names,\n              const std::set<std::string>& output_port_names) {\n    ConfigurationBuilder builder;\n    auto config = builder.Build();\n    modelbox::Node::Init(input_port_names, output_port_names, config);\n  }\n};\n\nclass DataContextTest : public testing::Test {\n public:\n  DataContextTest() = default;\n  ~DataContextTest() override = default;\n\n protected:\n  void SetUp() override {\n    session_ = std::make_shared<Session>(nullptr);\n    stream_ = std::make_shared<Stream>(session_);\n    end_stream_ = std::make_shared<Stream>(session_);\n    root_index_info_ = std::make_shared<BufferIndexInfo>();\n    root_index_info_->SetIndex(0);\n    root_end_index_info_ = std::make_shared<BufferIndexInfo>();\n    root_end_index_info_->SetIndex(1);\n    root_end_index_info_->MarkAsEndFlag();\n    node_ = std::make_shared<TestNode>();\n    node_->InitIO(in_port_names_, out_port_names_);\n  };\n  void TearDown() override{};\n\n  std::shared_ptr<PortDataMap> BuildData(size_t data_count, bool has_end,\n                                         bool expand_from_end = false,\n                                         std::set<size_t> error_index = {}) {\n    if (expand_from_end) {\n      data_count = 1;\n    }\n    auto data = std::make_shared<PortDataMap>();\n    for (const auto& port_name : in_port_names_) {\n      auto& port_data_list = (*data)[port_name];\n      for (size_t i = 0; i < data_count; ++i) {\n        auto buffer = std::make_shared<Buffer>();\n        if (error_index.find(i) != error_index.end()) {\n          buffer->SetError(\"DataContext.InputError\", \"InputErrorMsg\");\n        }\n        auto index = BufferManageView::GetIndexInfo(buffer);\n        auto inherit_info = std::make_shared<BufferInheritInfo>();\n        inherit_info->SetType(BufferProcessType::EXPAND);\n        if (!expand_from_end) {\n          inherit_info->SetInheritFrom(root_index_info_);\n          index->SetStream(stream_);\n          index->SetIndex(stream_->GetBufferCount());\n          stream_->IncreaseBufferCount();\n        } else {\n          inherit_info->SetInheritFrom(root_end_index_info_);\n          index->SetStream(end_stream_);\n          index->SetIndex(0);\n          end_stream_->IncreaseBufferCount();\n        }\n        index->SetInheritInfo(inherit_info);\n        port_data_list.push_back(buffer);\n        if (i == (data_count - 1) && has_end) {\n          index->MarkAsEndFlag();\n        }\n      }\n    }\n\n    return data;\n  }\n\n  void ProcessData(FlowUnitDataContext* data_ctx, BufferProcessType type,\n                   size_t expect_input_count, size_t output_count,\n                   std::set<size_t> error_index = {}) {\n    auto process_info = std::make_shared<BufferProcessInfo>();\n    process_info->SetType(type);\n\n    for (const auto& input_name : in_port_names_) {\n      auto inputs = data_ctx->Input(input_name);\n      ASSERT_EQ(inputs->Size(), expect_input_count);\n      std::list<std::shared_ptr<BufferIndexInfo>> input_index_list;\n      for (auto& input : *inputs) {\n        input_index_list.push_back(BufferManageView::GetIndexInfo(input));\n      }\n\n      process_info->SetParentBuffers(input_name, std::move(input_index_list));\n    }\n\n    auto output_map = data_ctx->Output();\n    ASSERT_NE(output_map, nullptr);\n    for (const auto& output_name : out_port_names_) {\n      auto output_list = std::make_shared<BufferList>();\n      (*output_map)[output_name] = output_list;\n      for (size_t i = 0; i < output_count; ++i) {\n        auto buffer = std::make_shared<Buffer>();\n        if (error_index.find(i) != error_index.end()) {\n          buffer->SetError(\"DataContext.ProcessDataError\", \"ProcessErrorMsg\");\n        }\n        output_list->PushBack(buffer);\n        auto index = BufferManageView::GetIndexInfo(output_list->Back());\n        index->SetProcessInfo(process_info);\n      }\n    }\n  }\n\n  void CheckPortDataError(BufferPtrList port_data_list, size_t data_count,\n                          std::set<size_t> error_index = {}) {\n    ASSERT_EQ(port_data_list.size(), data_count);\n    for (size_t i = 0; i < port_data_list.size(); ++i) {\n      if (error_index.find(i) != error_index.end()) {\n        EXPECT_TRUE(port_data_list[i]->HasError());\n      } else {\n        EXPECT_FALSE(port_data_list[i]->HasError());\n      }\n    }\n  }\n\n  std::shared_ptr<Session> session_;\n  std::shared_ptr<Stream> stream_;\n  std::shared_ptr<Stream> end_stream_;\n  std::shared_ptr<BufferIndexInfo> root_index_info_;\n  std::shared_ptr<BufferIndexInfo> root_end_index_info_;\n\n  std::set<std::string> in_port_names_{\"in_1\"};\n  std::set<std::string> out_port_names_{\"out_1\"};\n  std::shared_ptr<TestNode> node_;\n};\n\nTEST_F(DataContextTest, NormalTest) {\n  node_->SetFlowType(FlowType::NORMAL);\n  auto data = BuildData(10, true);\n\n  NormalFlowUnitDataContext data_ctx(node_.get(), nullptr, session_);\n  /* recv data */\n  // write data\n  data_ctx.WriteInputData(data);\n  EXPECT_FALSE(data_ctx.IsDataPre());\n  // process\n  ASSERT_FALSE(data_ctx.IsSkippable());\n  ProcessData(&data_ctx, BufferProcessType::ORIGIN, 9, 9);\n\n  data_ctx.SetStatus(STATUS_SUCCESS);\n  // post process\n  data_ctx.PostProcess();\n  EXPECT_FALSE(data_ctx.IsDataPost());\n  data_ctx.UpdateProcessState();\n  // check output and clear\n  PortDataMap out_data;\n  data_ctx.PopOutputData(out_data);\n  ASSERT_EQ(out_data.size(), 1);\n  ASSERT_EQ(out_data.begin()->second.size(), 10);\n  auto out_index =\n      BufferManageView::GetIndexInfo(out_data.begin()->second.back());\n  EXPECT_TRUE(out_index->IsEndFlag());\n  data_ctx.ClearData();\n  ASSERT_TRUE(data_ctx.IsFinished());\n  ASSERT_EQ(data_ctx.GetStatus(), STATUS_SUCCESS);\n}\n\nTEST_F(DataContextTest, StreamTest) {\n  node_->SetFlowType(FlowType::STREAM);\n  auto data = BuildData(10, true);\n\n  StreamFlowUnitDataContext data_ctx(node_.get(), nullptr, session_);\n  /* recv data */\n  // write data\n  data_ctx.WriteInputData(data);\n  EXPECT_TRUE(data_ctx.IsDataPre());\n  // process\n  ASSERT_FALSE(data_ctx.IsSkippable());\n  ProcessData(&data_ctx, BufferProcessType::ORIGIN, 9, 9);\n\n  data_ctx.SetStatus(STATUS_SUCCESS);\n  // post process\n  data_ctx.PostProcess();\n  EXPECT_TRUE(data_ctx.IsDataPost());\n  data_ctx.UpdateProcessState();\n  // check output and clear\n  PortDataMap out_data;\n  data_ctx.PopOutputData(out_data);\n  ASSERT_EQ(out_data.size(), 1);\n  ASSERT_EQ(out_data.begin()->second.size(), 10);\n  auto out_index =\n      BufferManageView::GetIndexInfo(out_data.begin()->second.back());\n  EXPECT_TRUE(out_index->IsEndFlag());\n  data_ctx.ClearData();\n  ASSERT_TRUE(data_ctx.IsFinished());\n  ASSERT_EQ(data_ctx.GetStatus(), STATUS_SUCCESS);\n}\n\nTEST_F(DataContextTest, StreamTest2) {\n  node_->SetFlowType(FlowType::STREAM);\n\n  auto data_ctx = std::make_shared<StreamFlowUnitDataContext>(\n      node_.get(), nullptr, session_);\n  session_->AddStateListener(data_ctx);\n  /* 1. recv data and continue generate */\n  // write data\n  auto data = BuildData(1, false);\n  data_ctx->WriteInputData(data);\n  EXPECT_TRUE(data_ctx->IsDataPre());\n  // process\n  ASSERT_FALSE(data_ctx->IsSkippable());\n  ProcessData(data_ctx.get(), BufferProcessType::ORIGIN, 1, 1);\n\n  data_ctx->SendEvent(std::make_shared<FlowUnitEvent>());\n  data_ctx->SetStatus(STATUS_CONTINUE);\n\n  FlowunitEventList event_list;\n  node_->GetEventPort()->Recv(event_list);\n  ASSERT_EQ(event_list->size(), 1);  // event to continue process\n  auto process_event = event_list->front();\n  ASSERT_NE(process_event, nullptr);\n  auto user_event = process_event->GetUserEvent();\n  ASSERT_EQ(process_event->GetEventCode(),\n            FlowUnitInnerEvent::EventCode::EXPAND_UNFINISH_DATA);\n  // post process\n  data_ctx->PostProcess();\n  EXPECT_FALSE(data_ctx->IsDataPost());\n  data_ctx->UpdateProcessState();\n  // check output and clear\n  PortDataMap out_data;\n  data_ctx->PopOutputData(out_data);\n  ASSERT_EQ(out_data.size(), 1);\n  ASSERT_EQ(out_data.begin()->second.size(), 1);\n  auto out_index =\n      BufferManageView::GetIndexInfo(out_data.begin()->second.front());\n  EXPECT_FALSE(out_index->IsEndFlag());\n  data_ctx->ClearData();\n  ASSERT_FALSE(data_ctx->IsFinished());\n  ASSERT_EQ(data_ctx->GetStatus(), STATUS_CONTINUE);\n  /* 2. recv end flag */\n  // write data\n  data = BuildData(1, true);\n  data_ctx->WriteInputData(data);\n  EXPECT_FALSE(data_ctx->IsDataPre());\n  // process\n  ASSERT_TRUE(data_ctx->IsSkippable());\n  // post process\n  data_ctx->PostProcess();\n  EXPECT_FALSE(data_ctx->IsDataPost());\n  data_ctx->UpdateProcessState();\n  // check output and clear\n  out_data.clear();\n  data_ctx->PopOutputData(out_data);\n  ASSERT_EQ(out_data.size(), 1);\n  ASSERT_TRUE(out_data.begin()->second.empty());\n  data_ctx->ClearData();\n  ASSERT_FALSE(data_ctx->IsFinished());\n  ASSERT_EQ(data_ctx->GetStatus(), STATUS_CONTINUE);\n  /* 3. recv event and delay to send event */\n  // set event\n  data_ctx->SetEvent(user_event);\n  EXPECT_FALSE(data_ctx->IsDataPre());\n  // process\n  ASSERT_FALSE(data_ctx->IsSkippable());\n  ASSERT_EQ(data_ctx->Event(), user_event);\n\n  auto output_map = data_ctx->Output();\n  ASSERT_NE(output_map, nullptr);\n  auto output_list = std::make_shared<BufferList>();\n  (*output_map)[\"out_1\"] = output_list;\n  output_list->PushBack(std::make_shared<Buffer>());\n\n  data_ctx->SetStatus(STATUS_CONTINUE);  // delay event, only return continue\n  // post process\n  data_ctx->PostProcess();\n  EXPECT_FALSE(data_ctx->IsDataPost());\n  data_ctx->UpdateProcessState();\n  // check output and clear\n  out_data.clear();\n  data_ctx->PopOutputData(out_data);\n  ASSERT_EQ(out_data.size(), 1);\n  ASSERT_EQ(out_data.begin()->second.size(), 1);\n  out_index = BufferManageView::GetIndexInfo(out_data.begin()->second.front());\n  EXPECT_FALSE(out_index->IsEndFlag());\n  data_ctx->ClearData();\n  ASSERT_FALSE(data_ctx->IsFinished());\n  ASSERT_EQ(data_ctx->GetStatus(), STATUS_CONTINUE);\n  /* 4. close session */\n  session_->Close();\n  // check event\n  event_list->clear();\n  node_->GetEventPort()->Recv(event_list);\n  ASSERT_EQ(event_list->size(), 1);  // event to continue process\n  process_event = event_list->front();\n  ASSERT_NE(process_event, nullptr);\n  auto finish_event = process_event->GetUserEvent();\n  // triger finish event\n  data_ctx->SetEvent(finish_event);\n  EXPECT_FALSE(data_ctx->IsDataPre());\n  // process\n  ASSERT_TRUE(data_ctx->IsSkippable());\n  ASSERT_EQ(data_ctx->Event(), finish_event);\n  // post process\n  data_ctx->PostProcess();\n  EXPECT_TRUE(data_ctx->IsDataPost());\n  data_ctx->UpdateProcessState();\n  // check output and clear\n  out_data.clear();\n  data_ctx->PopOutputData(out_data);\n  ASSERT_EQ(out_data.size(), 1);\n  ASSERT_EQ(out_data.begin()->second.size(), 1);\n  out_index = BufferManageView::GetIndexInfo(out_data.begin()->second.front());\n  EXPECT_TRUE(out_index->IsEndFlag());\n  data_ctx->ClearData();\n  ASSERT_TRUE(data_ctx->IsFinished());\n  ASSERT_EQ(data_ctx->GetStatus(), STATUS_CONTINUE);\n}\n\nTEST_F(DataContextTest, StreamTest_SendEventOutOfNodeRun) {\n  node_->SetFlowType(FlowType::STREAM);\n\n  auto data_ctx = std::make_shared<StreamFlowUnitDataContext>(\n      node_.get(), nullptr, session_);\n  session_->AddStateListener(data_ctx);\n  /* 1. recv data and continue generate */\n  // write data\n  auto data = BuildData(1, false);\n  data_ctx->WriteInputData(data);\n  EXPECT_TRUE(data_ctx->IsDataPre());\n  // process\n  ASSERT_FALSE(data_ctx->IsSkippable());\n  ProcessData(data_ctx.get(), BufferProcessType::ORIGIN, 1, 1);\n\n  data_ctx->SetStatus(STATUS_CONTINUE);  // event will send out of node run\n\n  // post process\n  data_ctx->PostProcess();\n  EXPECT_FALSE(data_ctx->IsDataPost());\n  data_ctx->UpdateProcessState();\n  // check output and clear\n  PortDataMap out_data;\n  data_ctx->PopOutputData(out_data);\n  ASSERT_EQ(out_data.size(), 1);\n  ASSERT_EQ(out_data.begin()->second.size(), 1);\n  auto out_index =\n      BufferManageView::GetIndexInfo(out_data.begin()->second.front());\n  EXPECT_FALSE(out_index->IsEndFlag());\n  data_ctx->ClearData();\n  ASSERT_FALSE(data_ctx->IsFinished());\n  ASSERT_EQ(data_ctx->GetStatus(), STATUS_CONTINUE);\n  /* 2. send delay event */\n  data_ctx->SendEvent(std::make_shared<FlowUnitEvent>());\n\n  FlowunitEventList event_list;\n  node_->GetEventPort()->Recv(event_list);\n  ASSERT_EQ(event_list->size(), 1);  // event to continue process\n  auto process_event = event_list->front();\n  ASSERT_NE(process_event, nullptr);\n  auto user_event = process_event->GetUserEvent();\n  ASSERT_EQ(process_event->GetEventCode(),\n            FlowUnitInnerEvent::EventCode::EXPAND_UNFINISH_DATA);\n  /* 3. recv event */\n  // set event\n  data_ctx->SetEvent(user_event);\n  EXPECT_FALSE(data_ctx->IsDataPre());\n  // process\n  ASSERT_FALSE(data_ctx->IsSkippable());\n  ASSERT_EQ(data_ctx->Event(), user_event);\n\n  auto output_map = data_ctx->Output();\n  ASSERT_NE(output_map, nullptr);\n  auto output_list = std::make_shared<BufferList>();\n  (*output_map)[\"out_1\"] = output_list;\n  output_list->PushBack(std::make_shared<Buffer>());\n\n  data_ctx->SetStatus(STATUS_SUCCESS);\n  // post process\n  ASSERT_EQ(data_ctx->PostProcess(), STATUS_SUCCESS);\n  EXPECT_FALSE(data_ctx->IsDataPost());\n  data_ctx->UpdateProcessState();\n  // check output and clear\n  out_data.clear();\n  data_ctx->PopOutputData(out_data);\n  ASSERT_EQ(out_data.size(), 1);\n  ASSERT_EQ(out_data.begin()->second.size(), 1);\n  out_index = BufferManageView::GetIndexInfo(out_data.begin()->second.front());\n  EXPECT_FALSE(out_index->IsEndFlag());\n  data_ctx->ClearData();\n  ASSERT_FALSE(data_ctx->IsFinished());\n  ASSERT_EQ(data_ctx->GetStatus(), STATUS_SUCCESS);\n}\n\nTEST_F(DataContextTest, NormalExpandTest) {\n  node_->SetOutputType(FlowOutputType::EXPAND);\n  node_->SetFlowType(FlowType::NORMAL);\n\n  NormalExpandFlowUnitDataContext data_ctx(node_.get(), nullptr, session_);\n  /* 1. recv data and continue expand */\n  // write data\n  auto data = BuildData(1, false);\n  data_ctx.WriteInputData(data);\n  EXPECT_FALSE(data_ctx.IsDataPre());\n  // process\n  ASSERT_FALSE(data_ctx.IsSkippable());\n\n  ProcessData(&data_ctx, BufferProcessType::EXPAND, 1, 1);\n\n  data_ctx.SendEvent(std::make_shared<FlowUnitEvent>());\n  data_ctx.SetStatus(STATUS_CONTINUE);\n\n  FlowunitEventList event_list;\n  node_->GetEventPort()->Recv(event_list);\n  ASSERT_EQ(event_list->size(), 1);  // event to continue expand current buffer\n  auto process_event = event_list->front();\n  ASSERT_NE(process_event, nullptr);\n  auto user_event = process_event->GetUserEvent();\n  ASSERT_EQ(process_event->GetEventCode(),\n            FlowUnitInnerEvent::EventCode::EXPAND_UNFINISH_DATA);\n  // post process\n  data_ctx.PostProcess();\n  EXPECT_FALSE(data_ctx.IsDataPost());\n  data_ctx.UpdateProcessState();\n  // check output and clear\n  PortDataMap out_data;\n  data_ctx.PopOutputData(out_data);\n  ASSERT_EQ(out_data.size(), 1);\n  auto out_index =\n      BufferManageView::GetIndexInfo(out_data.begin()->second.front());\n  EXPECT_FALSE(out_index->IsEndFlag());\n  data_ctx.ClearData();\n  ASSERT_FALSE(data_ctx.IsFinished());\n  ASSERT_EQ(data_ctx.GetStatus(), STATUS_CONTINUE);\n  /* 2. recv event and close session before user send event*/\n  // set event\n  data_ctx.SetEvent(user_event);\n  EXPECT_FALSE(data_ctx.IsDataPre());\n  // process\n  ASSERT_FALSE(data_ctx.IsSkippable());\n  ASSERT_EQ(data_ctx.Event(), user_event);\n\n  auto output_map = data_ctx.Output();\n  ASSERT_NE(output_map, nullptr);\n  auto output_list = std::make_shared<BufferList>();\n  (*output_map)[\"out_1\"] = output_list;\n  output_list->PushBack(std::make_shared<Buffer>());\n\n  session_->Close();\n  data_ctx.SendEvent(std::make_shared<FlowUnitEvent>());\n  data_ctx.SetStatus(STATUS_CONTINUE);\n\n  event_list->clear();\n  node_->GetEventPort()->Recv(event_list);\n  ASSERT_EQ(event_list->size(), 0);  // session close, send user event failed\n  process_event = event_list->front();\n  ASSERT_NE(process_event, nullptr);\n  user_event = process_event->GetUserEvent();\n  ASSERT_EQ(process_event->GetEventCode(),\n            FlowUnitInnerEvent::EventCode::EXPAND_UNFINISH_DATA);\n  // post process\n  data_ctx.PostProcess();\n  EXPECT_FALSE(data_ctx.IsDataPost());\n  data_ctx.UpdateProcessState();\n  // check output and clear\n  out_data.clear();\n  data_ctx.PopOutputData(out_data);\n  ASSERT_EQ(out_data.size(), 1);\n  ASSERT_EQ(out_data.begin()->second.size(),\n            2);  // expand end, include data and end flag\n  out_index = BufferManageView::GetIndexInfo(out_data.begin()->second.front());\n  EXPECT_FALSE(out_index->IsEndFlag());\n  out_index = BufferManageView::GetIndexInfo(out_data.begin()->second.back());\n  EXPECT_TRUE(out_index->IsEndFlag());\n  data_ctx.ClearData();\n  ASSERT_TRUE(data_ctx.IsFinished());\n  ASSERT_EQ(data_ctx.GetStatus(), STATUS_CONTINUE);\n}\n\nTEST_F(DataContextTest, StreamExpandTest) {\n  node_->SetOutputType(FlowOutputType::EXPAND);\n  node_->SetFlowType(FlowType::STREAM);\n\n  StreamExpandFlowUnitDataContext data_ctx(node_.get(), nullptr, session_);\n  /* 1. recv data and continue expand */\n  // write data\n  auto data = BuildData(1, false);\n  data_ctx.WriteInputData(data);\n  EXPECT_TRUE(data_ctx.IsDataPre());\n  // process\n  ASSERT_FALSE(data_ctx.IsSkippable());\n\n  ProcessData(&data_ctx, BufferProcessType::EXPAND, 1, 1);\n\n  data_ctx.SendEvent(std::make_shared<FlowUnitEvent>());\n  data_ctx.SetStatus(STATUS_CONTINUE);\n\n  FlowunitEventList event_list;\n  node_->GetEventPort()->Recv(event_list);\n  ASSERT_EQ(event_list->size(), 1);  // event to continue expand current buffer\n  auto process_event = event_list->front();\n  ASSERT_NE(process_event, nullptr);\n  auto user_event = process_event->GetUserEvent();\n  ASSERT_EQ(process_event->GetEventCode(),\n            FlowUnitInnerEvent::EventCode::EXPAND_UNFINISH_DATA);\n  // post process\n  data_ctx.PostProcess();\n  EXPECT_FALSE(data_ctx.IsDataPost());\n  data_ctx.UpdateProcessState();\n  // expand event\n  auto expand_event = data_ctx.GenerateSendEvent();\n  ASSERT_EQ(expand_event, nullptr);  // should not expand next\n  // check output and clear\n  PortDataMap out_data;\n  data_ctx.PopOutputData(out_data);\n  ASSERT_EQ(out_data.size(), 1);\n  auto out_index =\n      BufferManageView::GetIndexInfo(out_data.begin()->second.front());\n  EXPECT_FALSE(out_index->IsEndFlag());\n  data_ctx.ClearData();\n  ASSERT_FALSE(data_ctx.IsFinished());\n  ASSERT_EQ(data_ctx.GetStatus(), STATUS_CONTINUE);\n  /* 2. recv event and close session before user send event*/\n  // set event\n  data_ctx.SetEvent(user_event);\n  EXPECT_FALSE(data_ctx.IsDataPre());\n  // process\n  ASSERT_FALSE(data_ctx.IsSkippable());\n  ASSERT_EQ(data_ctx.Event(), user_event);\n\n  auto output_map = data_ctx.Output();\n  ASSERT_NE(output_map, nullptr);\n  auto output_list = std::make_shared<BufferList>();\n  (*output_map)[\"out_1\"] = output_list;\n  output_list->PushBack(std::make_shared<Buffer>());\n\n  session_->Close();\n  data_ctx.SendEvent(std::make_shared<FlowUnitEvent>());\n  data_ctx.SetStatus(STATUS_CONTINUE);\n\n  event_list->clear();\n  node_->GetEventPort()->Recv(event_list);\n  ASSERT_EQ(event_list->size(), 0);  // session close, send user event failed\n  process_event = event_list->front();\n  ASSERT_NE(process_event, nullptr);\n  user_event = process_event->GetUserEvent();\n  ASSERT_EQ(process_event->GetEventCode(),\n            FlowUnitInnerEvent::EventCode::EXPAND_UNFINISH_DATA);\n  // post process\n  data_ctx.PostProcess();\n  EXPECT_FALSE(data_ctx.IsDataPost());\n  data_ctx.UpdateProcessState();\n  // expand event\n  expand_event = data_ctx.GenerateSendEvent();\n  ASSERT_EQ(expand_event,\n            nullptr);  // end flag not received, should not send expand next\n  // check output and clear\n  out_data.clear();\n  data_ctx.PopOutputData(out_data);\n  ASSERT_EQ(out_data.size(), 1);\n  ASSERT_EQ(out_data.begin()->second.size(),\n            2);  // expand end, include data and end flag\n  out_index = BufferManageView::GetIndexInfo(out_data.begin()->second.front());\n  EXPECT_FALSE(out_index->IsEndFlag());\n  out_index = BufferManageView::GetIndexInfo(out_data.begin()->second.back());\n  EXPECT_TRUE(out_index->IsEndFlag());\n  data_ctx.ClearData();\n  ASSERT_FALSE(data_ctx.IsFinished());\n  ASSERT_EQ(data_ctx.GetStatus(), STATUS_CONTINUE);\n  /* 3. recv end flag and expand */\n  // write data\n  data = BuildData(1, true);\n  data_ctx.WriteInputData(data);\n  EXPECT_FALSE(data_ctx.IsDataPre());\n  // process\n  ASSERT_TRUE(data_ctx.IsSkippable());\n  event_list->clear();\n  node_->GetEventPort()->Recv(event_list);\n  ASSERT_TRUE(event_list->empty());  // no user event\n  // post process\n  data_ctx.PostProcess();\n  EXPECT_TRUE(data_ctx.IsDataPost());\n  data_ctx.UpdateProcessState();\n  // expand event\n  expand_event = data_ctx.GenerateSendEvent();\n  ASSERT_EQ(expand_event, nullptr);  // no data to expand\n  // check output and clear\n  out_data.clear();\n  data_ctx.PopOutputData(out_data);\n  ASSERT_EQ(out_data.size(), 1);\n  ASSERT_EQ(out_data.begin()->second.size(), 1);\n  out_index = BufferManageView::GetIndexInfo(out_data.begin()->second.front());\n  ASSERT_TRUE(out_index->IsEndFlag());\n  data_ctx.ClearData();\n  ASSERT_TRUE(data_ctx.IsFinished());  // data ctx process end\n  ASSERT_EQ(data_ctx.GetStatus(), STATUS_CONTINUE);\n}\n\nTEST_F(DataContextTest, StreamExpandTest2) {\n  node_->SetOutputType(FlowOutputType::EXPAND);\n  node_->SetFlowType(FlowType::STREAM);\n\n  StreamExpandFlowUnitDataContext data_ctx(node_.get(), nullptr, session_);\n  /* 1. recv data and continue expand */\n  // write data\n  auto data = BuildData(1, false);\n  data_ctx.WriteInputData(data);\n  EXPECT_TRUE(data_ctx.IsDataPre());\n  // process\n  ASSERT_FALSE(data_ctx.IsSkippable());\n\n  auto output_map = data_ctx.Output();\n  ASSERT_NE(output_map, nullptr);\n  auto output_list = std::make_shared<BufferList>();\n  (*output_map)[\"out_1\"] = output_list;\n  output_list->PushBack(std::make_shared<Buffer>());\n\n  data_ctx.SendEvent(std::make_shared<FlowUnitEvent>());\n  data_ctx.SetStatus(STATUS_CONTINUE);\n\n  FlowunitEventList event_list;\n  node_->GetEventPort()->Recv(event_list);\n  ASSERT_EQ(event_list->size(), 1);\n  auto process_event = event_list->front();\n  ASSERT_NE(process_event, nullptr);\n  auto user_event =\n      process_event->GetUserEvent();  // event to continue expand cur buffer\n  ASSERT_EQ(process_event->GetEventCode(),\n            FlowUnitInnerEvent::EventCode::EXPAND_UNFINISH_DATA);\n  // post process\n  data_ctx.PostProcess();\n  EXPECT_FALSE(data_ctx.IsDataPost());\n  data_ctx.UpdateProcessState();\n  // expand event\n  auto expand_event = data_ctx.GenerateSendEvent();\n  ASSERT_EQ(expand_event, nullptr);  // should not expand next\n  // check output and clear\n  PortDataMap out_data;\n  data_ctx.PopOutputData(out_data);\n  ASSERT_EQ(out_data.size(), 1);\n  auto out_index =\n      BufferManageView::GetIndexInfo(out_data.begin()->second.front());\n  EXPECT_FALSE(out_index->IsEndFlag());\n  data_ctx.ClearData();\n  ASSERT_FALSE(data_ctx.IsFinished());\n  ASSERT_EQ(data_ctx.GetStatus(), STATUS_CONTINUE);\n  /* 2. recv event and close session after user send event*/\n  // set event\n  data_ctx.SetEvent(user_event);\n  EXPECT_FALSE(data_ctx.IsDataPre());\n  // process\n  ASSERT_FALSE(data_ctx.IsSkippable());\n  ASSERT_EQ(data_ctx.Event(), user_event);\n\n  output_map = data_ctx.Output();\n  ASSERT_NE(output_map, nullptr);\n  output_list = std::make_shared<BufferList>();\n  (*output_map)[\"out_1\"] = output_list;\n  output_list->PushBack(std::make_shared<Buffer>());\n\n  data_ctx.SendEvent(std::make_shared<FlowUnitEvent>());\n  session_->Close();\n  data_ctx.SetStatus(STATUS_CONTINUE);\n\n  event_list->clear();\n  node_->GetEventPort()->Recv(event_list);\n  ASSERT_EQ(event_list->size(), 1);\n  process_event = event_list->front();\n  ASSERT_NE(process_event, nullptr);\n  user_event =\n      process_event->GetUserEvent();  // event to continue expand current buffer\n  ASSERT_EQ(process_event->GetEventCode(),\n            FlowUnitInnerEvent::EventCode::EXPAND_UNFINISH_DATA);\n  // post process\n  data_ctx.PostProcess();\n  EXPECT_FALSE(data_ctx.IsDataPost());\n  data_ctx.UpdateProcessState();\n  // expand event\n  expand_event = data_ctx.GenerateSendEvent();\n  ASSERT_EQ(expand_event, nullptr);  // should not expand next\n  // check output and clear\n  out_data.clear();\n  data_ctx.PopOutputData(out_data);\n  ASSERT_EQ(out_data.size(), 1);\n  out_index = BufferManageView::GetIndexInfo(out_data.begin()->second.front());\n  EXPECT_FALSE(out_index->IsEndFlag());\n  data_ctx.ClearData();\n  ASSERT_FALSE(data_ctx.IsFinished());\n  ASSERT_EQ(data_ctx.GetStatus(), STATUS_CONTINUE);\n  /* 3. recv end flag */\n  // write data\n  data = BuildData(1, true);\n  data_ctx.WriteInputData(data);\n  EXPECT_FALSE(data_ctx.IsDataPre());\n  // process\n  ASSERT_TRUE(data_ctx.IsSkippable());\n  event_list->clear();\n  node_->GetEventPort()->Recv(event_list);\n  ASSERT_TRUE(event_list->empty());  // no user event\n  // post process\n  data_ctx.PostProcess();\n  EXPECT_FALSE(data_ctx.IsDataPost());\n  data_ctx.UpdateProcessState();\n  // expand event\n  expand_event = data_ctx.GenerateSendEvent();\n  ASSERT_EQ(expand_event,\n            nullptr);  // should not send expand next, buffer[0] is still expand\n  // check output and clear\n  out_data.clear();\n  data_ctx.PopOutputData(out_data);\n  ASSERT_EQ(out_data.size(), 1);\n  ASSERT_TRUE(out_data.begin()->second.empty());\n  data_ctx.ClearData();\n  ASSERT_FALSE(data_ctx.IsFinished());\n  ASSERT_EQ(data_ctx.GetStatus(), STATUS_CONTINUE);\n  /* 4. recv event */\n  // set event\n  data_ctx.SetEvent(user_event);\n  EXPECT_FALSE(data_ctx.IsDataPre());\n  // process\n  ASSERT_FALSE(data_ctx.IsSkippable());\n  ASSERT_EQ(data_ctx.Event(), user_event);\n\n  output_map = data_ctx.Output();\n  ASSERT_NE(output_map, nullptr);\n  output_list = std::make_shared<BufferList>();\n  (*output_map)[\"out_1\"] = output_list;\n  output_list->PushBack(std::make_shared<Buffer>());\n\n  data_ctx.SendEvent(std::make_shared<FlowUnitEvent>());\n  session_->Close();\n  data_ctx.SetStatus(STATUS_CONTINUE);\n\n  event_list->clear();\n  node_->GetEventPort()->Recv(event_list);\n  ASSERT_EQ(event_list->size(), 0);  // no user event\n  // post process\n  data_ctx.PostProcess();\n  EXPECT_FALSE(data_ctx.IsDataPost());\n  data_ctx.UpdateProcessState();\n  // expand event\n  expand_event = data_ctx.GenerateSendEvent();  // expand end flag\n  ASSERT_NE(expand_event, nullptr);\n  EXPECT_EQ(expand_event->GetEventCode(),\n            FlowUnitInnerEvent::EventCode::EXPAND_NEXT_STREAM);\n  // check output and clear\n  out_data.clear();\n  data_ctx.PopOutputData(out_data);\n  ASSERT_EQ(out_data.size(), 1);\n  ASSERT_EQ(out_data.begin()->second.size(), 2);\n  out_index = BufferManageView::GetIndexInfo(out_data.begin()->second.front());\n  EXPECT_FALSE(out_index->IsEndFlag());\n  out_index = BufferManageView::GetIndexInfo(out_data.begin()->second.back());\n  EXPECT_TRUE(out_index->IsEndFlag());\n  data_ctx.ClearData();\n  ASSERT_FALSE(data_ctx.IsFinished());\n  ASSERT_EQ(data_ctx.GetStatus(), STATUS_CONTINUE);\n  /* 5. expand end flag */\n  // expand end flag buffer\n  data_ctx.ExpandNextBuffer();\n  EXPECT_FALSE(data_ctx.IsDataPre());\n  // process\n  ASSERT_TRUE(data_ctx.IsSkippable());\n  event_list->clear();\n  node_->GetEventPort()->Recv(event_list);\n  ASSERT_TRUE(event_list->empty());  // no user event\n  // post process\n  data_ctx.PostProcess();\n  EXPECT_TRUE(data_ctx.IsDataPost());\n  data_ctx.UpdateProcessState();\n  // expand event\n  expand_event = data_ctx.GenerateSendEvent();\n  ASSERT_EQ(expand_event, nullptr);  // no data to expand\n  // check output and clear\n  out_data.clear();\n  data_ctx.PopOutputData(out_data);\n  ASSERT_EQ(out_data.size(), 1);\n  ASSERT_EQ(out_data.begin()->second.size(), 1);\n  out_index = BufferManageView::GetIndexInfo(out_data.begin()->second.front());\n  ASSERT_TRUE(out_index->IsEndFlag());\n  data_ctx.ClearData();\n  ASSERT_TRUE(data_ctx.IsFinished());  // data ctx process end\n  ASSERT_EQ(data_ctx.GetStatus(), STATUS_CONTINUE);\n}\n\nTEST_F(DataContextTest, StreamExpandTest3) {\n  node_->SetOutputType(FlowOutputType::EXPAND);\n  node_->SetFlowType(FlowType::STREAM);\n\n  StreamExpandFlowUnitDataContext data_ctx(node_.get(), nullptr, session_);\n  /* 1. recv data and continue expand */\n  // write data\n  auto data = BuildData(1, false);\n  data_ctx.WriteInputData(data);\n  EXPECT_TRUE(data_ctx.IsDataPre());\n  // process\n  ASSERT_FALSE(data_ctx.IsSkippable());\n\n  auto output_map = data_ctx.Output();\n  ASSERT_NE(output_map, nullptr);\n  auto output_list = std::make_shared<BufferList>();\n  (*output_map)[\"out_1\"] = output_list;\n  output_list->PushBack(std::make_shared<Buffer>());\n\n  data_ctx.SendEvent(std::make_shared<FlowUnitEvent>());\n  data_ctx.SetStatus(STATUS_CONTINUE);\n\n  FlowunitEventList event_list;\n  node_->GetEventPort()->Recv(event_list);\n  ASSERT_EQ(event_list->size(), 1);\n  auto process_event = event_list->front();\n  ASSERT_NE(process_event, nullptr);\n  auto user_event =\n      process_event->GetUserEvent();  // event to continue expand cur buffer\n  ASSERT_EQ(process_event->GetEventCode(),\n            FlowUnitInnerEvent::EventCode::EXPAND_UNFINISH_DATA);\n  // post process\n  data_ctx.PostProcess();\n  EXPECT_FALSE(data_ctx.IsDataPost());\n  data_ctx.UpdateProcessState();\n  // expand event\n  auto expand_event = data_ctx.GenerateSendEvent();\n  ASSERT_EQ(expand_event, nullptr);  // should not expand next\n  // check output and clear\n  PortDataMap out_data;\n  data_ctx.PopOutputData(out_data);\n  ASSERT_EQ(out_data.size(), 1);\n  auto out_index =\n      BufferManageView::GetIndexInfo(out_data.begin()->second.front());\n  EXPECT_FALSE(out_index->IsEndFlag());\n  data_ctx.ClearData();\n  ASSERT_FALSE(data_ctx.IsFinished());\n  ASSERT_EQ(data_ctx.GetStatus(), STATUS_CONTINUE);\n  /* 2. recv data2 */\n  // write data2\n  data = BuildData(1, false);\n  data_ctx.WriteInputData(data);\n  EXPECT_FALSE(data_ctx.IsDataPre());\n  // process\n  ASSERT_TRUE(data_ctx.IsSkippable());\n  event_list->clear();\n  node_->GetEventPort()->Recv(event_list);\n  ASSERT_TRUE(event_list->empty());  // no user event\n  // post process\n  data_ctx.PostProcess();\n  EXPECT_FALSE(data_ctx.IsDataPost());\n  data_ctx.UpdateProcessState();\n  // expand event\n  expand_event = data_ctx.GenerateSendEvent();\n  ASSERT_EQ(expand_event,\n            nullptr);  // should not send expand next, data1 is still expand\n  // check output and clear\n  out_data.clear();\n  data_ctx.PopOutputData(out_data);\n  ASSERT_EQ(out_data.size(), 1);\n  ASSERT_TRUE(out_data.begin()->second.empty());\n  data_ctx.ClearData();\n  ASSERT_FALSE(data_ctx.IsFinished());\n  ASSERT_EQ(data_ctx.GetStatus(), STATUS_CONTINUE);\n  /* 3. recv event and stop continue*/\n  // set event\n  data_ctx.SetEvent(user_event);\n  EXPECT_FALSE(data_ctx.IsDataPre());\n  // process\n  ASSERT_FALSE(data_ctx.IsSkippable());\n  ASSERT_EQ(data_ctx.Event(), user_event);\n\n  output_map = data_ctx.Output();\n  ASSERT_NE(output_map, nullptr);\n  output_list = std::make_shared<BufferList>();\n  (*output_map)[\"out_1\"] = output_list;\n  output_list->PushBack(std::make_shared<Buffer>());\n\n  data_ctx.SetStatus(STATUS_SUCCESS);\n\n  event_list->clear();\n  node_->GetEventPort()->Recv(event_list);\n  ASSERT_TRUE(event_list->empty());  // no user event\n  // post process\n  data_ctx.PostProcess();\n  EXPECT_FALSE(data_ctx.IsDataPost());\n  data_ctx.UpdateProcessState();\n  // expand event\n  expand_event = data_ctx.GenerateSendEvent();\n  ASSERT_NE(expand_event, nullptr);  // expand data2\n  EXPECT_EQ(expand_event->GetEventCode(),\n            FlowUnitInnerEvent::EventCode::EXPAND_NEXT_STREAM);\n  // check output and clear\n  out_data.clear();\n  data_ctx.PopOutputData(out_data);\n  ASSERT_EQ(out_data.size(), 1);\n  ASSERT_EQ(out_data.begin()->second.size(), 2);\n  out_index = BufferManageView::GetIndexInfo(out_data.begin()->second.front());\n  EXPECT_FALSE(out_index->IsEndFlag());\n  out_index = BufferManageView::GetIndexInfo(out_data.begin()->second.back());\n  EXPECT_TRUE(out_index->IsEndFlag());\n  data_ctx.ClearData();\n  ASSERT_FALSE(data_ctx.IsFinished());\n  ASSERT_EQ(data_ctx.GetStatus(), STATUS_SUCCESS);\n  /* 4. recv data3 */\n  // write data3\n  data = BuildData(1, false);\n  data_ctx.WriteInputData(data);\n  EXPECT_FALSE(data_ctx.IsDataPre());\n  // process\n  ASSERT_TRUE(data_ctx.IsSkippable());\n  event_list->clear();\n  node_->GetEventPort()->Recv(event_list);\n  ASSERT_TRUE(event_list->empty());  // no user event\n  // post process\n  data_ctx.PostProcess();\n  EXPECT_FALSE(data_ctx.IsDataPost());\n  data_ctx.UpdateProcessState();\n  // expand event\n  expand_event = data_ctx.GenerateSendEvent();\n  ASSERT_EQ(expand_event,\n            nullptr);  // should not send expand next, data2 event has been sent\n  // check output and clear\n  out_data.clear();\n  data_ctx.PopOutputData(out_data);\n  ASSERT_EQ(out_data.size(), 1);\n  ASSERT_TRUE(out_data.begin()->second.empty());\n  data_ctx.ClearData();\n  ASSERT_FALSE(data_ctx.IsFinished());\n  ASSERT_EQ(data_ctx.GetStatus(), STATUS_SUCCESS);\n  /* 5. expand data2 */\n  // expand data2\n  data_ctx.ExpandNextBuffer();\n  EXPECT_FALSE(data_ctx.IsDataPre());\n  // process\n  ASSERT_FALSE(data_ctx.IsSkippable());\n\n  output_map = data_ctx.Output();\n  ASSERT_NE(output_map, nullptr);\n  output_list = std::make_shared<BufferList>();\n  (*output_map)[\"out_1\"] = output_list;\n  output_list->PushBack(std::make_shared<Buffer>());\n\n  data_ctx.SetStatus(STATUS_SUCCESS);\n\n  event_list->clear();\n  node_->GetEventPort()->Recv(event_list);\n  ASSERT_EQ(event_list->size(), 0);  // no user event\n  // post process\n  data_ctx.PostProcess();\n  EXPECT_FALSE(data_ctx.IsDataPost());\n  data_ctx.UpdateProcessState();\n  // expand event\n  expand_event = data_ctx.GenerateSendEvent();  // expand data3\n  ASSERT_NE(expand_event, nullptr);\n  EXPECT_EQ(expand_event->GetEventCode(),\n            FlowUnitInnerEvent::EventCode::EXPAND_NEXT_STREAM);\n  // check output and clear\n  out_data.clear();\n  data_ctx.PopOutputData(out_data);\n  ASSERT_EQ(out_data.size(), 1);\n  ASSERT_EQ(out_data.begin()->second.size(), 2);\n  out_index = BufferManageView::GetIndexInfo(out_data.begin()->second.front());\n  EXPECT_FALSE(out_index->IsEndFlag());\n  out_index = BufferManageView::GetIndexInfo(out_data.begin()->second.back());\n  EXPECT_TRUE(out_index->IsEndFlag());\n  data_ctx.ClearData();\n  ASSERT_FALSE(data_ctx.IsFinished());\n  ASSERT_EQ(data_ctx.GetStatus(), STATUS_SUCCESS);\n  /* 6. expand data3 */\n  // expand data3\n  data_ctx.ExpandNextBuffer();\n  EXPECT_FALSE(data_ctx.IsDataPre());\n  // process\n  ASSERT_FALSE(data_ctx.IsSkippable());\n\n  output_map = data_ctx.Output();\n  ASSERT_NE(output_map, nullptr);\n  output_list = std::make_shared<BufferList>();\n  (*output_map)[\"out_1\"] = output_list;\n  output_list->PushBack(std::make_shared<Buffer>());\n\n  data_ctx.SetStatus(STATUS_SUCCESS);\n\n  event_list->clear();\n  node_->GetEventPort()->Recv(event_list);\n  ASSERT_EQ(event_list->size(), 0);  // no user event\n  // post process\n  data_ctx.PostProcess();\n  EXPECT_FALSE(data_ctx.IsDataPost());\n  data_ctx.UpdateProcessState();\n  // expand event\n  expand_event = data_ctx.GenerateSendEvent();\n  ASSERT_EQ(expand_event, nullptr);  // no data to expand\n  // check output and clear\n  out_data.clear();\n  data_ctx.PopOutputData(out_data);\n  ASSERT_EQ(out_data.size(), 1);\n  ASSERT_EQ(out_data.begin()->second.size(), 2);\n  out_index = BufferManageView::GetIndexInfo(out_data.begin()->second.front());\n  EXPECT_FALSE(out_index->IsEndFlag());\n  out_index = BufferManageView::GetIndexInfo(out_data.begin()->second.back());\n  EXPECT_TRUE(out_index->IsEndFlag());\n  data_ctx.ClearData();\n  ASSERT_FALSE(data_ctx.IsFinished());\n  ASSERT_EQ(data_ctx.GetStatus(), STATUS_SUCCESS);\n}\n\nTEST_F(DataContextTest, NormalCollapseTest) {\n  node_->SetOutputType(FlowOutputType::COLLAPSE);\n  node_->SetFlowType(FlowType::STREAM);\n\n  NormalCollapseFlowUnitDataContext data_ctx(node_.get(), nullptr, session_);\n  /* 1. recv data */\n  auto data = BuildData(10, true);\n  // write data\n  data_ctx.WriteInputData(data);\n  EXPECT_TRUE(data_ctx.IsDataPre());\n  // process\n  ASSERT_FALSE(data_ctx.IsSkippable());\n  ProcessData(&data_ctx, BufferProcessType::COLLAPSE, 9, 1);\n\n  data_ctx.SetStatus(STATUS_SUCCESS);\n  // post process\n  data_ctx.PostProcess();\n  EXPECT_TRUE(data_ctx.IsDataPost());\n  data_ctx.UpdateProcessState();\n  // check output and clear\n  PortDataMap out_data;\n  data_ctx.PopOutputData(out_data);\n  ASSERT_EQ(out_data.size(), 1);\n  ASSERT_EQ(out_data.begin()->second.size(), 1);  // no end flag\n  auto out_index =\n      BufferManageView::GetIndexInfo(out_data.begin()->second.front());\n  EXPECT_FALSE(out_index->IsEndFlag());\n  data_ctx.ClearData();\n  ASSERT_TRUE(data_ctx.IsFinished());\n  ASSERT_EQ(data_ctx.GetStatus(), STATUS_SUCCESS);\n}\n\nTEST_F(DataContextTest, StreamCollapseTest) {\n  node_->SetOutputType(FlowOutputType::COLLAPSE);\n  node_->SetFlowType(FlowType::STREAM);\n\n  StreamCollapseFlowUnitDataContext data_ctx(node_.get(), nullptr, session_);\n  /* 1. recv sub stream1 and stream2 */\n  auto stream1 = BuildData(10, true);\n  auto stream2 = BuildData(1, true, true);\n  // write data\n  data_ctx.WriteInputData(stream1);\n  data_ctx.WriteInputData(stream2);\n  EXPECT_TRUE(data_ctx.IsDataPre());\n  // process\n  ASSERT_FALSE(data_ctx.IsSkippable());\n  ProcessData(&data_ctx, BufferProcessType::COLLAPSE, 9, 1);\n\n  data_ctx.SetStatus(STATUS_SUCCESS);\n  // post process\n  data_ctx.PostProcess();\n  EXPECT_TRUE(data_ctx.IsDataPost());\n  data_ctx.UpdateProcessState();\n  // collapse event\n  auto collapse_event = data_ctx.GenerateSendEvent();  // collapse next stream\n  ASSERT_NE(collapse_event, nullptr);\n  EXPECT_EQ(collapse_event->GetEventCode(),\n            FlowUnitInnerEvent::EventCode::COLLAPSE_NEXT_STREAM);\n  // check output and clear\n  PortDataMap out_data;\n  data_ctx.PopOutputData(out_data);\n  ASSERT_EQ(out_data.size(), 1);\n  ASSERT_EQ(out_data.begin()->second.size(), 1);  // no end flag\n  auto out_index =\n      BufferManageView::GetIndexInfo(out_data.begin()->second.front());\n  EXPECT_FALSE(out_index->IsEndFlag());\n  EXPECT_EQ(out_index->GetIndex(), 0);\n  data_ctx.ClearData();\n  ASSERT_FALSE(data_ctx.IsFinished());\n  ASSERT_EQ(data_ctx.GetStatus(), STATUS_SUCCESS);\n  /* 2. recv sub stream2 */\n  // collapse event\n  data_ctx.CollapseNextStream();\n  EXPECT_FALSE(data_ctx.IsDataPre());\n  // process\n  ASSERT_TRUE(data_ctx.IsSkippable());\n  // post process\n  data_ctx.PostProcess();\n  EXPECT_FALSE(data_ctx.IsDataPost());\n  data_ctx.UpdateProcessState();\n  // check output and clear\n  out_data.clear();\n  data_ctx.PopOutputData(out_data);\n  ASSERT_EQ(out_data.size(), 1);\n  ASSERT_EQ(out_data.begin()->second.size(), 1);  // end flag\n  out_index = BufferManageView::GetIndexInfo(out_data.begin()->second.front());\n  EXPECT_TRUE(out_index->IsEndFlag());\n  EXPECT_EQ(out_index->GetIndex(), 1);\n  data_ctx.ClearData();\n  ASSERT_TRUE(data_ctx.IsFinished());\n  ASSERT_EQ(data_ctx.GetStatus(), STATUS_SUCCESS);\n}\n\nTEST_F(DataContextTest, StreamRecvError_Visible) {\n  node_->SetFlowType(FlowType::STREAM);\n  node_->SetExceptionVisible(true);\n  std::set<size_t> error_index{2, 3};\n  auto data = BuildData(10, true, false, error_index);\n\n  StreamFlowUnitDataContext data_ctx(node_.get(), nullptr, session_);\n  // recv data\n  data_ctx.WriteInputData(data);\n  EXPECT_TRUE(data_ctx.HasError());\n  auto port_data_list = data_ctx.GetInputs().begin()->second;\n  CheckPortDataError(port_data_list, 9, error_index);\n\n  EXPECT_TRUE(data_ctx.IsDataPre());\n  // process\n  ASSERT_FALSE(data_ctx.IsSkippable());\n  ProcessData(&data_ctx, BufferProcessType::ORIGIN, 9, 9);\n\n  data_ctx.SetStatus(STATUS_SUCCESS);\n  // post process\n  data_ctx.PostProcess();\n  EXPECT_TRUE(data_ctx.IsDataPost());\n  data_ctx.UpdateProcessState();\n  // check output and clear\n  PortDataMap out_data;\n  data_ctx.PopOutputData(out_data);\n  ASSERT_EQ(out_data.size(), 1);\n  port_data_list = out_data.begin()->second;\n  CheckPortDataError(port_data_list, 10);\n  auto out_index =\n      BufferManageView::GetIndexInfo(out_data.begin()->second.back());\n  EXPECT_TRUE(out_index->IsEndFlag());\n  data_ctx.ClearData();\n  ASSERT_TRUE(data_ctx.IsFinished());\n  ASSERT_EQ(data_ctx.GetStatus(), STATUS_SUCCESS);\n}\n\nTEST_F(DataContextTest, StreamRecvError_InVisible) {\n  node_->SetFlowType(FlowType::STREAM);\n  node_->SetExceptionVisible(false);\n  auto data = BuildData(10, true, false, {2, 3});\n\n  StreamFlowUnitDataContext data_ctx(node_.get(), nullptr, session_);\n  data_ctx.WriteInputData(data);\n  EXPECT_FALSE(data_ctx.HasError());\n  auto port_data_list = data_ctx.GetInputs().begin()->second;\n  CheckPortDataError(port_data_list, 7);\n\n  EXPECT_TRUE(data_ctx.IsDataPre());\n  // process\n  ASSERT_FALSE(data_ctx.IsSkippable());\n  ProcessData(&data_ctx, BufferProcessType::ORIGIN, 7, 7);\n\n  data_ctx.SetStatus(STATUS_SUCCESS);\n  // post process\n  data_ctx.PostProcess();\n  EXPECT_TRUE(data_ctx.IsDataPost());\n  data_ctx.UpdateProcessState();\n  // check output and clear\n  PortDataMap out_data;\n  data_ctx.PopOutputData(out_data);\n  ASSERT_EQ(out_data.size(), 1);\n  port_data_list = out_data.begin()->second;\n  CheckPortDataError(port_data_list, 10, {7, 8});\n  auto out_index =\n      BufferManageView::GetIndexInfo(out_data.begin()->second.back());\n  EXPECT_TRUE(out_index->IsEndFlag());\n  data_ctx.ClearData();\n  ASSERT_TRUE(data_ctx.IsFinished());\n  ASSERT_EQ(data_ctx.GetStatus(), STATUS_SUCCESS);\n}\n\nTEST_F(DataContextTest, Normal_ProcessError) {\n  node_->SetFlowType(FlowType::NORMAL);\n  auto data = BuildData(10, true);\n\n  NormalFlowUnitDataContext data_ctx(node_.get(), nullptr, session_);\n  /* recv data */\n  // write data\n  data_ctx.WriteInputData(data);\n  EXPECT_FALSE(data_ctx.IsDataPre());\n  // process\n  ASSERT_FALSE(data_ctx.IsSkippable());\n  std::set<size_t> error_index{4, 5, 6};\n  ProcessData(&data_ctx, BufferProcessType::ORIGIN, 9, 9, error_index);\n\n  data_ctx.SetStatus(STATUS_SUCCESS);\n  // post process\n  data_ctx.PostProcess();\n  EXPECT_FALSE(data_ctx.IsDataPost());\n  data_ctx.UpdateProcessState();\n  // check output and clear\n  PortDataMap out_data;\n  data_ctx.PopOutputData(out_data);\n  ASSERT_EQ(out_data.size(), 1);\n  auto port_data_list = out_data.begin()->second;\n  CheckPortDataError(port_data_list, 10, error_index);\n  auto out_index =\n      BufferManageView::GetIndexInfo(out_data.begin()->second.back());\n  EXPECT_TRUE(out_index->IsEndFlag());\n  data_ctx.ClearData();\n  ASSERT_TRUE(data_ctx.IsFinished());\n  ASSERT_EQ(data_ctx.GetStatus(), STATUS_SUCCESS);\n}\n\nTEST_F(DataContextTest, Stream_DataPreError) {\n  node_->SetFlowType(FlowType::STREAM);\n  auto data = BuildData(10, false);\n\n  StreamFlowUnitDataContext data_ctx(node_.get(), nullptr, session_);\n  /* recv data */\n  // write data\n  data_ctx.WriteInputData(data);\n  EXPECT_TRUE(data_ctx.IsDataPre());\n  data_ctx.DealWithDataPreError(\"DataContext.DataPreError\", \"DataPreErrorMsg\");\n  // process\n  ASSERT_TRUE(data_ctx.IsSkippable());\n\n  // post process\n  data_ctx.PostProcess();\n  EXPECT_FALSE(data_ctx.IsDataPost());\n  data_ctx.UpdateProcessState();\n  // check output and clear\n  PortDataMap out_data;\n  data_ctx.PopOutputData(out_data);\n  ASSERT_EQ(out_data.size(), 1);\n  auto port_data_list = out_data.begin()->second;\n  CheckPortDataError(port_data_list, 1, {0});\n  data_ctx.ClearData();\n  ASSERT_FALSE(data_ctx.IsFinished());\n  ASSERT_EQ(data_ctx.GetStatus(), STATUS_SUCCESS);\n\n  /* 2. recv end flag */\n  // write data\n  data = BuildData(1, true);\n  data_ctx.WriteInputData(data);\n  EXPECT_FALSE(data_ctx.IsDataPre());\n  // process\n  ASSERT_TRUE(data_ctx.IsSkippable());\n  // post process\n  data_ctx.PostProcess();\n  EXPECT_TRUE(data_ctx.IsDataPost());\n  data_ctx.UpdateProcessState();\n  // check output and clear\n  out_data.clear();\n  data_ctx.PopOutputData(out_data);\n  ASSERT_EQ(out_data.size(), 1);\n  port_data_list = out_data.begin()->second;\n  CheckPortDataError(port_data_list, 1);\n  auto out_index =\n      BufferManageView::GetIndexInfo(out_data.begin()->second.back());\n  EXPECT_TRUE(out_index->IsEndFlag());\n  data_ctx.ClearData();\n  ASSERT_TRUE(data_ctx.IsFinished());\n  ASSERT_EQ(data_ctx.GetStatus(), STATUS_SUCCESS);\n}\n\nTEST_F(DataContextTest, Stream_ProcessError) {\n  node_->SetFlowType(FlowType::STREAM);\n  auto data = BuildData(10, true);\n\n  StreamFlowUnitDataContext data_ctx(node_.get(), nullptr, session_);\n  /* recv data */\n  // write data\n  data_ctx.WriteInputData(data);\n  EXPECT_TRUE(data_ctx.IsDataPre());\n  // process\n  ASSERT_FALSE(data_ctx.IsSkippable());\n  std::set<size_t> error_index{2, 3};\n  ProcessData(&data_ctx, BufferProcessType::ORIGIN, 9, 6, error_index);\n  data_ctx.SetStatus(STATUS_SUCCESS);\n\n  // post process\n  data_ctx.PostProcess();\n  EXPECT_TRUE(data_ctx.IsDataPost());\n  data_ctx.UpdateProcessState();\n  // check output and clear\n  PortDataMap out_data;\n  data_ctx.PopOutputData(out_data);\n  ASSERT_EQ(out_data.size(), 1);\n  auto port_data_list = out_data.begin()->second;\n  CheckPortDataError(port_data_list, 7, error_index);\n  data_ctx.ClearData();\n  ASSERT_TRUE(data_ctx.IsFinished());\n  ASSERT_EQ(data_ctx.GetStatus(), STATUS_SUCCESS);\n}\n\nTEST_F(DataContextTest, NormalExpand_ProcessError) {\n  node_->SetFlowType(FlowType::NORMAL);\n  node_->SetOutputType(FlowOutputType::EXPAND);\n\n  NormalExpandFlowUnitDataContext data_ctx(node_.get(), nullptr, session_);\n  // write data\n  auto data = BuildData(1, false);\n  data_ctx.WriteInputData(data);\n  EXPECT_FALSE(data_ctx.IsDataPre());\n  // process\n  ASSERT_FALSE(data_ctx.IsSkippable());\n\n  std::set<size_t> error_index{2, 3};\n  ProcessData(&data_ctx, BufferProcessType::EXPAND, 1, 5, error_index);\n  data_ctx.SetStatus(STATUS_SUCCESS);\n\n  // post process\n  data_ctx.PostProcess();\n  EXPECT_FALSE(data_ctx.IsDataPost());\n  data_ctx.UpdateProcessState();\n  // check output and clear\n  PortDataMap out_data;\n  data_ctx.PopOutputData(out_data);\n  ASSERT_EQ(out_data.size(), 1);\n  auto port_data_list = out_data.begin()->second;\n  CheckPortDataError(port_data_list, 6, error_index);\n  data_ctx.ClearData();\n  ASSERT_TRUE(data_ctx.IsFinished());\n  ASSERT_EQ(data_ctx.GetStatus(), STATUS_SUCCESS);\n}\n\nTEST_F(DataContextTest, StreamExpand_ProcessError) {\n  node_->SetFlowType(FlowType::STREAM);\n  node_->SetOutputType(FlowOutputType::EXPAND);\n\n  StreamExpandFlowUnitDataContext data_ctx(node_.get(), nullptr, session_);\n  // write data\n  auto data = BuildData(1, false);\n  data_ctx.WriteInputData(data);\n  EXPECT_TRUE(data_ctx.IsDataPre());\n  // process\n  ASSERT_FALSE(data_ctx.IsSkippable());\n\n  std::set<size_t> error_index{2, 3};\n  ProcessData(&data_ctx, BufferProcessType::EXPAND, 1, 5, error_index);\n  data_ctx.SetStatus(STATUS_SUCCESS);\n\n  // post process\n  data_ctx.PostProcess();\n  EXPECT_FALSE(data_ctx.IsDataPost());\n  data_ctx.UpdateProcessState();\n  // check output and clear\n  PortDataMap out_data;\n  data_ctx.PopOutputData(out_data);\n  ASSERT_EQ(out_data.size(), 1);\n  auto port_data_list = out_data.begin()->second;\n  CheckPortDataError(port_data_list, 6, error_index);\n  data_ctx.ClearData();\n  ASSERT_FALSE(data_ctx.IsFinished());\n  ASSERT_EQ(data_ctx.GetStatus(), STATUS_SUCCESS);\n}\n\nTEST_F(DataContextTest, CollapseRecvError_Visible) {\n  node_->SetFlowType(FlowType::NORMAL);\n  node_->SetOutputType(FlowOutputType::COLLAPSE);\n  node_->SetExceptionVisible(true);\n\n  NormalCollapseFlowUnitDataContext data_ctx(node_.get(), nullptr, session_);\n  // recv data\n  std::set<size_t> error_index{2, 3};\n  auto data = BuildData(10, true, false, error_index);\n  // write data\n  data_ctx.WriteInputData(data);  \n  EXPECT_TRUE(data_ctx.HasError());\n  auto port_data_list = data_ctx.GetInputs().begin()->second;\n  CheckPortDataError(port_data_list, 9, error_index);\n  \n  EXPECT_TRUE(data_ctx.IsDataPre());\n  // process\n  ASSERT_FALSE(data_ctx.IsSkippable());\n  ProcessData(&data_ctx, BufferProcessType::COLLAPSE, 9, 1);\n\n  data_ctx.SetStatus(STATUS_SUCCESS);\n  // post process\n  data_ctx.PostProcess();\n  EXPECT_TRUE(data_ctx.IsDataPost());\n  data_ctx.UpdateProcessState();\n  // check output and clear\n  PortDataMap out_data;\n  data_ctx.PopOutputData(out_data);\n  ASSERT_EQ(out_data.size(), 1);\n  port_data_list = out_data.begin()->second;\n  CheckPortDataError(port_data_list, 1);\n  auto out_index =\n      BufferManageView::GetIndexInfo(out_data.begin()->second.back());\n  EXPECT_FALSE(out_index->IsEndFlag());\n  data_ctx.ClearData();\n  ASSERT_TRUE(data_ctx.IsFinished());\n  ASSERT_EQ(data_ctx.GetStatus(), STATUS_SUCCESS);\n}\n\nTEST_F(DataContextTest, CollapseRecvError_InVisible) {\n  node_->SetFlowType(FlowType::NORMAL);\n  node_->SetOutputType(FlowOutputType::COLLAPSE);\n  node_->SetExceptionVisible(false);\n\n  NormalCollapseFlowUnitDataContext data_ctx(node_.get(), nullptr, session_);\n  // recv data\n  std::set<size_t> error_index{2, 3};\n  auto data = BuildData(10, true, false, error_index);\n  // write data\n  data_ctx.WriteInputData(data);    \n  EXPECT_FALSE(data_ctx.HasError());\n  EXPECT_TRUE(data_ctx.GetInputs().empty());\n  EXPECT_TRUE(data_ctx.IsDataPre());\n  // process\n  ASSERT_TRUE(data_ctx.IsSkippable());\n\n  // post process\n  data_ctx.PostProcess();\n  EXPECT_TRUE(data_ctx.IsDataPost());\n  data_ctx.UpdateProcessState();\n  // check output and clear\n  PortDataMap out_data;\n  data_ctx.PopOutputData(out_data);\n  ASSERT_EQ(out_data.size(), 1);\n  auto port_data_list = out_data.begin()->second;\n  CheckPortDataError(port_data_list, 1, {0});\n  auto out_index =\n      BufferManageView::GetIndexInfo(out_data.begin()->second.back());\n  EXPECT_FALSE(out_index->IsEndFlag());\n  data_ctx.ClearData();\n  ASSERT_TRUE(data_ctx.IsFinished());\n  ASSERT_EQ(data_ctx.GetStatus(), STATUS_SUCCESS);\n}\n\nTEST_F(DataContextTest, StreamCollapse_ProcessError) {\n  node_->SetFlowType(FlowType::NORMAL);\n  node_->SetOutputType(FlowOutputType::COLLAPSE);\n\n  NormalCollapseFlowUnitDataContext data_ctx(node_.get(), nullptr, session_);\n  // recv data\n  auto data = BuildData(10, true);\n  // write data\n  data_ctx.WriteInputData(data);\n  \n  EXPECT_TRUE(data_ctx.IsDataPre());\n  // process\n  ASSERT_FALSE(data_ctx.IsSkippable());\n  std::set<size_t> error_index{0};\n  ProcessData(&data_ctx, BufferProcessType::COLLAPSE, 9, 1, error_index);\n\n  data_ctx.SetStatus(STATUS_SUCCESS);\n  // post process\n  data_ctx.PostProcess();\n  EXPECT_TRUE(data_ctx.IsDataPost());\n  data_ctx.UpdateProcessState();\n  // check output and clear\n  PortDataMap out_data;\n  data_ctx.PopOutputData(out_data);\n  ASSERT_EQ(out_data.size(), 1);\n  auto port_data_list = out_data.begin()->second;\n  CheckPortDataError(port_data_list, 1, error_index);\n  auto out_index =\n      BufferManageView::GetIndexInfo(out_data.begin()->second.back());\n  EXPECT_FALSE(out_index->IsEndFlag());\n  data_ctx.ClearData();\n  ASSERT_TRUE(data_ctx.IsFinished());\n  ASSERT_EQ(data_ctx.GetStatus(), STATUS_SUCCESS);\n}\n\n}  // namespace modelbox\n"
  },
  {
    "path": "test/unit/libmodelbox/engine/data_hub_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"engine/common/data_hub.h\"\n\n#include <functional>\n#include <future>\n#include <thread>\n\n#include \"gtest/gtest.h\"\n#include \"modelbox/base/log.h\"\n\nnamespace modelbox {\n\nclass DefaultDataHubTest : public testing::Test {\n public:\n  DefaultDataHubTest() = default;\n\n protected:\n  std::shared_ptr<Node> node_;\n  void SetUp() override { node_ = std::make_shared<Node>(); };\n  void TearDown() override{};\n};\n\nTEST_F(DefaultDataHubTest, AddPort) {\n  DefaultDataHub data_hub;\n  int priority = 0;\n  auto port = std::make_shared<InPort>(\"input_1\", node_);\n  port->SetPriority(priority);\n  auto priority_port = std::make_shared<PriorityPort>(port);\n\n  EXPECT_EQ(data_hub.GetPortNum(), 0);\n\n  auto status = data_hub.AddPort(priority_port);\n  EXPECT_EQ(status, STATUS_SUCCESS);\n  EXPECT_EQ(data_hub.GetPortNum(), 1);\n\n  status = data_hub.AddPort(priority_port);\n  EXPECT_EQ(status, STATUS_SUCCESS);\n  EXPECT_EQ(data_hub.GetPortNum(), 1);\n}\n\nTEST_F(DefaultDataHubTest, SelectActivePort) {\n  DefaultDataHub data_hub;\n  int priority_0 = 2;\n  auto port_0 = std::make_shared<InPort>(\"input_0\", node_);\n  port_0->SetPriority(priority_0);\n  auto priority_port_0 = std::make_shared<PriorityPort>(port_0);\n\n  int priority_1 = 1;\n  auto port_1 = std::make_shared<InPort>(\"input_1\", node_);\n  port_1->SetPriority(priority_1);\n  auto priority_port_1 = std::make_shared<PriorityPort>(port_1);\n\n  int priority_3 = 1;\n  auto port_3 = std::make_shared<InPort>(\"input_2\", node_);\n  port_3->SetPriority(priority_3);\n  auto priority_port_3 = std::make_shared<PriorityPort>(port_3);\n  EXPECT_EQ(data_hub.GetPortNum(), 0);\n\n  auto status = data_hub.AddPort(priority_port_0);\n  EXPECT_EQ(status, STATUS_SUCCESS);\n  EXPECT_EQ(data_hub.GetPortNum(), 1);\n\n  status = data_hub.AddPort(priority_port_1);\n  EXPECT_EQ(status, STATUS_SUCCESS);\n  EXPECT_EQ(data_hub.GetPortNum(), 2);\n\n  status = data_hub.AddPort(priority_port_3);\n  EXPECT_EQ(status, STATUS_SUCCESS);\n  EXPECT_EQ(data_hub.GetPortNum(), 3);\n\n  auto buffer_1_0 = std::make_shared<Buffer>();\n  BufferManageView::SetPriority(buffer_1_0, priority_1);\n  auto in_port_1 =\n      std::dynamic_pointer_cast<InPort>(priority_port_1->GetPort());\n  in_port_1->GetQueue()->Push(buffer_1_0);\n  in_port_1->NotifyPushEvent();\n  auto in_port_0 =\n      std::dynamic_pointer_cast<InPort>(priority_port_0->GetPort());\n  auto buffer_0_0 = std::make_shared<Buffer>();\n  BufferManageView::SetPriority(buffer_0_0, priority_0);\n  in_port_0->GetQueue()->Push(buffer_0_0);\n  in_port_0->NotifyPushEvent();\n  auto in_port_3 =\n      std::dynamic_pointer_cast<InPort>(priority_port_3->GetPort());\n  auto buffer_3_0 = std::make_shared<Buffer>();\n  BufferManageView::SetPriority(buffer_3_0, priority_3);\n  in_port_3->GetQueue()->Push(buffer_3_0);\n  in_port_3->NotifyPushEvent();\n  EXPECT_EQ(data_hub.GetActivePortNum(), 3);\n\n  std::shared_ptr<PriorityPort> active_port = nullptr;\n  status = data_hub.SelectActivePort(&active_port);\n  EXPECT_EQ(status, STATUS_OK);\n  EXPECT_EQ(active_port, priority_port_0);\n  EXPECT_EQ(data_hub.GetActivePortNum(), 2);\n\n  status = data_hub.SelectActivePort(&active_port);\n  EXPECT_EQ(status, STATUS_OK);\n  EXPECT_EQ(active_port, priority_port_1);\n  EXPECT_EQ(data_hub.GetActivePortNum(), 1);\n\n  status = data_hub.SelectActivePort(&active_port);\n  EXPECT_EQ(status, STATUS_OK);\n  EXPECT_EQ(active_port, priority_port_3);\n  EXPECT_EQ(data_hub.GetActivePortNum(), 0);\n\n  status = data_hub.SelectActivePort(&active_port, 1000);\n  EXPECT_EQ(status, STATUS_TIMEDOUT);\n\n  status = data_hub.SelectActivePort(&active_port, -1);\n  EXPECT_EQ(status, STATUS_NODATA);\n\n  data_hub.AddToActivePort(priority_port_0);\n  priority_port_0->GetPort()->NotifyPushEvent();\n  priority_port_0->GetPort()->NotifyPopEvent();\n  EXPECT_EQ(data_hub.GetActivePortNum(), 1);\n}\n\nTEST_F(DefaultDataHubTest, InactivePort) {\n  DefaultDataHub data_hub;\n  int priority_0 = 2;\n  auto port_0 = std::make_shared<InPort>(\"input_0\", node_);\n  port_0->SetPriority(priority_0);\n  auto priority_port_0 = std::make_shared<PriorityPort>(port_0);\n\n  int priority_1 = 1;\n  auto port_1 = std::make_shared<InPort>(\"input_1\", node_);\n  port_1->SetPriority(priority_1);\n  auto priority_port_1 = std::make_shared<PriorityPort>(port_1);\n\n  int priority_2 = 1;\n  auto port_2 = std::make_shared<InPort>(\"input_2\", node_);\n  port_2->SetPriority(priority_2);\n  auto priority_port_2 = std::make_shared<PriorityPort>(port_2);\n  EXPECT_EQ(data_hub.GetPortNum(), 0);\n\n  data_hub.AddPort(priority_port_0);\n  data_hub.AddPort(priority_port_1);\n  data_hub.AddPort(priority_port_2);\n\n  EXPECT_EQ(data_hub.GetPortNum(), 3);\n\n  auto buffer = std::make_shared<Buffer>();\n  BufferManageView::SetPriority(buffer, priority_1);\n  auto in_port_0 =\n      std::dynamic_pointer_cast<InPort>(priority_port_0->GetPort());\n  in_port_0->GetQueue()->Push(buffer);\n  in_port_0->SetActiveState(false);\n  in_port_0->NotifyPushEvent();\n\n  std::shared_ptr<PriorityPort> active_port = nullptr;\n  auto status = data_hub.SelectActivePort(&active_port, 1000);\n  EXPECT_EQ(data_hub.GetActivePortNum(), 0);\n\n  in_port_0->SetActiveState(true);\n  in_port_0->NotifyPushEvent();\n\n  status = data_hub.SelectActivePort(&active_port);\n  EXPECT_EQ(status, STATUS_OK);\n  EXPECT_EQ(active_port, priority_port_0);\n  EXPECT_EQ(data_hub.GetActivePortNum(), 0);\n\n  auto in_port_1 =\n      std::dynamic_pointer_cast<InPort>(priority_port_1->GetPort());\n  in_port_1->GetQueue()->Push(buffer);\n  in_port_1->SetActiveState(false);\n  in_port_1->NotifyPushEvent();\n\n  auto in_port_2 =\n      std::dynamic_pointer_cast<InPort>(priority_port_2->GetPort());\n  in_port_2->GetQueue()->Push(buffer);\n  in_port_2->NotifyPushEvent();\n\n  status = data_hub.SelectActivePort(&active_port);\n  EXPECT_EQ(status, STATUS_OK);\n  EXPECT_EQ(active_port, priority_port_2);\n  EXPECT_EQ(data_hub.GetActivePortNum(), 0);\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "test/unit/libmodelbox/engine/flow_graph_desc_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/flow_graph_desc.h\"\n\n#include \"gtest/gtest.h\"\n#include \"mockflow.h\"\n\nnamespace modelbox {\n\nclass FlowGraphDescTest : public testing::Test {};\n\nTEST_F(FlowGraphDescTest, AddInputOutput) {\n  auto flow_graph_desc = std::make_shared<FlowGraphDesc>();\n  auto input1 = flow_graph_desc->AddInput(\"input1\");\n  ASSERT_NE(input1, nullptr);\n  EXPECT_EQ(input1->GetNodeName(), \"input1\");\n  flow_graph_desc->AddOutput(\"output1\", input1);\n  auto input_exist = flow_graph_desc->AddInput(\"input1\");\n  EXPECT_EQ(input_exist, nullptr);\n}\n\nTEST_F(FlowGraphDescTest, AddNode) {\n  auto flow_graph_desc = std::make_shared<FlowGraphDesc>();\n  flow_graph_desc->SetBatchSize(8);\n  flow_graph_desc->SetQueueSize(32);\n  flow_graph_desc->SetSkipDefaultDrivers(true);\n  flow_graph_desc->SetDriversDir({\"/test/\"});\n\n  auto input1 = flow_graph_desc->AddInput(\"input1\");\n  ASSERT_NE(input1, nullptr);\n\n  auto node1 = flow_graph_desc->AddNode(\n      \"fu\", \"cpu\", {\"image_width=100\", \"image_height=100\"}, input1);\n  ASSERT_NE(node1, nullptr);\n  EXPECT_EQ(node1->GetNodeName(), \"fu\");\n\n  auto node1_port1 = (*node1)[1];\n  ASSERT_NE(node1_port1, nullptr);\n  EXPECT_EQ(node1_port1->GetNodeName(), \"fu\");\n  EXPECT_FALSE(node1_port1->IsDescribeInName());\n  EXPECT_EQ(node1_port1->GetPortIdx(), 1);\n\n  auto node1_portx = (*node1)[\"x\"];\n  ASSERT_NE(node1_portx, nullptr);\n  EXPECT_EQ(node1_portx->GetNodeName(), \"fu\");\n  EXPECT_TRUE(node1_portx->IsDescribeInName());\n  EXPECT_EQ(node1_portx->GetPortName(), \"x\");\n\n  auto node2 = flow_graph_desc->AddNode(\n      \"fu\", \"cpu\", {\"image_width=100\", \"image_height=100\"}, node1);\n  ASSERT_NE(node2, nullptr);\n  EXPECT_EQ(node2->GetNodeName(), \"fu2\");\n  node2->SetNodeName(\"custom_fu\");\n  EXPECT_EQ(node2->GetNodeName(), \"custom_fu\");\n\n  auto node3 = flow_graph_desc->AddNode(\n      \"fu\", \"cpu\", {\"image_width=100\", \"image_height=100\"},\n      {{\"in1\", (*node1)[0]}, {\"in2\", (*node2)[\"x\"]}});\n  ASSERT_NE(node3, nullptr);\n\n  flow_graph_desc->AddOutput(\"output1\", node3);\n}\n\nTEST_F(FlowGraphDescTest, AddFunction) {\n  auto flow_graph_desc = std::make_shared<FlowGraphDesc>();\n  auto func_node = flow_graph_desc->AddFunction(\n      [](const std::shared_ptr<DataContext>& data_ctx) -> Status {\n        return STATUS_OK;\n      },\n      {\"in1\", \"in2\"}, {\"out\"}, nullptr);\n  EXPECT_EQ(func_node, nullptr);\n\n  func_node = flow_graph_desc->AddFunction(\n      [](const std::shared_ptr<DataContext>& data_ctx) -> Status {\n        return STATUS_OK;\n      },\n      {\"in1\", \"in2\"}, {\"out\"}, {{\"in2\", nullptr}});\n  ASSERT_NE(func_node, nullptr);\n  func_node->SetNodeName(\"my_function\");\n  auto port0 = (*func_node)[0];\n  ASSERT_NE(port0, nullptr);\n  EXPECT_FALSE(port0->IsDescribeInName());\n  EXPECT_EQ(port0->GetNodeName(), \"my_function\");\n  EXPECT_EQ(port0->GetPortIdx(), 0);\n\n  port0 = (*func_node)[\"out\"];\n  ASSERT_NE(port0, nullptr);\n  EXPECT_TRUE(port0->IsDescribeInName());\n  EXPECT_EQ(port0->GetNodeName(), \"my_function\");\n  EXPECT_EQ(port0->GetPortName(), \"out\");\n\n  func_node = flow_graph_desc->AddFunction(\n      [](const std::shared_ptr<DataContext>& data_ctx) -> Status {\n        return STATUS_OK;\n      },\n      {\"in1\"}, {\"out\"}, {{\"in2\", nullptr}});\n  EXPECT_NE(func_node, nullptr);\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "test/unit/libmodelbox/engine/flow_scheduler_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"engine/scheduler/flow_scheduler.h\"\n\n#include <functional>\n#include <future>\n#include <thread>\n\n#include \"flowunit_mockflowunit/flowunit_mockflowunit.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"mockflow.h\"\n#include \"modelbox/base/log.h\"\n\nnamespace modelbox {\n\nclass FlowSchedulerTest : public testing::Test {\n public:\n  FlowSchedulerTest() = default;\n\n protected:\n  std::shared_ptr<MockFlow> flow_;\n\n  void SetUp() override {\n    flow_ = std::make_shared<MockFlow>();\n    flow_->Init();\n  };\n\n  void TearDown() override { flow_->Destroy(); };\n};\n\nclass MockNode : public Node {\n public:\n  MockNode() = default;\n  MOCK_METHOD1(Run, Status(RunType type));\n};\n\nstatic SessionManager g_test_session_manager;\n\nTEST_F(FlowSchedulerTest, ShowScheduleStatus) {\n  auto device_ = flow_->GetDevice();\n\n  auto graph = std::make_shared<Graph>();\n  auto gc = std::make_shared<GCGraph>();\n  auto flowunit_mgr = FlowUnitManager::GetInstance();\n  auto device_mgr = DeviceManager::GetInstance();\n\n  std::shared_ptr<Node> node_a = nullptr;\n  std::shared_ptr<Node> node_b = nullptr;\n  std::shared_ptr<Node> node_c = nullptr;\n\n  {\n    ConfigurationBuilder configbuilder;\n    auto config = configbuilder.Build();\n    config->SetProperty(\"queue_size\", \"1\");\n    config->SetProperty(\"interval_time\", 1000);\n    config->SetProperty(\"queue_size_event\", 1);\n\n    node_a = std::make_shared<Node>();\n    node_a->SetFlowUnitInfo(\"listen\", \"cpu\", \"0\", flowunit_mgr);\n    node_a->SetName(\"gendata\");\n    node_a->Init({}, {\"Out_1\", \"Out_2\"}, config);\n    node_a->SetSessionManager(&g_test_session_manager);\n    EXPECT_TRUE(graph->AddNode(node_a));\n  }\n\n  {\n    ConfigurationBuilder configbuilder;\n    auto config = configbuilder.Build();\n    config->SetProperty(\"queue_size\", \"1\");\n\n    node_b = std::make_shared<Node>();\n    node_b->SetFlowUnitInfo(\"tensorlist_test_1\", \"cpu\", \"0\", flowunit_mgr);\n    node_b->SetName(\"tensorlist_test_1\");\n    node_b->Init({\"IN1\"}, {\"OUT1\"}, config);\n    node_b->SetSessionManager(&g_test_session_manager);\n    EXPECT_TRUE(graph->AddNode(node_b));\n  }\n\n  {\n    ConfigurationBuilder configbuilder;\n    auto config = configbuilder.Build();\n    config->SetProperty(\"queue_size\", \"1\");\n\n    config->SetProperty(\"max_count\", 1);\n    config->SetProperty(\"batch_size\", 1);\n\n    node_c = std::make_shared<Node>();\n    node_c->SetFlowUnitInfo(\"slow\", \"cpu\", \"0\", flowunit_mgr);\n    node_c->SetName(\"slow\");\n    node_c->Init({\"IN1\", \"IN2\"}, {}, config);\n    node_c->SetSessionManager(&g_test_session_manager);\n    EXPECT_TRUE(graph->AddNode(node_c));\n  }\n\n  graph->AddLink(node_a->GetName(), \"Out_1\", node_b->GetName(), \"IN1\");\n  graph->AddLink(node_a->GetName(), \"Out_2\", node_c->GetName(), \"IN1\");\n  graph->AddLink(node_b->GetName(), \"OUT1\", node_c->GetName(), \"IN2\");\n\n  ConfigurationBuilder configbuilder;\n  auto config = configbuilder.Build();\n  graph->Initialize(flowunit_mgr, device_mgr, nullptr, config);\n  EXPECT_TRUE(graph->Build(gc) == STATUS_OK);\n  auto scheduler = std::make_shared<FlowScheduler>();\n  auto status = scheduler->Init(config);\n  EXPECT_EQ(status, STATUS_OK);\n  status = scheduler->Build(*graph);\n  EXPECT_EQ(status, STATUS_OK);\n\n  EXPECT_EQ(scheduler->GetCheckCount(), 0);\n\n  scheduler->SetMaxCheckTimeoutCount(1);\n\n  scheduler->RunAsync();\n\n  auto scheduler_status = scheduler->Wait(3000, &status);\n  EXPECT_GT(scheduler->GetCheckCount(), 0);\n  MBLOG_INFO << \"count: \" << scheduler->GetCheckCount();\n  EXPECT_EQ(scheduler_status, STATUS_TIMEDOUT);\n  scheduler->Shutdown();\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "test/unit/libmodelbox/engine/flowunit_balancer_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <modelbox/flowunit_balancer.h>\n#include <modelbox/node.h>\n#include <modelbox/session.h>\n\n#include <chrono>\n#include <vector>\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace modelbox {\n\nclass BalancerMockFlowUnit : public FlowUnit {\n public:\n  Status Open(const std::shared_ptr<Configuration> &config) override {\n    return STATUS_OK;\n  }\n\n  Status Close() override { return STATUS_OK; }\n\n  Status Process(std::shared_ptr<DataContext> data_ctx) override {\n    return STATUS_OK;\n  }\n};\n\nclass BalancerMockDevice : public Device {\n public:\n  MOCK_CONST_METHOD0(GetDeviceID, std::string());\n};\n\nclass BalancerMockMemory : public DeviceMemory {\n public:\n  BalancerMockMemory(const std::shared_ptr<Device> &device,\n                     const std::shared_ptr<DeviceMemoryManager> &mem_mgr,\n                     const std::shared_ptr<void> &device_mem_ptr, size_t size)\n      : DeviceMemory(device, mem_mgr, device_mem_ptr, size) {}\n};\n\nclass FlowUnitBalancerTest : public testing::Test {\n public:\n  std::shared_ptr<FlowUnitDataContext> BuildFlowUnitDataContext(\n      Node *node, const std::shared_ptr<DeviceMemory> &mem) {\n    auto data_ctx =\n        std::make_shared<NormalFlowUnitDataContext>(node, nullptr, nullptr);\n    auto stream_data_map = std::make_shared<PortDataMap>();\n    auto &buffer_list = (*stream_data_map)[\"test_port\"];\n    buffer_list.push_back(std::make_shared<Buffer>(mem));\n    data_ctx->WriteInputData(stream_data_map);\n    return data_ctx;\n  }\n\n  std::vector<std::shared_ptr<Device>> CreateDevices(size_t count) {\n    std::vector<std::shared_ptr<Device>> devices;\n    devices.reserve(count);\n    for (size_t i = 0; i < count; ++i) {\n      auto device = std::make_shared<BalancerMockDevice>();\n      EXPECT_CALL(*device, GetDeviceID())\n          .WillOnce(testing::Return(std::to_string(i)));\n      devices.push_back(device);\n    }\n\n    return devices;\n  }\n\n  std::vector<std::shared_ptr<FlowUnit>> CreateFlowUnits(\n      size_t count, const std::vector<std::shared_ptr<Device>> &devices) {\n    std::vector<std::shared_ptr<FlowUnit>> flowunits;\n    flowunits.reserve(count);\n    for (size_t i = 0; i < count; ++i) {\n      auto fu = std::make_shared<BalancerMockFlowUnit>();\n      if (i >= devices.size()) {\n        fu->SetBindDevice(devices.back());\n      } else {\n        fu->SetBindDevice(devices[i]);\n      }\n\n      flowunits.push_back(fu);\n    }\n\n    return flowunits;\n  }\n\n  std::vector<std::shared_ptr<DeviceMemory>> CreateMems(\n      size_t count, const std::vector<std::shared_ptr<Device>> &devices) {\n    std::vector<std::shared_ptr<DeviceMemory>> mems;\n    mems.reserve(count);\n    for (size_t i = 0; i < count; ++i) {\n      std::shared_ptr<Device> device;\n      if (i >= devices.size()) {\n        device = devices.back();\n      } else {\n        device = devices[i];\n      }\n\n      mems.push_back(\n          std::make_shared<BalancerMockMemory>(device, nullptr, nullptr, 0));\n    }\n\n    return mems;\n  }\n\n protected:\n  void SetUp() override {}\n\n  void TearDown() override {}\n};\n\nclass MockBalancer : public FlowUnitBalancer {\n public:\n  MOCK_METHOD0(GetType, FlowUnitBalanceStrategy());\n  MOCK_METHOD0(OnInit, Status());\n  MOCK_METHOD1(BindFlowUnit, std::shared_ptr<FlowUnit>(\n                                 const std::shared_ptr<FlowUnitDataContext> &));\n};\n\nTEST_F(FlowUnitBalancerTest, BalancerFactoryTest) {\n  auto mock_balancer = std::make_shared<MockBalancer>();\n  EXPECT_CALL(*mock_balancer, GetType())\n      .WillOnce(testing::Return(FlowUnitBalanceStrategy::FU_NULL));\n  EXPECT_CALL(*mock_balancer, OnInit()).WillOnce(testing::Return(STATUS_OK));\n  EXPECT_CALL(*mock_balancer, BindFlowUnit(testing::_))\n      .WillOnce(testing::Return(nullptr));\n\n  auto create_func = [mock_balancer]() -> std::shared_ptr<FlowUnitBalancer> {\n    return mock_balancer;\n  };\n\n  auto factory = FlowUnitBalancerFactory::GetInstance();\n  factory.RegistBalancer(create_func);\n  auto balancer = factory.CreateBalancer(FlowUnitBalanceStrategy::FU_NULL);\n  EXPECT_EQ(mock_balancer, balancer);\n  std::vector<std::shared_ptr<FlowUnit>> flowunits;\n  EXPECT_EQ(balancer->Init(flowunits), STATUS_FAULT);\n  flowunits.push_back(nullptr);\n  EXPECT_EQ(balancer->Init(flowunits), STATUS_OK);\n  EXPECT_EQ(balancer->GetFlowUnit(nullptr), nullptr);\n}\n\nTEST_F(FlowUnitBalancerTest, RoundRobinTest) {\n  auto balancer = FlowUnitBalancerFactory::GetInstance().CreateBalancer(\n      FlowUnitBalanceStrategy::FU_ROUND_ROBIN);\n  ASSERT_NE(balancer, nullptr);\n  EXPECT_EQ(balancer->GetType(), FlowUnitBalanceStrategy::FU_ROUND_ROBIN);\n  auto devices = CreateDevices(3);\n  EXPECT_EQ(devices[2]->GetDeviceID(), \"2\");\n  auto flowunits = CreateFlowUnits(2, devices);\n  balancer->Init(flowunits);\n  auto mems = CreateMems(3, devices);\n  auto node = std::make_shared<Node>();\n  {\n    auto ctx1 = BuildFlowUnitDataContext(node.get(), mems[1]);\n    auto ctx2 = BuildFlowUnitDataContext(node.get(), mems[0]);\n    auto ctx3 = BuildFlowUnitDataContext(node.get(), mems[2]);\n    auto ctx4 = BuildFlowUnitDataContext(node.get(), mems[2]);\n    // first round\n    auto get_fu = balancer->GetFlowUnit(ctx1);\n    EXPECT_EQ(get_fu, flowunits[1]);\n    get_fu = balancer->GetFlowUnit(ctx2);\n    EXPECT_EQ(get_fu, flowunits[0]);\n    get_fu = balancer->GetFlowUnit(ctx3);\n    EXPECT_EQ(get_fu, flowunits[0]);\n    get_fu = balancer->GetFlowUnit(ctx4);\n    EXPECT_EQ(get_fu, flowunits[1]);\n    // second round\n    get_fu = balancer->GetFlowUnit(ctx4);\n    EXPECT_EQ(get_fu, flowunits[1]);\n    get_fu = balancer->GetFlowUnit(ctx3);\n    EXPECT_EQ(get_fu, flowunits[0]);\n    get_fu = balancer->GetFlowUnit(ctx2);\n    EXPECT_EQ(get_fu, flowunits[0]);\n    get_fu = balancer->GetFlowUnit(ctx1);\n    EXPECT_EQ(get_fu, flowunits[1]);\n  }\n  {\n    auto ctx1 = BuildFlowUnitDataContext(node.get(), mems[0]);\n    auto ctx2 = BuildFlowUnitDataContext(node.get(), mems[1]);\n    auto ctx3 = BuildFlowUnitDataContext(node.get(), mems[2]);\n    auto ctx4 = BuildFlowUnitDataContext(node.get(), mems[2]);\n    // first round after ctx clear\n    auto get_fu = balancer->GetFlowUnit(ctx4);\n    EXPECT_EQ(get_fu, flowunits[0]);\n    get_fu = balancer->GetFlowUnit(ctx3);\n    EXPECT_EQ(get_fu, flowunits[1]);\n    get_fu = balancer->GetFlowUnit(ctx2);\n    EXPECT_EQ(get_fu, flowunits[1]);\n    get_fu = balancer->GetFlowUnit(ctx1);\n    EXPECT_EQ(get_fu, flowunits[0]);\n  }\n}\n\nTEST_F(FlowUnitBalancerTest, RoundRobinPerfTest) {\n  auto balancer = FlowUnitBalancerFactory::GetInstance().CreateBalancer(\n      FlowUnitBalanceStrategy::FU_ROUND_ROBIN);\n  ASSERT_NE(balancer, nullptr);\n  EXPECT_EQ(balancer->GetType(), FlowUnitBalanceStrategy::FU_ROUND_ROBIN);\n  auto devices = CreateDevices(101);\n  EXPECT_EQ(devices.back()->GetDeviceID(), \"100\");\n  auto flowunits = CreateFlowUnits(100, devices);\n  auto mems = CreateMems(200, devices);\n  std::vector<std::shared_ptr<FlowUnitDataContext>> ctx_list;\n  auto node = std::make_shared<Node>();\n  for (size_t i = 0; i < 200; ++i) {\n    ctx_list.push_back(BuildFlowUnitDataContext(node.get(), mems[i]));\n  }\n\n  balancer->Init(flowunits);\n  const size_t test_loop_count = 1000;\n  auto start = std::chrono::steady_clock::now();\n  for (size_t i = 0; i < test_loop_count; ++i) {\n    for (auto &ctx : ctx_list) {\n      balancer->GetFlowUnit(ctx);\n    }\n  }\n  auto end = std::chrono::steady_clock::now();\n  auto cost = std::chrono::duration_cast<std::chrono::microseconds>(end - start)\n                  .count();\n  MBLOG_INFO << \"[RoundRobin] flowunits: \" << flowunits.size()\n             << \", ctx:\" << ctx_list.size()\n             << \", avg cost:\" << cost / test_loop_count << \" microsec\";\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "test/unit/libmodelbox/engine/flowunit_data_executor_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <modelbox/flowunit_data_executor.h>\n#include <modelbox/node.h>\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n\nnamespace modelbox {\n\nclass ExecutorMockMemory : public DeviceMemory {\n public:\n  ExecutorMockMemory(const std::shared_ptr<Device> &device,\n                     const std::shared_ptr<DeviceMemoryManager> &mem_mgr,\n                     const std::shared_ptr<void> &device_mem_ptr, size_t size)\n      : DeviceMemory(device, mem_mgr, device_mem_ptr, size) {}\n};\n\nclass ExecutorMockMemMgr : public DeviceMemoryManager {\n public:\n  ExecutorMockMemMgr() : DeviceMemoryManager(\"0\") { SetMemQuota(1024 * 1024); }\n\n  std::shared_ptr<DeviceMemory> MakeDeviceMemory(\n      const std::shared_ptr<Device> &device, std::shared_ptr<void> mem_ptr,\n      size_t size) override {\n    return std::make_shared<ExecutorMockMemory>(device, shared_from_this(),\n                                                mem_ptr, size);\n  }\n\n  void *Malloc(size_t size, uint32_t mem_flags) override {\n    return new (std::nothrow) uint8_t[size];\n  }\n\n  void Free(void *mem_ptr, uint32_t mem_flags) override {\n    delete[](uint8_t *) mem_ptr;\n  }\n\n  Status Copy(void *dest, size_t dest_size, const void *src_buffer,\n              size_t src_size, DeviceMemoryCopyKind kind) override {\n    return STATUS_OK;\n  }\n\n  Status DeviceMemoryCopy(const std::shared_ptr<DeviceMemory> &dest_memory,\n                          size_t dest_offset,\n                          const std::shared_ptr<const DeviceMemory> &src_memory,\n                          size_t src_offset, size_t src_size,\n                          DeviceMemoryCopyKind copy_kind =\n                              DeviceMemoryCopyKind::FromHost) override {\n    return STATUS_OK;\n  }\n\n  Status GetDeviceMemUsage(size_t *free, size_t *total) const override {\n    return STATUS_OK;\n  }\n};\n\nclass ExecutorMockDevice : public Device {\n public:\n  ExecutorMockDevice() : Device(std::make_shared<ExecutorMockMemMgr>()) {}\n\n  std::string GetDeviceID() const override { return \"0\"; }\n};\n\nclass ExecutorMockFlowUnit : public FlowUnit {\n public:\n  Status Open(const std::shared_ptr<Configuration> &config) override {\n    return STATUS_OK;\n  }\n\n  Status Close() override { return STATUS_OK; }\n\n  MOCK_METHOD1(Process, Status(std::shared_ptr<DataContext> data_ctx));\n};\n\nclass ExecutorMockDataContext : public FlowUnitDataContext {\n public:\n  ExecutorMockDataContext(Node *node)\n      : FlowUnitDataContext(node, nullptr, nullptr) {}\n\n  void MockInput(const std::shared_ptr<Device> &device, size_t port_num,\n                 size_t port_data_size) {\n    cur_input_valid_data_.clear();\n    for (size_t port_idx = 0; port_idx < port_num; ++port_idx) {\n      auto &port_data = cur_input_valid_data_[std::to_string(port_idx)];\n      for (size_t data_idx = 0; data_idx < port_data_size; ++data_idx) {\n        auto mem = device->MemAlloc(10);\n        port_data.push_back(std::make_shared<Buffer>(mem));\n      }\n    }\n  }\n\n protected:\n  void UpdateProcessState() override{};\n};\n\nclass ExecutorTestConfig {\n public:\n  size_t device_count{5};\n  size_t input_port_count{1};\n  size_t input_data_count{1};\n  size_t output_port_count{1};\n  size_t process_call_times{2};\n  std::function<Status(std::shared_ptr<DataContext>)> fu_process;\n  size_t ctx_count{10};\n  size_t batch_size{4};\n  bool need_contiguous{false};\n  FlowType node_flow_type{NORMAL};\n  FlowOutputType node_output_type{ORIGIN};\n  ConditionType node_condition_type{ConditionType::NONE};\n  std::function<void(FUExecContextList &ctx_list)> before_process;\n  Status expect_process_ret{STATUS_SUCCESS};\n  std::function<void(FUExecContextList &ctx_list)> after_process;\n};\n\nclass FlowUnitExecutorTest : public testing::Test {\n public:\n  std::vector<std::shared_ptr<Device>> CreateDevices(size_t count) {\n    std::vector<std::shared_ptr<Device>> devices;\n    devices.reserve(count);\n    for (size_t i = 0; i < count; ++i) {\n      devices.push_back(std::make_shared<ExecutorMockDevice>());\n    }\n\n    return devices;\n  }\n\n  std::vector<std::shared_ptr<FlowUnit>> CreateFlowUnits(\n      const std::vector<std::shared_ptr<Device>> &devices) {\n    std::vector<std::shared_ptr<FlowUnit>> flowunits;\n    flowunits.reserve(devices.size());\n    for (const auto &device : devices) {\n      auto fu = std::make_shared<ExecutorMockFlowUnit>();\n      fu->SetBindDevice(device);\n      flowunits.push_back(fu);\n    }\n\n    return flowunits;\n  }\n\n  std::list<std::shared_ptr<FlowUnitExecContext>> CreateExecCtxs(\n      size_t ctx_count, Node *node,\n      const std::vector<std::shared_ptr<FlowUnit>> &flowunits) {\n    std::list<std::shared_ptr<FlowUnitExecContext>> exec_ctx_list;\n    for (size_t i = 0; i < ctx_count; ++i) {\n      auto data_ctx = std::make_shared<ExecutorMockDataContext>(node);\n      auto exec_ctx = std::make_shared<FlowUnitExecContext>(data_ctx);\n      exec_ctx->SetFlowUnit(flowunits[i % flowunits.size()]);\n      exec_ctx_list.push_back(exec_ctx);\n    }\n\n    return exec_ctx_list;\n  }\n\n  void MockInput(const std::vector<std::shared_ptr<Device>> &devices,\n                 std::list<std::shared_ptr<FlowUnitExecContext>> &exec_ctx_list,\n                 size_t port_num, size_t port_data_size) {\n    size_t i = 0;\n    for (auto &exec_ctx : exec_ctx_list) {\n      const auto &device = devices[i % devices.size()];\n      ++i;\n      std::dynamic_pointer_cast<ExecutorMockDataContext>(exec_ctx->GetDataCtx())\n          ->MockInput(device, port_num, port_data_size);\n    }\n  }\n\n  void ExecutorTest(const ExecutorTestConfig &cfg) {\n    MBLOG_INFO << \"Flow type \" << cfg.node_flow_type;\n    auto devices = CreateDevices(cfg.device_count);\n    auto flowunits = CreateFlowUnits(devices);\n    for (auto &flowunit : flowunits) {\n      auto mock_fu = std::dynamic_pointer_cast<ExecutorMockFlowUnit>(flowunit);\n      auto desc = mock_fu->GetFlowUnitDesc();\n      for (size_t i = 0; i < cfg.input_port_count; ++i) {\n        FlowUnitInput input_port(std::to_string(i), \"cpu\");\n        input_port.SetDevice(mock_fu->GetBindDevice());\n        desc->AddFlowUnitInput(input_port);\n      }\n\n      for (size_t i = 0; i < cfg.output_port_count; ++i) {\n        desc->AddFlowUnitOutput({std::to_string(i), \"cpu\"});\n      }\n\n      EXPECT_CALL(*mock_fu, Process(testing::_))\n          .Times(cfg.process_call_times)\n          .WillRepeatedly(testing::Invoke(cfg.fu_process));\n    }\n\n    auto node = std::make_shared<Node>();\n    node->SetName(\"test_node\");\n    node->SetFlowType(cfg.node_flow_type);\n    node->SetOutputType(cfg.node_output_type);\n    node->SetConditionType(cfg.node_condition_type);\n    node->SetInputContiguous(cfg.need_contiguous);\n    std::set<std::string> input_names;\n    std::set<std::string> output_names;\n    for (size_t i = 0; i < cfg.input_port_count; ++i) {\n      input_names.insert(std::to_string(i));\n    }\n    for (size_t i = 0; i < cfg.output_port_count; ++i) {\n      output_names.insert(std::to_string(i));\n    }\n    ConfigurationBuilder builder;\n    node->Init(input_names, output_names, builder.Build());\n    auto ctx_list = CreateExecCtxs(cfg.ctx_count, node.get(), flowunits);\n    if (cfg.input_port_count > 0) {\n      MockInput(devices, ctx_list, cfg.input_port_count, cfg.input_data_count);\n    }\n\n    if (cfg.before_process) {\n      cfg.before_process(ctx_list);\n    }\n\n    FlowUnitDataExecutor executor(node, cfg.batch_size);\n    executor.SetNeedCheckOutput(true);\n    auto ret = executor.Process(ctx_list);\n    ASSERT_EQ(ret, cfg.expect_process_ret);\n    if (cfg.after_process) {\n      cfg.after_process(ctx_list);\n    }\n  }\n\n  void TestDataPreparePerf(\n      const std::vector<std::shared_ptr<Device>> &devices,\n      const std::vector<std::shared_ptr<FlowUnit>> &flowunits,\n      const std::shared_ptr<Node> &node, bool is_stream) {\n    auto ctx_list = CreateExecCtxs(320, node.get(), flowunits);\n    MockInput(devices, ctx_list, 4, 32);\n    FlowUnitExecDataView data_view(ctx_list);\n    auto start = std::chrono::steady_clock::now();\n    data_view.LoadInputFromExecCtx(true, is_stream, 8, false);\n    auto end = std::chrono::steady_clock::now();\n    auto cost =\n        std::chrono::duration_cast<std::chrono::milliseconds>(end - start)\n            .count();\n    MBLOG_ERROR << \"Prepare view, type:\" << (is_stream ? \"stream\" : \"normal\")\n                << \" device:8, stream_per_device:40, port_num:4, \"\n                   \"data_per_stream:32, batch_size:8, cost:\"\n                << cost << \" ms\";\n  }\n\n  void TestWriteBackPerf(\n      const std::vector<std::shared_ptr<Device>> &devices,\n      const std::vector<std::shared_ptr<FlowUnit>> &flowunits,\n      const std::shared_ptr<Node> &node, bool is_stream) {\n    auto ctx_list = CreateExecCtxs(320, node.get(), flowunits);\n    MockInput(devices, ctx_list, 4, 32);\n    FlowUnitExecDataView data_view(ctx_list);\n    data_view.LoadInputFromExecCtx(true, is_stream, 8, false);\n    auto data_flowunits = data_view.GetFlowUnits();\n    for (auto *flowunit : data_flowunits) {\n      auto batched_exec_data_ctx_list =\n          data_view.GetFlowUnitProcessData(flowunit);\n      for (auto &batch_data_ctx : batched_exec_data_ctx_list) {\n        for (auto &data_ctx : batch_data_ctx) {\n          auto outputs = data_ctx->Output();\n          for (auto &port_item : *outputs) {\n            std::vector<size_t> shape(32, 10);\n            port_item.second->Build(shape);\n          }\n        }\n      }\n    }\n    auto start = std::chrono::steady_clock::now();\n    data_view.SaveOutputToExecCtx();\n    auto end = std::chrono::steady_clock::now();\n    auto cost =\n        std::chrono::duration_cast<std::chrono::milliseconds>(end - start)\n            .count();\n    MBLOG_ERROR << \"WriteBack, type:\" << (is_stream ? \"stream\" : \"normal\")\n                << \" device:8, stream_per_device:40, port_num:2, \"\n                   \"data_per_stream:32, batch_size:8, cost:\"\n                << cost << \" ms\";\n  }\n\n  std::vector<FlowType> flow_types_ = {NORMAL, STREAM};\n\n protected:\n  void SetUp() override {}\n\n  void TearDown() override {}\n};\n\nTEST_F(FlowUnitExecutorTest, EventInputTest) {\n  ExecutorTestConfig cfg;\n  cfg.input_port_count = 0;\n  cfg.fu_process = [](const std::shared_ptr<DataContext> &data_ctx) -> Status {\n    auto inputs = data_ctx->Input();\n    EXPECT_EQ(inputs->size(), 0);\n    auto outputs = data_ctx->Output();\n    EXPECT_EQ(outputs->size(), 1);\n    auto output = data_ctx->Output(\"0\");\n    EXPECT_NE(output, nullptr);\n    output->Build({1});\n    auto *ptr = (uint8_t *)output->At(0)->MutableData();\n    auto val =\n        std::static_pointer_cast<uint8_t>(data_ctx->GetPrivate(\"test_val\"));\n    ptr[0] = *val;\n    return STATUS_OK;\n  };\n  cfg.before_process = [](FUExecContextList &ctx_list) {\n    uint8_t index = 0;\n    for (auto &ctx : ctx_list) {\n      auto test_val = std::make_shared<uint8_t>(index);\n      ++index;\n      ctx->GetDataCtx()->SetPrivate(\"test_val\", test_val);\n    }\n  };\n  cfg.after_process = [](FUExecContextList &ctx_list) {\n    for (auto &ctx : ctx_list) {\n      auto data_ctx = ctx->GetDataCtx();\n      auto outputs = data_ctx->Output();\n      auto val =\n          std::static_pointer_cast<uint8_t>(data_ctx->GetPrivate(\"test_val\"));\n      EXPECT_EQ(outputs->size(), 1);\n      for (auto &out_item : *outputs) {\n        const auto &port_name = out_item.first;\n        auto &port_data = out_item.second;\n        EXPECT_EQ(port_name, \"0\");\n        ASSERT_NE(port_data, nullptr);\n        EXPECT_EQ(port_data->Size(), 1);\n        auto buffer = port_data->At(0);\n        ASSERT_NE(buffer, nullptr);\n        auto *ptr = (uint8_t *)(buffer->MutableData());\n        EXPECT_EQ(*ptr, *val);\n      }\n    }\n  };\n  cfg.node_flow_type = NORMAL;\n  ExecutorTest(cfg);\n  cfg.node_flow_type = STREAM;\n  ExecutorTest(cfg);\n  cfg.node_flow_type = NORMAL;\n  cfg.need_contiguous = false;\n  ExecutorTest(cfg);\n  cfg.node_flow_type = STREAM;\n  cfg.need_contiguous = false;\n  ExecutorTest(cfg);\n}\n\nTEST_F(FlowUnitExecutorTest, ExpandTest) {\n  ExecutorTestConfig cfg;\n  cfg.output_port_count = 2;\n  cfg.fu_process = [](const std::shared_ptr<DataContext> &data_ctx) -> Status {\n    auto inputs = data_ctx->Input();\n    EXPECT_EQ(inputs->size(), 1);\n    auto outputs = data_ctx->Output();\n    EXPECT_EQ(outputs->size(), 2);\n    auto input = data_ctx->Input(\"0\");\n    EXPECT_NE(input, nullptr);\n    EXPECT_EQ(input->Size(), 1);\n    auto output1 = data_ctx->Output(\"0\");\n    EXPECT_NE(output1, nullptr);\n    auto output2 = data_ctx->Output(\"1\");\n    EXPECT_NE(output2, nullptr);\n    output1->Build({1, 1});\n    auto *ptr = (uint8_t *)output1->At(0)->MutableData();\n    ptr[0] = 1;\n    ptr = (uint8_t *)output1->At(1)->MutableData();\n    ptr[0] = 2;\n    output2->Build({1, 1});\n    ptr = (uint8_t *)output2->At(0)->MutableData();\n    ptr[0] = 3;\n    ptr = (uint8_t *)output2->At(1)->MutableData();\n    ptr[0] = 4;\n    return STATUS_OK;\n  };\n  cfg.node_output_type = EXPAND;\n  cfg.input_data_count = 1;\n  cfg.batch_size = 1;\n  cfg.after_process = [](FUExecContextList &ctx_list) {\n    for (auto &ctx : ctx_list) {\n      auto data_ctx = ctx->GetDataCtx();\n      auto outputs = data_ctx->Output();\n      uint8_t val = 1;\n      EXPECT_EQ(outputs->size(), 2);\n      for (size_t port_idx = 0; port_idx < outputs->size(); ++port_idx) {\n        auto &buffer_list = outputs->at(std::to_string(port_idx));\n        ASSERT_EQ(buffer_list->Size(), 2);\n        auto *ptr = (uint8_t *)(buffer_list->At(0)->ConstData());\n        EXPECT_EQ(*ptr, val);\n        ++val;\n        ptr = (uint8_t *)(buffer_list->At(1)->ConstData());\n        EXPECT_EQ(*ptr, val);\n        ++val;\n      }\n    }\n  };\n  cfg.node_flow_type = NORMAL;\n  ExecutorTest(cfg);\n  cfg.node_flow_type = STREAM;\n  ExecutorTest(cfg);\n  cfg.node_flow_type = NORMAL;\n  cfg.need_contiguous = false;\n  ExecutorTest(cfg);\n  cfg.node_flow_type = STREAM;\n  cfg.need_contiguous = false;\n  ExecutorTest(cfg);\n}\n\nTEST_F(FlowUnitExecutorTest, CollapseTest) {\n  ExecutorTestConfig cfg;\n  cfg.input_port_count = 2;\n  cfg.fu_process = [](const std::shared_ptr<DataContext> &data_ctx) -> Status {\n    auto inputs = data_ctx->Input();\n    EXPECT_EQ(inputs->size(), 2);\n    auto outputs = data_ctx->Output();\n    EXPECT_EQ(outputs->size(), 1);\n    auto input = data_ctx->Input(\"0\");\n    EXPECT_NE(input, nullptr);\n    EXPECT_EQ(input->Size(), 4);\n    auto input2 = data_ctx->Input(\"1\");\n    EXPECT_NE(input2, nullptr);\n    EXPECT_EQ(input2->Size(), 4);\n    auto output = data_ctx->Output(\"0\");\n    EXPECT_NE(output, nullptr);\n    output->Build({1});\n    auto *ptr = (uint8_t *)output->At(0)->MutableData();\n    ptr[0] = 1;\n    return STATUS_OK;\n  };\n  cfg.node_output_type = COLLAPSE;\n  cfg.input_data_count = 4;\n  cfg.batch_size = 2;\n  cfg.after_process = [](FUExecContextList &ctx_list) {\n    for (auto &ctx : ctx_list) {\n      auto data_ctx = ctx->GetDataCtx();\n      auto outputs = data_ctx->Output();\n      auto &buffer_list = outputs->at(\"0\");\n      ASSERT_EQ(buffer_list->Size(), 1);\n      auto *ptr = (uint8_t *)(buffer_list->At(0)->ConstData());\n      EXPECT_EQ(*ptr, 1);\n    }\n  };\n  cfg.node_flow_type = NORMAL;\n  ExecutorTest(cfg);\n  cfg.node_flow_type = STREAM;\n  ExecutorTest(cfg);\n  cfg.node_flow_type = NORMAL;\n  cfg.need_contiguous = false;\n  ExecutorTest(cfg);\n  cfg.node_flow_type = STREAM;\n  cfg.need_contiguous = false;\n  ExecutorTest(cfg);\n}\n\nTEST_F(FlowUnitExecutorTest, OriginErrorTest) {\n  ExecutorTestConfig cfg;\n  cfg.input_port_count = 2;\n  cfg.output_port_count = 2;\n  cfg.process_call_times = 1;\n  cfg.fu_process = [](const std::shared_ptr<DataContext> &data_ctx) -> Status {\n    auto output = data_ctx->Output(\"0\");\n    EXPECT_NE(output, nullptr);\n    output->Build({1});\n    output = data_ctx->Output(\"1\");\n    EXPECT_NE(output, nullptr);\n    output->Build({1, 1, 1, 1});\n    return STATUS_OK;\n  };\n  cfg.ctx_count = 5;\n  cfg.input_data_count = 4;\n  cfg.batch_size = 6;\n  cfg.expect_process_ret = STATUS_FAULT;\n  cfg.node_flow_type = NORMAL;\n  ExecutorTest(cfg);\n  cfg.node_flow_type = STREAM;\n  ExecutorTest(cfg);\n  cfg.node_flow_type = NORMAL;\n  cfg.need_contiguous = false;\n  ExecutorTest(cfg);\n  cfg.node_flow_type = STREAM;\n  cfg.need_contiguous = false;\n  ExecutorTest(cfg);\n}\n\nTEST_F(FlowUnitExecutorTest, OriginError2Test) {\n  ExecutorTestConfig cfg;\n  cfg.input_port_count = 2;\n  cfg.output_port_count = 2;\n  cfg.process_call_times = 1;\n  cfg.fu_process = [](const std::shared_ptr<DataContext> &data_ctx) -> Status {\n    auto output = data_ctx->Output(\"0\");\n    EXPECT_NE(output, nullptr);\n    output->Build({1, 1, 1});\n    output = data_ctx->Output(\"1\");\n    EXPECT_NE(output, nullptr);\n    output->Build({1, 1, 1});\n    return STATUS_OK;\n  };\n  cfg.ctx_count = 5;\n  cfg.input_data_count = 4;\n  cfg.batch_size = 6;\n  cfg.node_flow_type = NORMAL;\n  cfg.expect_process_ret = STATUS_FAULT;\n  ExecutorTest(cfg);\n  cfg.node_flow_type = STREAM;\n  cfg.expect_process_ret = STATUS_SUCCESS;\n  ExecutorTest(cfg);\n  cfg.node_flow_type = NORMAL;\n  cfg.need_contiguous = false;\n  cfg.expect_process_ret = STATUS_FAULT;\n  ExecutorTest(cfg);\n  cfg.node_flow_type = STREAM;\n  cfg.need_contiguous = false;\n  cfg.expect_process_ret = STATUS_SUCCESS;\n  ExecutorTest(cfg);\n}\n\nTEST_F(FlowUnitExecutorTest, OriginTest) {\n  ExecutorTestConfig cfg;\n  cfg.input_port_count = 2;\n  cfg.output_port_count = 2;\n  cfg.fu_process = [](const std::shared_ptr<DataContext> &data_ctx) -> Status {\n    for (size_t port_idx = 0; port_idx < 2; ++port_idx) {\n      auto port_name = std::to_string(port_idx);\n      auto input = data_ctx->Input(port_name);\n      EXPECT_NE(input, nullptr);\n      auto output = data_ctx->Output(port_name);\n      EXPECT_NE(output, nullptr);\n      for (auto &buffer : *input) {\n        output->PushBack(buffer);\n      }\n    }\n\n    return STATUS_OK;\n  };\n  cfg.input_data_count = 5;\n  cfg.batch_size = 2;\n  cfg.before_process = [](FUExecContextList &ctx_list) {\n    for (auto &ctx : ctx_list) {\n      auto data_ctx = ctx->GetDataCtx();\n      auto inputs = data_ctx->Input();\n      auto ctx_id = std::to_string((uintptr_t)ctx.get());\n      for (size_t port_idx = 0; port_idx < 2; ++port_idx) {\n        auto port_name = std::to_string(port_idx);\n        auto input = data_ctx->Input(port_name);\n        for (size_t buffer_idx = 0; buffer_idx < input->Size(); ++buffer_idx) {\n          auto buffer_id = std::to_string(buffer_idx);\n          auto buffer = input->At(buffer_idx);\n          buffer->Set(\"input_id\", ctx_id += port_name + buffer_id);\n        }\n      }\n    }\n  };\n  cfg.after_process = [](FUExecContextList &ctx_list) {\n    for (auto &ctx : ctx_list) {\n      auto data_ctx = ctx->GetDataCtx();\n      auto ctx_id = std::to_string((uintptr_t)ctx.get());\n      for (size_t port_idx = 0; port_idx < 2; ++port_idx) {\n        auto port_name = std::to_string(port_idx);\n        auto output = data_ctx->Output(port_name);\n        ASSERT_NE(output, nullptr);\n        EXPECT_EQ(output->Size(), 5);\n        for (size_t buffer_idx = 0; buffer_idx < output->Size(); ++buffer_idx) {\n          auto buffer_id = std::to_string(buffer_idx);\n          auto buffer = output->At(buffer_idx);\n          std::string input_id;\n          buffer->Get(\"input_id\", input_id);\n          EXPECT_EQ(input_id, ctx_id += port_name + buffer_id);\n        }\n      }\n    }\n  };\n  cfg.node_flow_type = NORMAL;\n  cfg.process_call_times = 5;\n  ExecutorTest(cfg);\n  cfg.node_flow_type = STREAM;\n  cfg.process_call_times = 6;\n  ExecutorTest(cfg);\n  cfg.node_flow_type = NORMAL;\n  cfg.need_contiguous = false;\n  cfg.process_call_times = 5;\n  ExecutorTest(cfg);\n  cfg.node_flow_type = STREAM;\n  cfg.need_contiguous = false;\n  cfg.process_call_times = 6;\n  ExecutorTest(cfg);\n}\n\nTEST_F(FlowUnitExecutorTest, IfElseTest) {\n  ExecutorTestConfig cfg;\n  cfg.process_call_times = 4;\n  cfg.input_port_count = 2;\n  cfg.output_port_count = 2;\n  cfg.fu_process = [](const std::shared_ptr<DataContext> &data_ctx) -> Status {\n    auto output = data_ctx->Output(\"0\");\n    EXPECT_NE(output, nullptr);\n    output->Build({10});\n    return STATUS_OK;\n  };\n  cfg.input_data_count = 2;\n  cfg.batch_size = 2;\n  cfg.node_condition_type = ConditionType::IF_ELSE;\n  cfg.after_process = [](FUExecContextList &ctx_list) {\n    for (auto &ctx : ctx_list) {\n      auto data_ctx = ctx->GetDataCtx();\n      auto outputs = data_ctx->Output();\n      auto buffer_list = outputs->at(\"0\");\n      EXPECT_EQ(buffer_list->Size(), 2);\n      for (auto &buffer : *buffer_list) {\n        EXPECT_NE(buffer, nullptr);\n      }\n      buffer_list = outputs->at(\"1\");\n      EXPECT_EQ(buffer_list->Size(), 2);\n      for (auto &buffer : *buffer_list) {\n        EXPECT_EQ(buffer, nullptr);\n      }\n    }\n  };\n  ExecutorTest(cfg);\n}\n\nTEST_F(FlowUnitExecutorTest, IfElseErrorTest) {\n  ExecutorTestConfig cfg;\n  cfg.process_call_times = 4;\n  cfg.input_port_count = 2;\n  cfg.output_port_count = 2;\n  cfg.fu_process = [](const std::shared_ptr<DataContext> &data_ctx) -> Status {\n    auto output = data_ctx->Output(\"0\");\n    EXPECT_NE(output, nullptr);\n    output->Build({10, 10, 10});\n    output = data_ctx->Output(\"1\");\n    EXPECT_NE(output, nullptr);\n    output->Build({10});\n    return STATUS_OK;\n  };\n  cfg.input_data_count = 2;\n  cfg.batch_size = 2;\n  cfg.node_condition_type = ConditionType::IF_ELSE;\n  cfg.expect_process_ret = STATUS_FAULT;\n  ExecutorTest(cfg);\n}\n\nTEST_F(FlowUnitExecutorTest, DataViewPerfTest) {\n  /**\n   * case 8 device, 40 stream per device, 32 data per stream, batch is 8\n   */\n  auto devices = CreateDevices(8);\n  auto flowunits = CreateFlowUnits(devices);\n  for (auto &flowunit : flowunits) {\n    auto mock_fu = std::dynamic_pointer_cast<ExecutorMockFlowUnit>(flowunit);\n    auto desc = mock_fu->GetFlowUnitDesc();\n    for (size_t i = 0; i < 4; ++i) {\n      FlowUnitInput in{std::to_string(i), \"cpu\"};\n      in.SetDevice(flowunit->GetBindDevice());\n      desc->AddFlowUnitInput(in);\n    }\n    desc->AddFlowUnitOutput({\"0\", \"cpu\"});\n    desc->AddFlowUnitOutput({\"1\", \"cpu\"});\n    EXPECT_CALL(*mock_fu, Process(testing::_))\n        .WillRepeatedly(testing::Invoke(\n            [](const std::shared_ptr<DataContext> &data_ctx) -> Status {\n              return STATUS_OK;\n            }));\n  }\n  auto node = std::make_shared<Node>();\n  TestDataPreparePerf(devices, flowunits, node, false);\n  TestDataPreparePerf(devices, flowunits, node, true);\n  TestWriteBackPerf(devices, flowunits, node, false);\n  TestWriteBackPerf(devices, flowunits, node, true);\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "test/unit/libmodelbox/engine/flowunit_group_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/flowunit_group.h\"\n\n#include \"flowunit_mockflowunit/flowunit_mockflowunit.h\"\n#include \"gtest/gtest.h\"\n#include \"mock_driver_ctl.h\"\n#include \"mockflow.h\"\n#include \"modelbox/base/driver.h\"\n#include \"modelbox/base/log.h\"\n#include \"modelbox/data_context.h\"\n#include \"modelbox/device/mockdevice/device_mockdevice.h\"\n#include \"modelbox/flowunit.h\"\n\nnamespace modelbox {\n\nstd::shared_ptr<FlowUnitDataContext> BuildFlowUnitDataContext(\n    size_t size, int& begin_data, Node* node,\n    const std::shared_ptr<Device>& device) {\n  auto stream_data_map = std::make_shared<PortDataMap>();\n  const auto& input_ports = node->GetInputPorts();\n  for (const auto& in_port : input_ports) {\n    auto& data_list = (*stream_data_map)[in_port->GetName()];\n    for (size_t i = 0; i < size; ++i) {\n      auto buffer = std::make_shared<Buffer>(device);\n      buffer->Build(sizeof(int32_t));\n      auto* ptr = (int32_t*)buffer->MutableData();\n      *ptr = begin_data + i;\n      data_list.push_back(buffer);\n    }\n  }\n  begin_data += size;\n  auto data_ctx =\n      std::make_shared<NormalFlowUnitDataContext>(node, nullptr, nullptr);\n  data_ctx->WriteInputData(stream_data_map);\n  return data_ctx;\n}\n\nvoid PrintDataContext(const std::shared_ptr<FlowUnitDataContext>& data_ctx) {\n  const auto& input = data_ctx->GetInputs();\n  for (const auto& in : input) {\n    MBLOG_DEBUG << in.first;\n    for (const auto& data : in.second) {\n      MBLOG_DEBUG << *((int*)data->ConstData());\n    }\n  }\n}\n\ntemplate <typename FuncImpl>\nvoid CheckDataContext(const std::shared_ptr<FlowUnitDataContext>& data_ctx,\n                      Node* node, FuncImpl func) {\n  const auto& outputs = data_ctx->GetOutputs();\n  for (const auto& out : outputs) {\n    auto buffer_list = out.second;\n    for (auto& data : *buffer_list) {\n      EXPECT_TRUE(func(*(int*)data->ConstData()));\n    }\n  }\n}\n\nclass FlowUnitGroupTest : public testing::Test {\n public:\n  FlowUnitGroupTest() = default;\n\n protected:\n  std::shared_ptr<MockFlow> flow_;\n  void SetUp() override {\n    flow_ = std::make_shared<MockFlow>();\n    flow_->Init();\n  };\n\n  void TearDown() override { flow_->Destroy(); };\n};\n\nTEST_F(FlowUnitGroupTest, Run2_In_1) {\n  auto device_ = flow_->GetDevice();\n\n  ConfigurationBuilder configbuilder;\n  configbuilder.AddProperty(\"batch_size\", \"3\");\n  auto config = configbuilder.Build();\n  auto flowunit_mgr_ = FlowUnitManager::GetInstance();\n  auto node_ = std::make_shared<Node>();\n  node_->SetFlowUnitInfo(\"iflow_add_1\", \"cpu\", \"0\", flowunit_mgr_);\n  EXPECT_EQ(node_->Init({\"In_1\"}, {\"Out_1\"}, config), STATUS_OK);\n\n  size_t fug_size = 10;\n  int data = 0;\n  std::list<std::shared_ptr<FlowUnitDataContext>> data_ctx_list;\n  for (size_t i = 0; i < fug_size; i++) {\n    data_ctx_list.push_back(\n        BuildFlowUnitDataContext(i + 1, data, node_.get(), device_));\n  }\n\n  FlowUnitGroup fug(\"iflow_add_1\", \"cpu\", \"0\", config, nullptr);\n  fug.Init({\"In_1\"}, {\"Out_1\"}, flowunit_mgr_);\n  fug.SetNode(node_);\n  fug.Open([](const std::shared_ptr<Device>& /*unused*/)\n               -> std::shared_ptr<ExternalData> { return nullptr; });\n  fug.Run(data_ctx_list);\n\n  int check_data = 0;\n  for (const auto& data_ctx : data_ctx_list) {\n    CheckDataContext(data_ctx, node_.get(), [&](int data) -> bool {\n      return data == (1 + check_data++);\n    });\n  }\n}\n\nTEST_F(FlowUnitGroupTest, Run2_In_2) {\n  auto device_ = flow_->GetDevice();\n\n  ConfigurationBuilder configbuilder;\n  configbuilder.AddProperty(\"batch_size\", \"3\");\n  auto config = configbuilder.Build();\n  auto flowunit_mgr_ = FlowUnitManager::GetInstance();\n  auto node_ = std::make_shared<Node>();\n  node_->SetFlowUnitInfo(\"add\", \"cpu\", \"0\", flowunit_mgr_);\n  EXPECT_EQ(node_->Init({\"In_1\", \"In_2\"}, {\"Out_1\"}, config), STATUS_OK);\n\n  size_t fug_size = 10;\n  int data = 0;\n  std::list<std::shared_ptr<FlowUnitDataContext>> data_ctx_list;\n\n  for (size_t i = 0; i < fug_size; i++) {\n    data_ctx_list.push_back(\n        BuildFlowUnitDataContext(i + 1, data, node_.get(), device_));\n  }\n\n  FlowUnitGroup fug(\"add\", \"cpu\", \"0\", config, nullptr);\n  fug.Init({\"In_1\", \"In_2\"}, {\"Out_1\"}, flowunit_mgr_);\n  fug.SetNode(node_);\n  fug.Open([](const std::shared_ptr<Device>& /*unused*/)\n               -> std::shared_ptr<ExternalData> { return nullptr; });\n  fug.Run(data_ctx_list);\n\n  int check_data = 0;\n  for (const auto& data_ctx : data_ctx_list) {\n    CheckDataContext(data_ctx, node_.get(), [&](int data) -> bool {\n      MBLOG_DEBUG << data << \" = \" << check_data << \" + \" << check_data;\n      return data == (2 * check_data++);\n    });\n  }\n}\n\nTEST_F(FlowUnitGroupTest, Run2_Status_Error) {\n  auto device_ = flow_->GetDevice();\n\n  ConfigurationBuilder configbuilder;\n  configbuilder.AddProperty(\"batch_size\", \"3\");\n  auto config = configbuilder.Build();\n  auto flowunit_mgr_ = FlowUnitManager::GetInstance();\n  auto node_ = std::make_shared<Node>();\n  node_->SetFlowUnitInfo(\"add_1_and_error\", \"cpu\", \"0\", flowunit_mgr_);\n  EXPECT_EQ(node_->Init({\"In_1\"}, {\"Out_1\"}, config), STATUS_OK);\n\n  size_t fug_size = 10;\n  int data = 0;\n  std::list<std::shared_ptr<FlowUnitDataContext>> data_ctx_list;\n\n  for (size_t i = 0; i < fug_size; i++) {\n    data_ctx_list.push_back(\n        BuildFlowUnitDataContext(i + 1, data, node_.get(), device_));\n  }\n\n  FlowUnitGroup fug(\"add_1_and_error\", \"cpu\", \"0\", config, nullptr);\n  fug.Init({\"In_1\"}, {\"Out_1\"}, flowunit_mgr_);\n  fug.SetNode(node_);\n  fug.Open([](const std::shared_ptr<Device>& /*unused*/)\n               -> std::shared_ptr<ExternalData> { return nullptr; });\n  fug.Run(data_ctx_list);\n\n  int check_data = 0;\n  int idx = 0;\n\n  auto func = [&](int data) -> bool {\n    MBLOG_DEBUG << data << \" = \" << 1 << \" + \" << check_data;\n    return data == (1 + check_data++);\n  };\n\n  for (const auto& data_ctx : data_ctx_list) {\n    const auto& outputs = data_ctx->GetOutputs();\n\n    for (const auto& out : outputs) {\n      auto buffer_list = out.second;\n\n      for (auto& data : *buffer_list) {\n        if (data->HasError()) {\n          EXPECT_EQ(data->ConstData(), nullptr);\n          ++check_data;\n          continue;\n        }\n\n        EXPECT_TRUE(func(*(int*)data->ConstData()));\n      }\n    }\n\n    idx++;\n  }\n}\n\nTEST_F(FlowUnitGroupTest, Run2_Condition) {\n  auto device_ = flow_->GetDevice();\n\n  ConfigurationBuilder configbuilder;\n  configbuilder.AddProperty(\"batch_size\", \"1\");\n  auto config = configbuilder.Build();\n  auto flowunit_mgr_ = FlowUnitManager::GetInstance();\n  auto node_ = std::make_shared<Node>();\n  node_->SetFlowUnitInfo(\"test_condition\", \"cpu\", \"0\", flowunit_mgr_);\n\n  EXPECT_EQ(node_->Init({\"In_1\"}, {\"Out_1\", \"Out_2\"}, config), STATUS_OK);\n\n  size_t fug_size = 10;\n  int data = 0;\n  std::list<std::shared_ptr<FlowUnitDataContext>> data_ctx_list;\n\n  for (size_t i = 0; i < fug_size; i++) {\n    data_ctx_list.push_back(\n        BuildFlowUnitDataContext(i + 1, data, node_.get(), device_));\n  }\n\n  FlowUnitGroup fug(\"test_condition\", \"cpu\", \"0\", config, nullptr);\n  fug.Init({\"In_1\"}, {\"Out_1\", \"Out_2\"}, flowunit_mgr_);\n  fug.SetNode(node_);\n  fug.Open([](const std::shared_ptr<Device>& /*unused*/)\n               -> std::shared_ptr<ExternalData> { return nullptr; });\n  fug.Run(data_ctx_list);\n\n  int check_data = 0;\n  int idx = 0;\n\n  for (const auto& data_ctx : data_ctx_list) {\n    EXPECT_FALSE(data_ctx->HasError());\n    const auto& outputs = data_ctx->GetOutputs();\n    const auto& output_1 = outputs.at(\"Out_1\");\n    const auto& output_2 = outputs.at(\"Out_2\");\n\n    EXPECT_EQ(output_1->Size(), output_2->Size());\n    for (size_t i = 0; i < output_1->Size(); ++i) {\n      if ((idx * (idx + 1) / 2 + i) % 2 == 0 &&\n          (idx * (idx + 1) / 2 + i) != 10) {\n        if (output_1->At(i)->HasError()) {\n          EXPECT_EQ(output_1->ConstBufferData(i), nullptr);\n          ++check_data;\n          continue;\n        }\n        EXPECT_NE(output_1->At(i), nullptr);\n        auto* data = (int*)output_1->ConstBufferData(i);\n        EXPECT_EQ(*data, check_data++);\n      } else {\n        if (output_2->At(i) == nullptr) {\n          ++check_data;\n          continue;\n        }\n        EXPECT_NE(output_2->At(i), nullptr);\n        if (!output_2->At(i)->HasError()) {\n          auto* data = (int*)output_2->ConstBufferData(i);\n          EXPECT_EQ(*data, check_data);\n        }\n        ++check_data;\n      }\n    }\n\n    idx++;\n  }\n}\n\nTEST_F(FlowUnitGroupTest, Init) {\n  ConfigurationBuilder configbuilder;\n  auto flowunit_mgr = FlowUnitManager::GetInstance();\n\n  auto config = configbuilder.Build();\n  auto valid_flg =\n      std::make_shared<FlowUnitGroup>(\"test_0_2\", \"cpu\", \"0\", config, nullptr);\n  EXPECT_EQ(valid_flg->Init({}, {\"Out_1\"}, flowunit_mgr), STATUS_BADCONF);\n  EXPECT_EQ(valid_flg->Init({}, {\"Out_1\", \"Out_2\"}, flowunit_mgr),\n            STATUS_SUCCESS);\n  auto invalid_flg = std::make_shared<FlowUnitGroup>(\"invalid_test\", \"cpu\", \"0\",\n                                                     config, nullptr);\n  EXPECT_EQ(invalid_flg->Init({}, {\"Out_1\", \"Out_2\"}, flowunit_mgr),\n            STATUS_NOTFOUND);\n}\n\nTEST_F(FlowUnitGroupTest, Open_Close) {\n  ConfigurationBuilder configbuilder;\n  auto flowunit_mgr = FlowUnitManager::GetInstance();\n\n  bool flag = false;\n\n  auto func_2 = [&](const std::shared_ptr<Device>& event)\n      -> std::shared_ptr<ExternalData> {\n    flag = true;\n    return nullptr;\n  };\n\n  {\n    auto config = configbuilder.Build();\n    FlowUnitGroup fug(\"listen\", \"cpu\", \"0\", config, nullptr);\n    EXPECT_TRUE(fug.Init({}, {\"Out_1\", \"Out_2\"}, flowunit_mgr));\n    auto flowunit = fug.GetExecutorUnit();\n    EXPECT_EQ(fug.Open(func_2), STATUS_OK);\n    EXPECT_EQ(fug.Close(), STATUS_OK);\n  }\n\n  {\n    auto config = configbuilder.Build();\n    FlowUnitGroup fug(\"listen\", \"cpu\", \"0\", config, nullptr);\n    EXPECT_TRUE(fug.Init({}, {\"Out_1\", \"Out_2\"}, flowunit_mgr));\n    auto flowunit = fug.GetExecutorUnit();\n    EXPECT_EQ(fug.Open(nullptr), STATUS_OK);\n    EXPECT_EQ(fug.Close(), STATUS_OK);\n  }\n}\n\n}  // namespace modelbox\n"
  },
  {
    "path": "test/unit/libmodelbox/engine/flowunit_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/flowunit.h\"\n\n#include \"flowunit_mockflowunit/flowunit_mockflowunit.h\"\n#include \"gtest/gtest.h\"\n#include \"mock_driver_ctl.h\"\n#include \"mockflow.h\"\n#include \"modelbox/base/config.h\"\n#include \"modelbox/base/driver.h\"\n#include \"modelbox/base/log.h\"\n#include \"modelbox/base/utils.h\"\n#include \"modelbox/device/mockdevice/device_mockdevice.h\"\n#include \"virtualdriver_python.h\"\n\nusing ::testing::_;\n\nnamespace modelbox {\n\nclass FlowUnitTest : public testing::Test {\n public:\n  FlowUnitTest() = default;\n\n protected:\n  MockDriverCtl ctl;\n\n  void SetUp() override {\n    std::shared_ptr<Drivers> drivers = Drivers::GetInstance();\n    modelbox::DriverDesc desc;\n    MockFlowUnitDriverDesc desc_flowunit;\n\n    desc.SetClass(\"DRIVER-DEVICE\");\n    desc.SetType(\"cpu\");\n    desc.SetName(\"device-driver-cpu\");\n    desc.SetDescription(\"the cpu device\");\n    desc.SetVersion(\"8.9.2\");\n    std::string file_path_device =\n        std::string(TEST_LIB_DIR) + \"/libmodelbox-device-cpu.so\";\n    desc.SetFilePath(file_path_device);\n    ctl.AddMockDriverDevice(\"cpu\", desc);\n    auto status_drivers_add = drivers->Add(file_path_device);\n    EXPECT_EQ(status_drivers_add, STATUS_OK);\n\n    std::shared_ptr<DeviceManager> device_mgr = DeviceManager::GetInstance();\n    Status status_device_init = device_mgr->InitDeviceFactory(drivers);\n    EXPECT_EQ(status_device_init, STATUS_OK);\n\n    auto cpu_factory = device_mgr->GetDeviceFactoryList().begin();\n    auto mockdevice_factory =\n        std::dynamic_pointer_cast<MockDeviceFactory>(cpu_factory->second);\n    EXPECT_CALL(*mockdevice_factory, DeviceProbe())\n        .WillRepeatedly(testing::Invoke([&]() {\n          std::map<std::string, std::shared_ptr<DeviceDesc>> tmp_map;\n          std::shared_ptr<DeviceDesc> device_desc =\n              std::make_shared<DeviceDesc>();\n          device_desc->SetDeviceId(\"0\");\n          device_desc->SetDeviceDesc(\"test desc\");\n          device_desc->SetDeviceMemory(\"8Gi\");\n          device_desc->SetDeviceVersion(\"xxxx\");\n          device_desc->SetDeviceType(\"CPU\");\n          tmp_map.insert(std::make_pair(\"0\", device_desc));\n          return tmp_map;\n        }));\n\n    Status status_device_probe = device_mgr->DeviceProbe();\n    EXPECT_EQ(status_device_probe, STATUS_OK);\n\n    EXPECT_CALL(*mockdevice_factory, CreateDevice(_))\n        .WillRepeatedly(testing::Invoke([&](const std::string& device_id) {\n          return std::make_shared<MockDevice>();\n        }));\n\n    desc_flowunit.SetClass(\"DRIVER-FLOWUNIT\");\n    desc_flowunit.SetType(\"cpu\");\n    desc_flowunit.SetName(\"httpserver\");\n    desc_flowunit.SetDescription(\"the cpu httpserver\");\n    desc_flowunit.SetVersion(\"1.0.0\");\n    std::string file_path_flowunit =\n        std::string(TEST_LIB_DIR) + \"/libmodelbox-unit-cpu-httpserver.so\";\n    desc_flowunit.SetFilePath(file_path_flowunit);\n    auto mock_flowunit = std::make_shared<MockFlowUnit>();\n    auto device = device_mgr->CreateDevice(\"cpu\", \"0\");\n    EXPECT_NE(device, nullptr);\n    device->SetMemQuota(10240);\n\n    auto mock_flowunit_desc = std::make_shared<FlowUnitDesc>();\n    mock_flowunit_desc->SetFlowUnitName(\"httpserver\");\n    mock_flowunit_desc->AddFlowUnitInput(modelbox::FlowUnitInput(\"input\"));\n    mock_flowunit_desc->AddFlowUnitOutput(modelbox::FlowUnitOutput(\"output\"));\n    mock_flowunit_desc->AddFlowUnitOption(modelbox::FlowUnitOption(\n        \"ip\", \"string\", true, \"127.0.0.1\", \"input ip\"));\n    mock_flowunit->SetFlowUnitDesc(mock_flowunit_desc);\n    mock_flowunit->SetBindDevice(device);\n    std::weak_ptr<MockFlowUnit> mock_flowunit_wp;\n    mock_flowunit_wp = mock_flowunit;\n    EXPECT_CALL(*mock_flowunit, Open(_))\n        .WillRepeatedly(\n            testing::Invoke([=](const std::shared_ptr<Configuration>& opts) {\n              if (auto spt = mock_flowunit_wp.lock()) {\n                auto device = spt->GetBindDevice();\n              }\n              return modelbox::STATUS_OK;\n            }));\n    desc_flowunit.SetMockFlowUnit(mock_flowunit);\n    ctl.AddMockDriverFlowUnit(\"httpserver\", \"cpu\", desc_flowunit);\n\n    status_drivers_add = drivers->Add(file_path_flowunit);\n    EXPECT_EQ(status_drivers_add, STATUS_OK);\n  };\n\n  void TearDown() override {\n    std::shared_ptr<Drivers> drivers = Drivers::GetInstance();\n    std::shared_ptr<DeviceManager> device_mgr = DeviceManager::GetInstance();\n    std::shared_ptr<FlowUnitManager> flowunit_mgr =\n        FlowUnitManager::GetInstance();\n    flowunit_mgr->Clear();\n    device_mgr->Clear();\n    drivers->Clear();\n  };\n};\n\nTEST_F(FlowUnitTest, InitFlowUnitFactory) {\n  std::shared_ptr<Drivers> drivers = Drivers::GetInstance();\n  std::shared_ptr<FlowUnitManager> flowunit_mgr =\n      FlowUnitManager::GetInstance();\n\n  flowunit_mgr->InitFlowUnitFactory(drivers);\n  auto factory_list = flowunit_mgr->GetFlowUnitFactoryList();\n  for (auto iter = factory_list.begin(); iter != factory_list.end(); iter++) {\n    EXPECT_EQ(iter->first.first, \"cpu\");\n    EXPECT_EQ(iter->first.second, \"httpserver\");\n    EXPECT_NE(iter->second, nullptr);\n  }\n}\n\nTEST_F(FlowUnitTest, Probe) {\n  std::shared_ptr<Drivers> drivers = Drivers::GetInstance();\n  std::shared_ptr<FlowUnitManager> flowunit_mgr =\n      FlowUnitManager::GetInstance();\n\n  Status status1 = flowunit_mgr->InitFlowUnitFactory(drivers);\n  Status status2 = flowunit_mgr->FlowUnitProbe();\n\n  EXPECT_EQ(status1, STATUS_OK);\n  EXPECT_EQ(status2, STATUS_OK);\n\n  auto desc_list = flowunit_mgr->GetFlowUnitDescList();\n  auto iter1 = desc_list.find(\"cpu\");\n  EXPECT_EQ(iter1->first, \"cpu\");\n\n  auto iter2 = iter1->second.find(\"httpserver\");\n  EXPECT_EQ(iter2->first, \"httpserver\");\n  auto flowunit_desc = iter2->second;\n  EXPECT_EQ(flowunit_desc->GetFlowUnitName(), \"httpserver\");\n  std::vector<FlowUnitInput> input_list = flowunit_desc->GetFlowUnitInput();\n  std::vector<FlowUnitOutput> output_list = flowunit_desc->GetFlowUnitOutput();\n  std::vector<FlowUnitOption> option_list = flowunit_desc->GetFlowUnitOption();\n  EXPECT_EQ(input_list[0].GetPortName(), \"input\");\n  EXPECT_EQ(output_list[0].GetPortName(), \"output\");\n  EXPECT_EQ(option_list[0].GetOptionName(), \"ip\");\n  EXPECT_EQ(option_list[0].GetOptionType(), \"string\");\n  EXPECT_EQ(option_list[0].IsRequire(), true);\n  EXPECT_EQ(option_list[0].GetOptionDefault(), \"127.0.0.1\");\n  EXPECT_EQ(option_list[0].GetOptionDesc(), \"input ip\");\n}\n\nTEST_F(FlowUnitTest, CreateFlowUnit) {\n  std::shared_ptr<Drivers> drivers = Drivers::GetInstance();\n  ConfigurationBuilder configbuilder;\n  auto device_mgr = DeviceManager::GetInstance();\n  device_mgr->Initialize(drivers, configbuilder.Build());\n  auto flowunit_mgr = FlowUnitManager::GetInstance();\n  flowunit_mgr->Initialize(drivers, device_mgr, configbuilder.Build());\n\n  auto flowunit = flowunit_mgr->CreateFlowUnit(\"httpserver\");\n  EXPECT_EQ(flowunit[0]->GetBindDevice()->GetDeviceManager()->GetDrivers(),\n            drivers);\n\n  EXPECT_EQ(flowunit.size(), 1);\n  auto flowunit_desc = flowunit[0]->GetFlowUnitDesc();\n  EXPECT_EQ(flowunit_desc->GetFlowUnitName(), \"httpserver\");\n  std::vector<FlowUnitInput> input_list = flowunit_desc->GetFlowUnitInput();\n  std::vector<FlowUnitOutput> output_list = flowunit_desc->GetFlowUnitOutput();\n  EXPECT_EQ(input_list[0].GetPortName(), \"input\");\n  EXPECT_EQ(output_list[0].GetPortName(), \"output\");\n\n  auto flowunit_device = flowunit[0]->GetBindDevice();\n  auto device_desc = flowunit_device->GetDeviceDesc();\n  EXPECT_EQ(device_desc->GetDeviceDesc(), \"test desc\");\n  EXPECT_EQ(device_desc->GetDeviceId(), \"0\");\n  EXPECT_EQ(device_desc->GetDeviceMemory(), \"8Gi\");\n  EXPECT_EQ(device_desc->GetDeviceVersion(), \"xxxx\");\n  EXPECT_EQ(device_desc->GetDeviceType(), \"CPU\");\n  auto config = configbuilder.Build();\n  EXPECT_EQ(flowunit[0]->Open(config), STATUS_OK);\n\n  flowunit = flowunit_mgr->CreateFlowUnit(\"httpserver\", \"cpu\");\n  EXPECT_EQ(flowunit.size(), 1);\n  flowunit_desc = flowunit[0]->GetFlowUnitDesc();\n  EXPECT_EQ(flowunit_desc->GetFlowUnitName(), \"httpserver\");\n  input_list = flowunit_desc->GetFlowUnitInput();\n  output_list = flowunit_desc->GetFlowUnitOutput();\n  EXPECT_EQ(input_list[0].GetPortName(), \"input\");\n  EXPECT_EQ(output_list[0].GetPortName(), \"output\");\n  flowunit_device = flowunit[0]->GetBindDevice();\n  device_desc = flowunit_device->GetDeviceDesc();\n  EXPECT_EQ(device_desc->GetDeviceDesc(), \"test desc\");\n  EXPECT_EQ(device_desc->GetDeviceId(), \"0\");\n  EXPECT_EQ(device_desc->GetDeviceMemory(), \"8Gi\");\n  EXPECT_EQ(device_desc->GetDeviceVersion(), \"xxxx\");\n  EXPECT_EQ(device_desc->GetDeviceType(), \"CPU\");\n  EXPECT_EQ(flowunit[0]->Open(config), STATUS_OK);\n\n  flowunit = flowunit_mgr->CreateFlowUnit(\"httpserver\", \"cpu\", \"0\");\n  EXPECT_EQ(flowunit.size(), 1);\n  flowunit_desc = flowunit[0]->GetFlowUnitDesc();\n  EXPECT_EQ(flowunit_desc->GetFlowUnitName(), \"httpserver\");\n  input_list = flowunit_desc->GetFlowUnitInput();\n  output_list = flowunit_desc->GetFlowUnitOutput();\n  EXPECT_EQ(input_list[0].GetPortName(), \"input\");\n  EXPECT_EQ(output_list[0].GetPortName(), \"output\");\n  flowunit_device = flowunit[0]->GetBindDevice();\n  device_desc = flowunit_device->GetDeviceDesc();\n  EXPECT_EQ(device_desc->GetDeviceDesc(), \"test desc\");\n  EXPECT_EQ(device_desc->GetDeviceId(), \"0\");\n  EXPECT_EQ(device_desc->GetDeviceMemory(), \"8Gi\");\n  EXPECT_EQ(device_desc->GetDeviceVersion(), \"xxxx\");\n  EXPECT_EQ(device_desc->GetDeviceType(), \"CPU\");\n  EXPECT_EQ(flowunit[0]->Open(config), STATUS_OK);\n\n  flowunit = flowunit_mgr->CreateFlowUnit(\"httpserver\", \"cpu:0\");\n  EXPECT_EQ(flowunit.size(), 1);\n  flowunit_desc = flowunit[0]->GetFlowUnitDesc();\n  EXPECT_EQ(flowunit_desc->GetFlowUnitName(), \"httpserver\");\n  EXPECT_EQ(flowunit[0]->GetBindDevice()->GetDeviceID(), \"0\");\n}\n\nTEST_F(FlowUnitTest, CreateFlowUnitFail) {\n  std::shared_ptr<Drivers> drivers = Drivers::GetInstance();\n  auto device_mgr = DeviceManager::GetInstance();\n  auto flowunit_mgr = FlowUnitManager::GetInstance();\n  ConfigurationBuilder configbuilder;\n\n  flowunit_mgr->Initialize(drivers, device_mgr, configbuilder.Build());\n\n  auto flowunit = flowunit_mgr->CreateFlowUnit(\"test\");\n  EXPECT_EQ(flowunit.size(), 0);\n  flowunit = flowunit_mgr->CreateFlowUnit(\"httpserver\", \"cuda\");\n  EXPECT_EQ(flowunit.size(), 0);\n  flowunit = flowunit_mgr->CreateFlowUnit(\"httpserver\", \"cpu\", \"1\");\n  EXPECT_EQ(flowunit.size(), 0);\n  flowunit = flowunit_mgr->CreateFlowUnit(\"httpserver\", \"cpu:0\", \"1\");\n  EXPECT_EQ(flowunit.size(), 0);\n  flowunit = flowunit_mgr->CreateFlowUnit(\"httpserver\", \"cpu:0,1\");\n  EXPECT_EQ(flowunit.size(), 1);\n  flowunit = flowunit_mgr->CreateFlowUnit(\"httpserver\", \"cpu:0\" + std::string(LIST_DELIMITER) + \"1\");\n  EXPECT_EQ(flowunit.size(), 1);\n  flowunit = flowunit_mgr->CreateFlowUnit(\"httpserver\", \"cpu:0,1;cuda\");\n  EXPECT_EQ(flowunit.size(), 1);\n  flowunit = flowunit_mgr->CreateFlowUnit(\"httpserver\", \"cpu:0\" + std::string(LIST_DELIMITER) + \"1;cuda\");\n  EXPECT_EQ(flowunit.size(), 1);\n  flowunit = flowunit_mgr->CreateFlowUnit(\"httpserver\", \"cpu:0:1;cuda\");\n  EXPECT_EQ(flowunit.size(), 0);\n  flowunit = flowunit_mgr->CreateFlowUnit(\"httpserver\", \"cpu:0;1;cuda\");\n  EXPECT_EQ(flowunit.size(), 1);\n}\n\nTEST_F(FlowUnitTest, GetFlowUnitDesc) {\n  std::shared_ptr<Drivers> drivers = Drivers::GetInstance();\n  ConfigurationBuilder configbuilder;\n  auto device_mgr = DeviceManager::GetInstance();\n  auto flowunit_mgr = FlowUnitManager::GetInstance();\n  flowunit_mgr->Initialize(drivers, device_mgr, configbuilder.Build());\n\n  auto flowunit_desc = flowunit_mgr->GetFlowUnitDesc(\"cpu\", \"httpserver\");\n  EXPECT_TRUE(flowunit_desc != nullptr);\n  EXPECT_EQ(flowunit_desc->GetFlowUnitName(), \"httpserver\");\n  auto input_list = flowunit_desc->GetFlowUnitInput();\n  auto output_list = flowunit_desc->GetFlowUnitOutput();\n  EXPECT_EQ(input_list[0].GetPortName(), \"input\");\n  EXPECT_EQ(output_list[0].GetPortName(), \"output\");\n\n  flowunit_desc = flowunit_mgr->GetFlowUnitDesc(\"cuda\", \"httpserver\");\n  EXPECT_TRUE(flowunit_desc == nullptr);\n}\n\nTEST_F(FlowUnitTest, GetAllFlowUnitDesc) {\n  std::shared_ptr<Drivers> drivers = Drivers::GetInstance();\n  ConfigurationBuilder configbuilder;\n  auto device_mgr = DeviceManager::GetInstance();\n  auto flowunit_mgr = FlowUnitManager::GetInstance();\n  flowunit_mgr->Initialize(drivers, device_mgr, configbuilder.Build());\n\n  auto flowunit_vec = flowunit_mgr->GetAllFlowUnitDesc();\n  EXPECT_EQ(flowunit_vec.size(), 1);\n  auto flowunit_desc = flowunit_vec[0];\n  EXPECT_EQ(flowunit_desc->GetFlowUnitName(), \"httpserver\");\n  auto input_list = flowunit_desc->GetFlowUnitInput();\n  auto output_list = flowunit_desc->GetFlowUnitOutput();\n  std::vector<FlowUnitOption> option_list = flowunit_desc->GetFlowUnitOption();\n  auto driver_desc = flowunit_desc->GetDriverDesc();\n  EXPECT_EQ(input_list[0].GetPortName(), \"input\");\n  EXPECT_EQ(output_list[0].GetPortName(), \"output\");\n  EXPECT_EQ(option_list[0].GetOptionName(), \"ip\");\n  EXPECT_EQ(option_list[0].GetOptionType(), \"string\");\n  EXPECT_EQ(option_list[0].IsRequire(), true);\n  EXPECT_EQ(option_list[0].GetOptionDefault(), \"127.0.0.1\");\n  EXPECT_EQ(option_list[0].GetOptionDesc(), \"input ip\");\n  EXPECT_EQ(driver_desc->GetName(), \"httpserver\");\n  EXPECT_EQ(driver_desc->GetType(), \"cpu\");\n  EXPECT_EQ(driver_desc->GetClass(), \"DRIVER-FLOWUNIT\");\n  EXPECT_EQ(driver_desc->GetDescription(), \"the cpu httpserver\");\n  EXPECT_EQ(driver_desc->GetVersion(), \"1.0.0\");\n}\n\nTEST_F(FlowUnitTest, FlowUnitDescCheckGroupType) {\n  FlowUnitDesc flow_desc;\n  flow_desc.SetFlowUnitGroupType(\"input\");\n  EXPECT_TRUE(flow_desc.GetGroupType().empty());\n\n  flow_desc.SetFlowUnitGroupType(\"Input $@#@\");\n  EXPECT_TRUE(flow_desc.GetGroupType().empty());\n\n  flow_desc.SetFlowUnitGroupType(\"Input/http/reply\");\n  EXPECT_TRUE(flow_desc.GetGroupType().empty());\n\n  flow_desc.SetFlowUnitGroupType(\"Input\");\n  EXPECT_EQ(flow_desc.GetGroupType(), \"Input\");\n\n  flow_desc.SetFlowUnitGroupType(\"Input321\");\n  EXPECT_EQ(flow_desc.GetGroupType(), \"Input321\");\n\n  flow_desc.SetFlowUnitGroupType(\"Input_321\");\n  EXPECT_EQ(flow_desc.GetGroupType(), \"Input_321\");\n\n  flow_desc.SetFlowUnitGroupType(\"Input/http\");\n  EXPECT_EQ(flow_desc.GetGroupType(), \"Input/http\");\n}\n\nclass VirtualFlowUnitTest : public testing::Test {\n public:\n  VirtualFlowUnitTest() = default;\n\n  void SetUp() override {\n    std::string misc_python_src_path = std::string(PYTHON_PATH);\n    misc_python_dest_path =\n        std::string(TEST_LIB_DIR) + \"/libmodelbox-unit-cpu-python.so\";\n    CopyFile(misc_python_src_path, misc_python_dest_path, 0, true);\n\n    std::string virtual_python_src_path = std::string(VIRTUAL_PYTHON_PATH);\n    virtual_python_dest_path =\n        std::string(TEST_LIB_DIR) + \"/libmodelbox-virtualdriver-python.so\";\n    CopyFile(virtual_python_src_path, virtual_python_dest_path, 0, true);\n  };\n\n  void TearDown() override {\n    std::shared_ptr<Drivers> drivers = Drivers::GetInstance();\n    std::shared_ptr<DeviceManager> device_mgr = DeviceManager::GetInstance();\n    std::shared_ptr<FlowUnitManager> flowunit_mgr =\n        FlowUnitManager::GetInstance();\n    flowunit_mgr->Clear();\n    device_mgr->Clear();\n    drivers->Clear();\n\n    remove(misc_python_dest_path.c_str());\n    remove(virtual_python_dest_path.c_str());\n  };\n\n  MockDriverCtl ctl;\n\n private:\n  std::string misc_python_dest_path;\n  std::string virtual_python_dest_path;\n};\n\nTEST_F(VirtualFlowUnitTest, VirtualTest) {\n  std::shared_ptr<Drivers> drivers = Drivers::GetInstance();\n  ConfigurationBuilder configbuilder;\n  configbuilder.AddProperty(DRIVER_DIR, std::string(TEST_ASSETS));\n  configbuilder.AddProperty(DRIVER_SKIP_DEFAULT, \"true\");\n  std::shared_ptr<Configuration> config = configbuilder.Build();\n  modelbox::DriverDesc desc;\n  MockFlowUnitDriverDesc desc_flowunit;\n\n  desc.SetClass(\"DRIVER-DEVICE\");\n  desc.SetType(\"cpu\");\n  desc.SetName(\"device-driver-cpu\");\n  desc.SetDescription(\"the cpu device\");\n  desc.SetVersion(\"8.9.2\");\n  std::string file_path_device =\n      std::string(TEST_LIB_DIR) + \"/libmodelbox-device-cpu.so\";\n  desc.SetFilePath(file_path_device);\n  ctl.AddMockDriverDevice(\"cpu\", desc);\n\n  bool result = drivers->Initialize(config);\n  EXPECT_TRUE(result);\n  result = drivers->Scan(TEST_LIB_DIR, \"libmodelbox-device-*\");\n  EXPECT_TRUE(result);\n\n  std::string file_misc_python =\n      std::string(TEST_LIB_DIR) + \"/libmodelbox-unit-cpu-python.so\";\n  result = drivers->Add(file_misc_python);\n  EXPECT_TRUE(result);\n  result = drivers->Scan(TEST_LIB_DIR, \"libmodelbox-virtualdriver-python.so\");\n  drivers->VirtualDriverScan();\n  EXPECT_TRUE(result);\n  std::shared_ptr<DeviceManager> device_mgr = DeviceManager::GetInstance();\n  device_mgr->Initialize(drivers, config);\n\n  auto flowunit_mgr = FlowUnitManager::GetInstance();\n  flowunit_mgr->Initialize(drivers, device_mgr, config);\n  auto flowunit_python =\n      flowunit_mgr->CreateFlowUnit(\"httpserver_python\", \"cpu\");\n  auto desc_python = flowunit_python[0]->GetFlowUnitDesc();\n  EXPECT_EQ(desc_python->GetFlowUnitName(), \"httpserver_python\");\n  auto input = desc_python->GetFlowUnitInput();\n  auto output = desc_python->GetFlowUnitOutput();\n  EXPECT_EQ(input[0].GetPortName(), \"image\");\n  EXPECT_EQ(input[0].GetDeviceType(), \"cpu\");\n  EXPECT_EQ(input[1].GetPortName(), \"anchor\");\n  EXPECT_EQ(output[0].GetPortName(), \"output\");\n  EXPECT_EQ(output[0].GetDeviceType(), \"cpu\");\n  EXPECT_EQ(desc_python->GetConditionType(), NONE);\n  EXPECT_EQ(desc_python->GetOutputType(), ORIGIN);\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "test/unit/libmodelbox/engine/graph_checker_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/graph_checker.h\"\n\n#include \"flowunit_mockflowunit/flowunit_mockflowunit.h\"\n#include \"gmock/gmock.h\"\n#include \"graph_conf_mockgraphconf/graph_conf_mockgraphconf.h\"\n#include \"gtest/gtest.h\"\n#include \"mock_driver_ctl.h\"\n#include \"mockflow.h\"\n#include \"modelbox/base/log.h\"\n\nnamespace modelbox {\n\nclass GraphCheckerTest : public testing::Test {\n public:\n  GraphCheckerTest() = default;\n\n protected:\n  void SetUp() override {\n    flow_ = std::make_shared<MockFlow>();\n\n    {\n      auto mock_desc = GenerateFlowunitDesc(\"test_0_1\", {}, {\"Out_1\"});\n      mock_desc->SetFlowType(STREAM);\n      auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n      flow_->AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n    }\n\n    {\n      auto mock_desc = GenerateFlowunitDesc(\"test_0_2\", {}, {\"Out_1\", \"Out_2\"});\n      mock_desc->SetFlowType(STREAM);\n      auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n      flow_->AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n    }\n\n    {\n      auto mock_desc =\n          GenerateFlowunitDesc(\"test_3_0\", {\"In_1\", \"In_2\", \"In_3\"}, {});\n      mock_desc->SetFlowType(STREAM);\n      auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n      flow_->AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n    }\n\n    {\n      auto mock_desc = GenerateFlowunitDesc(\"test_2_0\", {\"In_1\", \"In_2\"}, {});\n      mock_desc->SetFlowType(STREAM);\n      auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n      flow_->AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n    }\n\n    {\n      auto mock_desc = GenerateFlowunitDesc(\"test_1_0\", {\"In_1\"}, {});\n      mock_desc->SetFlowType(STREAM);\n      auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n      flow_->AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n    }\n\n    {\n      auto mock_desc =\n          GenerateFlowunitDesc(\"test_1_1_normal\", {\"In_1\"}, {\"Out_1\"});\n      auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n      flow_->AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n    }\n\n    {\n      auto mock_desc = GenerateFlowunitDesc(\"test_1_1\", {\"In_1\"}, {\"Out_1\"});\n      mock_desc->SetFlowType(STREAM);\n      mock_desc->SetStreamSameCount(true);\n      auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n      flow_->AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n    }\n\n    {\n      auto mock_desc =\n          GenerateFlowunitDesc(\"test_1_1_same_name\", {\"In_1\"}, {\"In_1\"});\n      mock_desc->SetFlowType(STREAM);\n      mock_desc->SetStreamSameCount(true);\n      auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n      flow_->AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n    }\n\n    {\n      auto mock_desc = GenerateFlowunitDesc(\"stream_1_1\", {\"In_1\"}, {\"Out_1\"});\n      mock_desc->SetFlowType(STREAM);\n      mock_desc->SetStreamSameCount(false);\n      auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n      flow_->AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n    }\n\n    {\n      auto mock_desc = GenerateFlowunitDesc(\"condition_1_3\", {\"In_1\"},\n                                            {\"Out_1\", \"Out_2\", \"Out_3\"});\n      mock_desc->SetConditionType(IF_ELSE);\n      auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n      flow_->AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n    }\n\n    {\n      auto mock_desc =\n          GenerateFlowunitDesc(\"condition_1_2\", {\"In_1\"}, {\"Out_1\", \"Out_2\"});\n      mock_desc->SetConditionType(IF_ELSE);\n      auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n      flow_->AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n    }\n\n    // expand and collapse\n    {\n      auto mock_desc =\n          GenerateFlowunitDesc(\"collapse_1_1\", {\"In_1\"}, {\"Out_1\"});\n      mock_desc->SetOutputType(COLLAPSE);\n      mock_desc->SetFlowType(STREAM);\n      auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n      flow_->AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n    }\n\n    {\n      auto mock_desc =\n          GenerateFlowunitDesc(\"collapse_2_1\", {\"In_1\", \"In_2\"}, {\"Out_1\"});\n      mock_desc->SetOutputType(COLLAPSE);\n      mock_desc->SetFlowType(STREAM);\n      auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n      flow_->AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n    }\n\n    {\n      auto mock_desc = GenerateFlowunitDesc(\"expand_1_1\", {\"In_1\"}, {\"Out_1\"});\n      mock_desc->SetOutputType(EXPAND);\n      auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n      flow_->AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n    }\n\n    {\n      auto mock_desc =\n          GenerateFlowunitDesc(\"expand_1_2\", {\"In_1\"}, {\"Out_1\", \"Out_2\"});\n      mock_desc->SetOutputType(EXPAND);\n      auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n      flow_->AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n    }\n\n    {\n      auto mock_desc = GenerateFlowunitDesc(\"expand_2_2\", {\"In_1\", \"In_2\"},\n                                            {\"Out_1\", \"Out_2\"});\n      mock_desc->SetOutputType(EXPAND);\n      auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n      flow_->AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n    }\n\n    {\n      auto mock_desc =\n          GenerateFlowunitDesc(\"test_1_2\", {\"In_1\"}, {\"Out_1\", \"Out_2\"});\n      mock_desc->SetFlowType(STREAM);\n      auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n      flow_->AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n    }\n\n    {\n      auto mock_desc = GenerateFlowunitDesc(\"test_1_3\", {\"In_1\"},\n                                            {\"Out_1\", \"Out_2\", \"Out_3\"});\n      mock_desc->SetFlowType(STREAM);\n      auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n      flow_->AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n    }\n\n    {\n      auto mock_desc =\n          GenerateFlowunitDesc(\"test_1_2_normal\", {\"In_1\"}, {\"Out_1\", \"Out_2\"});\n      mock_desc->SetFlowType(NORMAL);\n      auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n      flow_->AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n    }\n\n    {\n      auto mock_desc =\n          GenerateFlowunitDesc(\"test_3_1\", {\"In_1\", \"In_2\", \"In_3\"}, {\"Out_1\"});\n      mock_desc->SetFlowType(STREAM);\n      auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n      flow_->AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n    }\n\n    {\n      auto mock_desc =\n          GenerateFlowunitDesc(\"test_2_1\", {\"In_1\", \"In_2\"}, {\"Out_1\"});\n      mock_desc->SetFlowType(STREAM);\n      auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n      flow_->AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n    }\n\n    {\n      auto mock_desc = GenerateFlowunitDesc(\n          \"test_4_1\", {\"In_1\", \"In_2\", \"In_3\", \"In_4\"}, {\"Out_1\"});\n      mock_desc->SetFlowType(STREAM);\n      auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n      flow_->AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n    }\n\n    {\n      auto mock_desc =\n          GenerateFlowunitDesc(\"test_loop\", {\"In_1\"}, {\"Out_1\", \"Out_2\"});\n      mock_desc->SetLoopType(LOOP);\n      auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n      flow_->AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n    }\n\n    {\n      auto mock_desc = GenerateFlowunitDesc(\n          \"test_loop_invalid\", {\"In_1\", \"In_2\"}, {\"Out_1\", \"Out_2\"});\n      mock_desc->SetLoopType(LOOP);\n      auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n      flow_->AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n    }\n\n    {\n      auto mock_desc =\n          GenerateFlowunitDesc(\"test_1_1_stream\", {\"In_1\"}, {\"Out_1\"});\n      mock_desc->SetFlowType(STREAM);\n      auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n      flow_->AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n    }\n\n    flow_->Init(false);\n  }\n\n  void TearDown() override {\n    auto flowunit_mgr = FlowUnitManager::GetInstance();\n    flowunit_mgr->Clear();\n    auto device_mgr = DeviceManager::GetInstance();\n    device_mgr->Clear();\n    auto drivers = Drivers::GetInstance();\n    drivers->Clear();\n  }\n\n  void BuildGcGraph(const std::shared_ptr<Configuration> &config,\n                    std::shared_ptr<GCGraph> &gcgraph) {\n    std::shared_ptr<Drivers> drivers = Drivers::GetInstance();\n\n    auto device_mgr = DeviceManager::GetInstance();\n    device_mgr->Initialize(drivers, config);\n\n    auto flowunit_mgr = FlowUnitManager::GetInstance();\n    flowunit_mgr->Initialize(drivers, device_mgr, config);\n\n    GraphConfigManager graphconf_mgr = GraphConfigManager::GetInstance();\n    graphconf_mgr.Initialize(drivers, config);\n    auto graphvizconf = graphconf_mgr.LoadGraphConfig(config);\n    gcgraph = graphvizconf->Resolve();\n  }\n\n  std::shared_ptr<Graph> InitGraph(\n      const std::shared_ptr<Configuration> &config) {\n    auto device_mgr = DeviceManager::GetInstance();\n    auto flowunit_mgr = FlowUnitManager::GetInstance();\n    auto graph = std::make_shared<Graph>();\n    graph->Initialize(flowunit_mgr, device_mgr, nullptr, config);\n    return graph;\n  }\n\n  Status BuildGraph(const std::shared_ptr<Configuration> &config,\n                    std::shared_ptr<Graph> &graph) {\n    std::shared_ptr<GCGraph> gcgraph;\n    BuildGcGraph(config, gcgraph);\n    if (!gcgraph) {\n      return STATUS_BADCONF;\n    }\n\n    graph = InitGraph(config);\n    return graph->Build(gcgraph);\n  }\n\n  std::shared_ptr<Node> CastNode(const std::shared_ptr<NodeBase> &node) {\n    return std::dynamic_pointer_cast<Node>(node);\n  }\n\n  void TestGraph(const std::string &graph, const Status &status) {\n    ConfigurationBuilder configbuilder;\n    auto config = configbuilder.Build();\n    config->SetProperty(\"graph.format\", \"graphviz\");\n    config->SetProperty(\"graph.graphconf\", graph);\n    std::shared_ptr<Graph> mb_graph;\n    EXPECT_EQ(BuildGraph(config, mb_graph), status);\n  }\n\n private:\n  std::shared_ptr<MockFlow> flow_;\n};\n\nTEST_F(GraphCheckerTest, VirtualNode_NormalFlow) {\n  std::string conf_file_value =\n      R\"(\n        digraph demo {\n          input1[type=input]\n          output1[type=output]\n          b[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          input1 -> b:In_1\n          b:Out_1 -> output1\n        }\n      )\";\n\n  TestGraph(conf_file_value, STATUS_OK);\n}\n\nTEST_F(GraphCheckerTest, VirtualNode_MatchAtVirtualInput) {\n  std::string conf_file_value =\n      R\"(\n        digraph demo {\n          input1[type=input]\n          input2[type=input]\n          b[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          c[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          d[type=flowunit, flowunit=test_2_0, device=cpu, deviceid=0]\n          input1 -> b:In_1\n          input2 -> c:In_1\n          b:Out_1 -> d:In_1\n          c:Out_1 -> d:In_2\n        }\n      )\";\n\n  TestGraph(conf_file_value, STATUS_OK);\n}\n\nTEST_F(GraphCheckerTest, VirtualNode_MatchMultiInputOutput) {\n  std::string conf_file_value =\n      R\"(\n        digraph demo {\n          input1[type=input]\n          input2[type=input]\n          output1[type=output]\n          output2[type=output]\n          b[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          c[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          input1 -> b:In_1\n          input2 -> c:In_1\n          b:Out_1 -> output1\n          c:Out_1 -> output2\n        }\n      )\";\n\n  TestGraph(conf_file_value, STATUS_OK);\n}\n/*\n  a --> b --> d\n    |         |\n    |         |\n    c --------\n*/\n\nTEST_F(GraphCheckerTest, SinglePortMatch_SingleOutPortLinkMultiInPort) {\n  std::string conf_file_value =\n      R\"(\n        digraph demo {\n          a[type=flowunit, flowunit=test_0_1, device=cpu, deviceid=0]\n          b[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          c[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          d[type=flowunit, flowunit=test_2_0, device=cpu, deviceid=0]\n          a:Out_1 -> b:In_1\n          a:Out_1 -> c:In_1\n          b:Out_1 -> d:In_1\n          c:Out_1 -> d:In_2\n        }\n      )\";\n\n  TestGraph(conf_file_value, STATUS_OK);\n}\n\n/*\n  a --> b --> d\n    |       |\n    |       |\n    c ------\n*/\n\nTEST_F(GraphCheckerTest, SinglePortNotMatch_SingleOutPortLinkSingleInPort) {\n  const auto *conf_file_value =\n      R\"(\n        digraph demo {\n          a[type=flowunit, flowunit=test_0_1, device=cpu, deviceid=0]\n          b[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          c[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          d[type=flowunit, flowunit=test_1_0, device=cpu, deviceid=0]\n          a:Out_1 -> b:In_1\n          a:Out_1 -> c:In_1\n          b:Out_1 -> d:In_1\n          c:Out_1 -> d:In_1\n        }\n      )\";\n\n  TestGraph(conf_file_value, STATUS_BADCONF);\n}\n\n/*\n  a --> b --> d\n  |           |\n  |           |\n  c ----------\n*/\n\nTEST_F(GraphCheckerTest, MuliPortMatch_MultiOutPortLinkMultiInPort) {\n  const auto *conf_file_value =\n      R\"(\n        digraph demo {\n          a[type=flowunit, flowunit=test_0_2, device=cpu, deviceid=0]\n          b[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          c[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          d[type=flowunit, flowunit=test_2_1, device=cpu, deviceid=0]\n          e[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          f[type=flowunit, flowunit=test_1_0, device=cpu, deviceid=0]\n          a:Out_1 -> b:In_1\n          a:Out_2 -> c:In_1\n          b:Out_1 -> d:In_1\n          c:Out_1 -> d:In_2\n          d:Out_1 -> e:In_1\n          e:Out_1 -> f:In_1\n        }\n      )\";\n\n  TestGraph(conf_file_value, STATUS_OK);\n}\n\n/*\n  a --> b --> d\n  |         |\n  |         |\n  c --------\n*/\n\nTEST_F(GraphCheckerTest, MuliPortNotMatch_MultiOutPortLinkSingleInPort) {\n  const auto *conf_file_value =\n      R\"(\n        digraph demo {\n          a[type=flowunit, flowunit=test_0_2, device=cpu, deviceid=0]\n          b[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          c[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          d[type=flowunit, flowunit=test_1_0, device=cpu, deviceid=0]\n          a:Out_1 -> b:In_1\n          a:Out_2 -> c:In_1\n          b:Out_1 -> d:In_1\n          c:Out_1 -> d:In_1\n        }\n      )\";\n\n  TestGraph(conf_file_value, STATUS_BADCONF);\n}\n\nTEST_F(GraphCheckerTest, ConditionMatch_OneInPortThreeOutPort) {\n  const auto *conf_file_value =\n      R\"(\n        digraph demo {\n          a[type=flowunit, flowunit=test_0_1, device=cpu, deviceid=0]\n          b[type=flowunit, flowunit=condition_1_3, device=cpu, deviceid=0]\n          c[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          d[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          e[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          f[type=flowunit, flowunit=test_1_0, device=cpu, deviceid=0]\n          a:Out_1 -> b:In_1\n          b:Out_1 -> c:In_1\n          b:Out_2 -> d:In_1\n          b:Out_3 -> e:In_1\n          c:Out_1 -> f:In_1\n          d:Out_1 -> f:In_1\n          e:Out_1 -> f:In_1\n        }\n      )\";\n\n  TestGraph(conf_file_value, STATUS_OK);\n}\n\nTEST_F(GraphCheckerTest, ConditionMatch_OutConditionInMultiPort) {\n  const auto *conf_file_value =\n      R\"(\n        digraph demo {\n          a[type=flowunit, flowunit=test_0_2, device=cpu, deviceid=0]\n          b[type=flowunit, flowunit=condition_1_2, device=cpu, deviceid=0]\n          c[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          d[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          e[type=flowunit, flowunit=test_2_1, device=cpu, deviceid=0]\n          f[type=flowunit, flowunit=test_1_0, device=cpu, deviceid=0]\n          a:Out_1 -> b:In_1\n          a:Out_2 -> e:In_2\n          b:Out_1 -> c:In_1\n          b:Out_2 -> d:In_1\n          d:Out_1 -> e:In_1\n          c:Out_1 -> e:In_1\n          e:Out_1 -> f:In_1\n        }\n      )\";\n\n  TestGraph(conf_file_value, STATUS_OK);\n}\n\nTEST_F(GraphCheckerTest, ConditionMatch_MutiConditionInSinglePort) {\n  const auto *conf_file_value =\n      R\"(\n        digraph demo {\n          a[type=flowunit, flowunit=test_0_1, device=cpu, deviceid=0]\n          b[type=flowunit, flowunit=condition_1_2, device=cpu, deviceid=0]\n          c[type=flowunit, flowunit=condition_1_2, device=cpu, deviceid=0]\n          d[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          e[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          f[type=flowunit, flowunit=test_1_0, device=cpu, deviceid=0]\n          a:Out_1 -> b:In_1\n          b:Out_1 -> c:In_1\n          b:Out_2 -> d:In_1\n          d:Out_1 -> e:In_1\n          c:Out_1 -> e:In_1\n          c:Out_2 -> e:In_1\n          e:Out_1 -> f:In_1\n        }\n      )\";\n\n  TestGraph(conf_file_value, STATUS_BADCONF);\n}\n\nTEST_F(GraphCheckerTest, ConditionNotMatch_AllOutPortLinkDifferenceInPort) {\n  const auto *conf_file_value =\n      R\"(\n        digraph demo {\n          a[type=flowunit, flowunit=test_0_1, device=cpu, deviceid=0]\n          b[type=flowunit, flowunit=condition_1_3, device=cpu, deviceid=0]\n          c[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          d[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          e[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          f[type=flowunit, flowunit=test_2_0, device=cpu, deviceid=0]\n          a:Out_1 -> b:In_1\n          b:Out_1 -> c:In_1\n          b:Out_2 -> d:In_1\n          b:Out_3 -> e:In_1\n          c:Out_1 -> f:In_1\n          d:Out_1 -> f:In_1\n          e:Out_1 -> f:In_2\n        }\n      )\";\n\n  TestGraph(conf_file_value, STATUS_BADCONF);\n}\n\nTEST_F(GraphCheckerTest, ConditionNotMatch_MultiOutPortLinkInPort) {\n  const auto *conf_file_value =\n      R\"(\n        digraph demo {\n          a[type=flowunit, flowunit=test_0_1, device=cpu, deviceid=0]\n          b[type=flowunit, flowunit=condition_1_2, device=cpu, deviceid=0]\n          c[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          d[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          e[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          f[type=flowunit, flowunit=test_3_0, device=cpu, deviceid=0]\n          a:Out_1 -> b:In_1\n          b:Out_1 -> c:In_1\n          b:Out_1 -> d:In_1\n          b:Out_2 -> e:In_1\n          c:Out_1 -> f:In_1\n          d:Out_1 -> f:In_2\n          e:Out_1 -> f:In_3\n        }\n      )\";\n\n  TestGraph(conf_file_value, STATUS_BADCONF);\n}\n\nTEST_F(GraphCheckerTest, ConditionNotMatch_SinglePortConditionNotMatch) {\n  const auto *conf_file_value =\n      R\"(\n        digraph demo {\n          a[type=flowunit, flowunit=test_0_1, device=cpu, deviceid=0]\n          b[type=flowunit, flowunit=condition_1_3, device=cpu, deviceid=0]\n          c[type=flowunit, flowunit=test_2_1, device=cpu, deviceid=0]\n          d[type=flowunit, flowunit=test_1_0, device=cpu, deviceid=0]\n          a:Out_1 -> b:In_1\n          b:Out_1 -> c:In_1\n          b:Out_2 -> c:In_2\n          b:Out_3 -> d:In_1\n          c:Out_1 -> d:In_1\n        }\n      )\";\n\n  TestGraph(conf_file_value, STATUS_BADCONF);\n}\n\nTEST_F(GraphCheckerTest, ConditionMatch_SinglePortMatch) {\n  const auto *conf_file_value =\n      R\"(\n        digraph demo {\n          a[type=flowunit, flowunit=test_0_1, device=cpu, deviceid=0]\n          b[type=flowunit, flowunit=condition_1_2, device=cpu, deviceid=0]\n          c[type=flowunit, flowunit=test_2_1, device=cpu, deviceid=0]\n          d[type=flowunit, flowunit=test_1_0, device=cpu, deviceid=0]\n          a:Out_1 -> b:In_1\n          b:Out_1 -> c:In_1\n          b:Out_1 -> c:In_2\n          b:Out_2 -> d:In_1\n          c:Out_1 -> d:In_1\n        }\n      )\";\n\n  TestGraph(conf_file_value, STATUS_OK);\n}\n\nTEST_F(GraphCheckerTest, LoopMatch_LoopSelf) {\n  const auto *conf_file_value =\n      R\"(\n        digraph demo {\n          a[type=flowunit, flowunit=test_0_1, device=cpu, deviceid=0]\n          b[type=flowunit, flowunit=test_loop, device=cpu, deviceid=0]\n          c[type=flowunit, flowunit=test_1_0, device=cpu, deviceid=0]\n          a:Out_1 -> b:In_1 \n          b:Out_1 -> b:In_1\n          b:Out_2 -> c:In_1\n        }\n      )\";\n\n  TestGraph(conf_file_value, STATUS_OK);\n}\n\nTEST_F(GraphCheckerTest, LoopMatch_LoopHasNode) {\n  const auto *conf_file_value =\n      R\"(\n        digraph demo {\n          a[type=flowunit, flowunit=test_0_1, device=cpu, deviceid=0]\n          b[type=flowunit, flowunit=test_loop, device=cpu, deviceid=0]\n          c[type=flowunit, flowunit=test_1_1_normal, device=cpu, deviceid=0]\n          d[type=flowunit, flowunit=test_1_0, device=cpu, deviceid=0]\n          a:Out_1 -> b:In_1 \n          b:Out_1 -> c:In_1\n          c:Out_1 -> b:In_1\n          b:Out_2 -> d:In_1\n        }\n      )\";\n\n  TestGraph(conf_file_value, STATUS_OK);\n}\n\nTEST_F(GraphCheckerTest, LoopNotMatch_OverHierarchyLink) {\n  const auto *conf_file_value =\n      R\"(\n        digraph demo {\n          a[type=flowunit, flowunit=test_0_1, device=cpu, deviceid=0]\n          b[type=flowunit, flowunit=test_loop, device=cpu, deviceid=0]\n          c[type=flowunit, flowunit=test_1_2_normal, device=cpu, deviceid=0]\n          d[type=flowunit, flowunit=test_2_0, device=cpu, deviceid=0]\n          a:Out_1 -> b:In_1 \n          b:Out_1 -> c:In_1\n          c:Out_1 -> b:In_1\n          b:Out_2 -> d:In_1\n          c:Out_2 -> d:In_2\n        }\n      )\";\n\n  TestGraph(conf_file_value, STATUS_BADCONF);\n}\n\nTEST_F(GraphCheckerTest, ExpandCollapseMatch_NormalFlow) {\n  const auto *conf_file_value =\n      R\"(\n        digraph demo {\n          a[type=flowunit, flowunit=test_0_1, device=cpu, deviceid=0]\n          b[type=flowunit, flowunit=expand_1_1, device=cpu, deviceid=0]\n          c[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          d[type=flowunit, flowunit=collapse_1_1, device=cpu, deviceid=0] \n          e[type=flowunit, flowunit=test_1_0, device=cpu, deviceid=0] \n          a:Out_1 -> b:In_1 \n          b:Out_1 -> c:In_1\n          c:Out_1 -> d:In_1\n          d:Out_1 -> e:In_1\n        }\n      )\";\n\n  TestGraph(conf_file_value, STATUS_OK);\n}\n\nTEST_F(GraphCheckerTest, ExpandCollapseMatch_OnlyExpand) {\n  const auto *conf_file_value =\n      R\"(\n        digraph demo {\n          a[type=flowunit, flowunit=test_0_1, device=cpu, deviceid=0]\n          b[type=flowunit, flowunit=expand_1_1, device=cpu, deviceid=0]\n          c[type=flowunit, flowunit=test_1_0, device=cpu, deviceid=0] \n          a:Out_1 -> b:In_1 \n          b:Out_1 -> c:In_1\n        }\n      )\";\n\n  TestGraph(conf_file_value, STATUS_OK);\n}\n\nTEST_F(GraphCheckerTest, ExpandCollapseNotMatch_OnlyCollapse) {\n  const auto *conf_file_value =\n      R\"(\n        digraph demo {\n          a[type=flowunit, flowunit=test_0_1, device=cpu, deviceid=0]\n          b[type=flowunit, flowunit=collapse_1_1, device=cpu, deviceid=0]\n          c[type=flowunit, flowunit=test_1_0, device=cpu, deviceid=0] \n          a:Out_1 -> b:In_1 \n          b:Out_1 -> c:In_1\n        }\n      )\";\n\n  TestGraph(conf_file_value, STATUS_BADCONF);\n}\n\nTEST_F(GraphCheckerTest, ExpandCollapseMatch_OverMatchArch) {\n  const auto *conf_file_value =\n      R\"(\n        digraph demo {\n          a[type=flowunit, flowunit=test_0_1, device=cpu, deviceid=0] \n          b[type=flowunit, flowunit=expand_1_1, device=cpu, deviceid=0] \n          c[type=flowunit, flowunit=test_1_2, device=cpu, deviceid=0] \n          d[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0] \n          e[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0] \n          f[type=flowunit, flowunit=test_2_1, device=cpu, deviceid=0]\n          g[type=flowunit, flowunit=collapse_1_1, device=cpu, deviceid=0]\n          h[type=flowunit, flowunit=test_1_0, device=cpu, deviceid=0] \n          a:Out_1 -> b:In_1 \n          b:Out_1 -> c:In_1\n          c:Out_1 -> d:In_1\n          c:Out_2 -> e:In_1\n          d:Out_1 -> f:In_1\n          e:Out_1 -> f:In_2\n          f:Out_1 -> g:In_1\n          g:Out_1 -> h:In_1\n        }\n      )\";\n\n  TestGraph(conf_file_value, STATUS_OK);\n}\n\nTEST_F(GraphCheckerTest, ExpandCollapseNotMatch_ExpandInMatchArch) {\n  const auto *conf_file_value =\n      R\"(\n        digraph demo {\n          a[type=flowunit, flowunit=test_0_1, device=cpu, deviceid=0] \n          b[type=flowunit, flowunit=expand_1_1, device=cpu, deviceid=0] \n          c[type=flowunit, flowunit=test_1_2, device=cpu, deviceid=0] \n          d[type=flowunit, flowunit=expand_1_1, device=cpu, deviceid=0] \n          e[type=flowunit, flowunit=expand_1_1, device=cpu, deviceid=0] \n          f[type=flowunit, flowunit=test_2_1, device=cpu, deviceid=0]\n          g[type=flowunit, flowunit=collapse_1_1, device=cpu, deviceid=0]\n          h[type=flowunit, flowunit=test_1_0, device=cpu, deviceid=0] \n          a:Out_1 -> b:In_1 \n          b:Out_1 -> c:In_1\n          c:Out_1 -> d:In_1\n          c:Out_2 -> e:In_1\n          d:Out_1 -> f:In_1\n          e:Out_1 -> f:In_2\n          f:Out_1 -> g:In_1\n          g:Out_1 -> h:In_1\n        }\n      )\";\n\n  TestGraph(conf_file_value, STATUS_BADCONF);\n}\n\nTEST_F(GraphCheckerTest, ExpandCollapseMatch_ExpandIsMatchNode) {\n  const auto *conf_file_value =\n      R\"(\n        digraph demo {\n          a[type=flowunit, flowunit=test_0_1, device=cpu, deviceid=0] \n          b[type=flowunit, flowunit=expand_1_2, device=cpu, deviceid=0] \n          c[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0] \n          d[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0] \n          e[type=flowunit, flowunit=collapse_2_1, device=cpu, deviceid=0]\n          f[type=flowunit, flowunit=test_1_0, device=cpu, deviceid=0] \n          a:Out_1 -> b:In_1 \n          b:Out_1 -> c:In_1\n          b:Out_2 -> d:In_1\n          c:Out_1 -> e:In_1\n          d:Out_1 -> e:In_2\n          e:Out_1 -> f:In_1\n        }\n      )\";\n\n  TestGraph(conf_file_value, STATUS_OK);\n}\n\nTEST_F(GraphCheckerTest,\n       ExpandCollapseMatch_MultiOutputExpandDirectConnectCollapse) {\n  const auto *conf_file_value =\n      R\"(\n        digraph demo {\n          a[type=flowunit, flowunit=test_0_1, device=cpu, deviceid=0] \n          b[type=flowunit, flowunit=expand_1_2, device=cpu, deviceid=0]\n          d[type=flowunit, flowunit=collapse_2_1, device=cpu, deviceid=0]\n          e[type=flowunit, flowunit=test_1_0, device=cpu, deviceid=0] \n          a:Out_1 -> b:In_1 \n          b:Out_1 -> d:In_1\n          b:Out_2 -> d:In_2\n          d:Out_1 -> e:In_1\n        }\n      )\";\n\n  TestGraph(conf_file_value, STATUS_OK);\n}\n\nTEST_F(GraphCheckerTest, ExpandCollapseMatch_CollapseIsMatchNode) {\n  const auto *conf_file_value =\n      R\"(\n        digraph demo {\n          a[type=flowunit, flowunit=test_0_1, device=cpu, deviceid=0] \n          b[type=flowunit, flowunit=expand_1_1, device=cpu, deviceid=0] \n          c[type=flowunit, flowunit=test_1_2, device=cpu, deviceid=0] \n          d[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          e[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          f[type=flowunit, flowunit=collapse_2_1, device=cpu, deviceid=0]\n          g[type=flowunit, flowunit=test_1_0, device=cpu, deviceid=0] \n          a:Out_1 -> b:In_1 \n          b:Out_1 -> c:In_1\n          c:Out_1 -> d:In_1\n          c:Out_2 -> e:In_1\n          d:Out_1 -> f:In_1\n          e:Out_1 -> f:In_2\n          f:Out_1 -> g:In_1\n        }\n      )\";\n\n  TestGraph(conf_file_value, STATUS_OK);\n}\n\nTEST_F(GraphCheckerTest, ExpandCollapseNotMatch_CollapseIsMatchNode) {\n  const auto *conf_file_value =\n      R\"(\n        digraph demo {\n          a[type=flowunit, flowunit=test_0_1, device=cpu, deviceid=0] \n          c[type=flowunit, flowunit=test_1_2, device=cpu, deviceid=0] \n          d[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          e[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          f[type=flowunit, flowunit=collapse_2_1, device=cpu, deviceid=0]\n          g[type=flowunit, flowunit=test_1_0, device=cpu, deviceid=0] \n          a:Out_1 -> c:In_1\n          c:Out_1 -> d:In_1\n          c:Out_2 -> e:In_1\n          d:Out_1 -> f:In_1\n          e:Out_1 -> f:In_2\n          f:Out_1 -> g:In_1\n        }\n      )\";\n\n  TestGraph(conf_file_value, STATUS_BADCONF);\n}\n\nTEST_F(GraphCheckerTest, ExpandCollapseNotMatch_CollapseInMatchArch) {\n  const auto *conf_file_value =\n      R\"(\n        digraph demo {\n          a[type=flowunit, flowunit=test_0_1, device=cpu, deviceid=0] \n          c[type=flowunit, flowunit=test_1_2, device=cpu, deviceid=0] \n          d[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          e[type=flowunit, flowunit=collapse_1_1, device=cpu, deviceid=0]\n          f[type=flowunit, flowunit=test_2_1, device=cpu, deviceid=0]\n          g[type=flowunit, flowunit=test_1_0, device=cpu, deviceid=0] \n          a:Out_1 -> c:In_1\n          c:Out_1 -> d:In_1\n          c:Out_2 -> e:In_1\n          d:Out_1 -> f:In_1\n          e:Out_1 -> f:In_2\n          f:Out_1 -> g:In_1\n        }\n      )\";\n\n  TestGraph(conf_file_value, STATUS_BADCONF);\n}\n\nTEST_F(GraphCheckerTest,\n       ExpandCollapseNotMatch_CollapseInMatchArch_SinglePathMatch) {\n  const auto *conf_file_value =\n      R\"(\n        digraph demo {\n          a[type=flowunit, flowunit=test_0_1, device=cpu, deviceid=0] \n          c[type=flowunit, flowunit=expand_1_2, device=cpu, deviceid=0] \n          d[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          e[type=flowunit, flowunit=collapse_1_1, device=cpu, deviceid=0]\n          f[type=flowunit, flowunit=test_2_1, device=cpu, deviceid=0]\n          g[type=flowunit, flowunit=test_1_0, device=cpu, deviceid=0] \n          a:Out_1 -> c:In_1\n          c:Out_1 -> d:In_1\n          c:Out_2 -> e:In_1\n          d:Out_1 -> f:In_1\n          e:Out_1 -> f:In_2\n          f:Out_1 -> g:In_1\n        }\n      )\";\n\n  TestGraph(conf_file_value, STATUS_BADCONF);\n}\n\nTEST_F(GraphCheckerTest, ExpandCollapseNotMatch_OneExpandMultiCollapse) {\n  const auto *conf_file_value =\n      R\"(\n        digraph demo {\n          a[type=flowunit, flowunit=test_0_1, device=cpu, deviceid=0] \n          c[type=flowunit, flowunit=expand_1_2, device=cpu, deviceid=0] \n          d[type=flowunit, flowunit=collapse_1_1, device=cpu, deviceid=0]\n          e[type=flowunit, flowunit=collapse_1_1, device=cpu, deviceid=0]\n          f[type=flowunit, flowunit=test_2_1, device=cpu, deviceid=0]\n          g[type=flowunit, flowunit=test_1_0, device=cpu, deviceid=0] \n          a:Out_1 -> c:In_1\n          c:Out_1 -> d:In_1\n          c:Out_2 -> e:In_1\n          d:Out_1 -> f:In_1\n          e:Out_1 -> f:In_2\n          f:Out_1 -> g:In_1\n        }\n      )\";\n\n  TestGraph(conf_file_value, STATUS_OK);\n}\n\nTEST_F(GraphCheckerTest, ExpandCollapseMatch_MultiArch) {\n  const auto *conf_file_value =\n      R\"(\n        digraph demo {\n          a[type=flowunit, flowunit=test_0_1, device=cpu, deviceid=0] \n          b[type=flowunit, flowunit=expand_1_2, device=cpu, deviceid=0] \n          c[type=flowunit, flowunit=expand_1_1, device=cpu, deviceid=0]\n          d[type=flowunit, flowunit=expand_1_1, device=cpu, deviceid=0]\n          e[type=flowunit, flowunit=collapse_1_1, device=cpu, deviceid=0]\n          f[type=flowunit, flowunit=collapse_1_1, device=cpu, deviceid=0]\n          g[type=flowunit, flowunit=collapse_2_1, device=cpu, deviceid=0]\n          h[type=flowunit, flowunit=test_1_0, device=cpu, deviceid=0] \n          a:Out_1 -> b:In_1\n          b:Out_1 -> c:In_1\n          b:Out_2 -> d:In_1\n          c:Out_1 -> e:In_1\n          d:Out_1 -> f:In_1\n          e:Out_1 -> g:In_1\n          f:Out_1 -> g:In_2\n          g:Out_1 -> h:In_1\n        }\n      )\";\n\n  TestGraph(conf_file_value, STATUS_OK);\n}\n\nTEST_F(GraphCheckerTest, ExpandCollapseNotMatch_OverHierarchyLink_FromOutToIn) {\n  const auto *conf_file_value =\n      R\"(\n        digraph demo {\n          a[type=flowunit, flowunit=test_0_2, device=cpu, deviceid=0, label=\"<Out_1>\"] \n          b[type=flowunit, flowunit=expand_1_1, device=cpu, deviceid=0, label=\"<In_1> | <Out_1>\"] \n          c[type=flowunit, flowunit=test_2_1, device=cpu, deviceid=0, label=\"<In_1> | <In_2> | <Out_1> \"]\n          d[type=flowunit, flowunit=collapse_1_1, device=cpu, deviceid=0, label=\"<In_1>\"] \n          e[type=flowunit, flowunit=test_1_0, device=cpu, deviceid=0]\n          a:Out_1 -> b:In_1\n          a:Out_2 -> c:In_2\n          b:Out_1 -> c:In_1\n          c:Out_1 -> d:In_1\n          d:Out_1 -> e:In_1\n        }\n      )\";\n\n  TestGraph(conf_file_value, STATUS_BADCONF);\n}\n\nTEST_F(GraphCheckerTest, ExpandCollapseNotMatch_OverHierarchyLink_FromInToOut) {\n  const auto *conf_file_value =\n      R\"(\n        digraph demo {\n          a[type=flowunit, flowunit=test_0_1, device=cpu, deviceid=0, label=\"<Out_1>\"] \n          b[type=flowunit, flowunit=expand_1_1, device=cpu, deviceid=0, label=\"<In_1> | <Out_1>\"] \n          c[type=flowunit, flowunit=test_1_2, device=cpu, deviceid=0, label=\"<In_1> | <In_2> | <Out_1> \"]\n          d[type=flowunit, flowunit=collapse_1_1, device=cpu, deviceid=0, label=\"<In_1>\"] \n          e[type=flowunit, flowunit=test_2_0, device=cpu, deviceid=0]\n          a:Out_1 -> b:In_1\n          b:Out_1 -> c:In_1\n          c:Out_1 -> d:In_1\n          c:Out_2 -> e:In_2\n          d:Out_1 -> e:In_1\n        }\n      )\";\n\n  TestGraph(conf_file_value, STATUS_BADCONF);\n}\n\nTEST_F(GraphCheckerTest, ConditionNotMatch_OverHierarchyLink_FromOutToIn) {\n  const auto *conf_file_value =\n      R\"(\n        digraph demo {\n          a[type=flowunit, flowunit=test_0_2, device=cpu, deviceid=0] \n          b[type=flowunit, flowunit=condition_1_2, device=cpu, deviceid=0] \n          c[type=flowunit, flowunit=test_2_1, device=cpu, deviceid=0]\n          d[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0] \n          e[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          f[type=flowunit, flowunit=test_1_0, device=cpu, deviceid=0]\n          a:Out_1 -> b:In_1\n          a:Out_2 -> c:In_2\n          b:Out_1 -> c:In_1\n          b:Out_2 -> d:In_1\n          c:Out_1 -> e:In_1\n          d:Out_1 -> e:In_1\n          e:Out_1 -> f:In_1\n        }\n      )\";\n\n  TestGraph(conf_file_value, STATUS_BADCONF);\n}\n\nTEST_F(GraphCheckerTest, ConditionNotMatch_OverHierarchyLink_FromInToOut) {\n  const auto *conf_file_value =\n      R\"(\n        digraph demo {\n          a[type=flowunit, flowunit=test_0_1, device=cpu, deviceid=0] \n          b[type=flowunit, flowunit=condition_1_2, device=cpu, deviceid=0] \n          c[type=flowunit, flowunit=test_1_2, device=cpu, deviceid=0]\n          d[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0] \n          e[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          f[type=flowunit, flowunit=test_1_0, device=cpu, deviceid=0]\n          a:Out_1 -> b:In_1\n          b:Out_1 -> c:In_1\n          b:Out_2 -> d:In_1\n          c:Out_1 -> e:In_1\n          c:Out_2 -> f:In_1\n          d:Out_1 -> e:In_1\n          e:Out_1 -> f:In_1\n        }\n      )\";\n\n  TestGraph(conf_file_value, STATUS_BADCONF);\n}\n\nTEST_F(GraphCheckerTest, ConditionNotAddition_MultiConditionLinkSameOut) {\n  const auto *conf_file_value =\n      R\"(\n        digraph demo {\n          a[type=flowunit, flowunit=test_0_1, device=cpu, deviceid=0] \n          b[type=flowunit, flowunit=condition_1_2, device=cpu, deviceid=0] \n          c[type=flowunit, flowunit=condition_1_2, device=cpu, deviceid=0]\n          d[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0] \n          e[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          f[type=flowunit, flowunit=test_1_0, device=cpu, deviceid=0]\n          a:Out_1 -> b:In_1\n          b:Out_1 -> c:In_1\n          b:Out_2 -> d:In_1\n          c:Out_1 -> e:In_1\n          c:Out_2 -> d:In_1\n          d:Out_1 -> e:In_1\n          e:Out_1 -> f:In_1\n        }\n      )\";\n\n  TestGraph(conf_file_value, STATUS_BADCONF);\n}\n\nTEST_F(GraphCheckerTest, ConditionNotAddition_EndifAndInOtherMultiPort) {\n  const auto *conf_file_value =\n      R\"(\n        digraph demo {\n          a[type=flowunit, flowunit=test_0_2, device=cpu, deviceid=0] \n          b[type=flowunit, flowunit=condition_1_2, device=cpu, deviceid=0] \n          c[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0] \n          d[type=flowunit, flowunit=test_2_0, device=cpu, deviceid=0]\n          a:Out_1 -> b:In_1\n          a:Out_2 -> d:In_1\n          b:Out_1 -> c:In_1\n          b:Out_2 -> d:In_2\n          c:Out_1 -> d:In_2\n        }\n      )\";\n\n  TestGraph(conf_file_value, STATUS_OK);\n}\n\nTEST_F(GraphCheckerTest, ConditionMatch_EndifAndCollapseInOnePort) {\n  const auto *conf_file_value =\n      R\"(\n        digraph demo {\n          a[type=flowunit, flowunit=test_0_1, device=cpu, deviceid=0] \n          b[type=flowunit, flowunit=condition_1_2, device=cpu, deviceid=0] \n          c[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0] \n          d[type=flowunit, flowunit=expand_1_2, device=cpu, deviceid=0]\n          e[type=flowunit, flowunit=condition_1_2, device=cpu, deviceid=0]\n          f[type=flowunit, flowunit=collapse_2_1, device=cpu, deviceid=0]\n          h[type=flowunit, flowunit=test_1_0, device=cpu, deviceid=0]\n          a:Out_1 -> b:In_1\n          b:Out_1 -> c:In_1\n          b:Out_2 -> d:In_1\n          c:Out_1 -> d:In_1\n          d:Out_1 -> f:In_1\n          d:Out_2 -> e:In_1\n          e:Out_1 -> f:In_2\n          e:Out_2 -> f:In_2\n          f:Out_1 -> h:In_1\n        }\n      )\";\n\n  TestGraph(conf_file_value, STATUS_OK);\n}\n\nTEST_F(GraphCheckerTest, ConditionAddition_ConditionInExpandCollapse) {\n  const auto *conf_file_value =\n      R\"(\n        digraph demo {\n          a[type=flowunit, flowunit=test_0_1, device=cpu, deviceid=0] \n          b[type=flowunit, flowunit=expand_1_2, device=cpu, deviceid=0] \n          c[type=flowunit, flowunit=condition_1_2, device=cpu, deviceid=0]\n          d[type=flowunit, flowunit=collapse_2_1, device=cpu, deviceid=0] \n          e[type=flowunit, flowunit=test_1_0, device=cpu, deviceid=0]\n          a:Out_1 -> b:In_1\n          b:Out_1 -> c:In_1\n          b:Out_2 -> d:In_2\n          c:Out_1 -> d:In_1\n          c:Out_2 -> d:In_1\n          d:Out_1 -> e:In_1\n        }\n      )\";\n\n  TestGraph(conf_file_value, STATUS_OK);\n}\n\nTEST_F(GraphCheckerTest, BranchCollapseMatch) {\n  const auto *conf_file_value =\n      R\"(\n        digraph demo {\n          a[type=flowunit, flowunit=test_0_1, device=cpu, deviceid=0] \n          b[type=flowunit, flowunit=expand_1_1, device=cpu, deviceid=0] \n          c[type=flowunit, flowunit=collapse_1_1, device=cpu, deviceid=0] \n          d[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          e[type=flowunit, flowunit=test_2_0, device=cpu, deviceid=0]\n          a:Out_1 -> b:In_1\n          b:Out_1 -> c:In_1\n          c:Out_1 -> d:In_1\n          d:Out_1 -> e:In_1\n          c:Out_1 -> e:In_2\n        }\n      )\";\n\n  TestGraph(conf_file_value, STATUS_OK);\n}\n\nTEST_F(GraphCheckerTest, ConditionMatch_SinglePortLinkMultiPortThroughNode) {\n  const auto *conf_file_value =\n      R\"(\n        digraph demo {\n          httpserver_sync_receive[type=flowunit, flowunit=test_0_1, device=cpu, deviceid=0] \n          param_analysis[type=flowunit, flowunit=condition_1_2, device=cpu, deviceid=0] \n          my_nv_image_decoder[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0] \n          image_resolution_judge[type=flowunit, flowunit=condition_1_2, device=cpu, deviceid=0]\n          color_tranpose_1[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          padding[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          normalize[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          face_detetc_infer[type=flowunit, flowunit=test_1_2, device=cpu, deviceid=0]\n          face_detect_post[type=flowunit, flowunit=test_3_1, device=cpu, deviceid=0]\n          face_condition[type=flowunit, flowunit=condition_1_2, device=cpu, deviceid=0]\n          g[type=flowunit, flowunit=test_1_0, device=cpu, deviceid=0]\n\n          httpserver_sync_receive:Out_1 -> param_analysis:In_1\n          param_analysis:Out_1 -> my_nv_image_decoder:In_1\n          param_analysis:Out_2 -> image_resolution_judge:In_1\n          my_nv_image_decoder:Out_1 -> image_resolution_judge:In_1\n          image_resolution_judge:Out_1 -> face_detect_post:In_1\n          image_resolution_judge:Out_1 -> color_tranpose_1:In_1\n          color_tranpose_1:Out_1 -> padding:In_1\n          padding:Out_1 -> normalize:In_1\n          normalize:Out_1 -> face_detetc_infer:In_1\n          face_detetc_infer:Out_1 -> face_detect_post:In_2\n          face_detetc_infer:Out_2 -> face_detect_post:In_3\n          face_detect_post:Out_1 -> face_condition:In_1\n          image_resolution_judge:Out_2 -> face_condition:In_1\n          face_condition:Out_1 -> g:In_1\n          face_condition:Out_2 -> g:In_1\n        }\n      )\";\n\n  TestGraph(conf_file_value, STATUS_OK);\n}\n\nTEST_F(GraphCheckerTest, ConditionMatch_EndIfNodeIsAlsoCondition) {\n  const auto *conf_file_value =\n      R\"(\n        digraph demo {\n          begin[type=flowunit, flowunit=test_0_1, device=cpu, deviceid=0]\n          a[type=flowunit, flowunit=condition_1_2, device=cpu, deviceid=0]\n          b[type=flowunit, flowunit=condition_1_2, device=cpu, deviceid=0]\n          c[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          d[type=flowunit, flowunit=condition_1_2, device=cpu, deviceid=0]\n          e[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          f[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          end[type=flowunit, flowunit=test_1_0, device=cpu, deviceid=0]\n\n          begin:Out_1 -> a:In_1\n          a:Out_1 -> b:In_1\n          b:Out_1 -> c:In_1\n          b:Out_2 -> d:In_1\n          c:Out_1 -> d:In_1\n          d:Out_1 -> e:In_1\n          d:Out_2 -> f:In_1\n          e:Out_1 -> f:In_1\n          f:Out_1 -> end:In_1\n          a:Out_2 -> end:In_1\n        }\n      )\";\n\n  TestGraph(conf_file_value, STATUS_OK);\n}\n\nTEST_F(GraphCheckerTest, ConditionMatch_EndIfNodeIsAlsoExpand) {\n  const auto *conf_file_value =\n      R\"(\n        digraph demo {\n          begin[type=flowunit, flowunit=test_0_1, device=cpu, deviceid=0]\n          a[type=flowunit, flowunit=condition_1_2, device=cpu, deviceid=0]\n          b[type=flowunit, flowunit=condition_1_2, device=cpu, deviceid=0]\n          c[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          d[type=flowunit, flowunit=expand_1_2, device=cpu, deviceid=0]\n          e[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          f[type=flowunit, flowunit=collapse_2_1, device=cpu, deviceid=0]\n          end[type=flowunit, flowunit=test_1_0, device=cpu, deviceid=0]\n\n          begin:Out_1 -> a:In_1\n          a:Out_1 -> b:In_1\n          b:Out_1 -> c:In_1\n          b:Out_2 -> d:In_1\n          c:Out_1 -> d:In_1\n          d:Out_1 -> e:In_1\n          d:Out_2 -> f:In_1\n          e:Out_1 -> f:In_2\n          f:Out_1 -> end:In_1\n          a:Out_2 -> end:In_1\n        }\n      )\";\n\n  TestGraph(conf_file_value, STATUS_OK);\n}\n\nTEST_F(GraphCheckerTest, ConditionMatch_EndIfNodeIsAlsoCollapse) {\n  const auto *conf_file_value =\n      R\"(\n        digraph demo {\n          begin[type=flowunit, flowunit=test_0_1, device=cpu, deviceid=0]\n          a[type=flowunit, flowunit=condition_1_2, device=cpu, deviceid=0]\n          aa[type=flowunit, flowunit=expand_1_1, device=cpu, deviceid=0]\n          b[type=flowunit, flowunit=condition_1_2, device=cpu, deviceid=0]\n          c[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          d[type=flowunit, flowunit=collapse_1_1, device=cpu, deviceid=0]\n          e[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          end[type=flowunit, flowunit=test_1_0, device=cpu, deviceid=0]\n\n          begin:Out_1 -> a:In_1\n          a:Out_1 -> aa:In_1\n          aa:Out_1 -> b:In_1\n          b:Out_1 -> c:In_1\n          b:Out_2 -> d:In_1\n          c:Out_1 -> d:In_1\n          d:Out_1 -> e:In_1\n          e:Out_1 -> end:In_1\n          a:Out_2 -> end:In_1\n        }\n      )\";\n\n  TestGraph(conf_file_value, STATUS_OK);\n}\n\nTEST_F(GraphCheckerTest, MultiNotMatch_MultiExpandSingleCollapseInBranch) {\n  const auto *conf_file_value =\n      R\"(\n        digraph demo {\n          begin[type=flowunit, flowunit=test_0_1, device=cpu, deviceid=0]\n          a[type=flowunit, flowunit=test_1_2, device=cpu, deviceid=0]\n          b[type=flowunit, flowunit=expand_1_1, device=cpu, deviceid=0]\n          c[type=flowunit, flowunit=expand_1_1, device=cpu, deviceid=0]\n          d[type=flowunit, flowunit=collapse_1_1, device=cpu, deviceid=0]\n          e[type=flowunit, flowunit=test_1_0, device=cpu, deviceid=0]\n\n          begin:Out_1 -> a:In_1\n          a:Out_1 -> b:In_1\n          a:Out_2 -> c:In_1\n          b:Out_1 -> d:In_1\n          c:Out_1 -> d:In_1\n          d:Out_1 -> e:In_1\n        }\n      )\";\n\n  TestGraph(conf_file_value, STATUS_BADCONF);\n}\n\nTEST_F(GraphCheckerTest, Bicycle) {\n  const auto *conf_file_value =\n      R\"(\n        digraph demo {\n          a[type=flowunit, flowunit=test_0_1, device=cpu, deviceid=0]\n          b[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          c[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          d[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          e[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          f[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          g[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          h[type=flowunit, flowunit=test_1_3, device=cpu, deviceid=0]\n          i[type=flowunit, flowunit=test_3_1, device=cpu, deviceid=0]\n          j[type=flowunit, flowunit=test_2_1, device=cpu, deviceid=0]\n          k[type=flowunit, flowunit=test_2_1, device=cpu, deviceid=0]\n          l[type=flowunit, flowunit=condition_1_2, device=cpu, deviceid=0]\n          m[type=flowunit, flowunit=test_1_2, device=cpu, deviceid=0]\n          n[type=flowunit, flowunit=expand_2_2, device=cpu, deviceid=0]\n          o[type=flowunit, flowunit=test_2_1, device=cpu, deviceid=0]\n          p[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          q[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          r[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          s[type=flowunit, flowunit=collapse_1_1, device=cpu, deviceid=0]\n          t[type=flowunit, flowunit=condition_1_2, device=cpu, deviceid=0]\n          u[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          v[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          w[type=flowunit, flowunit=test_1_3, device=cpu, deviceid=0]\n          x[type=flowunit, flowunit=test_3_1, device=cpu, deviceid=0]\n          y[type=flowunit, flowunit=test_4_1, device=cpu, deviceid=0]\n          z[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          out[type=flowunit, flowunit=test_1_0, device=cpu, deviceid=0]\n\n          a:Out_1 -> b:In_1\n          b:Out_1 -> c: In_1\n          c:Out_1 -> d:In_1\n          d:Out_1 -> e:In_1\n          e:Out_1 -> f:In_1\n          e:Out_1 -> j:In_2\n          e:Out_1 -> t:In_1\n          e:Out_1 -> k:In_1\n          e:Out_1 -> y:In_4\n          f:Out_1 -> g:In_1\n          g:Out_1 -> h:In_1\n          h:Out_1 -> i:In_1\n          h:Out_2 -> i:In_2\n          h:Out_3 -> i:In_3\n          i:Out_1 -> j:In_1\n          j:Out_1 -> k:In_2\n          j:Out_1 -> y:In_3\n          t:Out_1 -> y:In_2\n          t:Out_2 -> u:In_1\n          u:Out_1 -> v:In_1\n          v:Out_1 -> w:In_1\n          w:Out_1 -> x:In_1\n          w:Out_2 -> x:In_2\n          w:Out_3 -> x:In_3\n          x:Out_1 -> y:In_2\n          k:Out_1 -> l:In_1\n          l:Out_1 -> m:In_1\n          l:Out_2 -> y:In_1\n          m:Out_1 -> n:In_1\n          m:Out_2 -> n:In_2\n          n:Out_1 -> o:In_1\n          n:Out_2 -> o:In_2\n          o:Out_1 -> p:In_1\n          p:Out_1 -> q:In_1\n          q:Out_1 -> r:In_1\n          r:Out_1 -> s:In_1\n          s:Out_1 -> y:In_1\n          y:Out_1 -> z:In_1\n          z:Out_1 -> out:In_1\n        }\n      )\";\n\n  TestGraph(conf_file_value, STATUS_OK);\n}\n\nTEST_F(GraphCheckerTest, Park) {\n  const auto *conf_file_value =\n      R\"(\n        digraph demo {\n          a[type=flowunit, flowunit=test_0_1, device=cpu, deviceid=0]\n          b[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          c[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          d[type=flowunit, flowunit=condition_1_2, device=cpu, deviceid=0]\n          e[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          f[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          g[type=flowunit, flowunit=test_1_3, device=cpu, deviceid=0]\n          h[type=flowunit, flowunit=test_3_1, device=cpu, deviceid=0]\n          i[type=flowunit, flowunit=test_2_1, device=cpu, deviceid=0]\n          j[type=flowunit, flowunit=test_2_1, device=cpu, deviceid=0]\n          k[type=flowunit, flowunit=test_1_0, device=cpu, deviceid=0]\n\n          a:Out_1 -> b:In_1\n          b:Out_1 -> c:In_1\n          c:Out_1 -> d:In_1\n          d:Out_1 -> e:In_1\n          d:Out_2 -> i:In_1\n          e:Out_1 -> f:In_1\n          f:Out_1 -> g:In_1\n          g:Out_1 -> h:In_1\n          g:Out_2 -> h:In_2\n          g:Out_3 -> h:In_3\n          h:Out_1 -> i:In_1\n          b:Out_1 -> i:In_2\n          i:Out_1 -> j:In_1\n          b:Out_1 -> j:In_2\n          j:Out_1 -> k:In_1\n        }\n      )\";\n\n  TestGraph(conf_file_value, STATUS_OK);\n}\n\nTEST_F(GraphCheckerTest, Road) {\n  const auto *conf_file_value =\n      R\"(\n        digraph demo {\n          a[type=flowunit, flowunit=test_0_1, device=cpu, deviceid=0]\n          b[type=flowunit, flowunit=condition_1_2, device=cpu, deviceid=0]\n          c[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          d[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          e[type=flowunit, flowunit=test_1_3, device=cpu, deviceid=0]\n          f[type=flowunit, flowunit=test_3_1, device=cpu, deviceid=0]\n          g[type=flowunit, flowunit=test_2_1, device=cpu, deviceid=0]\n          h[type=flowunit, flowunit=condition_1_2, device=cpu, deviceid=0]\n          i[type=flowunit, flowunit=expand_1_2, device=cpu, deviceid=0]\n          j[type=flowunit, flowunit=test_2_1, device=cpu, deviceid=0]\n          k[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          l[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          m[type=flowunit, flowunit=collapse_1_1, device=cpu, deviceid=0]\n          n[type=flowunit, flowunit=test_2_1, device=cpu, deviceid=0]\n          o[type=flowunit, flowunit=condition_1_2, device=cpu, deviceid=0]\n          p[type=flowunit, flowunit=expand_1_2, device=cpu, deviceid=0]\n          q[type=flowunit, flowunit=test_2_1, device=cpu, deviceid=0]\n          r[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          s[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          t[type=flowunit, flowunit=test_2_1, device=cpu, deviceid=0]\n          u[type=flowunit, flowunit=collapse_1_1, device=cpu, deviceid=0]\n          v[type=flowunit, flowunit=test_2_1, device=cpu, deviceid=0]\n          w[type=flowunit, flowunit=condition_1_2, device=cpu, deviceid=0]\n          x[type=flowunit, flowunit=expand_1_2, device=cpu, deviceid=0]\n          y[type=flowunit, flowunit=test_2_1, device=cpu, deviceid=0]\n          z[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          aa[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          bb[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          cc[type=flowunit, flowunit=collapse_1_1, device=cpu, deviceid=0]\n          dd[type=flowunit, flowunit=test_2_1, device=cpu, deviceid=0]\n          ee[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          ff[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          gg[type=flowunit, flowunit=test_1_0, device=cpu, deviceid=0]\n\n\n          a:Out_1 -> b:In_1\n          b:Out_1 -> c:In_1\n          c:Out_1 -> d:In_1\n          d:Out_1 -> e:In_1\n          e:Out_1 -> f:In_1\n          e:Out_2 -> f:In_2\n          e:Out_3 -> f:In_3\n          f:Out_1 -> g:In_1\n          c:Out_1 -> g:In_2\n          g:Out_1 -> h:In_1\n          h:Out_1 -> i:In_1\n          h:Out_1 -> n:In_1\n          h:Out_2 -> o:In_1\n          i:Out_1 -> j:In_1\n          i:Out_2 -> j:In_2\n          j:Out_1 -> k:In_1\n          k:Out_1 -> l:In_1\n          l:Out_1 -> m:In_1\n          m:Out_1 -> n:In_2\n          n:Out_1 -> o:In_1\n          o:Out_1 -> p:In_1\n          o:Out_1 -> v:In_1\n          o:Out_2 -> w:In_1\n          p:Out_1 -> q:In_1\n          p:Out_2 -> q:In_2\n          q:Out_1 -> r:In_1\n          r:Out_1 -> s:In_1\n          p:Out_2 -> t:In_1\n          s:Out_1 -> t:In_2\n          t:Out_1 -> u:In_1\n          u:Out_1 -> v:In_2\n          v:Out_1 -> w:In_1\n          w:Out_1 -> x:In_1\n          w:Out_1 -> dd:In_1\n          w:Out_2 -> ee:In_1\n          x:Out_1 -> y:In_1\n          x:Out_2 -> y:In_2\n          y:Out_1 -> z:In_1\n          z:Out_1 -> aa:In_1\n          aa:Out_1 -> bb:In_1\n          bb:Out_1 -> cc:In_1\n          cc:Out_1 -> dd:In_2\n          dd:Out_1 -> ee:In_1\n          b:Out_2 -> ff:In_1\n          ee:Out_1 -> ff:In_1\n          ff:Out_1 -> gg:In_1\n        }\n      )\";\n\n  TestGraph(conf_file_value, STATUS_OK);\n}\n\nTEST_F(GraphCheckerTest, NodeHasSameNameInInputOutputPort) {\n  const auto *conf_file_value =\n      R\"(\n        digraph demo {\n          a[type=flowunit, flowunit=test_0_1, device=cpu, deviceid=0]\n          b[type=flowunit, flowunit=condition_1_2, device=cpu, deviceid=0]\n          c[type=flowunit, flowunit=expand_1_2, device=cpu, deviceid=0]\n          d[type=flowunit, flowunit=test_2_1, device=cpu, deviceid=0]\n          e[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          f[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          g[type=flowunit, flowunit=test_2_1, device=cpu, deviceid=0]\n          h[type=flowunit, flowunit=collapse_1_1, device=cpu, deviceid=0]\n          i[type=flowunit, flowunit=test_2_1, device=cpu, deviceid=0]\n          j[type=flowunit, flowunit=test_1_1_same_name, device=cpu, deviceid=0]\n          output1[type=output]\n\n          a:Out_1->b:In_1\n          b:Out_1->c:In_1\n          b:Out_1->i:In_1\n          b:Out_2->j:In_1\n          c:Out_1->d:In_1\n          c:Out_2->d:In_2\n          c:Out_1->g:In_1\n          d:Out_1->e:In_1\n          e:Out_1->f:In_1\n          f:Out_1->g:In_2\n          g:Out_1->h:In_1\n          h:Out_1->i:In_2\n          i:Out_1->j:In_1\n          j:In_1->output1\n        }\n      )\";\n\n  TestGraph(conf_file_value, STATUS_OK);\n}\n\nTEST_F(GraphCheckerTest, GetSetMatchNode) {\n  const auto *conf_file_value =\n      R\"(\n        digraph demo {\n          a[type=flowunit, flowunit=test_0_1, device=cpu, deviceid=0] \n          b[type=flowunit, flowunit=expand_1_1, device=cpu, deviceid=0] \n          c[type=flowunit, flowunit=condition_1_2, device=cpu, deviceid=0]\n          d[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0]\n          e[type=flowunit, flowunit=collapse_1_1, device=cpu, deviceid=0] \n          f[type=flowunit, flowunit=test_1_0, device=cpu, deviceid=0]\n          a:Out_1 -> b:In_1\n          b:Out_1 -> c:In_1\n          c:Out_1 -> d:In_1\n          c:Out_2 -> d:In_1\n          d:Out_1 -> e:In_1\n          e:Out_1 -> f:In_1\n        }\n      )\";\n  ConfigurationBuilder configbuilder;\n  auto config = configbuilder.Build();\n  config->SetProperty(\"graph.format\", \"graphviz\");\n  config->SetProperty(\"graph.graphconf\", conf_file_value);\n  std::shared_ptr<Graph> graph;\n  EXPECT_TRUE(BuildGraph(config, graph) == STATUS_OK);\n  EXPECT_EQ(CastNode(graph->GetNode(\"a\"))->GetMatchNode(), nullptr);\n  EXPECT_EQ(CastNode(graph->GetNode(\"b\"))->GetMatchNode(), nullptr);\n  EXPECT_EQ(CastNode(graph->GetNode(\"c\"))->GetMatchNode(), nullptr);\n  EXPECT_EQ(CastNode(graph->GetNode(\"d\"))->GetMatchNode(), graph->GetNode(\"c\"));\n  EXPECT_EQ(CastNode(graph->GetNode(\"e\"))->GetMatchNode(), graph->GetNode(\"b\"));\n  EXPECT_EQ(CastNode(graph->GetNode(\"f\"))->GetMatchNode(), nullptr);\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "test/unit/libmodelbox/engine/graph_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/graph.h\"\n\n#include \"flowunit_mockflowunit/flowunit_mockflowunit.h\"\n#include \"gmock/gmock.h\"\n#include \"graph_conf_mockgraphconf/graph_conf_mockgraphconf.h\"\n#include \"gtest/gtest.h\"\n#include \"mock_driver_ctl.h\"\n#include \"mockflow.h\"\n#include \"modelbox/base/log.h\"\n\nnamespace modelbox {\nclass GraphTest : public testing::Test {\n public:\n  GraphTest() = default;\n\n protected:\n  void SetUp() override {\n    flow_ = std::make_shared<MockFlow>();\n\n    {\n      auto mock_desc = GenerateFlowunitDesc(\"test_0_1\", {}, {\"Out_1\"});\n      mock_desc->SetFlowType(STREAM);\n      auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n      flow_->AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n    }\n\n    {\n      auto mock_desc = GenerateFlowunitDesc(\"test_0_2\", {}, {\"Out_1\", \"Out_2\"});\n      mock_desc->SetFlowType(STREAM);\n      auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n      flow_->AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n    }\n\n    {\n      auto mock_desc = GenerateFlowunitDesc(\"test_2_0\", {\"In_1\", \"In_2\"}, {});\n      mock_desc->SetFlowType(STREAM);\n      auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n      flow_->AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n    }\n\n    {\n      auto mock_desc = GenerateFlowunitDesc(\"test_1_0\", {\"In_1\"}, {});\n      mock_desc->SetFlowType(STREAM);\n      auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n      flow_->AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n    }\n\n    {\n      auto mock_desc =\n          GenerateFlowunitDesc(\"test_1_1_normal\", {\"In_1\"}, {\"Out_1\"});\n      auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n      flow_->AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n    }\n\n    {\n      auto mock_desc = GenerateFlowunitDesc(\"test_1_1\", {\"In_1\"}, {\"Out_1\"});\n      mock_desc->SetFlowType(STREAM);\n      mock_desc->SetStreamSameCount(true);\n      auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n      flow_->AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n    }\n\n    {\n      auto mock_desc = GenerateFlowunitDesc(\"stream_1_1\", {\"In_1\"}, {\"Out_1\"});\n      mock_desc->SetFlowType(STREAM);\n      mock_desc->SetStreamSameCount(false);\n      auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n      flow_->AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n    }\n\n    {\n      auto mock_desc =\n          GenerateFlowunitDesc(\"condition_1_2\", {\"In_1\"}, {\"Out_1\", \"Out_2\"});\n      mock_desc->SetConditionType(IF_ELSE);\n      auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n      flow_->AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n    }\n\n    {\n      auto mock_desc =\n          GenerateFlowunitDesc(\"collapse_1_1\", {\"In_1\"}, {\"Out_1\"});\n      mock_desc->SetOutputType(COLLAPSE);\n      mock_desc->SetFlowType(STREAM);\n      auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n      flow_->AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n    }\n\n    {\n      auto mock_desc = GenerateFlowunitDesc(\"expand_1_1\", {\"In_1\"}, {\"Out_1\"});\n      mock_desc->SetOutputType(EXPAND);\n      auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n      flow_->AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n    }\n\n    {\n      auto mock_desc =\n          GenerateFlowunitDesc(\"test_1_2\", {\"In_1\"}, {\"Out_1\", \"Out_2\"});\n      mock_desc->SetFlowType(STREAM);\n      auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n      flow_->AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n    }\n\n    {\n      auto mock_desc =\n          GenerateFlowunitDesc(\"test_2_1\", {\"In_1\", \"In_2\"}, {\"Out_1\"});\n      mock_desc->SetFlowType(STREAM);\n      auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n      flow_->AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n    }\n\n    {\n      auto mock_desc =\n          GenerateFlowunitDesc(\"test_loop\", {\"In_1\"}, {\"Out_1\", \"Out_2\"});\n      mock_desc->SetLoopType(LOOP);\n      auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n      flow_->AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n    }\n\n    {\n      auto mock_desc = GenerateFlowunitDesc(\n          \"test_loop_invalid\", {\"In_1\", \"In_2\"}, {\"Out_1\", \"Out_2\"});\n      mock_desc->SetLoopType(LOOP);\n      auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n      flow_->AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n    }\n\n    {\n      auto mock_desc =\n          GenerateFlowunitDesc(\"test_1_1_stream\", {\"In_1\"}, {\"Out_1\"});\n      mock_desc->SetFlowType(STREAM);\n      auto mock_funcitons = std::make_shared<MockFunctionCollection>();\n      flow_->AddFlowUnitDesc(mock_desc, mock_funcitons->GenerateCreateFunc());\n    }\n\n    flow_->Init(false);\n  }\n\n  void TearDown() override {\n    auto device_mgr = DeviceManager::GetInstance();\n    device_mgr->Clear();\n    auto flowunit_mgr = FlowUnitManager::GetInstance();\n    flowunit_mgr->Clear();\n    auto drivers = Drivers::GetInstance();\n    drivers->Clear();\n  }\n\n  Status BuildGraph(const std::shared_ptr<Configuration> &config,\n                    std::shared_ptr<GCGraph> *gcgraph_out = nullptr) {\n    std::shared_ptr<Drivers> drivers = Drivers::GetInstance();\n\n    auto device_mgr = DeviceManager::GetInstance();\n    device_mgr->Initialize(drivers, config);\n\n    auto flowunit_mgr = FlowUnitManager::GetInstance();\n    flowunit_mgr->Initialize(drivers, device_mgr, config);\n\n    GraphConfigManager graphconf_mgr = GraphConfigManager::GetInstance();\n    graphconf_mgr.Initialize(drivers, config);\n    auto graphvizconf = graphconf_mgr.LoadGraphConfig(config);\n    auto gcgraph = graphvizconf->Resolve();\n    if (gcgraph_out) {\n      *gcgraph_out = gcgraph;\n    }\n\n    auto graph = std::make_shared<Graph>();\n    graph->Initialize(flowunit_mgr, device_mgr, nullptr, config);\n\n    return graph->Build(gcgraph);\n  }\n\n private:\n  std::shared_ptr<MockFlow> flow_;\n};\n\n/*\n      ---->b---->\n     /           \\\n    a             d---->e---->f\n     \\           /\n      ---->c---->\n*/\nTEST_F(GraphTest, BuildGraph) {\n  const auto *conf_file_value =\n      R\"(\n        digraph demo {\n          a[type=flowunit, flowunit=test_0_2, device=cpu, deviceid=0, label=\"<Out_1> | <Out_2>\"]\n          b[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0, label=\"<In_1> | <Out_1>\"]\n          c[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0, label=\"<In_1> | <Out_1>\"]\n          d[type=flowunit, flowunit=test_2_1, device=cpu, deviceid=0, label=\"<In_1> | <In_2> | <Out_1>\"]\n          e[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0, label=\"<In_1> | <Out_1>\"]\n          f[type=flowunit, flowunit=test_1_0, device=cpu, deviceid=0, label=\"<In_1>\"]\n          a:Out_1 -> b:In_1\n          a:Out_2 -> c:In_1\n          b:Out_1 -> d:In_1\n          c:Out_1 -> d:In_2\n          d:Out_1 -> e:In_1\n          e:Out_1 -> f:In_1\n        }\n      )\";\n\n  ConfigurationBuilder configbuilder;\n  auto config = configbuilder.Build();\n  config->SetProperty(\"graph.format\", \"graphviz\");\n  config->SetProperty(\"graph.graphconf\", conf_file_value);\n  EXPECT_TRUE(BuildGraph(config) == STATUS_OK);\n}\n\n/*\n      ---->b---->    e-->\n     /           \\       \\\n    a             d------>f\n     \\           /\n      ---->c---->\n*/\nTEST_F(GraphTest, BuildGraph_IsolatedPort) {\n  const auto *conf_file_value =\n      R\"(\n        digraph demo {\n          a[type=flowunit, flowunit=test_0_2, device=cpu, deviceid=0, label=\"<Out_1> | <Out_2>\"]\n          b[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0, label=\"<In_1> | <Out_1>\"]\n          c[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0, label=\"<In_1> | <Out_1>\"]\n          d[type=flowunit, flowunit=test_2_1, device=cpu, deviceid=0, label=\"<In_1> | <In_2> | <Out_1>\"]\n          e[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0, label=\"<In_1> | <Out_1>\"]\n          f[type=flowunit, flowunit=test_1_0, device=cpu, deviceid=0, label=\"<In_1>\"]\n          a:Out_1 -> b:In_1\n          a:Out_2 -> c:In_1\n          b:Out_1 -> d:In_1\n          c:Out_1 -> d:In_2\n          d:Out_1 -> f:In_1\n          e:Out_1 -> f:In_1\n        }\n      )\";\n\n  ConfigurationBuilder configbuilder;\n  auto config = configbuilder.Build();\n  config->SetProperty(\"graph.format\", \"graphviz\");\n  config->SetProperty(\"graph.graphconf\", conf_file_value);\n  EXPECT_TRUE(BuildGraph(config) != STATUS_OK);\n}\n\n/*\n     ---->b----\n    /          \\\n    a           --->d   e-->f\n    \\          /\n     ---->c----\n*/\nTEST_F(GraphTest, BuildGraph_IsolatedNode) {\n  const auto *conf_file_value =\n      R\"(\n        digraph demo {\n          a[type=flowunit, flowunit=test_0_2, device=cpu, deviceid=0, label=\"<Out_1> | <Out_2>\"]\n          b[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0, label=\"<In_1> | <Out_1>\"]\n          c[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0, label=\"<In_1> | <Out_1>\"]\n          d[type=flowunit, flowunit=test_1_0, device=cpu, deviceid=0, label=\"<In_1>\"]\n          e[type=flowunit, flowunit=test_0_1, device=cpu, deviceid=0, label=\"<Out_1>\"]\n          f[type=flowunit, flowunit=test_1_0, device=cpu, deviceid=0, label=\"<In_1>\"]\n          a:Out_1 -> b:In_1\n          a:Out_2 -> c:In_1\n          b:Out_1 -> d:In_1\n          c:Out_1 -> d:In_1\n          e:Out_1 -> f:In_1\n        }\n      )\";\n\n  ConfigurationBuilder configbuilder;\n  auto config = configbuilder.Build();\n  config->SetProperty(\"graph.format\", \"graphviz\");\n  config->SetProperty(\"graph.graphconf\", conf_file_value);\n  EXPECT_TRUE(BuildGraph(config) != STATUS_OK);\n}\n\n/*\n       -------------\n      /             \\\n     ---->b---->     \\\n    /           \\     \\\n   a             d---->e---->f\n    \\           /\n     ---->c---->\n*/\nTEST_F(GraphTest, BuildGraph_Topology) {\n  const auto *conf_file_value =\n      R\"(\n        digraph demo {\n          a[type=flowunit, flowunit=test_0_2, device=cpu, deviceid=0, label=\"<Out_1> | <Out_2>\"]\n          b[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0, label=\"<In_1> | <Out_1>\"]\n          c[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0, label=\"<In_1> | <Out_1>\"]\n          d[type=flowunit, flowunit=test_1_0, device=cpu, deviceid=0, label=\"<In_1>\"]\n          e[type=flowunit, flowunit=test_1_2, device=cpu, deviceid=0, label=\"<In_1> | <Out_1> | <Out_1>\"]\n          f[type=flowunit, flowunit=test_1_0, device=cpu, deviceid=0, label=\"<In_1>\"]\n          a:Out_1 -> b:In_1\n          a:Out_2 -> c:In_1\n          b:Out_1 -> d:In_1\n          c:Out_1 -> d:In_1\n          d:Out_1 -> e:In_1\n          e:Out_1 -> f:In_1\n          e:Out_2 -> b:In_1\n        }\n      )\";\n\n  ConfigurationBuilder configbuilder;\n  auto config = configbuilder.Build();\n  config->SetProperty(\"graph.format\", \"graphviz\");\n  config->SetProperty(\"graph.graphconf\", conf_file_value);\n  EXPECT_TRUE(BuildGraph(config) != STATUS_OK);\n}\n\n/*\n      a\n      |\n  --> b --> end\n |    |\n  <-- c\n*/\n\nTEST_F(GraphTest, BuildGraph_SingleLoop) {\n  const auto *conf_file_value =\n      R\"(\n        digraph demo {\n          a[type=flowunit, flowunit=test_0_1, device=cpu, deviceid=0, label=\"<Out_1>\"]\n          b[type=flowunit, flowunit=test_loop, device=cpu, deviceid=0, label=\"<In_1>|<Out_1>|<Out_2>\"]\n          c[type=flowunit, flowunit=test_1_1_normal, device=cpu, deviceid=0, label=\"<In_1> | <Out_1>\"]\n          end[type=flowunit, flowunit=test_1_0, device=cpu, deviceid=0, label=\"<In_1>\"]\n          a:Out_1 -> b:In_1\n          b:Out_1 -> c:In_1\n          c:Out_1 -> b:In_1\n          b:Out_2 -> end:In_1\n        }\n      )\";\n\n  ConfigurationBuilder configbuilder;\n  auto config = configbuilder.Build();\n  config->SetProperty(\"graph.format\", \"graphviz\");\n  config->SetProperty(\"graph.graphconf\", conf_file_value);\n  auto status = BuildGraph(config);\n  MBLOG_ERROR << status.WrapErrormsgs();\n  EXPECT_TRUE(status == STATUS_OK);\n}\n\n/*\n      a            --> end\n      |           |\n  --> b --> d --> e -->\n |    |           |    |\n  <-- c             <--\n*/\n\nTEST_F(GraphTest, DISABLED_BuildGraph_DoubleLoop) {\n  const auto *conf_file_value =\n      R\"(\n        digraph demo {\n          a[type=flowunit, flowunit=test_0_1, device=cpu, deviceid=0, label=\"<Out_1>\"]\n          b[type=flowunit, flowunit=test_loop, device=cpu, deviceid=0, label=\"<In_1>|<Out_1>|<Out_2>\"]\n          c[type=flowunit, flowunit=test_1_1_normal, device=cpu, deviceid=0, label=\"<In_1> | <Out_1>\"]\n          d[type=flowunit, flowunit=test_1_1_normal, device=cpu, deviceid=0, label=\"<In_1>| <Out_1>\"]\n          e[type=flowunit, flowunit=test_loop, device=cpu, deviceid=0, label=\"<In_1> |<Out_1>|<Out_2>\"]\n          end[type=flowunit, flowunit=test_1_0, device=cpu, deviceid=0, label=\"<In_1>\"]\n          a:Out_1 -> b:In_1\n          b:Out_1 -> c:In_1\n          c:Out_1 -> b:In_1\n          b:Out_2 -> d:In_1\n          d:Out_1 -> e:In_1\n          e:Out_1 -> e:In_1\n          e:Out_2 -> end:In_1\n        }\n      )\";\n\n  ConfigurationBuilder configbuilder;\n  auto config = configbuilder.Build();\n  config->SetProperty(\"graph.format\", \"graphviz\");\n  config->SetProperty(\"graph.graphconf\", conf_file_value);\n  EXPECT_TRUE(BuildGraph(config) == STATUS_OK);\n}\n\n/*     ------> end\n      |     f --------->\n      |     |          |\na --> b --> c --> d    |\n      |     |    |     |\n      |      <-- e     |\n      <----------------\n*/\n\nTEST_F(GraphTest, DISABLED_BuildGraph_LoopInLoop) {\n  const auto *conf_file_value =\n      R\"(\n        digraph demo {\n          a[type=flowunit, flowunit=test_0_1, device=cpu, deviceid=0, label=\"<Out_1>\"]\n          b[type=flowunit, flowunit=test_loop, device=cpu, deviceid=0, label=\"<In_1>|<Out_1>|<Out_2>\"]\n          c[type=flowunit, flowunit=test_loop, device=cpu, deviceid=0, label=\"<In_1> | <Out_1>|<Out_2>\"]\n          d[type=flowunit, flowunit=test_1_1_normal, device=cpu, deviceid=0, label=\"<In_1>| <Out_1>\"]\n          e[type=flowunit, flowunit=test_1_1_normal, device=cpu, deviceid=0, label=\"<In_1>|<Out_1>\"]\n          f[type=flowunit, flowunit=test_1_1_normal, device=cpu, deviceid=0, label=\"<In_1>|<Out_1>\"]\n          end[type=flowunit, flowunit=test_1_0, device=cpu, deviceid=0, label=\"<In_1>\"]\n\n          a:Out_1 -> b:In_1\n          b:Out_1 -> c:In_1\n          c:Out_1 -> d:In_1\n          d:Out_1 -> e:In_1\n          e:Out_1 -> c:In_1\n          c:Out_2 -> f:In_1\n          f:Out_1 -> b:In_1\n          b:Out_2 -> end:In_1\n\n        }\n      )\";\n\n  ConfigurationBuilder configbuilder;\n  auto config = configbuilder.Build();\n  config->SetProperty(\"graph.format\", \"graphviz\");\n  config->SetProperty(\"graph.graphconf\", conf_file_value);\n  EXPECT_TRUE(BuildGraph(config) == STATUS_OK);\n}\n\nTEST_F(GraphTest, BuildGraph_LoopInputOutputInvalid) {\n  const auto *conf_file_value =\n      R\"(\n        digraph demo {\n          a1[type=flowunit, flowunit=test_0_1, device=cpu, deviceid=0, label=\"<Out_1>\"]\n          a2[type=flowunit, flowunit=test_0_1, device=cpu, deviceid=0, label=\"<Out_1>\"]\n          b[type=flowunit, flowunit=test_loop_invalid, device=cpu, deviceid=0, label=\"<In_1>|<In_2>|<Out_1>|<Out_2>\"]\n          c[type=flowunit, flowunit=test_1_1_normal, device=cpu, deviceid=0, label=\"<In_1> | <Out_1>\"]\n          end[type=flowunit, flowunit=test_1_0, device=cpu, deviceid=0, label=\"<In_1>\"]\n          a1:Out_1 -> b:In_1\n          a2:Out_1 -> b:In_2\n          b:Out_1 -> c:In_1\n          c:Out_1 -> b:In_1\n          b:Out_2 -> end:In_1\n        }\n      )\";\n\n  ConfigurationBuilder configbuilder;\n  auto config = configbuilder.Build();\n  config->SetProperty(\"graph.format\", \"graphviz\");\n  config->SetProperty(\"graph.graphconf\", conf_file_value);\n  EXPECT_TRUE(BuildGraph(config) == STATUS_FAULT);\n}\n\nTEST_F(GraphTest, BuildGraph_StreamInLoop) {\n  const auto *conf_file_value =\n      R\"(\n        digraph demo {\n          a[type=flowunit, flowunit=test_0_1, device=cpu, deviceid=0, label=\"<Out_1>\"]\n          b[type=flowunit, flowunit=test_loop, device=cpu, deviceid=0, label=\"<In_1>|<Out_1>|<Out_2>\"]\n          c[type=flowunit, flowunit=test_1_1, device=cpu, deviceid=0, label=\"<In_1> | <Out_1>\"]\n          end[type=flowunit, flowunit=test_1_0, device=cpu, deviceid=0, label=\"<In_1>\"]\n          a:Out_1 -> b:In_1\n          b:Out_1 -> c:In_1\n          c:Out_1 -> b:In_1\n          b:Out_2 -> end:In_1\n        }\n      )\";\n\n  ConfigurationBuilder configbuilder;\n  auto config = configbuilder.Build();\n  config->SetProperty(\"graph.format\", \"graphviz\");\n  config->SetProperty(\"graph.graphconf\", conf_file_value);\n  auto status = BuildGraph(config);\n  MBLOG_ERROR << status.WrapErrormsgs();\n  EXPECT_TRUE(status == STATUS_FAULT);\n}\n\nTEST_F(GraphTest, OrphanCheck) {\n  const auto *conf_file_value =\n      R\"(\n        digraph demo {\n          orphan[flowunit=test_1_1]\n          a[flowunit=test_0_1]\n          b[flowunit=test_1_0]\n          a:Out_1 -> b:In_1\n        }\n      )\";\n\n  ConfigurationBuilder configbuilder;\n  auto config = configbuilder.Build();\n  config->SetProperty(\"graph.format\", \"graphviz\");\n  config->SetProperty(\"graph.graphconf\", conf_file_value);\n  EXPECT_EQ(BuildGraph(config), STATUS_BADCONF);\n}\n\nTEST_F(GraphTest, SkipOrphan) {\n  const auto *conf_file_value =\n      R\"(\n        digraph demo {\n          orphan[flowunit=test_1_1]\n          a[flowunit=test_0_1]\n          b[flowunit=test_1_0]\n          a:Out_1 -> b:In_1\n        }\n      )\";\n\n  ConfigurationBuilder configbuilder;\n  auto config = configbuilder.Build();\n  config->SetProperty(\"graph.format\", \"graphviz\");\n  config->SetProperty(\"graph.strict\", false);\n  config->SetProperty(\"graph.graphconf\", conf_file_value);\n  EXPECT_EQ(BuildGraph(config), STATUS_OK);\n}\n\nTEST_F(GraphTest, DefaultGraphConfig) {\n  const auto *conf_file_value =\n      R\"(\n        digraph demo {\n          queue_size=8\n          batch_size=8\n          a[flowunit=test_0_1]\n          b[flowunit=test_1_1, batch_size=16]\n          c[flowunit=test_1_0, queue_size=16]\n\n          a:Out_1 -> b:In_1\n          b:Out_1 -> c:In_1\n        }\n      )\";\n\n  ConfigurationBuilder configbuilder;\n  std::shared_ptr<modelbox::GCGraph> gcgraph;\n  auto config = configbuilder.Build();\n  config->SetProperty(\"graph.format\", \"graphviz\");\n  config->SetProperty(\"graph.strict\", false);\n  config->SetProperty(\"graph.graphconf\", conf_file_value);\n  EXPECT_EQ(BuildGraph(config, &gcgraph), STATUS_OK);\n\n  auto config_a = gcgraph->GetNode(\"a\")->GetConfiguration();\n  auto config_b = gcgraph->GetNode(\"b\")->GetConfiguration();\n  auto config_c = gcgraph->GetNode(\"c\")->GetConfiguration();\n\n  EXPECT_EQ(config_a->GetUint32(\"queue_size\", 0), 8);\n  EXPECT_EQ(config_a->GetUint32(\"batch_size\", 0), 8);\n  EXPECT_EQ(config_b->GetUint32(\"queue_size\", 0), 8);\n  EXPECT_EQ(config_b->GetUint32(\"batch_size\", 0), 16);\n  EXPECT_EQ(config_c->GetUint32(\"queue_size\", 0), 16);\n  EXPECT_EQ(config_c->GetUint32(\"batch_size\", 0), 8);\n}\n\nTEST_F(GraphTest, InputStreamUnmatch) {\n  const auto *conf_file_value =\n      R\"(\n        digraph demo {\n          a[flowunit=test_0_2]\n          b[flowunit=test_1_1]\n          c[flowunit=stream_1_1]\n          d[flowunit=test_2_0]\n\n          a:Out_1 -> b:In_1\n          a:Out_2 -> c:In_1\n          b:Out_1 -> d:In_1\n          c:Out_1 -> d:In_2\n\n        }\n      )\";\n\n  ConfigurationBuilder configbuilder;\n  auto config = configbuilder.Build();\n  config->SetProperty(\"graph.format\", \"graphviz\");\n  config->SetProperty(\"graph.strict\", false);\n  config->SetProperty(\"graph.graphconf\", conf_file_value);\n  EXPECT_EQ(BuildGraph(config), STATUS_OK);\n}\n\nTEST_F(GraphTest, InputStreamCollapseRoot) {\n  const auto *conf_file_value =\n      R\"(\n        digraph demo {\n          a[flowunit=test_0_1]\n          b[flowunit=collapse_1_1]\n          c[flowunit=test_1_0]\n\n          a:Out_1 -> b:In_1\n          b:Out_1 -> c:In_1\n        }\n      )\";\n\n  ConfigurationBuilder configbuilder;\n  auto config = configbuilder.Build();\n  config->SetProperty(\"graph.format\", \"graphviz\");\n  config->SetProperty(\"graph.strict\", false);\n  config->SetProperty(\"graph.graphconf\", conf_file_value);\n  EXPECT_EQ(BuildGraph(config), STATUS_BADCONF);\n}\n\nTEST_F(GraphTest, InputStreamCollapseUnmatch) {\n  const auto *conf_file_value =\n      R\"(\n        digraph demo {\n          a[flowunit=test_0_2]\n          b[flowunit=expand_1_1]\n          c[flowunit=test_1_1]\n          d[flowunit=test_2_0]\n\n          a:Out_1 -> b:In_1\n          a:Out_2 -> c:In_1\n          b:Out_1 -> d:In_1\n          c:Out_1 -> d:In_2\n        }\n      )\";\n\n  ConfigurationBuilder configbuilder;\n  auto config = configbuilder.Build();\n  config->SetProperty(\"graph.format\", \"graphviz\");\n  config->SetProperty(\"graph.strict\", false);\n  config->SetProperty(\"graph.graphconf\", conf_file_value);\n  EXPECT_EQ(BuildGraph(config), STATUS_BADCONF);\n}\n\nTEST_F(GraphTest, InputStreamConditionUnmatch) {\n  const auto *conf_file_value =\n      R\"(\n        digraph demo {\n          a[flowunit=test_0_1]\n          b[flowunit=condition_1_2]\n          c[flowunit=test_1_0]\n          d[flowunit=test_1_0]\n\n          a:Out_1 -> b:In_1\n          b:Out_1 -> c:In_1\n          b:Out_2 -> d:In_1\n          a:Out_1 -> c:In_1\n        }\n      )\";\n\n  ConfigurationBuilder configbuilder;\n  auto config = configbuilder.Build();\n  config->SetProperty(\"graph.format\", \"graphviz\");\n  config->SetProperty(\"graph.strict\", false);\n  config->SetProperty(\"graph.graphconf\", conf_file_value);\n  EXPECT_EQ(BuildGraph(config), STATUS_BADCONF);\n}\n\nTEST_F(GraphTest, InputStreamConditionOne) {\n  const auto *conf_file_value =\n      R\"(\n        digraph demo {\n          a[flowunit=test_0_1]\n          b[flowunit=condition_1_2]\n          c[flowunit=test_1_0]\n\n          a:Out_1 -> b:In_1\n          b:Out_1 -> c:In_1\n          b:Out_1 -> c:In_1\n          b:Out_2 -> c:In_1\n        }\n      )\";\n\n  ConfigurationBuilder configbuilder;\n  auto config = configbuilder.Build();\n  config->SetProperty(\"graph.format\", \"graphviz\");\n  config->SetProperty(\"graph.strict\", false);\n  config->SetProperty(\"graph.graphconf\", conf_file_value);\n  EXPECT_EQ(BuildGraph(config), STATUS_OK);\n}\n\nTEST_F(GraphTest, InputStreamConditionOne1) {\n  const auto *conf_file_value =\n      R\"(\n        digraph demo {\n          a[flowunit=test_0_1]\n          b[flowunit=condition_1_2]\n          c[flowunit=test_1_1]\n          d[flowunit=test_1_1]\n          e[flowunit=test_1_0]\n\n          a:Out_1 -> b:In_1\n          b:Out_1 -> c:In_1\n          b:Out_1 -> d:In_1\n          c:Out_1 -> e:In_1\n          d:Out_1 -> e:In_1\n          b:Out_2 -> e:In_1\n        }\n      )\";\n\n  ConfigurationBuilder configbuilder;\n  auto config = configbuilder.Build();\n  config->SetProperty(\"graph.format\", \"graphviz\");\n  config->SetProperty(\"graph.strict\", false);\n  config->SetProperty(\"graph.graphconf\", conf_file_value);\n  EXPECT_EQ(BuildGraph(config), STATUS_BADCONF);\n}\n\nTEST_F(GraphTest, InputConditionConnectWrongPort) {\n  const auto *conf_file_value =\n      R\"(\n        digraph demo {\n          a[flowunit=test_0_1]\n          b[flowunit=condition_1_2]\n          c[flowunit=condition_1_2]\n          d[flowunit=test_1_0]\n          e[flowunit=test_1_0]\n\n          a:Out_1 -> b:In_1\n          a:Out_1 -> c:In_1\n          b:Out_1 -> d:In_1\n          b:Out_2 -> e:In_1\n          c:Out_1 -> d:In_1\n          c:Out_2 -> e:In_1\n        }\n      )\";\n\n  ConfigurationBuilder configbuilder;\n  auto config = configbuilder.Build();\n  config->SetProperty(\"graph.format\", \"graphviz\");\n  config->SetProperty(\"graph.strict\", false);\n  config->SetProperty(\"graph.graphconf\", conf_file_value);\n  EXPECT_EQ(BuildGraph(config), STATUS_BADCONF);\n}\n\nTEST_F(GraphTest, InputConditionNotHasSameCount) {\n  const auto *conf_file_value =\n      R\"(\n        digraph demo {\n          a[flowunit=test_0_1]\n          b[flowunit=condition_1_2]\n          c[flowunit=stream_1_1]\n          d[flowunit=test_1_0]\n\n          a:Out_1 -> b:In_1\n          b:Out_1 -> c:In_1\n          c:Out_1 -> d:In_1\n          b:Out_2 -> d:In_1\n        }\n      )\";\n\n  ConfigurationBuilder configbuilder;\n  auto config = configbuilder.Build();\n  config->SetProperty(\"graph.format\", \"graphviz\");\n  config->SetProperty(\"graph.strict\", false);\n  config->SetProperty(\"graph.graphconf\", conf_file_value);\n  EXPECT_EQ(BuildGraph(config), STATUS_OK);\n}\n\nTEST_F(GraphTest, SucessConditionGraph) {\n  const auto *conf_file_value =\n      R\"(\n        digraph demo {\n          a[flowunit=test_0_1]\n          b[flowunit=condition_1_2]\n          c[flowunit=condition_1_2]\n          d[flowunit=test_1_1]\n          e[flowunit=test_1_1]\n          f[flowunit=test_1_1]\n          g[flowunit=test_1_0]\n\n          a:Out_1 -> b:In_1\n\n          b:Out_1 -> c:In_1\n          b:Out_2 -> g:In_1\n\n          c:Out_1 -> d:In_1\n          c:Out_2 -> e:In_1\n\n          d:Out_1  -> f:In_1\n          e:Out_1  -> f:In_1\n\n          f:Out_1 -> g:In_1\n        }\n      )\";\n\n  ConfigurationBuilder configbuilder;\n  auto config = configbuilder.Build();\n  config->SetProperty(\"graph.format\", \"graphviz\");\n  config->SetProperty(\"graph.strict\", false);\n  config->SetProperty(\"graph.graphconf\", conf_file_value);\n  EXPECT_EQ(BuildGraph(config), STATUS_SUCCESS);\n}\n\nTEST_F(GraphTest, SucessExpandCollapseGraph) {\n  const auto *conf_file_value =\n      R\"(\n        digraph demo {\n          a[flowunit=test_0_1]\n          b[flowunit=expand_1_1]\n          c[flowunit=test_1_1]\n          d[flowunit=collapse_1_1]\n          e[flowunit=test_2_0]\n\n          a:Out_1 -> b:In_1\n          b:Out_1 -> c:In_1\n          c:Out_1 -> d:In_1\n          d:Out_1 -> e:In_1\n          a:Out_1 -> e:In_2\n        }\n      )\";\n\n  ConfigurationBuilder configbuilder;\n  auto config = configbuilder.Build();\n  config->SetProperty(\"graph.format\", \"graphviz\");\n  config->SetProperty(\"graph.strict\", false);\n  config->SetProperty(\"graph.graphconf\", conf_file_value);\n  EXPECT_EQ(BuildGraph(config), STATUS_SUCCESS);\n}\n\nTEST_F(GraphTest, BuildGraphFromArray) {\n  const char* conf_file_value[] = {\n      \"digraph demo {\", \"    a[flowunit=test_0_1, a=x, c=x]\",\n      \"    b[flowunit=test_1_0]\", \"    a:Out_1 -> b:In_1\", \"}\"};\n\n  std::vector<std::string> graph_config;\n  for (auto &i : conf_file_value) {\n    graph_config.emplace_back(i);\n  }\n\n  ConfigurationBuilder configbuilder;\n  auto config = configbuilder.Build();\n  config->SetProperty(\"graph.format\", \"graphviz\");\n  config->SetProperty(\"graph.graphconf\", graph_config);\n  EXPECT_TRUE(BuildGraph(config) == STATUS_OK);\n}\n\n}  // namespace modelbox\n"
  },
  {
    "path": "test/unit/libmodelbox/engine/match_stream_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/match_stream.h\"\n\n#include <functional>\n#include <future>\n#include <thread>\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"mock_driver_ctl.h\"\n#include \"modelbox/base/log.h\"\n#include \"modelbox/device/mockdevice/device_mockdevice.h\"\n#include \"modelbox/session.h\"\n\nnamespace modelbox {\nclass MatchStreamTest : public testing::Test {\n protected:\n  void SetUp() override {\n    in_port1_ = std::make_shared<InPort>(\"a\", nullptr);\n    in_port2_ = std::make_shared<InPort>(\"b\", nullptr);\n    data_ports_.push_back(in_port1_);\n    data_ports_.push_back(in_port2_);\n\n    // prepare data\n    auto root_buffer_index = std::make_shared<BufferIndexInfo>();\n    auto session = std::make_shared<Session>(nullptr);\n\n    auto stream1 = std::make_shared<Stream>(session);\n    buffer1_ = std::make_shared<Buffer>();\n    auto buffer1_index = std::make_shared<BufferIndexInfo>();\n    auto buffer1_inherit = std::make_shared<BufferInheritInfo>();\n    buffer1_inherit->SetInheritFrom(root_buffer_index);\n    buffer1_inherit->SetType(BufferProcessType::EXPAND);\n    buffer1_index->SetInheritInfo(buffer1_inherit);\n    buffer1_index->SetStream(stream1);\n    buffer1_index->SetIndex(0);\n    BufferManageView::SetIndexInfo(buffer1_, buffer1_index);\n\n    auto stream2 = std::make_shared<Stream>(session);\n    buffer2_ = std::make_shared<Buffer>();\n    auto buffer2_index = std::make_shared<BufferIndexInfo>();\n    auto buffer2_inherit = std::make_shared<BufferInheritInfo>();\n    buffer2_inherit->SetInheritFrom(root_buffer_index);\n    buffer2_inherit->SetType(BufferProcessType::EXPAND);\n    buffer2_index->SetInheritInfo(buffer2_inherit);\n    buffer2_index->SetStream(stream2);\n    buffer2_index->SetIndex(0);\n    BufferManageView::SetIndexInfo(buffer2_, buffer2_index);\n\n    // push data\n    in_port1_->GetQueue()->Push(buffer1_);\n    in_port2_->GetQueue()->Push(buffer2_);\n  }\n\n  std::shared_ptr<Buffer> buffer1_;\n  std::shared_ptr<Buffer> buffer2_;\n  std::shared_ptr<InPort> in_port1_;\n  std::shared_ptr<InPort> in_port2_;\n  std::vector<std::shared_ptr<InPort>> data_ports_;\n};\n\nTEST_F(MatchStreamTest, InputMatchStreamManagerTest) {\n  // run\n  InputMatchStreamManager input_match_stream_mgr(\"test\", 32, 2);\n  auto ret = input_match_stream_mgr.LoadData(data_ports_);\n  EXPECT_EQ(ret, STATUS_SUCCESS);\n  std::list<std::shared_ptr<MatchStreamData>> match_stream_list;\n  ret = input_match_stream_mgr.GenMatchStreamData(match_stream_list);\n  EXPECT_EQ(ret, STATUS_SUCCESS);\n  ASSERT_EQ(match_stream_list.size(), 1);\n  auto match_stream = match_stream_list.front();\n  ASSERT_EQ(match_stream->GetDataCount(), 1);\n  auto data_map = match_stream->GetBufferList();\n  ASSERT_EQ(data_map->size(), 2);\n  ASSERT_EQ(data_map->at(\"a\").size(), 1);\n  ASSERT_EQ(data_map->at(\"a\").size(), 1);\n  EXPECT_EQ(data_map->at(\"a\").front(), buffer1_);\n  EXPECT_EQ(data_map->at(\"b\").front(), buffer2_);\n}\n\nTEST_F(MatchStreamTest, OutputMatchStream) {\n  std::set<std::string> output_port_names{\"a\", \"b\"};\n  OutputMatchStreamManager output_match_stream_mgr(\n      \"test\", std::move(output_port_names));\n\n  auto buffer1_index = BufferManageView::GetIndexInfo(buffer1_);\n  auto buffer2_index = BufferManageView::GetIndexInfo(buffer2_);\n\n  auto out_buffer1 = std::make_shared<Buffer>();\n  auto out_index = std::make_shared<BufferIndexInfo>();\n  auto out_inherit = std::make_shared<BufferInheritInfo>();\n  out_inherit->SetInheritFrom(buffer1_index);\n  out_inherit->SetType(BufferProcessType::EXPAND);\n  auto out_process = std::make_shared<BufferProcessInfo>();\n  out_process->SetParentBuffers(\"a\", {buffer1_index});\n  out_process->SetParentBuffers(\"b\", {buffer2_index});\n  out_index->SetInheritInfo(out_inherit);\n  out_index->SetProcessInfo(out_process);\n  BufferManageView::SetIndexInfo(out_buffer1, out_index);\n\n  auto out_buffer2 = std::make_shared<Buffer>();\n  auto out_index2 = std::make_shared<BufferIndexInfo>();\n  auto out_inherit2 = std::make_shared<BufferInheritInfo>();\n  out_inherit2->SetInheritFrom(buffer1_index);\n  out_inherit2->SetType(BufferProcessType::EXPAND);\n  auto out_process2 = std::make_shared<BufferProcessInfo>();\n  out_process2->SetParentBuffers(\"a\", {buffer1_index});\n  out_process2->SetParentBuffers(\"b\", {buffer2_index});\n  out_index2->SetInheritInfo(out_inherit2);\n  out_index2->SetProcessInfo(out_process2);\n  BufferManageView::SetIndexInfo(out_buffer2, out_index2);\n\n  std::unordered_map<std::string, std::vector<std::shared_ptr<Buffer>>>\n      output_data;\n  output_data[\"a\"].push_back(out_buffer1);\n  output_data[\"b\"].push_back(out_buffer2);\n  std::unordered_map<std::string, std::shared_ptr<DataMeta>> port_stream_meta;\n  auto ret = output_match_stream_mgr.UpdateStreamInfo(\n      output_data, port_stream_meta, nullptr);\n  ASSERT_EQ(ret, STATUS_SUCCESS);\n  ASSERT_EQ(output_match_stream_mgr.GetOutputStreamCount(), 1);\n  EXPECT_NE(out_index->GetStream(), nullptr);\n  EXPECT_EQ(out_index->GetIndex(), 0);\n  EXPECT_NE(out_index2->GetStream(), nullptr);\n  EXPECT_EQ(out_index2->GetIndex(), 0);\n}\n}  // namespace modelbox"
  },
  {
    "path": "test/unit/libmodelbox/engine/node_test.cc",
    "content": "\n/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/node.h\"\n\n#include <string>\n#include <utility>\n\n#include \"flowunit_mockflowunit/flowunit_mockflowunit.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"mockflow.h\"\n#include \"modelbox/data_context.h\"\n#include \"modelbox/stream.h\"\n\nnamespace modelbox {\n\nusing ::testing::Sequence;\n\nvoid BuildDataEventStart(\n    std::unordered_map<std::string, std::vector<std::shared_ptr<Buffer>>>\n        &input_map,\n    const std::shared_ptr<Device> &device) {}\n\nvoid BuildDataEventStop(\n    std::unordered_map<std::string, std::vector<std::shared_ptr<Buffer>>>&\n        input_map) {}\n\nvoid BuildDataQueue(\n    Node *match_at,\n    std::unordered_map<std::string, std::vector<std::shared_ptr<Buffer>>>\n        &input_map,\n    const std::shared_ptr<Device> &device) {\n  auto session = std::make_shared<Session>(nullptr);\n  auto init_stream = std::make_shared<Stream>(session);\n  auto init_buffer_index_info = std::make_shared<BufferIndexInfo>();\n  init_buffer_index_info->SetStream(init_stream);\n  init_buffer_index_info->SetIndex(0);\n  init_stream->IncreaseBufferCount();\n\n  auto inherit_info = std::make_shared<BufferInheritInfo>();\n  inherit_info->SetInheritFrom(init_buffer_index_info);\n  inherit_info->SetType(BufferProcessType::EXPAND);\n\n  std::vector<std::shared_ptr<Buffer>> p1_bl(1);\n  std::vector<std::shared_ptr<Buffer>> p2_bl(1);\n\n  std::vector<int> data_1 = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};\n  std::vector<int> data_2 = {10, 11, 12, 13, 14, 15, 16, 17, 18, 19};\n\n  auto buf_1 = std::make_shared<Buffer>(device);\n  buf_1->Build(10 * sizeof(int));\n  auto *dev_data_1 = (int *)buf_1->MutableData();\n  for (size_t i = 0; i < data_1.size(); ++i) {\n    dev_data_1[i] = data_1[i];\n  }\n  auto b1_index = BufferManageView::GetIndexInfo(buf_1);\n  auto b1_s1 = std::make_shared<Stream>(session);\n  b1_index->SetStream(b1_s1);\n  b1_index->SetIndex(0);\n  b1_index->SetInheritInfo(inherit_info);\n  p1_bl[0] = buf_1;\n\n  auto buf_2 = std::make_shared<Buffer>(device);\n  buf_2->Build(10 * sizeof(int));\n  auto *dev_data_2 = (int *)buf_2->MutableData();\n  for (size_t i = 0; i < data_2.size(); ++i) {\n    dev_data_2[i] = data_2[i];\n  }\n  auto b2_index = BufferManageView::GetIndexInfo(buf_2);\n  auto b2_s1 = std::make_shared<Stream>(session);\n  b2_index->SetStream(b2_s1);\n  b2_index->SetIndex(0);\n  b2_index->SetInheritInfo(inherit_info);\n  p2_bl[0] = buf_2;\n\n  input_map.emplace(\"In_1\", p1_bl);\n  input_map.emplace(\"In_2\", p2_bl);\n}\n\nvoid CheckQueueHasDataError(const std::shared_ptr<BufferQueue> &queue,\n                            uint32_t queue_size) {\n  std::vector<std::shared_ptr<Buffer>> error_buffer_vector;\n  queue->PopBatch(&error_buffer_vector);\n  bool has_error{false};\n  for (auto& buffer : error_buffer_vector) {\n    if (buffer->HasError()) {\n      has_error = true;\n    }\n  }\n  EXPECT_EQ(error_buffer_vector.size(), queue_size);\n  EXPECT_EQ(has_error, true);\n  queue->PushBatch(&error_buffer_vector);\n}\n\nvoid CheckQueueNotHasDataError(const std::shared_ptr<BufferQueue> &queue) {\n  std::vector<std::shared_ptr<Buffer>> error_buffer_vector;\n  queue->PopBatch(&error_buffer_vector);\n  bool has_error{false};\n  for (auto& buffer : error_buffer_vector) {\n    if (buffer->HasError()) {\n      has_error = true;\n    }\n  }\n  EXPECT_EQ(has_error, false);\n  queue->PushBatch(&error_buffer_vector);\n}\n\nstd::shared_ptr<Buffer> CreateBuffer(size_t idx = 0,\n                                     std::shared_ptr<Stream> stream = nullptr) {\n  auto buffer = std::make_shared<Buffer>();\n  auto input_index = BufferManageView::GetIndexInfo(buffer);\n  if (stream == nullptr) {\n    auto session = std::make_shared<Session>(nullptr);\n    stream = std::make_shared<Stream>(session);\n  }\n  input_index->SetIndex(idx);\n  input_index->SetStream(stream);\n  stream->IncreaseBufferCount();\n  return buffer;\n}\n\nclass TestNode : public Node {\n public:\n  Status Recv(\n      RunType type,\n      std::list<std::shared_ptr<FlowUnitDataContext>>& data_ctx_list) override {\n    return Node::Recv(type, data_ctx_list);\n  }\n\n  Status GenInputMatchStreamData(RunType type,\n                                 std::list<std::shared_ptr<MatchStreamData>>&\n                                     match_stream_data_list) override {\n    return Node::GenInputMatchStreamData(type, match_stream_data_list);\n  }\n\n  Status InitNodeProperties() override { return Node::InitNodeProperties(); }\n\n  void SetInputInOrder(bool input_in_order) {\n    input_match_stream_mgr_->SetInputBufferInOrder(input_in_order);\n  }\n\n  void SetInputGatherAll(bool input_gather_all) {\n    input_match_stream_mgr_->SetInputStreamGatherAll(input_gather_all);\n  }\n};\n\nclass NodeTest : public testing::Test {\n public:\n  NodeTest() = default;\n\n protected:\n  void SetUp() override {\n    flow_ = std::make_shared<MockFlow>();\n    flow_->Init();\n  };\n  void TearDown() override { flow_->Destroy(); };\n\n private:\n  std::shared_ptr<MockFlow> flow_;\n};\n\nclass NodeRecvTest : public testing::Test {\n public:\n  NodeRecvTest() = default;\n\n protected:\n  std::shared_ptr<TestNode> node_;\n  std::vector<std::shared_ptr<Buffer>> node1_input_;\n  std::vector<std::shared_ptr<Buffer>> node2_input1_;\n  std::vector<std::shared_ptr<Buffer>> node2_input1_end_;\n  std::vector<std::shared_ptr<Buffer>> node2_input1_mismatch_;\n  std::vector<std::shared_ptr<Buffer>> node2_input2_;\n  std::vector<std::shared_ptr<Buffer>> node2_input2_end_;\n  std::vector<std::shared_ptr<Buffer>> node2_input2_mismatch_;\n\n  std::shared_ptr<Node> node1_;\n  std::shared_ptr<Node> node2_;\n\n  void SetUp() override {\n    flow_ = std::make_shared<MockFlow>();\n    flow_->Init();\n    auto flowunit_mgr = FlowUnitManager::GetInstance();\n\n    ConfigurationBuilder configbuilder;\n    auto config = configbuilder.Build();\n    node_ = std::make_shared<TestNode>();\n    node_->SetFlowUnitInfo(\"test_2_inputs_2_outputs\", \"cpu\", \"0\", flowunit_mgr);\n    node_->Init({\"In_1\", \"In_2\"}, {\"Out_1\", \"Out_2\"}, config);\n\n    /**\n     *                      -> node2_input1\n     * node1_input -> node1                 -> node2\n     *                      -> node2_input2\n     *\n     **/\n\n    node1_ = std::make_shared<Node>();\n    node2_ = std::make_shared<Node>();\n\n    auto session = std::make_shared<Session>(nullptr);\n    auto init_stream = std::make_shared<Stream>(session);\n    auto root1 = std::make_shared<BufferIndexInfo>();\n    root1->SetIndex(0);\n    root1->SetStream(init_stream);\n    auto root2 = std::make_shared<BufferIndexInfo>();\n    root2->SetIndex(0);\n    root2->SetStream(init_stream);\n\n    // node1 p1 s1\n    auto node1_input_s1 = std::make_shared<Stream>(session);\n    auto node1_input_s1_buffer = CreateBuffer(0, node1_input_s1);\n    auto node1_input_s1_end_flag = CreateBuffer(1, node1_input_s1);\n    BufferManageView::GetIndexInfo(node1_input_s1_end_flag)->MarkAsEndFlag();\n    node1_input_.push_back(node1_input_s1_buffer);\n    node1_input_.push_back(node1_input_s1_end_flag);\n    // node1 p1 s2\n    auto node1_input_s2_buffer = CreateBuffer();\n    node1_input_.push_back(node1_input_s2_buffer);\n\n    // node2 p1 s1\n    auto node2_input1_s1 = std::make_shared<Stream>(session);\n    auto node2_input1_s1_buffer = CreateBuffer(0, node2_input1_s1);\n    auto node2_input1_s1_end_flag = CreateBuffer(1, node2_input1_s1);\n    BufferManageView::GetIndexInfo(node2_input1_s1_end_flag)->MarkAsEndFlag();\n    node2_input1_.push_back(node2_input1_s1_buffer);\n    node2_input1_end_.push_back(node2_input1_s1_end_flag);\n    // node2 p1 s2\n    auto node2_input1_s2_buffer2 = CreateBuffer(1);\n    node2_input1_.push_back(node2_input1_s2_buffer2);\n    // node2 p1 mismatch\n    node2_input1_mismatch_.push_back(node2_input1_s1_buffer);\n    node2_input1_mismatch_.push_back(node2_input1_s1_end_flag);\n    // node2 p2 s1\n    auto node2_input2_s1 = std::make_shared<Stream>(session);\n    auto node2_input2_s1_buffer = CreateBuffer(0, node2_input2_s1);\n    auto node2_input2_s1_end_flag = CreateBuffer(1, node2_input2_s1);\n    BufferManageView::GetIndexInfo(node2_input2_s1_end_flag)->MarkAsEndFlag();\n    node2_input2_.push_back(node2_input2_s1_buffer);\n    node2_input2_end_.push_back(node2_input2_s1_end_flag);\n    // node2 p2 s2\n    auto node2_input2_s2_buffer2 = CreateBuffer(1);\n    node2_input2_.push_back(node2_input2_s2_buffer2);\n    // node2 p2 mismatch\n    auto node2_input2_mis_end_flag = CreateBuffer(0, node2_input2_s1);\n    BufferManageView::GetIndexInfo(node2_input2_mis_end_flag)->MarkAsEndFlag();\n    node2_input2_mismatch_.push_back(node2_input2_mis_end_flag);\n\n    // inherit init\n    auto inherit1 = std::make_shared<BufferInheritInfo>();\n    inherit1->SetType(BufferProcessType::EXPAND);\n    inherit1->SetInheritFrom(root1);\n    auto inherit2 = std::make_shared<BufferInheritInfo>();\n    inherit2->SetType(BufferProcessType::EXPAND);\n    inherit2->SetInheritFrom(root2);\n    // node1 input index init\n    BufferManageView::GetIndexInfo(node1_input_s1_buffer)\n        ->SetInheritInfo(inherit1);\n    BufferManageView::GetIndexInfo(node1_input_s1_end_flag)\n        ->SetInheritInfo(inherit1);\n    BufferManageView::GetIndexInfo(node1_input_s2_buffer)\n        ->SetInheritInfo(inherit2);\n    // node2 input index init\n    BufferManageView::GetIndexInfo(node2_input1_s1_buffer)\n        ->SetInheritInfo(inherit1);\n    BufferManageView::GetIndexInfo(node2_input2_s1_buffer)\n        ->SetInheritInfo(inherit1);\n    BufferManageView::GetIndexInfo(node2_input1_s1_end_flag)\n        ->SetInheritInfo(inherit1);\n    BufferManageView::GetIndexInfo(node2_input2_s1_end_flag)\n        ->SetInheritInfo(inherit1);\n    BufferManageView::GetIndexInfo(node2_input2_mis_end_flag)\n        ->SetInheritInfo(inherit1);\n\n    BufferManageView::GetIndexInfo(node2_input1_s2_buffer2)\n        ->SetInheritInfo(inherit2);\n    BufferManageView::GetIndexInfo(node2_input2_s2_buffer2)\n        ->SetInheritInfo(inherit2);\n  };\n\n  void TearDown() override {\n    node_ = nullptr;\n    flow_->Destroy();\n  };\n\n private:\n  std::shared_ptr<MockFlow> flow_;\n};\n\nclass NodeRunTest : public testing::Test {\n public:\n  NodeRunTest() = default;\n  void TestAdd(const std::string &add_flowunit_name);\n  void TestWrongAdd(const std::string &flowunit_name, const Status &run_status);\n\n protected:\n  std::shared_ptr<MockFlow> flow_;\n  void SetUp() override {\n    flow_ = std::make_shared<MockFlow>();\n    flow_->Init();\n  };\n  void TearDown() override { flow_->Destroy(); };\n};\n\nstatic SessionManager node_test_session_manager;\n\nstd::shared_ptr<Node> Add_Node(\n    const std::string &name, const std::set<std::string> &inputs,\n    const std::set<std::string> &outputs,\n    std::shared_ptr<Configuration> config = nullptr) {\n  if (config == nullptr) {\n    ConfigurationBuilder configbuilder;\n    config = configbuilder.Build();\n  }\n  auto flowunit_mgr = FlowUnitManager::GetInstance();\n  auto node = std::make_shared<Node>();\n  node->SetName(name);\n  node->SetFlowUnitInfo(name, \"cpu\", \"0\", flowunit_mgr);\n  node->SetSessionManager(&node_test_session_manager);\n  EXPECT_EQ(node->Init(inputs, outputs, config), STATUS_SUCCESS);\n  EXPECT_EQ(node->Open(), STATUS_SUCCESS);\n  return node;\n}\n\nstd::shared_ptr<Node> Add_Test_2_0_Node(\n    std::shared_ptr<Configuration> config = nullptr) {\n  return Add_Node(\"test_2_0\", {\"In_1\", \"In_2\"}, {}, std::move(config));\n}\n\nstd::shared_ptr<Node> Add_Test_1_0_Node(\n    std::shared_ptr<Configuration> config = nullptr) {\n  return Add_Node(\"test_1_0\", {\"In_1\"}, {}, std::move(config));\n}\n\nstd::shared_ptr<Node> Add_Test_1_0_Batch_Node(\n    std::shared_ptr<Configuration> config = nullptr) {\n  return Add_Node(\"test_1_0_batch\", {\"In_1\"}, {}, std::move(config));\n}\n\nstd::shared_ptr<Node> Add_Test_0_2_Node(\n    std::shared_ptr<Configuration> config = nullptr) {\n  return Add_Node(\"test_0_2\", {}, {\"Out_1\", \"Out_2\"}, std::move(config));\n}\n\nstd::shared_ptr<Node> Add_Test_0_1_Node(\n    std::shared_ptr<Configuration> config = nullptr) {\n  return Add_Node(\"test_0_1\", {}, {\"Out_1\"}, std::move(config));\n}\n\nstd::shared_ptr<Node> Add_Test_0_1_Batch_Node(\n    std::shared_ptr<Configuration> config = nullptr) {\n  return Add_Node(\"test_0_1_batch\", {}, {\"Out_1\"}, std::move(config));\n}\n\nstd::shared_ptr<Node> Add_Add_Node(\n    std::shared_ptr<Configuration> config = nullptr) {\n  return Add_Node(\"add\", {\"In_1\", \"In_2\"}, {\"Out_1\"}, std::move(config));\n}\n\nstd::shared_ptr<Node> Add_Stream_Add_Node(\n    std::shared_ptr<Configuration> config = nullptr) {\n  return Add_Node(\"stream_add\", {\"In_1\", \"In_2\"}, {\"Out_1\"}, std::move(config));\n}\n\nstd::shared_ptr<Node> Add_Test_Orgin_0_2_Node(\n    std::shared_ptr<Configuration> config = nullptr) {\n  return Add_Node(\"test_orgin_0_2\", {}, {\"Out_1\", \"Out_2\"}, std::move(config));\n}\n\nstd::shared_ptr<Node> Add_Half_Condition_Node(\n    std::shared_ptr<Configuration> config = nullptr) {\n  return Add_Node(\"half-condition\", {\"In_1\"}, {\"Out_1\", \"Out_2\"},\n                  std::move(config));\n}\n\nstd::shared_ptr<Node> Add_Conditionn_Node(\n    std::shared_ptr<Configuration> config = nullptr) {\n  return Add_Node(\"condition\", {\"In_1\"}, {\"Out_1\", \"Out_2\"}, std::move(config));\n}\n\nstd::shared_ptr<Node> Add_Switch_Case_Node(\n    std::shared_ptr<Configuration> config = nullptr) {\n  return Add_Node(\"switch_case\", {\"In_1\"}, {\"Out_1\", \"Out_2\", \"Out_3\"},\n                  std::move(config));\n}\n\nstd::shared_ptr<Node> Add_Loop_Node(\n    std::shared_ptr<Configuration> config = nullptr) {\n  return Add_Node(\"loop\", {\"In_1\"}, {\"Out_1\", \"Out_2\"}, std::move(config));\n}\n\nstd::shared_ptr<Node> Add_Loop_End_Node(\n    std::shared_ptr<Configuration> config = nullptr) {\n  return Add_Node(\"loop_end\", {\"In_1\"}, {\"Out_1\"}, std::move(config));\n}\n\nstd::shared_ptr<Node> Add_Expand_Normal_Node(\n    std::shared_ptr<Configuration> config = nullptr) {\n  return Add_Node(\"expand_normal\", {\"In_1\"}, {\"Out_1\"}, std::move(config));\n}\n\nstd::shared_ptr<Node> Add_Collapse_Normal_Node(\n    std::shared_ptr<Configuration> config = nullptr) {\n  return Add_Node(\"collapse_normal\", {\"In_1\"}, {\"Out_1\"}, std::move(config));\n}\n\nstd::shared_ptr<Node> Add_Expand_Stream_Node(\n    std::shared_ptr<Configuration> config = nullptr) {\n  return Add_Node(\"expand_stream\", {\"In_1\"}, {\"Out_1\"}, std::move(config));\n}\n\nstd::shared_ptr<Node> Add_Collapse_Stream_Node(\n    std::shared_ptr<Configuration> config = nullptr) {\n  return Add_Node(\"collapse_stream\", {\"In_1\"}, {\"Out_1\"}, std::move(config));\n}\n\nstd::shared_ptr<Node> Add_Garther_Node(\n    std::shared_ptr<Configuration> config = nullptr) {\n  return Add_Node(\"garther\", {\"In_1\"}, {\"Out_1\"}, std::move(config));\n}\n\nstd::shared_ptr<Node> Add_Scatter_Node(\n    std::shared_ptr<Configuration> config = nullptr) {\n  return Add_Node(\"scatter\", {\"In_1\"}, {\"Out_1\"}, std::move(config));\n}\n\nstd::shared_ptr<Node> Add_Simple_Pass_Node(\n    int times, std::shared_ptr<Configuration> config = nullptr) {\n  auto node = Add_Node(\"simple_pass\", {\"In_1\"}, {\"Out_1\"}, std::move(config));\n  Sequence s1;\n  auto pass_fu = std::dynamic_pointer_cast<MockFlowUnit>(\n      node->GetFlowUnitGroup()->GetExecutorUnit());\n  EXPECT_CALL(*pass_fu, Process(testing::_)).Times(times).InSequence(s1);\n  return node;\n}\n\nstd::shared_ptr<Node> Add_Stream_Process_Node(\n    std::vector<uint32_t> times,\n    std::shared_ptr<Configuration> config = nullptr) {\n  auto node =\n      Add_Node(\"stream_process\", {\"In_1\"}, {\"Out_1\"}, std::move(config));\n  Sequence s1;\n  auto process_fu = std::dynamic_pointer_cast<MockFlowUnit>(\n      node->GetFlowUnitGroup()->GetExecutorUnit());\n  EXPECT_EQ(times.size(), 3);\n  EXPECT_CALL(*process_fu, DataPre(testing::_)).Times(times[0]).InSequence(s1);\n  EXPECT_CALL(*process_fu, Process(testing::_)).Times(times[1]).InSequence(s1);\n  EXPECT_CALL(*process_fu, DataPost(testing::_)).Times(times[2]).InSequence(s1);\n  return node;\n}\n\nstd::shared_ptr<Node> Add_Stream_Info_Node(\n    std::shared_ptr<Configuration> config = nullptr) {\n  auto node = Add_Node(\"stream_info\", {}, {\"Out_1\"}, std::move(config));\n  Sequence s1;\n  auto stream_info_fu = std::dynamic_pointer_cast<MockFlowUnit>(\n      node->GetFlowUnitGroup()->GetExecutorUnit());\n  EXPECT_CALL(*stream_info_fu, DataPre(testing::_)).Times(1).InSequence(s1);\n  EXPECT_CALL(*stream_info_fu, Process(testing::_)).Times(1).InSequence(s1);\n  EXPECT_CALL(*stream_info_fu, DataPost(testing::_)).Times(1).InSequence(s1);\n  return node;\n}\n\nstd::shared_ptr<Node> Add_Stream_Start_Node(\n    std::vector<uint32_t> times,\n    std::shared_ptr<Configuration> config = nullptr) {\n  auto node = Add_Node(\"stream_start\", {\"In_1\"}, {\"Out_1\"}, std::move(config));\n  Sequence s1;\n  auto stream_start_fu = std::dynamic_pointer_cast<MockFlowUnit>(\n      node->GetFlowUnitGroup()->GetExecutorUnit());\n  EXPECT_CALL(*stream_start_fu, DataPre(testing::_))\n      .Times(times[0])\n      .InSequence(s1);\n  EXPECT_CALL(*stream_start_fu, Process(testing::_))\n      .Times(times[1])\n      .InSequence(s1);\n  EXPECT_CALL(*stream_start_fu, DataPost(testing::_))\n      .Times(times[2])\n      .InSequence(s1);\n  return node;\n}\n\nstd::shared_ptr<Node> Add_Stream_Mid_Node(\n    std::shared_ptr<Configuration> config = nullptr) {\n  auto node = Add_Node(\"stream_mid\", {\"In_1\"}, {\"Out_1\"}, std::move(config));\n  Sequence s1;\n  auto stream_mid_fu = std::dynamic_pointer_cast<MockFlowUnit>(\n      node->GetFlowUnitGroup()->GetExecutorUnit());\n  EXPECT_CALL(*stream_mid_fu, DataPre(testing::_)).Times(1).InSequence(s1);\n  EXPECT_CALL(*stream_mid_fu, Process(testing::_)).Times(15).InSequence(s1);\n  EXPECT_CALL(*stream_mid_fu, DataPost(testing::_)).Times(1).InSequence(s1);\n  return node;\n}\n\nstd::shared_ptr<Node> Add_Stream_End_Node(\n    uint32_t times, std::shared_ptr<Configuration> config = nullptr) {\n  auto node = Add_Node(\"stream_end\", {\"In_1\"}, {\"Out_1\"}, std::move(config));\n  Sequence s1;\n  auto stream_end_fu = std::dynamic_pointer_cast<MockFlowUnit>(\n      node->GetFlowUnitGroup()->GetExecutorUnit());\n  EXPECT_CALL(*stream_end_fu, DataPre(testing::_)).Times(1).InSequence(s1);\n  EXPECT_CALL(*stream_end_fu, Process(testing::_)).Times(times).InSequence(s1);\n  EXPECT_CALL(*stream_end_fu, DataPost(testing::_)).Times(1).InSequence(s1);\n  return node;\n}\n\nstd::shared_ptr<Node> Add_Garther_Gen_More_Node(\n    std::shared_ptr<Configuration> config = nullptr) {\n  return Add_Node(\"garther_gen_more\", {\"In_1\"}, {\"Out_1\"}, std::move(config));\n}\n\nstd::shared_ptr<Node> Add_Stream_Normal_Info_Node(\n    std::shared_ptr<Configuration> config = nullptr) {\n  auto node = Add_Node(\"stream_normal_info\", {}, {\"Out_1\"}, std::move(config));\n  Sequence s1;\n  auto stream_info_fu = std::dynamic_pointer_cast<MockFlowUnit>(\n      node->GetFlowUnitGroup()->GetExecutorUnit());\n  EXPECT_CALL(*stream_info_fu, DataPre(testing::_)).Times(1).InSequence(s1);\n  EXPECT_CALL(*stream_info_fu, Process(testing::_)).Times(1).InSequence(s1);\n  EXPECT_CALL(*stream_info_fu, DataPost(testing::_)).Times(1).InSequence(s1);\n  return node;\n}\n\nstd::shared_ptr<Node> Add_Simple_Error_Node(\n    uint32_t times, std::shared_ptr<Configuration> config = nullptr) {\n  auto node = Add_Node(\"simple_error\", {\"In_1\"}, {\"Out_1\"}, std::move(config));\n  Sequence s1;\n  auto simple_error_fu = std::dynamic_pointer_cast<MockFlowUnit>(\n      node->GetFlowUnitGroup()->GetExecutorUnit());\n  EXPECT_CALL(*simple_error_fu, Process(testing::_))\n      .Times(times)\n      .InSequence(s1);\n  return node;\n}\n\nstd::shared_ptr<Node> Add_Stream_Datapre_Error_Node(\n    std::vector<size_t> times,\n    std::shared_ptr<Configuration> config = nullptr) {\n  auto node =\n      Add_Node(\"stream_datapre_error\", {\"In_1\"}, {\"Out_1\"}, std::move(config));\n  Sequence s1;\n  auto node_fu = std::dynamic_pointer_cast<MockFlowUnit>(\n      node->GetFlowUnitGroup()->GetExecutorUnit());\n  EXPECT_CALL(*node_fu, DataPre(testing::_)).Times(times[0]).InSequence(s1);\n  EXPECT_CALL(*node_fu, Process(testing::_)).Times(times[1]).InSequence(s1);\n  EXPECT_CALL(*node_fu, DataPost(testing::_)).Times(times[2]).InSequence(s1);\n  return node;\n}\n\nstd::shared_ptr<Node> Add_Collapse_Recieve_Error_Node(\n    uint32_t times, bool catch_error = true,\n    std::shared_ptr<Configuration> config = nullptr) {\n  auto node = Add_Node(\"collapse_recieve_error\", {\"In_1\"}, {\"Out_1\"},\n                       std::move(config));\n  Sequence s1;\n  auto node_fu = std::dynamic_pointer_cast<MockFlowUnit>(\n      node->GetFlowUnitGroup()->GetExecutorUnit());\n  for (uint32_t i = 0; i < times; i++) {\n    EXPECT_CALL(*node_fu, DataPre(testing::_)).Times(1).InSequence(s1);\n    if (catch_error) {\n      EXPECT_CALL(*node_fu, Process(testing::_)).Times(1).InSequence(s1);\n    }\n    EXPECT_CALL(*node_fu, DataPost(testing::_)).Times(1).InSequence(s1);\n  }\n  return node;\n}\n\nstd::shared_ptr<Node> Add_Stream_Process_Error_Node(\n    std::vector<size_t> times,\n    std::shared_ptr<Configuration> config = nullptr) {\n  auto node =\n      Add_Node(\"stream_process_error\", {\"In_1\"}, {\"Out_1\"}, std::move(config));\n  Sequence s1;\n  auto node_fu = std::dynamic_pointer_cast<MockFlowUnit>(\n      node->GetFlowUnitGroup()->GetExecutorUnit());\n  EXPECT_CALL(*node_fu, DataPre(testing::_)).Times(times[0]).InSequence(s1);\n  EXPECT_CALL(*node_fu, Process(testing::_)).Times(times[1]).InSequence(s1);\n  EXPECT_CALL(*node_fu, DataPost(testing::_)).Times(times[2]).InSequence(s1);\n  return node;\n}\n\nstd::shared_ptr<Node> Add_Error_Start_Node(\n    std::shared_ptr<Configuration> config = nullptr) {\n  auto node = Add_Node(\"error_start\", {}, {\"Out_1\"}, std::move(config));\n  Sequence s1;\n  auto node_fu = std::dynamic_pointer_cast<MockFlowUnit>(\n      node->GetFlowUnitGroup()->GetExecutorUnit());\n  EXPECT_CALL(*node_fu, DataPre(testing::_)).Times(1).InSequence(s1);\n  EXPECT_CALL(*node_fu, Process(testing::_)).Times(1).InSequence(s1);\n  EXPECT_CALL(*node_fu, DataPost(testing::_)).Times(1).InSequence(s1);\n  return node;\n}\n\nstd::shared_ptr<Node> Add_Error_Start_Normal_Node(\n    std::shared_ptr<Configuration> config = nullptr) {\n  auto node = Add_Node(\"error_start_normal\", {}, {\"Out_1\"}, std::move(config));\n  return node;\n}\n\nstd::shared_ptr<Node> Add_Error_End_Normal_Node(\n    uint32_t times, std::shared_ptr<Configuration> config = nullptr) {\n  auto node = Add_Node(\"error_end_normal\", {\"In_1\"}, {}, std::move(config));\n  auto node_fu = std::dynamic_pointer_cast<MockFlowUnit>(\n      node->GetFlowUnitGroup()->GetExecutorUnit());\n  EXPECT_CALL(*node_fu, Process(testing::_)).Times(times);\n  return node;\n}\n\nstd::shared_ptr<Node> Add_Normal_Start_Node(\n    std::shared_ptr<Configuration> config = nullptr) {\n  auto node = Add_Node(\"normal_start\", {}, {\"Out_1\"}, std::move(config));\n  Sequence s1;\n  auto node_fu = std::dynamic_pointer_cast<MockFlowUnit>(\n      node->GetFlowUnitGroup()->GetExecutorUnit());\n  EXPECT_CALL(*node_fu, DataPre(testing::_)).Times(1).InSequence(s1);\n  EXPECT_CALL(*node_fu, Process(testing::_)).Times(1).InSequence(s1);\n  EXPECT_CALL(*node_fu, DataPost(testing::_)).Times(1).InSequence(s1);\n  return node;\n}\n\nstd::shared_ptr<Node> Add_Normal_Expand_Process_Error_Node(\n    uint32_t times, std::shared_ptr<Configuration> config = nullptr) {\n  auto node = Add_Node(\"normal_expand_process_error\", {\"In_1\"}, {\"Out_1\"},\n                       std::move(config));\n  Sequence s1;\n  auto node_fu = std::dynamic_pointer_cast<MockFlowUnit>(\n      node->GetFlowUnitGroup()->GetExecutorUnit());\n  EXPECT_CALL(*node_fu, Process(testing::_)).Times(times).InSequence(s1);\n  return node;\n}\n\nstd::shared_ptr<Node> Add_Normal_Collapse_Recieve_Error_Node(\n    std::vector<uint32_t> times,\n    std::shared_ptr<Configuration> config = nullptr) {\n  auto node = Add_Node(\"normal_collapse_recieve_error\", {\"In_1\"}, {\"Out_1\"},\n                       std::move(config));\n  Sequence s1;\n  auto node_fu = std::dynamic_pointer_cast<MockFlowUnit>(\n      node->GetFlowUnitGroup()->GetExecutorUnit());\n  EXPECT_CALL(*node_fu, DataPre(testing::_)).Times(times[0]).InSequence(s1);\n  EXPECT_CALL(*node_fu, Process(testing::_)).Times(times[1]).InSequence(s1);\n  EXPECT_CALL(*node_fu, DataPost(testing::_)).Times(times[2]).InSequence(s1);\n  return node;\n}\n\nstd::shared_ptr<Node> Add_Normal_Expand_Process_Node(\n    uint32_t times, std::shared_ptr<Configuration> config = nullptr) {\n  auto node =\n      Add_Node(\"normal_expand_process\", {\"In_1\"}, {\"Out_1\"}, std::move(config));\n  Sequence s1;\n  auto node_fu = std::dynamic_pointer_cast<MockFlowUnit>(\n      node->GetFlowUnitGroup()->GetExecutorUnit());\n  EXPECT_CALL(*node_fu, Process(testing::_)).Times(times).InSequence(s1);\n  return node;\n}\n\nstd::shared_ptr<Node> Add_Stream_In_Process_Error_Node(\n    std::vector<uint32_t> times,\n    std::shared_ptr<Configuration> config = nullptr) {\n  auto node = Add_Node(\"stream_in_process_error\", {\"In_1\"}, {\"Out_1\"},\n                       std::move(config));\n  Sequence s1;\n  auto node_fu = std::dynamic_pointer_cast<MockFlowUnit>(\n      node->GetFlowUnitGroup()->GetExecutorUnit());\n  EXPECT_CALL(*node_fu, DataPre(testing::_)).Times(times[0]).InSequence(s1);\n  EXPECT_CALL(*node_fu, Process(testing::_)).Times(times[1]).InSequence(s1);\n  EXPECT_CALL(*node_fu, DataPost(testing::_)).Times(times[2]).InSequence(s1);\n  return node;\n}\n\nstd::shared_ptr<Node> Add_Normal_Expand_Start_Node(\n    uint32_t times, std::shared_ptr<Configuration> config = nullptr) {\n  auto node =\n      Add_Node(\"normal_expand_start\", {\"In_1\"}, {\"Out_1\"}, std::move(config));\n  Sequence s1;\n  auto node_fu = std::dynamic_pointer_cast<MockFlowUnit>(\n      node->GetFlowUnitGroup()->GetExecutorUnit());\n  EXPECT_CALL(*node_fu, Process(testing::_)).Times(times).InSequence(s1);\n  return node;\n}\n\nstd::shared_ptr<Node> Add_Expand_Datapre_Error_Node(\n    std::shared_ptr<Configuration> config = nullptr) {\n  auto node =\n      Add_Node(\"expand_datapre_error\", {\"In_1\"}, {\"Out_1\"}, std::move(config));\n  Sequence s1;\n  auto node_fu = std::dynamic_pointer_cast<MockFlowUnit>(\n      node->GetFlowUnitGroup()->GetExecutorUnit());\n  EXPECT_CALL(*node_fu, DataPre(testing::_)).Times(1).InSequence(s1);\n  EXPECT_CALL(*node_fu, Process(testing::_)).Times(0).InSequence(s1);\n  EXPECT_CALL(*node_fu, DataPost(testing::_)).Times(0).InSequence(s1);\n  return node;\n}\n\nstd::shared_ptr<Node> Add_Expand_Process_Error_Node(\n    uint32_t times, std::shared_ptr<Configuration> config = nullptr) {\n  auto node =\n      Add_Node(\"expand_process_error\", {\"In_1\"}, {\"Out_1\"}, std::move(config));\n  Sequence s1;\n  auto node_fu = std::dynamic_pointer_cast<MockFlowUnit>(\n      node->GetFlowUnitGroup()->GetExecutorUnit());\n  EXPECT_CALL(*node_fu, DataPre(testing::_)).Times(1).InSequence(s1);\n  EXPECT_CALL(*node_fu, Process(testing::_)).Times(times).InSequence(s1);\n  EXPECT_CALL(*node_fu, DataPost(testing::_)).Times(1).InSequence(s1);\n  return node;\n}\n\nstd::shared_ptr<Node> Add_Expand_Process_Node(\n    uint32_t times, std::shared_ptr<Configuration> config = nullptr) {\n  auto node =\n      Add_Node(\"expand_process\", {\"In_1\"}, {\"Out_1\"}, std::move(config));\n  Sequence s1;\n  auto node_fu = std::dynamic_pointer_cast<MockFlowUnit>(\n      node->GetFlowUnitGroup()->GetExecutorUnit());\n  if (times == 0) {\n    EXPECT_CALL(*node_fu, DataPre(testing::_)).Times(1).InSequence(s1);\n    EXPECT_CALL(*node_fu, Process(testing::_)).Times(0).InSequence(s1);\n    EXPECT_CALL(*node_fu, DataPost(testing::_)).Times(0).InSequence(s1);\n  } else {\n    EXPECT_CALL(*node_fu, DataPre(testing::_)).Times(1).InSequence(s1);\n    EXPECT_CALL(*node_fu, Process(testing::_)).Times(times).InSequence(s1);\n    EXPECT_CALL(*node_fu, DataPost(testing::_)).Times(1).InSequence(s1);\n  }\n  return node;\n}\n\nstd::shared_ptr<Node> Add_Collapse_Datagrouppre_Error_Node(\n    std::shared_ptr<Configuration> config = nullptr) {\n  auto node = Add_Node(\"collapse_datagrouppre_error\", {\"In_1\"}, {\"Out_1\"},\n                       std::move(config));\n  Sequence s1;\n  auto node_fu = std::dynamic_pointer_cast<MockFlowUnit>(\n      node->GetFlowUnitGroup()->GetExecutorUnit());\n  EXPECT_CALL(*node_fu, DataGroupPre(testing::_)).Times(1).InSequence(s1);\n  EXPECT_CALL(*node_fu, DataPre(testing::_)).Times(0).InSequence(s1);\n  EXPECT_CALL(*node_fu, Process(testing::_)).Times(0).InSequence(s1);\n  EXPECT_CALL(*node_fu, DataPost(testing::_)).Times(0).InSequence(s1);\n  EXPECT_CALL(*node_fu, DataGroupPost(testing::_)).Times(0).InSequence(s1);\n  return node;\n}\n\nstd::shared_ptr<Node> Add_Collapse_DataPre_Error_Node(\n    uint32_t times, std::shared_ptr<Configuration> config = nullptr) {\n  auto node = Add_Node(\"collapse_datapre_error\", {\"In_1\"}, {\"Out_1\"},\n                       std::move(config));\n  Sequence s1;\n  auto node_fu = std::dynamic_pointer_cast<MockFlowUnit>(\n      node->GetFlowUnitGroup()->GetExecutorUnit());\n  for (uint32_t i = 0; i < times; i++) {\n    EXPECT_CALL(*node_fu, DataPre(testing::_)).Times(1).InSequence(s1);\n    EXPECT_CALL(*node_fu, Process(testing::_)).Times(0).InSequence(s1);\n    EXPECT_CALL(*node_fu, DataPost(testing::_)).Times(1).InSequence(s1);\n  }\n  return node;\n}\n\nstd::shared_ptr<Node> Add_Collapse_Process_Error_Node(\n    uint32_t times, std::shared_ptr<Configuration> config = nullptr) {\n  auto node = Add_Node(\"collapse_process_error\", {\"In_1\"}, {\"Out_1\"},\n                       std::move(config));\n  Sequence s1;\n  auto node_fu = std::dynamic_pointer_cast<MockFlowUnit>(\n      node->GetFlowUnitGroup()->GetExecutorUnit());\n  for (uint32_t i = 0; i < times; i++) {\n    EXPECT_CALL(*node_fu, DataPre(testing::_)).Times(1).InSequence(s1);\n    EXPECT_CALL(*node_fu, Process(testing::_)).Times(1).InSequence(s1);\n    EXPECT_CALL(*node_fu, DataPost(testing::_)).Times(1).InSequence(s1);\n  }\n  return node;\n}\n\nstd::shared_ptr<Node> Add_Collapse_Process_Node(\n    uint32_t times, std::shared_ptr<Configuration> config = nullptr) {\n  auto node =\n      Add_Node(\"collapse_process\", {\"In_1\"}, {\"Out_1\"}, std::move(config));\n  Sequence s1;\n  auto node_fu = std::dynamic_pointer_cast<MockFlowUnit>(\n      node->GetFlowUnitGroup()->GetExecutorUnit());\n  for (uint32_t i = 0; i < times; i++) {\n    EXPECT_CALL(*node_fu, DataPre(testing::_)).Times(1).InSequence(s1);\n    EXPECT_CALL(*node_fu, Process(testing::_)).Times(1).InSequence(s1);\n    EXPECT_CALL(*node_fu, DataPost(testing::_)).Times(1).InSequence(s1);\n  }\n  return node;\n}\n\nstd::shared_ptr<Node> Add_Normal_Collapse_Datapre_Error_Node(\n    uint32_t times, std::shared_ptr<Configuration> config = nullptr) {\n  auto node = Add_Node(\"normal_collapse_datapre_error\", {\"In_1\"}, {\"Out_1\"},\n                       std::move(config));\n  Sequence s1;\n  auto node_fu = std::dynamic_pointer_cast<MockFlowUnit>(\n      node->GetFlowUnitGroup()->GetExecutorUnit());\n  EXPECT_CALL(*node_fu, DataPre(testing::_)).Times(times).InSequence(s1);\n  EXPECT_CALL(*node_fu, Process(testing::_)).Times(0).InSequence(s1);\n  EXPECT_CALL(*node_fu, DataPost(testing::_)).Times(times).InSequence(s1);\n  return node;\n}\n\nstd::shared_ptr<Node> Add_Normal_Collapse_Process_Error_Node(\n    uint32_t times, std::shared_ptr<Configuration> config = nullptr) {\n  auto node = Add_Node(\"normal_collapse_process_error\", {\"In_1\"}, {\"Out_1\"},\n                       std::move(config));\n  Sequence s1;\n  auto node_fu = std::dynamic_pointer_cast<MockFlowUnit>(\n      node->GetFlowUnitGroup()->GetExecutorUnit());\n  EXPECT_CALL(*node_fu, DataPre(testing::_)).Times(times).InSequence(s1);\n  EXPECT_CALL(*node_fu, Process(testing::_)).Times(times).InSequence(s1);\n  EXPECT_CALL(*node_fu, DataPost(testing::_)).Times(times).InSequence(s1);\n  return node;\n}\n\nstd::shared_ptr<Node> Add_Normal_Collapse_Process_Node2(\n    uint32_t stream_times, uint32_t process_times,\n    std::shared_ptr<Configuration> config = nullptr) {\n  auto node = Add_Node(\"normal_collapse_process\", {\"In_1\"}, {\"Out_1\"},\n                       std::move(config));\n  Sequence s1;\n  auto node_fu = std::dynamic_pointer_cast<MockFlowUnit>(\n      node->GetFlowUnitGroup()->GetExecutorUnit());\n  EXPECT_CALL(*node_fu, DataPre(testing::_)).Times(stream_times).InSequence(s1);\n  EXPECT_CALL(*node_fu, Process(testing::_))\n      .Times(process_times)\n      .InSequence(s1);\n  EXPECT_CALL(*node_fu, DataPost(testing::_))\n      .Times(stream_times)\n      .InSequence(s1);\n  return node;\n}\n\nstd::shared_ptr<Node> Add_Normal_Collapse_Process_Node(\n    std::vector<uint32_t> times, bool repeat,\n    std::shared_ptr<Configuration> config = nullptr) {\n  auto node = Add_Node(\"normal_collapse_process\", {\"In_1\"}, {\"Out_1\"},\n                       std::move(config));\n  Sequence s1;\n  auto node_fu = std::dynamic_pointer_cast<MockFlowUnit>(\n      node->GetFlowUnitGroup()->GetExecutorUnit());\n  if (!repeat) {\n    EXPECT_CALL(*node_fu, DataPre(testing::_)).Times(times[0]).InSequence(s1);\n    EXPECT_CALL(*node_fu, Process(testing::_)).Times(times[1]).InSequence(s1);\n    EXPECT_CALL(*node_fu, DataPost(testing::_)).Times(times[2]).InSequence(s1);\n  } else {\n    for (uint32_t i = 0; i < times[0]; i++) {\n      EXPECT_CALL(*node_fu, DataPre(testing::_)).Times(1).InSequence(s1);\n      EXPECT_CALL(*node_fu, Process(testing::_)).Times(1).InSequence(s1);\n      EXPECT_CALL(*node_fu, DataPost(testing::_)).Times(1).InSequence(s1);\n    }\n  }\n  return node;\n}\n\nstd::shared_ptr<Node> Add_Stream_Normal_Info_2_Node(\n    std::shared_ptr<Configuration> config = nullptr) {\n  auto node =\n      Add_Node(\"stream_normal_info_2\", {}, {\"Out_1\"}, std::move(config));\n  Sequence s1;\n  auto node_fu = std::dynamic_pointer_cast<MockFlowUnit>(\n      node->GetFlowUnitGroup()->GetExecutorUnit());\n  EXPECT_CALL(*node_fu, DataPre(testing::_)).Times(1).InSequence(s1);\n  EXPECT_CALL(*node_fu, Process(testing::_)).Times(1).InSequence(s1);\n  EXPECT_CALL(*node_fu, DataPost(testing::_)).Times(1).InSequence(s1);\n  return node;\n}\n\nstd::shared_ptr<Node> Add_Stream_Tail_Filter_Node(\n    uint32_t times, std::shared_ptr<Configuration> config = nullptr) {\n  auto node =\n      Add_Node(\"stream_tail_filter\", {\"In_1\"}, {\"Out_1\"}, std::move(config));\n  Sequence s1;\n  auto node_fu = std::dynamic_pointer_cast<MockFlowUnit>(\n      node->GetFlowUnitGroup()->GetExecutorUnit());\n  EXPECT_CALL(*node_fu, DataPre(testing::_)).Times(1).InSequence(s1);\n  EXPECT_CALL(*node_fu, Process(testing::_)).Times(times).InSequence(s1);\n  EXPECT_CALL(*node_fu, DataPost(testing::_)).Times(1).InSequence(s1);\n  return node;\n}\n\nstd::shared_ptr<Node> Add_Normal_Condition_Node(\n    uint32_t times, std::shared_ptr<Configuration> config = nullptr) {\n  auto node = Add_Node(\"normal-condition\", {\"In_1\"}, {\"Out_1\", \"Out_2\"},\n                       std::move(config));\n  Sequence s1;\n  auto node_fu = std::dynamic_pointer_cast<MockFlowUnit>(\n      node->GetFlowUnitGroup()->GetExecutorUnit());\n  EXPECT_CALL(*node_fu, Process(testing::_)).Times(times).InSequence(s1);\n  return node;\n}\n\nvoid NodeRunTest::TestAdd(const std::string &add_flowunit_name) {\n  auto match_at_node = std::make_shared<Node>();\n  ConfigurationBuilder configbuilder;\n  configbuilder.AddProperty(\"batch_size\", \"3\");\n  auto config = configbuilder.Build();\n  std::shared_ptr<Node> add_node;\n\n  if (add_flowunit_name == \"add\") {\n    add_node = Add_Add_Node(config);\n  } else if (add_flowunit_name == \"stream_add\") {\n    add_node = Add_Stream_Add_Node(config);\n  }\n  auto input_node = Add_Test_2_0_Node();\n\n  auto device_ = flow_->GetDevice();\n  auto input_map_1 =\n      std::unordered_map<std::string, std::vector<std::shared_ptr<Buffer>>>();\n  BuildDataQueue(match_at_node.get(), input_map_1, device_);\n\n  auto input_map_2 =\n      std::unordered_map<std::string, std::vector<std::shared_ptr<Buffer>>>();\n  BuildDataQueue(match_at_node.get(), input_map_2, device_);\n\n  auto add_output_port = add_node->GetOutputPort(\"Out_1\");\n  EXPECT_TRUE(add_output_port->ConnectPort(input_node->GetInputPort(\"In_1\")));\n\n  auto add_queue_1 = add_node->GetInputPort(\"In_1\")->GetQueue();\n  auto add_queue_2 = add_node->GetInputPort(\"In_2\")->GetQueue();\n  add_queue_1->PushBatch(&input_map_1[\"In_1\"]);\n  add_queue_2->PushBatch(&input_map_1[\"In_2\"]);\n\n  add_queue_1->PushBatch(&input_map_2[\"In_1\"]);\n  add_queue_2->PushBatch(&input_map_2[\"In_2\"]);\n\n  auto queue_1 = input_node->GetInputPort(\"In_1\")->GetQueue();\n\n  EXPECT_EQ(add_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(queue_1->Size(), 2);\n\n  std::vector<std::shared_ptr<Buffer>> buffer_vecort_0;\n  queue_1->PopBatch(&buffer_vecort_0);\n  EXPECT_EQ(buffer_vecort_0[0]->GetBytes(), 40);\n  EXPECT_EQ(buffer_vecort_0[1]->GetBytes(), 40);\n\n  auto *data_result = (int *)buffer_vecort_0[0]->ConstData();\n  for (int i = 0; i < 10; i++) {\n    EXPECT_EQ(data_result[i], 10 + 2 * i);\n  }\n\n  auto *data_result_2 = (int *)buffer_vecort_0[1]->ConstData();\n  for (int i = 0; i < 10; i++) {\n    EXPECT_EQ(data_result_2[i], 10 + 2 * i);\n  }\n}\n\nTEST_F(NodeTest, Init) {\n  ConfigurationBuilder configbuilder;\n  auto config = configbuilder.Build();\n  auto flowunit_mgr_ = FlowUnitManager::GetInstance();\n  auto node = std::make_shared<Node>();\n  node->SetFlowUnitInfo(\"test_2_inputs_2_outputs\", \"cpu\", \"0\", flowunit_mgr_);\n  EXPECT_EQ(node->Init({\"In_1\", \"In_2\"}, {\"Out_1\"}, config), STATUS_BADCONF);\n  EXPECT_EQ(node->Init({\"In_1\", \"In_2\"}, {\"Out_1\", \"Out_2\"}, config),\n            STATUS_SUCCESS);\n  EXPECT_EQ(node->GetInputNum(), 2);\n  EXPECT_EQ(node->GetOutputNum(), 2);\n  EXPECT_NE(node->GetInputPort(\"In_1\"), nullptr);\n  EXPECT_NE(node->GetInputPort(\"In_2\"), nullptr);\n  EXPECT_NE(node->GetOutputPort(\"Out_1\"), nullptr);\n  EXPECT_NE(node->GetOutputPort(\"Out_2\"), nullptr);\n  EXPECT_EQ(node->GetOutputPort(\"In_None\"), nullptr);\n\n  auto another_node = std::make_shared<Node>();\n  another_node->SetFlowUnitInfo(\"test_2_0\", \"cpu\", \"0\", flowunit_mgr_);\n  EXPECT_EQ(another_node->Init({\"In_1\", \"In_1\", \"In_2\"}, {}, config),\n            STATUS_SUCCESS);\n  EXPECT_EQ(another_node->GetInputNum(), 2);\n  EXPECT_EQ(another_node->GetOutputNum(), 0);\n  EXPECT_NE(another_node->GetInputPort(\"In_1\"), nullptr);\n  EXPECT_EQ(another_node->Init({}, {}, config), STATUS_BADCONF);\n\n  auto invalid_node = std::make_shared<Node>();\n  invalid_node->SetFlowUnitInfo(\"invalid_test\", \"cpu\", \"0\", flowunit_mgr_);\n}\n\nTEST_F(NodeTest, SendEvent) {\n  ConfigurationBuilder configbuilder;\n  auto config = configbuilder.Build();\n  auto flowunit_mgr_ = FlowUnitManager::GetInstance();\n  auto node = std::make_shared<Node>();\n  node->SetFlowUnitInfo(\"test_0_2\", \"cpu\", \"0\", flowunit_mgr_);\n\n  EXPECT_EQ(node->Init({}, {\"Out_1\", \"Out_2\"}, config), STATUS_OK);\n\n  auto event = std::make_shared<FlowUnitInnerEvent>(\n      FlowUnitInnerEvent::EXPAND_UNFINISH_DATA);\n  auto event_vector = std::vector<std::shared_ptr<FlowUnitInnerEvent>>();\n  event_vector.push_back(event);\n  EXPECT_EQ(node->SendBatchEvent(event_vector), STATUS_OK);\n  FlowunitEventList events = nullptr;\n  EXPECT_EQ(node->GetEventPort()->Recv(events), STATUS_OK);\n  EXPECT_EQ(events->size(), 1);\n  EXPECT_EQ(events->at(0), event);\n}\n\nTEST_F(NodeRecvTest, RecvEmpty) {\n  std::list<std::shared_ptr<MatchStreamData>> match_stream_data_list;\n  EXPECT_EQ(\n      node_->GenInputMatchStreamData(RunType::DATA, match_stream_data_list),\n      STATUS_SUCCESS);\n  EXPECT_TRUE(match_stream_data_list.empty());\n}\n\nTEST_F(NodeRecvTest, RecvMismatch) {\n  node_->SetInputInOrder(true);\n\n  auto port_1 = node_->GetInputPort(\"In_1\");\n  auto input_queue_1 = port_1->GetQueue();\n  auto origin_node2_input1 = node2_input1_mismatch_;\n  input_queue_1->PushBatch(&node2_input1_mismatch_);\n\n  auto port_2 = node_->GetInputPort(\"In_2\");\n  auto input_queue_2 = port_2->GetQueue();\n  auto origin_node2_input2 = node2_input2_mismatch_;\n  input_queue_2->PushBatch(&node2_input2_mismatch_);\n\n  std::list<std::shared_ptr<MatchStreamData>> match_stream_data_list;\n  EXPECT_EQ(\n      node_->GenInputMatchStreamData(RunType::DATA, match_stream_data_list),\n      STATUS_FAULT);\n  ASSERT_EQ(match_stream_data_list.size(), 0);\n}\n\nTEST_F(NodeRecvTest, RecvNoOrder) {\n  node_->SetInputInOrder(false);\n\n  auto port_1 = node_->GetInputPort(\"In_1\");\n  auto input_queue_1 = port_1->GetQueue();\n  auto origin_node2_input1 = node2_input1_;\n  input_queue_1->PushBatch(&node2_input1_);\n\n  auto port_2 = node_->GetInputPort(\"In_2\");\n  auto input_queue_2 = port_2->GetQueue();\n  auto origin_node2_input2 = node2_input2_;\n  input_queue_2->PushBatch(&node2_input2_);\n\n  std::list<std::shared_ptr<MatchStreamData>> match_stream_data_list;\n  EXPECT_EQ(\n      node_->GenInputMatchStreamData(RunType::DATA, match_stream_data_list),\n      STATUS_SUCCESS);\n  ASSERT_EQ(match_stream_data_list.size(), 2);\n  auto& s1 = match_stream_data_list.front();\n  auto& s2 = match_stream_data_list.back();\n  ASSERT_EQ(s1->GetDataCount(), 1);\n  ASSERT_EQ(s2->GetDataCount(), 1);\n\n  auto s1_data = s1->GetBufferList();\n  auto s2_data = s2->GetBufferList();\n  ASSERT_EQ(s1_data->size(), 2);\n  ASSERT_EQ(s2_data->size(), 2);\n  auto s1_p1_bl = s1_data->at(\"In_1\");\n  auto s1_p2_bl = s1_data->at(\"In_2\");\n  ASSERT_EQ(s1_p1_bl.size(), 1);\n  ASSERT_EQ(s1_p2_bl.size(), 1);\n  EXPECT_EQ(s1_p1_bl.front(), origin_node2_input1.back());\n  EXPECT_EQ(s1_p2_bl.front(), origin_node2_input2.back());\n  auto s2_p1_bl = s2_data->at(\"In_1\");\n  auto s2_p2_bl = s2_data->at(\"In_2\");\n  ASSERT_EQ(s2_p1_bl.size(), 1);\n  ASSERT_EQ(s2_p2_bl.size(), 1);\n  EXPECT_EQ(s2_p1_bl.front(), origin_node2_input1.front());\n  EXPECT_EQ(s2_p2_bl.front(), origin_node2_input2.front());\n\n  EXPECT_EQ(0, input_queue_1->Size());\n  EXPECT_EQ(0, input_queue_2->Size());\n}\n\nTEST_F(NodeRecvTest, RecvOrder) {\n  node_->SetInputInOrder(true);\n\n  auto port_1 = node_->GetInputPort(\"In_1\");\n  auto input_queue_1 = port_1->GetQueue();\n  auto origin_node2_input1 = node2_input1_;\n  input_queue_1->PushBatch(&node2_input1_);\n\n  auto port_2 = node_->GetInputPort(\"In_2\");\n  auto input_queue_2 = port_2->GetQueue();\n  auto origin_node2_input2 = node2_input2_;\n  input_queue_2->PushBatch(&node2_input2_);\n\n  std::list<std::shared_ptr<MatchStreamData>> match_stream_data_list;\n  EXPECT_EQ(\n      node_->GenInputMatchStreamData(RunType::DATA, match_stream_data_list),\n      STATUS_SUCCESS);\n  ASSERT_EQ(match_stream_data_list.size(), 1);\n  auto& s1 = match_stream_data_list.front();\n  ASSERT_EQ(s1->GetDataCount(), 1);\n\n  auto s1_data = s1->GetBufferList();\n  ASSERT_EQ(s1_data->size(), 2);\n  auto s1_p1_bl = s1_data->at(\"In_1\");\n  auto s1_p2_bl = s1_data->at(\"In_2\");\n  ASSERT_EQ(s1_p1_bl.size(), 1);\n  ASSERT_EQ(s1_p2_bl.size(), 1);\n  EXPECT_EQ(s1_p1_bl.front(), origin_node2_input1.front());\n  EXPECT_EQ(s1_p2_bl.front(), origin_node2_input2.front());\n\n  EXPECT_EQ(0, input_queue_1->Size());\n  EXPECT_EQ(0, input_queue_2->Size());\n}\n\nTEST_F(NodeRecvTest, RecvGatherAll) {\n  node_->SetInputInOrder(false);\n  node_->SetInputGatherAll(true);\n\n  auto port_1 = node_->GetInputPort(\"In_1\");\n  auto input_queue_1 = port_1->GetQueue();\n  auto origin_node2_input1 = node2_input1_;\n  input_queue_1->PushBatch(&node2_input1_);\n  input_queue_1->PushBatch(&node2_input1_end_);\n\n  auto port_2 = node_->GetInputPort(\"In_2\");\n  auto input_queue_2 = port_2->GetQueue();\n  auto origin_node2_input2 = node2_input2_;\n  input_queue_2->PushBatch(&node2_input2_);\n  input_queue_2->PushBatch(&node2_input2_end_);\n\n  std::list<std::shared_ptr<MatchStreamData>> match_stream_data_list;\n  EXPECT_EQ(\n      node_->GenInputMatchStreamData(RunType::DATA, match_stream_data_list),\n      STATUS_SUCCESS);\n  ASSERT_EQ(match_stream_data_list.size(), 1);\n  auto& s1 = match_stream_data_list.front();\n  ASSERT_EQ(s1->GetDataCount(), 2);\n\n  auto s1_data = s1->GetBufferList();\n  ASSERT_EQ(s1_data->size(), 2);\n  auto s1_p1_bl = s1_data->at(\"In_1\");\n  auto s1_p2_bl = s1_data->at(\"In_2\");\n  ASSERT_EQ(s1_p1_bl.size(), 2);\n  ASSERT_EQ(s1_p2_bl.size(), 2);\n  EXPECT_EQ(s1_p1_bl.front(), origin_node2_input1.front());\n  EXPECT_EQ(s1_p2_bl.front(), origin_node2_input2.front());\n  EXPECT_TRUE(BufferManageView::GetIndexInfo(s1_p1_bl.back())->IsEndFlag());\n  EXPECT_TRUE(BufferManageView::GetIndexInfo(s1_p2_bl.back())->IsEndFlag());\n\n  EXPECT_EQ(0, input_queue_1->Size());\n  EXPECT_EQ(0, input_queue_2->Size());\n}\n\nTEST_F(NodeRecvTest, RecvTwice) {\n  node_->SetInputGatherAll(true);\n  node_->SetInputInOrder(true);\n\n  // push first\n  auto port_1 = node_->GetInputPort(\"In_1\");\n  auto input_queue_1 = port_1->GetQueue();\n  auto origin_node2_input1 = node2_input1_;\n  input_queue_1->PushBatch(&node2_input1_);\n\n  auto port_2 = node_->GetInputPort(\"In_2\");\n  auto input_queue_2 = port_2->GetQueue();\n  auto origin_node2_input2 = node2_input2_;\n  input_queue_2->PushBatch(&node2_input2_);\n  input_queue_2->PushBatch(&node2_input2_end_);\n\n  std::list<std::shared_ptr<MatchStreamData>> match_stream_data_list;\n  EXPECT_EQ(\n      node_->GenInputMatchStreamData(RunType::DATA, match_stream_data_list),\n      STATUS_SUCCESS);\n  EXPECT_TRUE(match_stream_data_list.empty());\n\n  // push again\n  input_queue_1->PushBatch(&node2_input1_end_);\n\n  EXPECT_EQ(\n      node_->GenInputMatchStreamData(RunType::DATA, match_stream_data_list),\n      STATUS_SUCCESS);\n  ASSERT_EQ(match_stream_data_list.size(), 1);\n  auto& s1 = match_stream_data_list.front();\n  ASSERT_EQ(s1->GetDataCount(), 2);\n\n  auto s1_data = s1->GetBufferList();\n  ASSERT_EQ(s1_data->size(), 2);\n  auto s1_p1_bl = s1_data->at(\"In_1\");\n  auto s1_p2_bl = s1_data->at(\"In_2\");\n  ASSERT_EQ(s1_p1_bl.size(), 2);\n  ASSERT_EQ(s1_p2_bl.size(), 2);\n  EXPECT_EQ(s1_p1_bl.front(), origin_node2_input1.front());\n  EXPECT_EQ(s1_p2_bl.front(), origin_node2_input2.front());\n  EXPECT_TRUE(BufferManageView::GetIndexInfo(s1_p1_bl.back())->IsEndFlag());\n  EXPECT_TRUE(BufferManageView::GetIndexInfo(s1_p2_bl.back())->IsEndFlag());\n\n  EXPECT_EQ(0, input_queue_1->Size());\n  EXPECT_EQ(0, input_queue_2->Size());\n}\n\nTEST_F(NodeRunTest, NodeOutput) {\n  auto first_node = Add_Test_0_2_Node();\n  auto second_node = Add_Test_2_0_Node();\n\n  auto first_output_port_1 = first_node->GetOutputPort(\"Out_1\");\n  auto first_output_port_2 = first_node->GetOutputPort(\"Out_2\");\n  EXPECT_TRUE(\n      first_output_port_1->ConnectPort(second_node->GetInputPort(\"In_1\")));\n  EXPECT_TRUE(\n      first_output_port_2->ConnectPort(second_node->GetInputPort(\"In_2\")));\n  first_node->Run(DATA);\n\n  auto second_input_queue_1 = second_node->GetInputPort(\"In_1\")->GetQueue();\n  auto second_input_queue_2 = second_node->GetInputPort(\"In_2\")->GetQueue();\n\n  EXPECT_EQ(second_input_queue_1->Size(), 2);\n  EXPECT_EQ(second_input_queue_2->Size(), 2);\n\n  std::vector<std::shared_ptr<Buffer>> p1_bl;\n  std::vector<std::shared_ptr<Buffer>> p2_bl;\n  second_input_queue_1->PopBatch(&p1_bl);\n  second_input_queue_2->PopBatch(&p2_bl);\n  EXPECT_EQ(p1_bl.size(), 2);\n  EXPECT_EQ(p2_bl.size(), 2);\n  auto p1_b1 = p1_bl.front();\n  auto p1_b2 = p1_bl.back();\n  auto p2_b1 = p2_bl.front();\n  auto p2_b2 = p2_bl.back();\n  EXPECT_TRUE(BufferManageView::GetIndexInfo(p1_b2)->IsEndFlag());\n  EXPECT_TRUE(BufferManageView::GetIndexInfo(p2_b2)->IsEndFlag());\n  EXPECT_EQ(p1_b1->GetBytes(), 40);\n  EXPECT_EQ(p2_b1->GetBytes(), 40);\n  const auto *p1_b1_ptr = (const int32_t *)p1_b1->ConstData();\n  const auto *p2_b1_ptr = (const int32_t *)p2_b1->ConstData();\n  for (size_t i = 0; i < 10; ++i) {\n    EXPECT_EQ(p1_b1_ptr[i], i);\n    EXPECT_EQ(p2_b1_ptr[i], i + 10);\n  }\n}\n\nTEST_F(NodeRunTest, AddRun) { TestAdd(\"add\"); }\n\nTEST_F(NodeRunTest, StreamAddRun) { TestAdd(\"stream_add\"); }\n\nTEST_F(NodeRunTest, GartherScatterRun) {\n  auto output_node = Add_Test_Orgin_0_2_Node();\n  auto condition_node = Add_Conditionn_Node();\n  auto expand_node = Add_Expand_Normal_Node();\n  auto collapse_node = Add_Collapse_Normal_Node();\n  auto stream_add_node = Add_Stream_Add_Node();\n  auto input_node = Add_Test_2_0_Node();\n\n  auto output_port_1 = output_node->GetOutputPort(\"Out_1\");\n  auto output_port_2 = output_node->GetOutputPort(\"Out_2\");\n  EXPECT_TRUE(output_port_1->ConnectPort(condition_node->GetInputPort(\"In_1\")));\n\n  auto condition_port_1 = condition_node->GetOutputPort(\"Out_1\");\n  EXPECT_TRUE(condition_port_1->ConnectPort(expand_node->GetInputPort(\"In_1\")));\n\n  auto expand_port_1 = expand_node->GetOutputPort(\"Out_1\");\n  EXPECT_TRUE(expand_port_1->ConnectPort(collapse_node->GetInputPort(\"In_1\")));\n\n  auto condition_port_2 = condition_node->GetOutputPort(\"Out_2\");\n  EXPECT_TRUE(\n      condition_port_2->ConnectPort(stream_add_node->GetInputPort(\"In_1\")));\n\n  auto collapse_port_1 = collapse_node->GetOutputPort(\"Out_1\");\n  EXPECT_TRUE(\n      collapse_port_1->ConnectPort(stream_add_node->GetInputPort(\"In_1\")));\n  EXPECT_TRUE(\n      output_port_2->ConnectPort(stream_add_node->GetInputPort(\"In_2\")));\n\n  auto add_port = stream_add_node->GetOutputPort(\"Out_1\");\n  EXPECT_TRUE(add_port->ConnectPort(input_node->GetInputPort(\"In_1\")));\n\n  EXPECT_EQ(output_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(condition_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(expand_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(collapse_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(stream_add_node->Run(DATA), STATUS_SUCCESS);\n\n  auto queue_1 = input_node->GetInputPort(\"In_1\")->GetQueue();\n  std::vector<std::shared_ptr<Buffer>> buffer_vector;\n  queue_1->PopBatch(&buffer_vector);\n  EXPECT_EQ(buffer_vector.size(), 11);\n  for (int i = 0; i < 10; i++) {\n    auto *data_result = (int *)buffer_vector[i]->ConstData();\n    if (i % 2 == 0) {\n      EXPECT_EQ(data_result[0], 20 + 6 * i);\n    } else {\n      EXPECT_EQ(data_result[0], 10 + 2 * i);\n    }\n  }\n\n  auto end_flag = buffer_vector.back();\n  EXPECT_TRUE(BufferManageView::GetIndexInfo(end_flag)->IsEndFlag());\n}\n\nTEST_F(NodeRunTest, NormalErrorThroughNormalCollaspe) {\n  auto output_node = Add_Error_Start_Normal_Node();\n  auto expand_node = Add_Expand_Normal_Node();\n  auto simple_pass_node = Add_Simple_Pass_Node(0);\n  auto collapse_node = Add_Collapse_Normal_Node();\n  auto input_node = Add_Test_1_0_Node();\n\n  auto output_port_1 = output_node->GetOutputPort(\"Out_1\");\n  auto expand_node_port = expand_node->GetOutputPort(\"Out_1\");\n  auto stream_add_port = simple_pass_node->GetOutputPort(\"Out_1\");\n  auto collapse_node_port = collapse_node->GetOutputPort(\"Out_1\");\n  EXPECT_TRUE(output_port_1->ConnectPort(expand_node->GetInputPort(\"In_1\")));\n  EXPECT_TRUE(\n      expand_node_port->ConnectPort(simple_pass_node->GetInputPort(\"In_1\")));\n  EXPECT_TRUE(\n      stream_add_port->ConnectPort(collapse_node->GetInputPort(\"In_1\")));\n  EXPECT_TRUE(\n      collapse_node_port->ConnectPort(input_node->GetInputPort(\"In_1\")));\n\n  EXPECT_EQ(output_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(expand_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(simple_pass_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(collapse_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(input_node->Run(DATA), STATUS_STOP);\n}\n\nTEST_F(NodeRunTest, NormalErrorThroughStreamCollaspe) {\n  auto output_node = Add_Error_Start_Normal_Node();\n  auto expand_node = Add_Expand_Stream_Node();\n  auto simple_pass_node = Add_Simple_Pass_Node(0);\n  auto collapse_node = Add_Collapse_Normal_Node();\n  auto input_node = Add_Test_1_0_Node();\n\n  auto output_port_1 = output_node->GetOutputPort(\"Out_1\");\n  auto expand_node_port = expand_node->GetOutputPort(\"Out_1\");\n  auto stream_add_port = simple_pass_node->GetOutputPort(\"Out_1\");\n  auto collapse_node_port = collapse_node->GetOutputPort(\"Out_1\");\n  EXPECT_TRUE(output_port_1->ConnectPort(expand_node->GetInputPort(\"In_1\")));\n  EXPECT_TRUE(\n      expand_node_port->ConnectPort(simple_pass_node->GetInputPort(\"In_1\")));\n  EXPECT_TRUE(\n      stream_add_port->ConnectPort(collapse_node->GetInputPort(\"In_1\")));\n  EXPECT_TRUE(\n      collapse_node_port->ConnectPort(input_node->GetInputPort(\"In_1\")));\n\n  EXPECT_EQ(output_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(expand_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(expand_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(simple_pass_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(collapse_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(input_node->Run(DATA), STATUS_STOP);\n}\n\nTEST_F(NodeRunTest, StreamGartherScatterRun) {\n  auto output_node = Add_Test_0_2_Node();\n  auto scatter_node = Add_Scatter_Node();\n  auto gather_node = Add_Garther_Node();\n  auto add_node = Add_Add_Node();\n  auto input_node = Add_Test_2_0_Node();\n\n  auto output_port_1 = output_node->GetOutputPort(\"Out_1\");\n  auto output_port_2 = output_node->GetOutputPort(\"Out_2\");\n  auto scatter_output_port = scatter_node->GetOutputPort(\"Out_1\");\n  auto garther_output_port = gather_node->GetOutputPort(\"Out_1\");\n  auto add_output_port = add_node->GetOutputPort(\"Out_1\");\n\n  EXPECT_TRUE(output_port_1->ConnectPort(scatter_node->GetInputPort(\"In_1\")));\n  EXPECT_TRUE(output_port_2->ConnectPort(add_node->GetInputPort(\"In_2\")));\n  EXPECT_TRUE(\n      scatter_output_port->ConnectPort(gather_node->GetInputPort(\"In_1\")));\n  EXPECT_TRUE(garther_output_port->ConnectPort(add_node->GetInputPort(\"In_1\")));\n  EXPECT_TRUE(add_output_port->ConnectPort(input_node->GetInputPort(\"In_1\")));\n\n  EXPECT_EQ(output_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(scatter_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(scatter_node->Run(EVENT), STATUS_SUCCESS);\n\n  std::vector<std::shared_ptr<Buffer>> buffer_vector;\n  auto queue_1 = gather_node->GetInputPort(\"In_1\")->GetQueue();\n  queue_1->PopBatch(&buffer_vector);\n  EXPECT_EQ(buffer_vector.size(), 12);\n  auto *s1 =\n      BufferManageView::GetIndexInfo(buffer_vector[0])->GetStream().get();\n  auto *s2 =\n      BufferManageView::GetIndexInfo(buffer_vector[1])->GetStream().get();\n  EXPECT_EQ(s1, s2);\n  for (int i = 0; i < 10; i++) {\n    auto *data_result = (int *)buffer_vector[i]->ConstData();\n    EXPECT_EQ(data_result[0], i);\n  }\n  queue_1->PushBatch(&buffer_vector);\n  buffer_vector.clear();\n\n  std::vector<std::shared_ptr<Buffer>> buffer_vector_one;\n  std::vector<std::shared_ptr<Buffer>> buffer_vector_two;\n  EXPECT_EQ(gather_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(gather_node->Run(EVENT), STATUS_SUCCESS);\n  auto queue_2 = add_node->GetInputPort(\"In_1\")->GetQueue();\n  auto queue_3 = add_node->GetInputPort(\"In_2\")->GetQueue();\n  queue_2->PopBatch(&buffer_vector_one);\n  queue_3->PopBatch(&buffer_vector_two);\n  EXPECT_EQ(buffer_vector_one.size(), 2);\n  EXPECT_EQ(buffer_vector_two.size(), 2);\n  EXPECT_EQ(buffer_vector_one[0]->GetBytes(), 40);\n  EXPECT_EQ(buffer_vector_two[0]->GetBytes(), 40);\n\n  auto *data_result = (int *)buffer_vector_one[0]->ConstData();\n  for (int i = 0; i < 10; i++) {\n    EXPECT_EQ(data_result[i], i);\n  }\n  queue_2->PushBatch(&buffer_vector_one);\n  queue_3->PushBatch(&buffer_vector_two);\n  buffer_vector_two.clear();\n  buffer_vector_one.clear();\n\n  std::vector<std::shared_ptr<Buffer>> final_buffer_vector;\n  EXPECT_EQ(add_node->Run(DATA), STATUS_SUCCESS);\n  auto queue_4 = input_node->GetInputPort(\"In_1\")->GetQueue();\n  queue_4->PopBatch(&final_buffer_vector);\n  EXPECT_EQ(final_buffer_vector.size(), 2);\n  EXPECT_EQ(final_buffer_vector[0]->GetBytes(), 40);\n  auto *add_data_result = (int *)final_buffer_vector[0]->ConstData();\n  for (int i = 0; i < 10; i++) {\n    EXPECT_EQ(add_data_result[i], 10 + 2 * i);\n  }\n  final_buffer_vector.clear();\n}\n\nTEST_F(NodeRunTest, ConditionRun) {\n  auto output_node = Add_Test_0_2_Node();\n  auto scatter_node = Add_Scatter_Node();\n  auto condition_node = Add_Conditionn_Node();\n  auto garther_node = Add_Garther_Node();\n  auto add_node = Add_Add_Node();\n  auto input_node = Add_Test_2_0_Node();\n\n  auto output_port_1 = output_node->GetOutputPort(\"Out_1\");\n  auto output_port_2 = output_node->GetOutputPort(\"Out_2\");\n  auto scatter_output_port = scatter_node->GetOutputPort(\"Out_1\");\n  auto condition_output_1_port = condition_node->GetOutputPort(\"Out_1\");\n  auto condition_output_2_port = condition_node->GetOutputPort(\"Out_2\");\n  auto garther_output_port = garther_node->GetOutputPort(\"Out_1\");\n  auto add_output_port = add_node->GetOutputPort(\"Out_1\");\n\n  EXPECT_TRUE(output_port_1->ConnectPort(scatter_node->GetInputPort(\"In_1\")));\n  EXPECT_TRUE(output_port_2->ConnectPort(add_node->GetInputPort(\"In_2\")));\n  EXPECT_TRUE(\n      scatter_output_port->ConnectPort(condition_node->GetInputPort(\"In_1\")));\n  EXPECT_TRUE(\n      condition_output_1_port->ConnectPort(garther_node->GetInputPort(\"In_1\")));\n  EXPECT_TRUE(\n      condition_output_2_port->ConnectPort(garther_node->GetInputPort(\"In_1\")));\n  EXPECT_TRUE(garther_output_port->ConnectPort(add_node->GetInputPort(\"In_1\")));\n  EXPECT_TRUE(add_output_port->ConnectPort(input_node->GetInputPort(\"In_1\")));\n\n  EXPECT_EQ(output_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(scatter_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(scatter_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(condition_node->Run(DATA), STATUS_SUCCESS);\n\n  std::vector<std::shared_ptr<Buffer>> buffer_vector;\n  auto queue = garther_node->GetInputPort(\"In_1\")->GetQueue();\n  queue->PopBatch(&buffer_vector);\n  EXPECT_EQ(buffer_vector.size(), 14);  // contain 4 end_flag\n  queue->PushBatch(&buffer_vector);\n  buffer_vector.clear();\n\n  EXPECT_EQ(garther_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(garther_node->Run(EVENT), STATUS_SUCCESS);\n\n  std::vector<std::shared_ptr<Buffer>> add_vector_1;\n  std::vector<std::shared_ptr<Buffer>> add_vector_2;\n  auto add_queue_1 = add_node->GetInputPort(\"In_1\")->GetQueue();\n  auto add_queue_2 = add_node->GetInputPort(\"In_2\")->GetQueue();\n  add_queue_1->PopBatch(&add_vector_1);\n  add_queue_2->PopBatch(&add_vector_2);\n  EXPECT_EQ(add_vector_1.size(), 2);\n  EXPECT_EQ(add_vector_2.size(), 2);\n  add_queue_1->PushBatch(&add_vector_1);\n  add_queue_2->PushBatch(&add_vector_2);\n  add_vector_1.clear();\n  add_vector_2.clear();\n\n  EXPECT_EQ(add_node->Run(DATA), STATUS_SUCCESS);\n\n  std::vector<std::shared_ptr<Buffer>> final_buffer_vector;\n  auto queue_4 = input_node->GetInputPort(\"In_1\")->GetQueue();\n  queue_4->PopBatch(&final_buffer_vector);\n  EXPECT_EQ(final_buffer_vector.size(), 2);\n  EXPECT_EQ(final_buffer_vector[0]->GetBytes(), 40);\n  EXPECT_TRUE(\n      BufferManageView::GetIndexInfo(final_buffer_vector[1])->IsEndFlag());\n  auto *add_data_result = (int *)final_buffer_vector[0]->ConstData();\n  for (int i = 0; i < 10; i++) {\n    EXPECT_EQ(add_data_result[i], 10 + 2 * i);\n  }\n  final_buffer_vector.clear();\n}\n\n/*\n   output_node ---> loop_node ---> end_node\n                   |         |\n                   |         |\n                    <--------\n*/\n\nTEST_F(NodeRunTest, LoopRunBatchTwice) {\n  ConfigurationBuilder loop_configbuilder;\n  auto config = loop_configbuilder.Build();\n\n  auto output_node = Add_Test_0_1_Batch_Node();\n  output_node->SetPriority(0);\n  auto loop_node = Add_Loop_Node(config);\n  loop_node->SetPriority(1);\n  auto end_node = Add_Test_1_0_Batch_Node();\n  end_node->SetPriority(2);\n  auto output_0_1_port = output_node->GetOutputPort(\"Out_1\");\n  EXPECT_TRUE(output_0_1_port->ConnectPort(loop_node->GetInputPort(\"In_1\")));\n  auto input_ports = output_0_1_port->GetConnectInPort();\n  for (const auto &input_port : input_ports) {\n    input_port->SetPriority(0);\n  }\n\n  auto output_loop_port = loop_node->GetOutputPort(\"Out_1\");\n  EXPECT_TRUE(output_loop_port->ConnectPort(loop_node->GetInputPort(\"In_1\")));\n  auto output_loop_end_port = loop_node->GetOutputPort(\"Out_2\");\n  EXPECT_TRUE(\n      output_loop_end_port->ConnectPort(end_node->GetInputPort(\"In_1\")));\n  input_ports = output_loop_port->GetConnectInPort();\n  for (const auto &input_port : input_ports) {\n    input_port->SetPriority(1);\n  }\n\n  input_ports = output_loop_end_port->GetConnectInPort();\n  for (const auto &input_port : input_ports) {\n    input_port->SetPriority(1);\n  }\n\n  EXPECT_EQ(output_node->Run(DATA), STATUS_SUCCESS);\n  for (int index = 0; index < 10; index++) {\n    EXPECT_EQ(loop_node->Run(DATA), STATUS_SUCCESS);\n\n    if (index == 4) {\n      EXPECT_EQ(output_node->Open(), STATUS_SUCCESS);\n      EXPECT_EQ(output_node->Run(DATA), STATUS_SUCCESS);\n      auto queue = loop_node->GetInputPort(\"In_1\")->GetQueue();\n      EXPECT_EQ(queue->Size(), 22);\n    }\n  }\n  EXPECT_EQ(end_node->Run(DATA), STATUS_STOP);\n\n  for (int index = 0; index < 6; index++) {\n    EXPECT_EQ(loop_node->Run(DATA), STATUS_SUCCESS);\n  }\n  EXPECT_EQ(end_node->Run(DATA), STATUS_STOP);\n}\n\n/*\n   output_node ---> loop_node ---> end_node\n                   |         |\n                   |         |\n                    <--------\n*/\n\nTEST_F(NodeRunTest, LoopRunBatch) {\n  ConfigurationBuilder loop_configbuilder;\n  loop_configbuilder.AddProperty(\"queue_size\", \"1\");\n  auto config = loop_configbuilder.Build();\n\n  auto output_node = Add_Test_0_1_Batch_Node();\n  output_node->SetPriority(0);\n  auto loop_node = Add_Loop_Node(config);\n  loop_node->SetPriority(1);\n  auto end_node = Add_Test_1_0_Batch_Node();\n  end_node->SetPriority(2);\n  auto output_0_1_port = output_node->GetOutputPort(\"Out_1\");\n  EXPECT_TRUE(output_0_1_port->ConnectPort(loop_node->GetInputPort(\"In_1\")));\n  auto input_ports = output_0_1_port->GetConnectInPort();\n  for (const auto &input_port : input_ports) {\n    input_port->SetPriority(0);\n  }\n\n  auto output_loop_port = loop_node->GetOutputPort(\"Out_1\");\n  EXPECT_TRUE(output_loop_port->ConnectPort(loop_node->GetInputPort(\"In_1\")));\n  auto output_loop_end_port = loop_node->GetOutputPort(\"Out_2\");\n  EXPECT_TRUE(\n      output_loop_end_port->ConnectPort(end_node->GetInputPort(\"In_1\")));\n  input_ports = output_loop_port->GetConnectInPort();\n  for (const auto &input_port : input_ports) {\n    input_port->SetPriority(1);\n  }\n\n  input_ports = output_loop_end_port->GetConnectInPort();\n  for (const auto &input_port : input_ports) {\n    input_port->SetPriority(1);\n  }\n\n  EXPECT_EQ(output_node->Run(DATA), STATUS_SUCCESS);\n  for (int index = 0; index < 11; index++) {\n    for (int i = 0; i < 11; i++) {\n      EXPECT_EQ(loop_node->Run(DATA), STATUS_SUCCESS);\n    }\n  }\n  EXPECT_EQ(end_node->Run(DATA), STATUS_STOP);\n}\n\n/*\n   output_node ---> loop_node ---> end_node\n                   |         |\n                   |         |\n                  <--loop_end--\n*/\n\nTEST_F(NodeRunTest, LoopRunBatchMultiFlowUnit) {\n  ConfigurationBuilder loop_configbuilder;\n  loop_configbuilder.AddProperty(\"queue_size\", \"3\");\n  auto config = loop_configbuilder.Build();\n\n  auto output_node = Add_Test_0_1_Batch_Node();\n  output_node->SetPriority(0);\n  auto loop_node = Add_Loop_Node(config);\n  loop_node->SetPriority(1);\n  auto loop_end_node = Add_Loop_End_Node();\n  loop_end_node->SetPriority(2);\n  auto end_node = Add_Test_1_0_Batch_Node();\n  end_node->SetPriority(3);\n  auto output_0_1_port = output_node->GetOutputPort(\"Out_1\");\n  EXPECT_TRUE(output_0_1_port->ConnectPort(loop_node->GetInputPort(\"In_1\")));\n  auto input_ports = output_0_1_port->GetConnectInPort();\n  for (const auto &input_port : input_ports) {\n    input_port->SetPriority(0);\n  }\n\n  auto output_loop_output1_port = loop_node->GetOutputPort(\"Out_1\");\n  auto output_loop_output2_port = loop_node->GetOutputPort(\"Out_2\");\n  EXPECT_TRUE(output_loop_output1_port->ConnectPort(\n      loop_end_node->GetInputPort(\"In_1\")));\n  EXPECT_TRUE(\n      output_loop_output2_port->ConnectPort(end_node->GetInputPort(\"In_1\")));\n  input_ports = output_loop_output1_port->GetConnectInPort();\n  for (const auto &input_port : input_ports) {\n    input_port->SetPriority(1);\n  }\n\n  input_ports = output_loop_output2_port->GetConnectInPort();\n  for (const auto &input_port : input_ports) {\n    input_port->SetPriority(1);\n  }\n\n  auto output_loop_end_port = loop_end_node->GetOutputPort(\"Out_1\");\n  EXPECT_TRUE(\n      output_loop_end_port->ConnectPort(loop_node->GetInputPort(\"In_1\")));\n\n  EXPECT_EQ(output_node->Run(DATA), STATUS_SUCCESS);\n  for (int index = 0; index < 10; index++) {\n    for (int i = 0; i < 4; i++) {\n      EXPECT_EQ(loop_node->Run(DATA), STATUS_SUCCESS);\n    }\n    EXPECT_EQ(loop_end_node->Run(DATA), STATUS_SUCCESS);\n  }\n  EXPECT_EQ(end_node->Run(DATA), STATUS_STOP);\n}\n\nTEST_F(NodeRunTest, StreamInfo) {\n  auto stream_info_node = Add_Stream_Info_Node();\n  auto stream_start_node = Add_Stream_Start_Node({1, 3, 1});\n  auto simple_pass_node = Add_Simple_Pass_Node(15);\n  auto stream_mid_node = Add_Stream_Mid_Node();\n  auto stream_end_node = Add_Stream_End_Node(2);\n  auto final_input_node = Add_Test_2_0_Node();\n\n  auto start_info_port = stream_info_node->GetOutputPort(\"Out_1\");\n  EXPECT_TRUE(\n      start_info_port->ConnectPort(stream_start_node->GetInputPort(\"In_1\")));\n  auto start_output_port = stream_start_node->GetOutputPort(\"Out_1\");\n  EXPECT_TRUE(\n      start_output_port->ConnectPort(simple_pass_node->GetInputPort(\"In_1\")));\n\n  auto simple_pass_port = simple_pass_node->GetOutputPort(\"Out_1\");\n  EXPECT_TRUE(\n      simple_pass_port->ConnectPort(stream_mid_node->GetInputPort(\"In_1\")));\n\n  auto mid_output_port = stream_mid_node->GetOutputPort(\"Out_1\");\n  EXPECT_TRUE(\n      mid_output_port->ConnectPort(stream_end_node->GetInputPort(\"In_1\")));\n  auto end_output_port = stream_end_node->GetOutputPort(\"Out_1\");\n  EXPECT_TRUE(\n      end_output_port->ConnectPort(final_input_node->GetInputPort(\"In_1\")));\n\n  EXPECT_EQ(stream_info_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(stream_start_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(simple_pass_node->Run(DATA), STATUS_SUCCESS);\n  std::vector<std::shared_ptr<Buffer>> start_buffer_vector;\n  auto queue_2 = stream_mid_node->GetInputPort(\"In_1\")->GetQueue();\n  queue_2->PopBatch(&start_buffer_vector);\n  EXPECT_EQ(start_buffer_vector.size(), 5);\n  for (int i = 0; i < 5; i++) {\n    auto *data_result = (int *)start_buffer_vector[i]->ConstData();\n    EXPECT_EQ(data_result[0], i);\n  }\n  auto stream =\n      BufferManageView::GetIndexInfo(start_buffer_vector[0])->GetStream();\n  auto data_meta = stream->GetStreamMeta();\n  auto start_num =\n      *(std::static_pointer_cast<int>(data_meta->GetMeta(\"start_index\")).get());\n  auto end_num =\n      *(std::static_pointer_cast<int>(data_meta->GetMeta(\"end_index\")).get());\n  auto interval =\n      *(std::static_pointer_cast<int>(data_meta->GetMeta(\"interval\")).get());\n  EXPECT_EQ(start_num, 0);\n  EXPECT_EQ(end_num, 15);\n  EXPECT_EQ(interval, 3);\n  queue_2->PushBatch(&start_buffer_vector);\n  start_buffer_vector.clear();\n\n  EXPECT_EQ(stream_mid_node->Run(DATA), STATUS_SUCCESS);\n  std::vector<std::shared_ptr<Buffer>> mid_buffer_vector;\n  auto queue_3 = stream_end_node->GetInputPort(\"In_1\")->GetQueue();\n  queue_3->PopBatch(&mid_buffer_vector);\n  EXPECT_EQ(mid_buffer_vector.size(), 2);\n  auto *data_result_0 = (int *)mid_buffer_vector[0]->ConstData();\n  auto *data_result_1 = (int *)mid_buffer_vector[1]->ConstData();\n\n  auto data_group_meta_1 = BufferManageView::GetIndexInfo(mid_buffer_vector[0])\n                               ->GetStream()\n                               ->GetStreamMeta();\n  EXPECT_EQ(\n      *std::static_pointer_cast<int>(data_group_meta_1->GetMeta(\"magic_num\")),\n      3343);\n  EXPECT_EQ(data_result_0[0], 0);\n  EXPECT_EQ(data_result_1[0], 3);\n  queue_3->PushBatch(&mid_buffer_vector);\n  mid_buffer_vector.clear();\n\n  EXPECT_EQ(stream_end_node->Run(DATA), STATUS_SUCCESS);\n  auto queue_4 = final_input_node->GetInputPort(\"In_1\")->GetQueue();\n  EXPECT_EQ(queue_4->Size(), 0);\n\n  EXPECT_EQ(stream_start_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(simple_pass_node->Run(DATA), STATUS_SUCCESS);\n  queue_2->PopBatch(&start_buffer_vector);\n\n  EXPECT_EQ(start_buffer_vector.size(), 5);\n  for (int i = 0; i < 5; i++) {\n    auto *data_result = (int *)start_buffer_vector[i]->ConstData();\n    EXPECT_EQ(data_result[0], i + 5);\n  }\n\n  EXPECT_EQ(stream_start_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(stream_start_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(simple_pass_node->Run(DATA), STATUS_SUCCESS);\n  queue_2->PopBatch(&start_buffer_vector);\n  EXPECT_EQ(start_buffer_vector.size(), 12);\n\n  int base_v = 0;\n  for (int i = 0; i < 12; i++) {\n    if (BufferManageView::GetIndexInfo(start_buffer_vector[i])->IsEndFlag()) {\n      continue;\n    }\n    auto *data_result = (int *)start_buffer_vector[i]->ConstData();\n    EXPECT_EQ(data_result[0], base_v + 5);\n    ++base_v;\n  }\n  queue_2->PushBatch(&start_buffer_vector);\n  start_buffer_vector.clear();\n\n  EXPECT_EQ(stream_mid_node->Run(DATA), STATUS_SUCCESS);\n  queue_3->PopBatch(&mid_buffer_vector);\n  EXPECT_EQ(mid_buffer_vector.size(), 5);\n  for (int i = 0; i < 5; i++) {\n    auto buffer = mid_buffer_vector[i];\n    auto index = BufferManageView::GetIndexInfo(buffer);\n    if (index->IsEndFlag()) {\n      continue;\n    }\n    auto *data_result = (int *)mid_buffer_vector[i]->ConstData();\n    EXPECT_EQ(data_result[0], index->GetIndex() * 3);\n  }\n  queue_3->PushBatch(&mid_buffer_vector);\n  mid_buffer_vector.clear();\n\n  EXPECT_EQ(stream_end_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(stream_end_node->Run(EVENT), STATUS_SUCCESS);\n  std::vector<std::shared_ptr<Buffer>> end_buffer_vector;\n  queue_4->PopBatch(&end_buffer_vector);\n  EXPECT_EQ(end_buffer_vector.size(), 2);\n  auto *final_result = (int *)end_buffer_vector[0]->ConstData();\n  EXPECT_EQ(final_result[0], 30);\n}\n\nvoid NodeRunTest::TestWrongAdd(const std::string &flowunit_name,\n                               const Status &run_status) {\n  ConfigurationBuilder configbuilderflowunit;\n  auto config_flowunit = configbuilderflowunit.Build();\n  config_flowunit->SetProperty(\"need_check_output\", true);\n  auto flowunit_mgr_ = FlowUnitManager::GetInstance();\n\n  auto output_node = Add_Test_0_2_Node();\n  auto wrong_add_node = std::make_shared<Node>();\n  wrong_add_node->SetFlowUnitInfo(flowunit_name, \"cpu\", \"0\", flowunit_mgr_);\n  EXPECT_EQ(wrong_add_node->Init({\"In_1\", \"In_2\"}, {\"Out_1\"}, config_flowunit),\n            STATUS_SUCCESS);\n  EXPECT_EQ(wrong_add_node->Open(), STATUS_SUCCESS);\n\n  auto output_port_1 = output_node->GetOutputPort(\"Out_1\");\n  auto output_port_2 = output_node->GetOutputPort(\"Out_2\");\n  EXPECT_TRUE(output_port_1->ConnectPort(wrong_add_node->GetInputPort(\"In_1\")));\n  EXPECT_TRUE(output_port_2->ConnectPort(wrong_add_node->GetInputPort(\"In_2\")));\n\n  EXPECT_EQ(output_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(wrong_add_node->Run(DATA), run_status);\n}\n\nTEST_F(NodeRunTest, Run_Normal_Count_InSame) {\n  TestWrongAdd(\"wrong_add\", STATUS_STOP);\n}\n\nTEST_F(NodeRunTest, Run_Normal_Count_InSame_2) {\n  TestWrongAdd(\"wrong_add_2\", STATUS_SUCCESS);\n}\n\nTEST_F(NodeRunTest, Run_Collapse_Not_One) {\n  auto output_node = Add_Test_0_2_Node();\n  auto scatter_node = Add_Scatter_Node();\n  auto garther_node = Add_Garther_Gen_More_Node();\n\n  auto output_port_1 = output_node->GetOutputPort(\"Out_1\");\n  auto output_port_2 = output_node->GetOutputPort(\"Out_2\");\n  auto scatter_output_port = scatter_node->GetOutputPort(\"Out_1\");\n  auto garther_output_port = garther_node->GetOutputPort(\"Out_1\");\n  EXPECT_TRUE(output_port_1->ConnectPort(scatter_node->GetInputPort(\"In_1\")));\n  EXPECT_TRUE(\n      scatter_output_port->ConnectPort(garther_node->GetInputPort(\"In_1\")));\n\n  EXPECT_EQ(output_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(scatter_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(garther_node->Run(DATA), STATUS_STOP);\n}\n\nTEST_F(NodeRunTest, CacheFull) {\n  ConfigurationBuilder configbuilder;\n  configbuilder.AddProperty(\"queue_size\", \"5\");\n  auto config = configbuilder.Build();\n  auto start_node = Add_Test_Orgin_0_2_Node();\n  auto pass_node = Add_Simple_Pass_Node(10);\n  auto receive_node = Add_Test_2_0_Node(config);\n\n  auto output_port_1 = start_node->GetOutputPort(\"Out_1\");\n  EXPECT_TRUE(output_port_1->ConnectPort(pass_node->GetInputPort(\"In_1\")));\n  auto output_port_2 = start_node->GetOutputPort(\"Out_2\");\n  EXPECT_TRUE(output_port_2->ConnectPort(receive_node->GetInputPort(\"In_1\")));\n  auto add_output_port_1 = pass_node->GetOutputPort(\"Out_1\");\n  EXPECT_TRUE(\n      add_output_port_1->ConnectPort(receive_node->GetInputPort(\"In_2\")));\n\n  EXPECT_EQ(start_node->Run(DATA), STATUS_SUCCESS);\n\n  auto queue_1 = receive_node->GetInputPort(\"In_1\")->GetQueue();\n  auto queue_2 = receive_node->GetInputPort(\"In_2\")->GetQueue();\n  EXPECT_EQ(queue_1->Size(), 11);\n  EXPECT_EQ(queue_2->Size(), 0);\n\n  EXPECT_EQ(receive_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(pass_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(queue_1->Size(), 11);\n  EXPECT_EQ(queue_2->Size(), 11);\n\n  EXPECT_EQ(receive_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(queue_1->Size(), 6);\n  EXPECT_EQ(queue_2->Size(), 6);\n\n  EXPECT_EQ(receive_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(receive_node->Run(DATA), STATUS_STOP);\n  EXPECT_EQ(queue_1->Size(), 0);\n  EXPECT_EQ(queue_2->Size(), 0);\n}\n\n// thread_pool has not implement set priority\nTEST_F(NodeRunTest, DISABLED_RunPriority) {\n  auto device_ = flow_->GetDevice();\n  auto flowunit_mgr_ = FlowUnitManager::GetInstance();\n\n  ConfigurationBuilder configbuilder;\n  configbuilder.AddProperty(\"batch_size\", \"5\");\n  auto config = configbuilder.Build();\n  auto run_node = std::make_shared<Node>();\n  run_node->SetFlowUnitInfo(\"get_priority\", \"cpu\", \"0\", flowunit_mgr_);\n  auto print_node = std::make_shared<Node>();\n  print_node->SetFlowUnitInfo(\"print\", \"cpu\", \"0\", flowunit_mgr_);\n  EXPECT_EQ(run_node->Init({\"In_1\"}, {\"Out_1\"}, config), STATUS_SUCCESS);\n  EXPECT_EQ(run_node->Open(), STATUS_SUCCESS);\n  EXPECT_EQ(print_node->Init({\"In_1\"}, {}, config), STATUS_SUCCESS);\n  EXPECT_EQ(print_node->Open(), STATUS_SUCCESS);\n\n  auto output_port_1 = run_node->GetOutputPort(\"Out_1\");\n  EXPECT_TRUE(output_port_1->ConnectPort(print_node->GetInputPort(\"In_1\")));\n\n  int32_t default_priority = 3;\n  size_t data_size = 5;\n  size_t buffer_size = 3;\n  std::vector<std::shared_ptr<Buffer>> in_data(data_size * buffer_size,\n                                               nullptr);\n  for (size_t i = 0; i < buffer_size; ++i) {\n    for (size_t j = 0; j < data_size; ++j) {\n      auto buffer = std::make_shared<Buffer>(device_);\n      buffer->Build(1 * sizeof(int));\n      BufferManageView::SetPriority(buffer, default_priority + i);\n      in_data[i * data_size + j] = buffer;\n    }\n  }\n\n  auto in_queue = run_node->GetInputPort(\"In_1\")->GetQueue();\n  in_queue->PushBatch(&in_data);\n\n  EXPECT_EQ(in_queue->Size(), data_size * buffer_size);\n  for (size_t i = 0; i < buffer_size; ++i) {\n    EXPECT_EQ(run_node->Run(DATA), STATUS_SUCCESS);\n\n    auto out_queue = print_node->GetInputPort(\"In_1\")->GetQueue();\n    std::vector<std::shared_ptr<Buffer>> buffer_vector;\n    out_queue->PopBatch(&buffer_vector);\n\n    EXPECT_EQ(buffer_vector.size(), data_size);\n    for (size_t i = 0; i < data_size; i++) {\n      EXPECT_EQ(buffer_vector[i]->GetBytes(), 1 * sizeof(int));\n      auto *data_result = (int *)buffer_vector[i]->ConstData();\n      if (i == data_size - 1) {\n        EXPECT_EQ(data_result[i], 0);\n      } else {\n        EXPECT_EQ(data_result[i], default_priority + (buffer_size - 1 - i));\n      }\n    }\n  }\n}\n\nTEST_F(NodeRunTest, Normal_Process_Error_Recieve_InVisible) {\n  auto stream_info_node = Add_Stream_Normal_Info_Node();\n  auto stream_start_node = Add_Stream_Start_Node({1, 5, 1});\n  auto simple_error_node = Add_Simple_Error_Node(25);\n  // Invisible error end node do not execute process function\n  auto error_end_node = Add_Error_End_Normal_Node(23);\n\n  stream_info_node->SetName(\"stream_info_normal_node\");\n  stream_info_node->SetPriority(0);\n  stream_start_node->SetName(\"stream_start_node\");\n  stream_start_node->SetPriority(1);\n  simple_error_node->SetName(\"simple_error_node\");\n  simple_error_node->SetPriority(2);\n  error_end_node->SetName(\"error_end_normal_node\");\n  error_end_node->SetPriority(3);\n  EXPECT_EQ(error_end_node->IsExceptionVisible(), true);\n  error_end_node->SetExceptionVisible(false);\n\n  EXPECT_TRUE(stream_info_node->GetOutputPort(\"Out_1\")->ConnectPort(\n      stream_start_node->GetInputPort(\"In_1\")));\n  EXPECT_TRUE(stream_start_node->GetOutputPort(\"Out_1\")->ConnectPort(\n      simple_error_node->GetInputPort(\"In_1\")));\n  EXPECT_TRUE(simple_error_node->GetOutputPort(\"Out_1\")->ConnectPort(\n      error_end_node->GetInputPort(\"In_1\")));\n\n  EXPECT_EQ(stream_info_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(stream_start_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(simple_error_node->Run(DATA), STATUS_SUCCESS);\n  auto recieve_queue = error_end_node->GetInputPort(\"In_1\")->GetQueue();\n  CheckQueueHasDataError(recieve_queue, 5);\n  EXPECT_EQ(stream_start_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(stream_start_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(stream_start_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(stream_start_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(stream_start_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(simple_error_node->Run(DATA), STATUS_SUCCESS);\n\n  recieve_queue = error_end_node->GetInputPort(\"In_1\")->GetQueue();\n  CheckQueueHasDataError(recieve_queue, 27);\n\n  EXPECT_EQ(error_end_node->Run(DATA), STATUS_SUCCESS);\n}\n\nTEST_F(NodeRunTest, Normal_Process_Error_Recieve_Visible) {\n  auto stream_info_node = Add_Stream_Normal_Info_Node();\n  auto stream_start_node = Add_Stream_Start_Node({1, 5, 1});\n  auto simple_error_node = Add_Simple_Error_Node(25);\n  auto error_end_node = Add_Error_End_Normal_Node(25);\n\n  stream_info_node->SetName(\"stream_info_normal_node\");\n  stream_info_node->SetPriority(0);\n  stream_start_node->SetName(\"stream_start_node\");\n  stream_start_node->SetPriority(1);\n  simple_error_node->SetName(\"simple_error_node\");\n  simple_error_node->SetPriority(2);\n  error_end_node->SetName(\"error_end_normal_node\");\n  error_end_node->SetPriority(3);\n  error_end_node->SetExceptionVisible(true);\n\n  EXPECT_TRUE(stream_info_node->GetOutputPort(\"Out_1\")->ConnectPort(\n      stream_start_node->GetInputPort(\"In_1\")));\n  EXPECT_TRUE(stream_start_node->GetOutputPort(\"Out_1\")->ConnectPort(\n      simple_error_node->GetInputPort(\"In_1\")));\n  EXPECT_TRUE(simple_error_node->GetOutputPort(\"Out_1\")->ConnectPort(\n      error_end_node->GetInputPort(\"In_1\")));\n\n  EXPECT_EQ(stream_info_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(stream_start_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(simple_error_node->Run(DATA), STATUS_SUCCESS);\n  auto recieve_queue = error_end_node->GetInputPort(\"In_1\")->GetQueue();\n  CheckQueueHasDataError(recieve_queue, 5);\n  EXPECT_EQ(stream_start_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(stream_start_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(stream_start_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(stream_start_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(stream_start_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(simple_error_node->Run(DATA), STATUS_SUCCESS);\n\n  recieve_queue = error_end_node->GetInputPort(\"In_1\")->GetQueue();\n  CheckQueueHasDataError(recieve_queue, 27);\n\n  EXPECT_EQ(error_end_node->Run(DATA), STATUS_SUCCESS);\n}\n\nTEST_F(NodeRunTest, Normal_Process_Error_Expand_Visible) {\n  auto start_node = Add_Error_Start_Normal_Node();\n  auto expand_process_node = Add_Normal_Expand_Process_Node(0);\n  auto simple_pass_node = Add_Simple_Pass_Node(0);\n  auto receive_error_node = Add_Normal_Collapse_Recieve_Error_Node({1, 0, 1});\n\n  start_node->SetName(\"start_node\");\n  expand_process_node->SetName(\"expand_process_node\");\n  simple_pass_node->SetName(\"simple_pass_node\");\n  receive_error_node->SetName(\"receive_error_node\");\n  receive_error_node->SetExceptionVisible(true);\n\n  EXPECT_TRUE(start_node->GetOutputPort(\"Out_1\")->ConnectPort(\n      expand_process_node->GetInputPort(\"In_1\")));\n  EXPECT_TRUE(expand_process_node->GetOutputPort(\"Out_1\")->ConnectPort(\n      simple_pass_node->GetInputPort(\"In_1\")));\n  EXPECT_TRUE(simple_pass_node->GetOutputPort(\"Out_1\")->ConnectPort(\n      receive_error_node->GetInputPort(\"In_1\")));\n\n  EXPECT_EQ(start_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(expand_process_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(simple_pass_node->Run(DATA), STATUS_SUCCESS);\n  auto recv_queue = receive_error_node->GetInputPort(\"In_1\")->GetQueue();\n  CheckQueueHasDataError(recv_queue, 3);\n  EXPECT_EQ(receive_error_node->Run(DATA), STATUS_SUCCESS);\n}\n\nTEST_F(NodeRunTest, Normal_Process_Error_Expand_Invisible) {\n  auto start_node = Add_Error_Start_Normal_Node();\n  auto expand_process_node = Add_Normal_Expand_Process_Node(1);\n  auto simple_pass_node = Add_Simple_Pass_Node(0);\n  auto receive_error_node = Add_Normal_Collapse_Recieve_Error_Node({1, 0, 1});\n\n  start_node->SetName(\"start_node\");\n  expand_process_node->SetName(\"expand_process_node\");\n  simple_pass_node->SetName(\"simple_pass_node\");\n  receive_error_node->SetName(\"receive_error_node\");\n  expand_process_node->SetExceptionVisible(true);\n\n  EXPECT_TRUE(start_node->GetOutputPort(\"Out_1\")->ConnectPort(\n      expand_process_node->GetInputPort(\"In_1\")));\n  EXPECT_TRUE(expand_process_node->GetOutputPort(\"Out_1\")->ConnectPort(\n      simple_pass_node->GetInputPort(\"In_1\")));\n  EXPECT_TRUE(simple_pass_node->GetOutputPort(\"Out_1\")->ConnectPort(\n      receive_error_node->GetInputPort(\"In_1\")));\n\n  EXPECT_EQ(start_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(expand_process_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(simple_pass_node->Run(DATA), STATUS_SUCCESS);\n  auto recv_queue = receive_error_node->GetInputPort(\"In_1\")->GetQueue();\n  CheckQueueHasDataError(recv_queue, 3);\n  EXPECT_EQ(receive_error_node->Run(DATA), STATUS_SUCCESS);\n}\n\nTEST_F(NodeRunTest, Normal_Process_Error_Collapse_Visible) {\n  auto start_node = Add_Error_Start_Normal_Node();\n  auto expand_process_node = Add_Normal_Expand_Process_Node(0);\n  auto simple_pass_node = Add_Simple_Pass_Node(0);\n  auto collapse_node = Add_Normal_Collapse_Process_Node({1, 0, 1}, false);\n  auto receive_node = Add_Stream_Process_Node({0, 0, 0});\n\n  start_node->SetName(\"start_node\");\n  expand_process_node->SetName(\"expand_node\");\n  simple_pass_node->SetName(\"simple_pass_node\");\n  collapse_node->SetName(\"collapse_node\");\n  receive_node->SetName(\"receive_node\");\n  collapse_node->SetExceptionVisible(true);\n\n  EXPECT_TRUE(start_node->GetOutputPort(\"Out_1\")->ConnectPort(\n      expand_process_node->GetInputPort(\"In_1\")));\n  EXPECT_TRUE(expand_process_node->GetOutputPort(\"Out_1\")->ConnectPort(\n      simple_pass_node->GetInputPort(\"In_1\")));\n  EXPECT_TRUE(simple_pass_node->GetOutputPort(\"Out_1\")->ConnectPort(\n      collapse_node->GetInputPort(\"In_1\")));\n  EXPECT_TRUE(collapse_node->GetOutputPort(\"Out_1\")->ConnectPort(\n      receive_node->GetInputPort(\"In_1\")));\n\n  EXPECT_EQ(start_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(expand_process_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(simple_pass_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(collapse_node->Run(DATA), STATUS_SUCCESS);\n\n  auto receive_queue = receive_node->GetInputPort(\"In_1\")->GetQueue();\n  CheckQueueHasDataError(receive_queue, 2);\n}\n\nTEST_F(NodeRunTest, Normal_Process_Error_Collapse_Invisible) {\n  auto start_node = Add_Error_Start_Normal_Node();\n  auto expand_process_node = Add_Normal_Expand_Process_Node(0);\n  auto simple_pass_node = Add_Simple_Pass_Node(0);\n  auto collapse_node = Add_Normal_Collapse_Process_Node2(1, 0);\n  auto receive_node = Add_Stream_Process_Node({1, 1, 1});\n\n  start_node->SetName(\"start_node\");\n  expand_process_node->SetName(\"expand_node\");\n  simple_pass_node->SetName(\"simple_pass_node\");\n  collapse_node->SetName(\"collapse_node\");\n  receive_node->SetName(\"receive_node\");\n  receive_node->SetExceptionVisible(true);\n\n  EXPECT_TRUE(start_node->GetOutputPort(\"Out_1\")->ConnectPort(\n      expand_process_node->GetInputPort(\"In_1\")));\n  EXPECT_TRUE(expand_process_node->GetOutputPort(\"Out_1\")->ConnectPort(\n      simple_pass_node->GetInputPort(\"In_1\")));\n  EXPECT_TRUE(simple_pass_node->GetOutputPort(\"Out_1\")->ConnectPort(\n      collapse_node->GetInputPort(\"In_1\")));\n  EXPECT_TRUE(collapse_node->GetOutputPort(\"Out_1\")->ConnectPort(\n      receive_node->GetInputPort(\"In_1\")));\n\n  EXPECT_EQ(start_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(expand_process_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(simple_pass_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(collapse_node->Run(DATA), STATUS_SUCCESS);\n\n  auto receive_queue = receive_node->GetInputPort(\"In_1\")->GetQueue();\n  CheckQueueHasDataError(receive_queue, 2);\n  EXPECT_EQ(receive_node->Run(DATA), STATUS_SUCCESS);\n}\n\nTEST_F(NodeRunTest, Normal_Process_Error) {\n  auto stream_info_node = Add_Stream_Normal_Info_Node();\n  auto stream_start_node = Add_Stream_Start_Node({1, 5, 1});\n  auto simple_error_node = Add_Simple_Error_Node(25);\n  auto simple_pass_node = Add_Simple_Pass_Node(23);\n  auto recieve_node = Add_Stream_Process_Node({1, 25, 1});\n\n  stream_info_node->SetName(\"stream_info_node\");\n  stream_start_node->SetName(\"stream_start_node\");\n  simple_error_node->SetName(\"simple_error_node\");\n  simple_pass_node->SetName(\"simple_pass_node\");\n  recieve_node->SetName(\"recieve_node\");\n  recieve_node->SetExceptionVisible(true);\n\n  EXPECT_TRUE(stream_info_node->GetOutputPort(\"Out_1\")->ConnectPort(\n      stream_start_node->GetInputPort(\"In_1\")));\n  EXPECT_TRUE(stream_start_node->GetOutputPort(\"Out_1\")->ConnectPort(\n      simple_error_node->GetInputPort(\"In_1\")));\n  EXPECT_TRUE(simple_error_node->GetOutputPort(\"Out_1\")->ConnectPort(\n      simple_pass_node->GetInputPort(\"In_1\")));\n  EXPECT_TRUE(simple_pass_node->GetOutputPort(\"Out_1\")->ConnectPort(\n      recieve_node->GetInputPort(\"In_1\")));\n\n  EXPECT_EQ(stream_info_node->Run(DATA), STATUS_SUCCESS);\n  auto stream_start_queue = stream_start_node->GetInputPort(\"In_1\")->GetQueue();\n  CheckQueueNotHasDataError(stream_start_queue);\n  EXPECT_EQ(stream_start_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(simple_error_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(stream_start_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(simple_error_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(simple_pass_node->Run(DATA), STATUS_SUCCESS);\n  auto queue = recieve_node->GetInputPort(\"In_1\")->GetQueue();\n  CheckQueueHasDataError(queue, 10);\n  EXPECT_EQ(recieve_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(stream_start_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(stream_start_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(stream_start_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(stream_start_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(simple_error_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(simple_pass_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(recieve_node->Run(DATA), STATUS_SUCCESS);\n}\n\nTEST_F(NodeRunTest, Normal_Recv_InVisible_Error) {\n  auto stream_info_node = Add_Stream_Normal_Info_Node();\n  auto stream_start_node = Add_Stream_Start_Node({1, 2, 0});\n  auto simple_error_node = Add_Stream_Datapre_Error_Node({1, 0, 0});\n  auto simple_pass_node = Add_Simple_Pass_Node(0);\n  auto recieve_node = Add_Stream_Process_Node({1, 1, 0});\n\n  stream_info_node->SetName(\"stream_info_node\");\n  stream_start_node->SetName(\"stream_start_node\");\n  simple_error_node->SetName(\"simple_error_node\");\n  simple_pass_node->SetName(\"simple_pass_node\");\n  recieve_node->SetName(\"recieve_node\");\n  recieve_node->SetExceptionVisible(true);\n\n  EXPECT_EQ(stream_info_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                stream_start_node->GetInputPort(\"In_1\")),\n            true);\n  EXPECT_EQ(stream_start_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                simple_error_node->GetInputPort(\"In_1\")),\n            true);\n  EXPECT_EQ(simple_error_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                simple_pass_node->GetInputPort(\"In_1\")),\n            true);\n  EXPECT_EQ(simple_pass_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                recieve_node->GetInputPort(\"In_1\")),\n            true);\n\n  EXPECT_EQ(stream_info_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(stream_start_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(simple_error_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(simple_pass_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(stream_start_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(simple_error_node->Run(DATA), STATUS_SUCCESS);\n\n  auto queue = recieve_node->GetInputPort(\"In_1\")->GetQueue();\n  CheckQueueHasDataError(queue, 1);\n  EXPECT_EQ(recieve_node->Run(DATA), STATUS_SUCCESS);\n}\n\nTEST_F(NodeRunTest, Normal_Recv_Visible_Error) {\n  ConfigurationBuilder builder;\n  auto stream_info_node = Add_Stream_Info_Node();\n  auto stream_start_node = Add_Stream_Start_Node({1, 3, 0});\n  auto simple_error_node_cfg = builder.Build();\n  simple_error_node_cfg->SetProperty<uint32_t>(\"batch_size\", 5);\n  auto simple_error_node =\n      Add_Stream_In_Process_Error_Node({1, 3, 1}, simple_error_node_cfg);\n  auto simple_pass_node = Add_Simple_Pass_Node(6);\n  auto recieve_node_cfg = builder.Build();\n  recieve_node_cfg->SetProperty<uint32_t>(\"batch_size\", 6);\n  auto recieve_node = Add_Stream_Process_Node({1, 1, 0}, recieve_node_cfg);\n\n  stream_info_node->SetName(\"stream_info_node\");\n  stream_start_node->SetName(\"stream_start_node\");\n  simple_error_node->SetName(\"simple_error_node\");\n  simple_pass_node->SetName(\"simple_pass_node\");\n  recieve_node->SetName(\"recieve_node\");\n\n  simple_pass_node->SetExceptionVisible(true);\n  EXPECT_EQ(stream_info_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                stream_start_node->GetInputPort(\"In_1\")),\n            true);\n  EXPECT_EQ(stream_start_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                simple_error_node->GetInputPort(\"In_1\")),\n            true);\n  EXPECT_EQ(simple_error_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                simple_pass_node->GetInputPort(\"In_1\")),\n            true);\n  EXPECT_EQ(simple_pass_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                recieve_node->GetInputPort(\"In_1\")),\n            true);\n\n  EXPECT_EQ(stream_info_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(stream_start_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(simple_error_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(stream_start_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(simple_error_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(simple_pass_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(stream_start_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(simple_error_node->Run(DATA), STATUS_SUCCESS);\n\n  auto queue = recieve_node->GetInputPort(\"In_1\")->GetQueue();\n  EXPECT_EQ(queue->Size(), 6);\n  EXPECT_EQ(recieve_node->Run(DATA), STATUS_SUCCESS);\n}\n\nTEST_F(NodeRunTest, Normal_Send_Error) {\n  auto stream_info_node = Add_Stream_Normal_Info_Node();\n  auto stream_start_node = Add_Stream_Start_Node({1, 3, 0});\n  auto simple_pass_node = Add_Simple_Pass_Node(15);\n  auto simple_error_node = Add_Stream_Datapre_Error_Node({1, 0, 0});\n\n  stream_info_node->SetName(\"stream_info_node\");\n  stream_start_node->SetName(\"stream_start_node\");\n  simple_pass_node->SetName(\"simple_pass_node\");\n  simple_error_node->SetName(\"simple_error_node\");\n\n  EXPECT_EQ(stream_info_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                stream_start_node->GetInputPort(\"In_1\")),\n            true);\n  EXPECT_EQ(stream_start_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                simple_pass_node->GetInputPort(\"In_1\")),\n            true);\n  EXPECT_EQ(simple_pass_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                simple_error_node->GetInputPort(\"In_1\")),\n            true);\n\n  EXPECT_EQ(stream_info_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(stream_start_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(simple_pass_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(simple_error_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(stream_start_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(simple_pass_node->Run(DATA), STATUS_SUCCESS);\n  auto simple_error_queue = simple_error_node->GetInputPort(\"In_1\")->GetQueue();\n  EXPECT_EQ(simple_error_queue->Size(), 5);\n  EXPECT_EQ(stream_start_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(simple_pass_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(simple_error_queue->Size(), 10);\n  EXPECT_EQ(simple_error_node->Run(DATA), STATUS_SUCCESS);\n}\n\nTEST_F(NodeRunTest, Stream_DataPre_Error) {\n  auto stream_info_node = Add_Stream_Normal_Info_Node();\n  auto stream_start_node = Add_Stream_Start_Node({1, 5, 1});\n  auto simple_error_node = Add_Stream_Datapre_Error_Node({1, 0, 1});\n  auto receive_node = Add_Collapse_Recieve_Error_Node(1);\n\n  stream_info_node->SetName(\"stream_info_node\");\n  stream_start_node->SetName(\"stream_start_node\");\n  simple_error_node->SetName(\"simple_error_node\");\n  receive_node->SetName(\"receive_node\");\n\n  receive_node->SetExceptionVisible(true);\n\n  EXPECT_EQ(stream_info_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                stream_start_node->GetInputPort(\"In_1\")),\n            true);\n  EXPECT_EQ(stream_start_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                simple_error_node->GetInputPort(\"In_1\")),\n            true);\n  EXPECT_EQ(simple_error_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                receive_node->GetInputPort(\"In_1\")),\n            true);\n\n  EXPECT_EQ(stream_info_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(stream_start_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(simple_error_node->Run(DATA), STATUS_SUCCESS);\n  auto recieve_queue = receive_node->GetInputPort(\"In_1\")->GetQueue();\n  CheckQueueHasDataError(recieve_queue, 1);\n\n  EXPECT_EQ(stream_start_node->Run(EVENT), STATUS_SUCCESS);\n  auto simple_error_queue = simple_error_node->GetInputPort(\"In_1\")->GetQueue();\n  EXPECT_EQ(simple_error_queue->Size(), 5);\n  EXPECT_EQ(simple_error_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(stream_start_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(stream_start_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(stream_start_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(stream_start_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(simple_error_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(receive_node->Run(DATA), STATUS_SUCCESS);\n}\n\nTEST_F(NodeRunTest, Stream_Process_Error) {\n  ConfigurationBuilder builder;\n  auto stream_info_node = Add_Stream_Normal_Info_Node();\n  auto stream_start_node = Add_Stream_Start_Node({1, 5, 1});\n  auto simple_error_node_cfg = builder.Build();\n  simple_error_node_cfg->SetProperty<uint32_t>(\"batch_size\", 5);\n  auto simple_error_node =\n      Add_Stream_Process_Error_Node({1, 5, 1}, simple_error_node_cfg);\n  auto receive_node = Add_Collapse_Recieve_Error_Node(1);\n\n  stream_info_node->SetName(\"stream_info_node\");\n  stream_start_node->SetName(\"stream_start_node\");\n  simple_error_node->SetName(\"simple_error_node\");\n  receive_node->SetName(\"receive_node\");\n\n  receive_node->SetExceptionVisible(true);\n\n  EXPECT_EQ(stream_info_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                stream_start_node->GetInputPort(\"In_1\")),\n            true);\n  EXPECT_EQ(stream_start_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                simple_error_node->GetInputPort(\"In_1\")),\n            true);\n  EXPECT_EQ(simple_error_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                receive_node->GetInputPort(\"In_1\")),\n            true);\n\n  EXPECT_EQ(stream_info_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(stream_start_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(simple_error_node->Run(DATA), STATUS_SUCCESS);\n  auto recieve_queue = receive_node->GetInputPort(\"In_1\")->GetQueue();\n  CheckQueueHasDataError(recieve_queue, 1);\n\n  EXPECT_EQ(stream_start_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(stream_start_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(stream_start_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(stream_start_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(stream_start_node->Run(EVENT), STATUS_SUCCESS);\n  auto simple_error_queue = simple_error_node->GetInputPort(\"In_1\")->GetQueue();\n  EXPECT_EQ(simple_error_queue->Size(), 22);\n\n  EXPECT_EQ(simple_error_node->Run(DATA), STATUS_SUCCESS);\n\n  EXPECT_EQ(receive_node->Run(DATA), STATUS_SUCCESS);\n}\n\nTEST_F(NodeRunTest, Stream_Recv_Visible_Error) {\n  auto error_start_node = Add_Error_Start_Node();\n  auto simple_stream_node = Add_Stream_Process_Node({1, 1, 1});\n  auto receive_node = Add_Collapse_Recieve_Error_Node(1);\n\n  error_start_node->SetName(\"error_start_node\");\n  simple_stream_node->SetName(\"simple_stream_node\");\n  receive_node->SetName(\"receive_node\");\n  simple_stream_node->SetExceptionVisible(true);\n  receive_node->SetExceptionVisible(true);\n\n  EXPECT_EQ(error_start_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                simple_stream_node->GetInputPort(\"In_1\")),\n            true);\n  EXPECT_EQ(simple_stream_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                receive_node->GetInputPort(\"In_1\")),\n            true);\n\n  EXPECT_EQ(error_start_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(simple_stream_node->Run(DATA), STATUS_SUCCESS);\n\n  auto recieve_queue = receive_node->GetInputPort(\"In_1\")->GetQueue();\n  CheckQueueNotHasDataError(recieve_queue);\n  EXPECT_EQ(receive_node->Run(DATA), STATUS_SUCCESS);\n}\n\nTEST_F(NodeRunTest, Stream_Recv_Invisible_Error) {\n  auto error_start_node = Add_Error_Start_Node();\n  auto simple_stream_node = Add_Stream_Process_Node({1, 0, 1});\n  auto receive_node = Add_Collapse_Recieve_Error_Node(1);\n\n  error_start_node->SetName(\"error_start_node\");\n  simple_stream_node->SetName(\"simple_stream_node\");\n  receive_node->SetName(\"receive_node\");\n\n  receive_node->SetExceptionVisible(true);\n\n  EXPECT_EQ(error_start_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                simple_stream_node->GetInputPort(\"In_1\")),\n            true);\n  EXPECT_EQ(simple_stream_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                receive_node->GetInputPort(\"In_1\")),\n            true);\n\n  EXPECT_EQ(error_start_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(simple_stream_node->Run(DATA), STATUS_SUCCESS);\n\n  auto recieve_queue = receive_node->GetInputPort(\"In_1\")->GetQueue();\n  CheckQueueHasDataError(recieve_queue, 2);\n  EXPECT_EQ(receive_node->Run(DATA), STATUS_SUCCESS);\n}\n\nTEST_F(NodeRunTest, Stream_Send_Error) {\n  ConfigurationBuilder builder;\n  auto stream_info_node = Add_Stream_Normal_Info_Node();\n  auto stream_start_node = Add_Stream_Start_Node({1, 3, 0});\n\n  auto simple_stream_node_cfg = builder.Build();\n  simple_stream_node_cfg->SetProperty<uint32_t>(\"batch_size\", 5);\n  auto simple_stream_node =\n      Add_Stream_Process_Node({1, 3, 0}, simple_stream_node_cfg);\n\n  auto simple_error_node = Add_Stream_Datapre_Error_Node({1, 0, 0});\n\n  stream_info_node->SetName(\"stream_info_node\");\n  stream_start_node->SetName(\"stream_start_node\");\n  simple_stream_node->SetName(\"simple_stream_node\");\n  simple_error_node->SetName(\"simple_error_node\");\n\n  EXPECT_EQ(stream_info_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                stream_start_node->GetInputPort(\"In_1\")),\n            true);\n  EXPECT_EQ(stream_start_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                simple_stream_node->GetInputPort(\"In_1\")),\n            true);\n  EXPECT_EQ(simple_stream_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                simple_error_node->GetInputPort(\"In_1\")),\n            true);\n\n  EXPECT_EQ(stream_info_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(stream_start_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(simple_stream_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(simple_error_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(stream_start_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(simple_stream_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(stream_start_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(simple_stream_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(simple_error_node->Run(DATA), STATUS_SUCCESS);\n}\n\nTEST_F(NodeRunTest, Normal_Expand_Process_Error) {\n  auto start_node = Add_Normal_Start_Node();\n  auto expand_process_error_node = Add_Normal_Expand_Process_Error_Node(4);\n  auto simple_pass_node = Add_Simple_Pass_Node(12);\n  auto receive_error_node = Add_Normal_Collapse_Recieve_Error_Node({4, 4, 4});\n\n  start_node->SetName(\"start_node\");\n  expand_process_error_node->SetName(\"expand_process_error_node\");\n  simple_pass_node->SetName(\"simple_pass_node\");\n  receive_error_node->SetName(\"receive_error_node\");\n\n  receive_error_node->SetExceptionVisible(true);\n\n  EXPECT_EQ(start_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                expand_process_error_node->GetInputPort(\"In_1\")),\n            true);\n  EXPECT_EQ(expand_process_error_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                simple_pass_node->GetInputPort(\"In_1\")),\n            true);\n  EXPECT_EQ(simple_pass_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                receive_error_node->GetInputPort(\"In_1\")),\n            true);\n\n  EXPECT_EQ(start_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(expand_process_error_node->Run(DATA), STATUS_SUCCESS);\n\n  EXPECT_EQ(simple_pass_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(receive_error_node->Run(DATA), STATUS_SUCCESS);\n}\n\nTEST_F(NodeRunTest, Normal_Expand_Recieve_Invisible_Error) {\n  auto start_node = Add_Error_Start_Node();\n  auto expand_process_node = Add_Normal_Expand_Process_Node(0);\n  auto simple_pass_node = Add_Simple_Pass_Node(0);\n  auto receive_error_node = Add_Normal_Collapse_Recieve_Error_Node({1, 0, 1});\n\n  start_node->SetName(\"start_node\");\n  expand_process_node->SetName(\"expand_process_node\");\n  simple_pass_node->SetName(\"simple_pass_node\");\n  receive_error_node->SetName(\"receive_error_node\");\n  receive_error_node->SetExceptionVisible(true);\n\n  EXPECT_EQ(start_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                expand_process_node->GetInputPort(\"In_1\")),\n            true);\n  EXPECT_EQ(expand_process_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                simple_pass_node->GetInputPort(\"In_1\")),\n            true);\n  EXPECT_EQ(simple_pass_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                receive_error_node->GetInputPort(\"In_1\")),\n            true);\n\n  EXPECT_EQ(start_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(expand_process_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(simple_pass_node->Run(DATA), STATUS_SUCCESS);\n  auto recv_queue = receive_error_node->GetInputPort(\"In_1\")->GetQueue();\n  CheckQueueHasDataError(recv_queue, 3);\n  EXPECT_EQ(receive_error_node->Run(DATA), STATUS_SUCCESS);\n}\n\nTEST_F(NodeRunTest, Normal_Expand_Recieve_Visible_Error) {\n  auto start_node = Add_Error_Start_Node();\n  auto expand_process_node = Add_Normal_Expand_Process_Node(1);\n  auto simple_pass_node = Add_Simple_Pass_Node(0);\n  auto receive_error_node = Add_Normal_Collapse_Recieve_Error_Node({1, 0, 1});\n\n  start_node->SetName(\"start_node\");\n  expand_process_node->SetName(\"expand_process_node\");\n  simple_pass_node->SetName(\"simple_pass_node\");\n  receive_error_node->SetName(\"receive_error_node\");\n  expand_process_node->SetExceptionVisible(true);\n\n  EXPECT_EQ(start_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                expand_process_node->GetInputPort(\"In_1\")),\n            true);\n  EXPECT_EQ(expand_process_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                simple_pass_node->GetInputPort(\"In_1\")),\n            true);\n  EXPECT_EQ(simple_pass_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                receive_error_node->GetInputPort(\"In_1\")),\n            true);\n\n  EXPECT_EQ(start_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(expand_process_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(simple_pass_node->Run(DATA), STATUS_SUCCESS);\n  auto recv_queue = receive_error_node->GetInputPort(\"In_1\")->GetQueue();\n  CheckQueueHasDataError(recv_queue, 3);\n  EXPECT_EQ(receive_error_node->Run(DATA), STATUS_SUCCESS);\n}\n\nTEST_F(NodeRunTest, Normal_Expand_Send_Error) {\n  auto stream_info_node = Add_Stream_Normal_Info_Node();\n  auto normal_start_node = Add_Normal_Expand_Start_Node(3);\n  auto simple_pass_node = Add_Simple_Pass_Node(10);\n  auto simple_error_node = Add_Stream_Datapre_Error_Node({1, 0, 0});\n\n  stream_info_node->SetName(\"stream_info_node\");\n  normal_start_node->SetName(\"normal_start_node\");\n  simple_pass_node->SetName(\"simple_pass_node\");\n  simple_error_node->SetName(\"simple_error_node\");\n\n  EXPECT_EQ(stream_info_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                normal_start_node->GetInputPort(\"In_1\")),\n            true);\n  EXPECT_EQ(normal_start_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                simple_pass_node->GetInputPort(\"In_1\")),\n            true);\n  EXPECT_EQ(simple_pass_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                simple_error_node->GetInputPort(\"In_1\")),\n            true);\n\n  EXPECT_EQ(stream_info_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(normal_start_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(simple_pass_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(simple_error_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(normal_start_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(simple_pass_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(simple_error_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(normal_start_node->Run(EVENT), STATUS_SUCCESS);\n}\n\nTEST_F(NodeRunTest, Stream_Expand_DataPre_Error) {\n  auto start_node = Add_Normal_Start_Node();\n  auto expand_datapre_error_node = Add_Expand_Datapre_Error_Node();\n  auto simple_pass_node = Add_Simple_Pass_Node(0);\n  auto receive_error_node = Add_Collapse_Recieve_Error_Node(1);\n\n  start_node->SetName(\"start_node\");\n  expand_datapre_error_node->SetName(\"expand_datapre_error_node\");\n  simple_pass_node->SetName(\"simple_pass_node\");\n  receive_error_node->SetName(\"receive_error_node\");\n  receive_error_node->SetExceptionVisible(true);\n\n  EXPECT_EQ(start_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                expand_datapre_error_node->GetInputPort(\"In_1\")),\n            true);\n  EXPECT_EQ(expand_datapre_error_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                simple_pass_node->GetInputPort(\"In_1\")),\n            true);\n  EXPECT_EQ(simple_pass_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                receive_error_node->GetInputPort(\"In_1\")),\n            true);\n\n  EXPECT_EQ(start_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(expand_datapre_error_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(simple_pass_node->Run(DATA), STATUS_SUCCESS);\n  auto receive_queue = receive_error_node->GetInputPort(\"In_1\")->GetQueue();\n  CheckQueueHasDataError(receive_queue, 2);\n  EXPECT_EQ(receive_error_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(expand_datapre_error_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(expand_datapre_error_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(expand_datapre_error_node->Run(EVENT), STATUS_SUCCESS);\n\n  EXPECT_EQ(simple_pass_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(receive_error_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(receive_error_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(receive_error_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(receive_error_node->Run(EVENT), STATUS_SUCCESS);\n}\n\nTEST_F(NodeRunTest, Stream_Expand_Process_Error) {\n  auto start_node = Add_Normal_Start_Node();\n  auto expand_process_error_node = Add_Expand_Process_Error_Node(4);\n  auto simple_pass_node = Add_Simple_Pass_Node(12);\n  auto receive_error_node = Add_Collapse_Recieve_Error_Node(4);\n\n  start_node->SetName(\"start_node\");\n  expand_process_error_node->SetName(\"expand_process_error_node\");\n  simple_pass_node->SetName(\"simple_pass_node\");\n  receive_error_node->SetName(\"receive_error_node\");\n\n  receive_error_node->SetExceptionVisible(true);\n\n  EXPECT_EQ(start_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                expand_process_error_node->GetInputPort(\"In_1\")),\n            true);\n  EXPECT_EQ(expand_process_error_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                simple_pass_node->GetInputPort(\"In_1\")),\n            true);\n  EXPECT_EQ(simple_pass_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                receive_error_node->GetInputPort(\"In_1\")),\n            true);\n\n  EXPECT_EQ(start_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(expand_process_error_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(expand_process_error_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(expand_process_error_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(expand_process_error_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(expand_process_error_node->Run(EVENT), STATUS_SUCCESS);\n  auto pass_queue = simple_pass_node->GetInputPort(\"In_1\")->GetQueue();\n  CheckQueueHasDataError(pass_queue, 18);\n  EXPECT_EQ(simple_pass_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(receive_error_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(receive_error_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(receive_error_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(receive_error_node->Run(EVENT), STATUS_SUCCESS);\n}\n\nTEST_F(NodeRunTest, Stream_Expand_Recieve_Invisible_Error) {\n  auto start_node = Add_Error_Start_Node();\n  auto expand_process_node = Add_Expand_Process_Node(0);\n  auto simple_pass_node = Add_Simple_Pass_Node(0);\n  auto receive_error_node = Add_Collapse_Recieve_Error_Node(1, false);\n\n  start_node->SetName(\"start_node\");\n  expand_process_node->SetName(\"expand_process_node\");\n  simple_pass_node->SetName(\"simple_pass_node\");\n  receive_error_node->SetName(\"receive_error_node\");\n  receive_error_node->SetExceptionVisible(true);\n\n  EXPECT_EQ(start_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                expand_process_node->GetInputPort(\"In_1\")),\n            true);\n  EXPECT_EQ(expand_process_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                simple_pass_node->GetInputPort(\"In_1\")),\n            true);\n  EXPECT_EQ(simple_pass_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                receive_error_node->GetInputPort(\"In_1\")),\n            true);\n\n  EXPECT_EQ(start_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(expand_process_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(simple_pass_node->Run(DATA), STATUS_SUCCESS);\n  auto recv_queue = receive_error_node->GetInputPort(\"In_1\")->GetQueue();\n  CheckQueueHasDataError(recv_queue, 2);\n  EXPECT_EQ(receive_error_node->Run(DATA), STATUS_SUCCESS);\n}\n\nTEST_F(NodeRunTest, Stream_Expand_Recieve_Visible_Error) {\n  auto start_node = Add_Error_Start_Node();\n  auto expand_process_node = Add_Expand_Process_Node(1);\n  auto simple_pass_node = Add_Simple_Pass_Node(0);\n  auto receive_error_node = Add_Collapse_Recieve_Error_Node(1, false);\n\n  start_node->SetName(\"start_node\");\n  expand_process_node->SetName(\"expand_process_node\");\n  simple_pass_node->SetName(\"simple_pass_node\");\n  receive_error_node->SetName(\"receive_error_node\");\n  expand_process_node->SetExceptionVisible(true);\n\n  EXPECT_EQ(start_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                expand_process_node->GetInputPort(\"In_1\")),\n            true);\n  EXPECT_EQ(expand_process_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                simple_pass_node->GetInputPort(\"In_1\")),\n            true);\n  EXPECT_EQ(simple_pass_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                receive_error_node->GetInputPort(\"In_1\")),\n            true);\n\n  EXPECT_EQ(start_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(expand_process_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(expand_process_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(simple_pass_node->Run(DATA), STATUS_SUCCESS);\n  auto recv_queue = receive_error_node->GetInputPort(\"In_1\")->GetQueue();\n  CheckQueueHasDataError(recv_queue, 3);\n  EXPECT_EQ(receive_error_node->Run(DATA), STATUS_SUCCESS);\n}\n\nTEST_F(NodeRunTest, DISABLED_Stream_Expand_Recieve_Event_Error) {\n  auto device_ = flow_->GetDevice();\n  auto input_map_1 =\n      std::unordered_map<std::string, std::vector<std::shared_ptr<Buffer>>>();\n  BuildDataEventStart(input_map_1, device_);\n\n  auto stream_start_node = Add_Stream_Start_Node({1, 2, 1});\n\n  ConfigurationBuilder builder;\n  auto simple_stream_node_cfg = builder.Build();\n  simple_stream_node_cfg->SetProperty<uint32_t>(\"batch_size\", 10);\n  auto simple_stream_node =\n      Add_Stream_Process_Node({1, 2, 1}, simple_stream_node_cfg);\n\n  stream_start_node->SetName(\"stream_start_node\");\n  simple_stream_node->SetName(\"simple_stream_node\");\n  simple_stream_node->SetExceptionVisible(true);\n\n  EXPECT_EQ(stream_start_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                simple_stream_node->GetInputPort(\"In_1\")),\n            true);\n\n  auto start_queue_1 = stream_start_node->GetInputPort(\"In_1\")->GetQueue();\n  auto index_start_vector = input_map_1[\"In_1\"];\n  start_queue_1->PushBatch(&index_start_vector);\n  EXPECT_EQ(stream_start_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(stream_start_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(simple_stream_node->Run(DATA), STATUS_SUCCESS);\n\n  auto input_map_2 =\n      std::unordered_map<std::string, std::vector<std::shared_ptr<Buffer>>>();\n  BuildDataEventStop(input_map_2);\n  auto index_stop_vector = input_map_2[\"In_1\"];\n  start_queue_1->PushBatch(&index_stop_vector);\n  EXPECT_EQ(stream_start_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(stream_start_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(simple_stream_node->Run(DATA), STATUS_SUCCESS);\n}\n\nTEST_F(NodeRunTest, Stream_Expand_Send_Error) {\n  auto stream_info_node = Add_Stream_Normal_Info_Node();\n  auto stream_start_node = Add_Stream_Start_Node({1, 3, 0});\n  auto simple_pass_node = Add_Simple_Pass_Node(10);\n  auto simple_error_node = Add_Stream_Datapre_Error_Node({1, 0, 0});\n\n  stream_info_node->SetName(\"stream_info_node\");\n  stream_start_node->SetName(\"stream_start_node\");\n  simple_pass_node->SetName(\"simple_pass_node\");\n  simple_error_node->SetName(\"simple_error_node\");\n\n  EXPECT_EQ(stream_info_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                stream_start_node->GetInputPort(\"In_1\")),\n            true);\n  EXPECT_EQ(stream_start_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                simple_pass_node->GetInputPort(\"In_1\")),\n            true);\n  EXPECT_EQ(simple_pass_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                simple_error_node->GetInputPort(\"In_1\")),\n            true);\n\n  EXPECT_EQ(stream_info_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(stream_start_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(simple_pass_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(simple_error_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(stream_start_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(simple_pass_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(simple_error_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(stream_start_node->Run(EVENT), STATUS_SUCCESS);\n}\n\nTEST_F(NodeRunTest, Collapse_DataPre_Error) {\n  auto start_node = Add_Normal_Start_Node();\n  auto expand_node = Add_Expand_Process_Node(4);\n  auto simple_pass_node = Add_Simple_Pass_Node(16);\n  auto collapse_error_node = Add_Collapse_DataPre_Error_Node(4);\n  auto receive_node = Add_Stream_Process_Node({0, 0, 0});\n\n  start_node->SetName(\"start_node\");\n  expand_node->SetName(\"expand_node\");\n  simple_pass_node->SetName(\"simple_pass_node\");\n  collapse_error_node->SetName(\"collapse_error_node\");\n  receive_node->SetName(\"receive_node\");\n\n  EXPECT_EQ(start_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                expand_node->GetInputPort(\"In_1\")),\n            true);\n  EXPECT_EQ(expand_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                simple_pass_node->GetInputPort(\"In_1\")),\n            true);\n  EXPECT_EQ(simple_pass_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                collapse_error_node->GetInputPort(\"In_1\")),\n            true);\n  EXPECT_EQ(collapse_error_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                receive_node->GetInputPort(\"In_1\")),\n            true);\n\n  EXPECT_EQ(start_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(expand_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(simple_pass_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(collapse_error_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(collapse_error_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(expand_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(simple_pass_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(collapse_error_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(collapse_error_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(expand_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(expand_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(expand_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(simple_pass_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(collapse_error_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(collapse_error_node->Run(EVENT), STATUS_SUCCESS);\n\n  auto receive_queue = receive_node->GetInputPort(\"In_1\")->GetQueue();\n  CheckQueueHasDataError(receive_queue, 4);\n}\n\nTEST_F(NodeRunTest, Collapse_Process_Error) {\n  auto start_node = Add_Normal_Start_Node();\n  auto expand_node = Add_Expand_Process_Node(4);\n  auto simple_pass_node = Add_Simple_Pass_Node(16);\n  auto collapse_error_node = Add_Collapse_Process_Error_Node(4);\n  auto receive_node = Add_Stream_Process_Node({0, 0, 0});\n\n  start_node->SetName(\"start_node\");\n  expand_node->SetName(\"expand_node\");\n  simple_pass_node->SetName(\"simple_pass_node\");\n  collapse_error_node->SetName(\"receive_error_node\");\n  receive_node->SetName(\"receive_node\");\n\n  EXPECT_EQ(start_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                expand_node->GetInputPort(\"In_1\")),\n            true);\n  EXPECT_EQ(expand_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                simple_pass_node->GetInputPort(\"In_1\")),\n            true);\n  EXPECT_EQ(simple_pass_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                collapse_error_node->GetInputPort(\"In_1\")),\n            true);\n  EXPECT_EQ(collapse_error_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                receive_node->GetInputPort(\"In_1\")),\n            true);\n\n  EXPECT_EQ(start_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(expand_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(simple_pass_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(collapse_error_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(collapse_error_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(expand_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(simple_pass_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(collapse_error_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(collapse_error_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(expand_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(expand_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(expand_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(simple_pass_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(collapse_error_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(collapse_error_node->Run(EVENT), STATUS_SUCCESS);\n\n  auto receive_queue = receive_node->GetInputPort(\"In_1\")->GetQueue();\n  std::vector<std::shared_ptr<Buffer>> error_buffer_vector;\n  receive_queue->PopBatch(&error_buffer_vector);\n  uint64_t error_buffer_size{0};\n  for (auto& buffer : error_buffer_vector) {\n    if (buffer->HasError()) {\n      ++error_buffer_size;\n    }\n  }\n  EXPECT_EQ(error_buffer_size, 4);\n  for (uint32_t i = 0; i < 4; i++) {\n    EXPECT_TRUE(error_buffer_vector[i]->HasError());\n  }\n  receive_queue->PushBatch(&error_buffer_vector);\n}\n\nTEST_F(NodeRunTest, Stream_Collapse_Send_Error) {\n  auto start_node = Add_Normal_Start_Node();\n  auto expand_node = Add_Expand_Process_Node(4);\n  auto simple_pass_node = Add_Simple_Pass_Node(16);\n  auto collapse_node = Add_Collapse_Process_Node(4);\n  auto stream_error_node = Add_Stream_Datapre_Error_Node({1, 0, 0});\n\n  start_node->SetName(\"start_node\");\n  expand_node->SetName(\"expand_node\");\n  simple_pass_node->SetName(\"simple_pass_node\");\n  collapse_node->SetName(\"collapse_node\");\n  stream_error_node->SetName(\"stream_error_node\");\n\n  EXPECT_EQ(start_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                expand_node->GetInputPort(\"In_1\")),\n            true);\n  EXPECT_EQ(expand_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                simple_pass_node->GetInputPort(\"In_1\")),\n            true);\n  EXPECT_EQ(simple_pass_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                collapse_node->GetInputPort(\"In_1\")),\n            true);\n  EXPECT_EQ(collapse_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                stream_error_node->GetInputPort(\"In_1\")),\n            true);\n\n  EXPECT_EQ(start_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(expand_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(simple_pass_node->Run(DATA), STATUS_SUCCESS);\n  auto collapse_queue = collapse_node->GetInputPort(\"In_1\")->GetQueue();\n  CheckQueueNotHasDataError(collapse_queue);\n  EXPECT_EQ(collapse_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(collapse_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(stream_error_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(expand_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(simple_pass_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(collapse_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(expand_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(expand_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(expand_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(simple_pass_node->Run(DATA), STATUS_SUCCESS);\n  CheckQueueNotHasDataError(collapse_queue);\n  EXPECT_EQ(collapse_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(collapse_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(collapse_node->Run(EVENT), STATUS_SUCCESS);\n}\n\nTEST_F(NodeRunTest, Stream_Collapse_Visible_Recv_Error) {\n  auto start_node = Add_Normal_Start_Node();\n  auto expand_error_node = Add_Expand_Process_Error_Node(4);\n  auto simple_pass_node = Add_Simple_Pass_Node(12);\n  auto collapse_node = Add_Collapse_Process_Node(4);\n  auto receive_node = Add_Stream_Process_Node({0, 0, 0});\n\n  start_node->SetName(\"start_node\");\n  expand_error_node->SetName(\"expand_node\");\n  simple_pass_node->SetName(\"simple_pass_node\");\n  collapse_node->SetName(\"collapse_node\");\n  receive_node->SetName(\"receive_node\");\n  collapse_node->SetExceptionVisible(true);\n\n  EXPECT_EQ(start_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                expand_error_node->GetInputPort(\"In_1\")),\n            true);\n  EXPECT_EQ(expand_error_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                simple_pass_node->GetInputPort(\"In_1\")),\n            true);\n  EXPECT_EQ(simple_pass_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                collapse_node->GetInputPort(\"In_1\")),\n            true);\n  EXPECT_EQ(collapse_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                receive_node->GetInputPort(\"In_1\")),\n            true);\n\n  EXPECT_EQ(start_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(expand_error_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(simple_pass_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(collapse_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(collapse_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(expand_error_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(simple_pass_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(collapse_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(expand_error_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(expand_error_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(expand_error_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(simple_pass_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(collapse_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(collapse_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(collapse_node->Run(EVENT), STATUS_SUCCESS);\n\n  auto receive_queue = receive_node->GetInputPort(\"In_1\")->GetQueue();\n  CheckQueueNotHasDataError(receive_queue);\n}\n\nTEST_F(NodeRunTest, DISABLED_Stream_Collapse_Invisible_Recv_Error) {\n  auto start_node = Add_Normal_Start_Node();\n  auto expand_error_node = Add_Expand_Process_Error_Node(2);\n  auto simple_pass_node = Add_Simple_Pass_Node(4);\n  auto collapse_node = Add_Collapse_Process_Node(1);\n  auto receive_node = Add_Stream_Process_Node({0, 0, 0});\n\n  start_node->SetName(\"start_node\");\n  expand_error_node->SetName(\"expand_node\");\n  simple_pass_node->SetName(\"simple_pass_node\");\n  collapse_node->SetName(\"collapse_node\");\n  receive_node->SetName(\"receive_node\");\n\n  EXPECT_EQ(start_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                expand_error_node->GetInputPort(\"In_1\")),\n            true);\n  EXPECT_EQ(expand_error_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                simple_pass_node->GetInputPort(\"In_1\")),\n            true);\n  EXPECT_EQ(simple_pass_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                collapse_node->GetInputPort(\"In_1\")),\n            true);\n  EXPECT_EQ(collapse_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                receive_node->GetInputPort(\"In_1\")),\n            true);\n\n  EXPECT_EQ(start_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(expand_error_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(simple_pass_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(collapse_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(collapse_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(expand_error_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(simple_pass_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(collapse_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(expand_error_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(expand_error_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(expand_error_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(simple_pass_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(collapse_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(collapse_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(collapse_node->Run(EVENT), STATUS_SUCCESS);\n\n  auto receive_queue = receive_node->GetInputPort(\"In_1\")->GetQueue();\n  CheckQueueHasDataError(receive_queue, 5);\n}\n\nTEST_F(NodeRunTest, Normal_Collapse_DataPre_Error) {\n  auto start_node = Add_Normal_Start_Node();\n  auto expand_node = Add_Normal_Expand_Process_Node(4);\n  auto simple_pass_node = Add_Simple_Pass_Node(16);\n  auto collapse_error_node = Add_Normal_Collapse_Datapre_Error_Node(4);\n  auto receive_node = Add_Stream_Process_Node({0, 0, 0});\n\n  start_node->SetName(\"start_node\");\n  expand_node->SetName(\"expand_node\");\n  simple_pass_node->SetName(\"simple_pass_node\");\n  collapse_error_node->SetName(\"collapse_error_node\");\n  receive_node->SetName(\"receive_node\");\n\n  EXPECT_EQ(start_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                expand_node->GetInputPort(\"In_1\")),\n            true);\n  EXPECT_EQ(expand_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                simple_pass_node->GetInputPort(\"In_1\")),\n            true);\n  EXPECT_EQ(simple_pass_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                collapse_error_node->GetInputPort(\"In_1\")),\n            true);\n  EXPECT_EQ(collapse_error_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                receive_node->GetInputPort(\"In_1\")),\n            true);\n\n  EXPECT_EQ(start_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(expand_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(simple_pass_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(collapse_error_node->Run(DATA), STATUS_SUCCESS);\n\n  auto receive_queue = receive_node->GetInputPort(\"In_1\")->GetQueue();\n  CheckQueueHasDataError(receive_queue, 5);\n}\n\nTEST_F(NodeRunTest, Normal_Collapse_Process_Error) {\n  auto start_node = Add_Normal_Start_Node();\n  auto expand_node = Add_Normal_Expand_Process_Node(4);\n  auto simple_pass_node = Add_Simple_Pass_Node(16);\n  auto collapse_error_node = Add_Normal_Collapse_Process_Error_Node(4);\n  auto receive_node = Add_Stream_Process_Node({0, 0, 0});\n\n  start_node->SetName(\"start_node\");\n  expand_node->SetName(\"expand_node\");\n  simple_pass_node->SetName(\"simple_pass_node\");\n  collapse_error_node->SetName(\"collapse_error_node\");\n  receive_node->SetName(\"receive_node\");\n\n  EXPECT_EQ(start_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                expand_node->GetInputPort(\"In_1\")),\n            true);\n  EXPECT_EQ(expand_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                simple_pass_node->GetInputPort(\"In_1\")),\n            true);\n  EXPECT_EQ(simple_pass_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                collapse_error_node->GetInputPort(\"In_1\")),\n            true);\n  EXPECT_EQ(collapse_error_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                receive_node->GetInputPort(\"In_1\")),\n            true);\n\n  EXPECT_EQ(start_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(expand_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(simple_pass_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(collapse_error_node->Run(DATA), STATUS_SUCCESS);\n\n  auto receive_queue = receive_node->GetInputPort(\"In_1\")->GetQueue();\n  CheckQueueHasDataError(receive_queue, 5);\n}\n\nTEST_F(NodeRunTest, Normal_Collapse_Visible_Recv_Error) {\n  auto start_node = Add_Normal_Start_Node();\n  auto expand_error_node = Add_Normal_Expand_Process_Error_Node(4);\n  auto simple_pass_node = Add_Simple_Pass_Node(12);\n  auto collapse_node = Add_Normal_Collapse_Process_Node({4, 4, 4}, false);\n  auto receive_node = Add_Stream_Process_Node({0, 0, 0});\n\n  start_node->SetName(\"start_node\");\n  expand_error_node->SetName(\"expand_node\");\n  simple_pass_node->SetName(\"simple_pass_node\");\n  collapse_node->SetName(\"collapse_node\");\n  receive_node->SetName(\"receive_node\");\n  collapse_node->SetExceptionVisible(true);\n\n  EXPECT_EQ(start_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                expand_error_node->GetInputPort(\"In_1\")),\n            true);\n  EXPECT_EQ(expand_error_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                simple_pass_node->GetInputPort(\"In_1\")),\n            true);\n  EXPECT_EQ(simple_pass_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                collapse_node->GetInputPort(\"In_1\")),\n            true);\n  EXPECT_EQ(collapse_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                receive_node->GetInputPort(\"In_1\")),\n            true);\n\n  EXPECT_EQ(start_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(expand_error_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(simple_pass_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(collapse_node->Run(DATA), STATUS_SUCCESS);\n\n  auto receive_queue = receive_node->GetInputPort(\"In_1\")->GetQueue();\n  CheckQueueNotHasDataError(receive_queue);\n}\n\nTEST_F(NodeRunTest, Normal_Collapse_Invisible_Recv_Error) {\n  auto start_node = Add_Normal_Start_Node();\n  auto expand_error_node = Add_Normal_Expand_Process_Error_Node(4);\n  auto simple_pass_node = Add_Simple_Pass_Node(12);\n  auto collapse_node = Add_Normal_Collapse_Process_Node({4, 3, 4}, false);\n  auto receive_node = Add_Stream_Process_Node({0, 0, 0});\n\n  start_node->SetName(\"start_node\");\n  expand_error_node->SetName(\"expand_node\");\n  simple_pass_node->SetName(\"simple_pass_node\");\n  collapse_node->SetName(\"collapse_node\");\n  receive_node->SetName(\"receive_node\");\n\n  EXPECT_EQ(start_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                expand_error_node->GetInputPort(\"In_1\")),\n            true);\n  EXPECT_EQ(expand_error_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                simple_pass_node->GetInputPort(\"In_1\")),\n            true);\n  EXPECT_EQ(simple_pass_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                collapse_node->GetInputPort(\"In_1\")),\n            true);\n  EXPECT_EQ(collapse_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                receive_node->GetInputPort(\"In_1\")),\n            true);\n\n  EXPECT_EQ(start_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(expand_error_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(simple_pass_node->Run(DATA), STATUS_SUCCESS);\n  auto collapse_queue = collapse_node->GetInputPort(\"In_1\")->GetQueue();\n  CheckQueueHasDataError(collapse_queue, 18);\n  EXPECT_EQ(collapse_node->Run(DATA), STATUS_SUCCESS);\n\n  auto receive_queue = receive_node->GetInputPort(\"In_1\")->GetQueue();\n  CheckQueueHasDataError(receive_queue, 5);\n}\n\nTEST_F(NodeRunTest, Normal_Collapse_Send_Error) {\n  auto start_node = Add_Stream_Normal_Info_2_Node();\n  auto expand_node = Add_Normal_Expand_Start_Node(3);\n  auto simple_pass_node = Add_Simple_Pass_Node(15);\n  auto collapse_node = Add_Normal_Collapse_Process_Node({2}, true);\n  auto stream_error_node = Add_Stream_Datapre_Error_Node({1, 0, 0});\n\n  start_node->SetName(\"start_node\");\n  expand_node->SetName(\"expand_node\");\n  simple_pass_node->SetName(\"simple_pass_node\");\n  collapse_node->SetName(\"collapse_node\");\n  stream_error_node->SetName(\"stream_error_node\");\n\n  EXPECT_EQ(start_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                expand_node->GetInputPort(\"In_1\")),\n            true);\n  EXPECT_EQ(expand_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                simple_pass_node->GetInputPort(\"In_1\")),\n            true);\n  EXPECT_EQ(simple_pass_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                collapse_node->GetInputPort(\"In_1\")),\n            true);\n  EXPECT_EQ(collapse_node->GetOutputPort(\"Out_1\")->ConnectPort(\n                stream_error_node->GetInputPort(\"In_1\")),\n            true);\n\n  EXPECT_EQ(start_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(expand_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(simple_pass_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(collapse_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(stream_error_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(expand_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(simple_pass_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(collapse_node->Run(DATA), STATUS_SUCCESS);\n}\n\nTEST_F(NodeRunTest, Completion_Unfinish_Normal_Data) {\n  ConfigurationBuilder builder;\n  auto stream_info_node = Add_Stream_Info_Node();\n  auto stream_start_node = Add_Stream_Start_Node({1, 3, 1});\n\n  auto stream_tail_filter_node_cfg = builder.Build();\n  stream_tail_filter_node_cfg->SetProperty<uint32_t>(\"batch_size\", 5);\n  auto stream_tail_filter_node =\n      Add_Stream_Tail_Filter_Node(3, stream_tail_filter_node_cfg);\n\n  auto simple_pass_node = Add_Simple_Pass_Node(10);\n  auto stream_end_node = Add_Stream_End_Node(1);\n\n  auto start_info_port = stream_info_node->GetOutputPort(\"Out_1\");\n  EXPECT_TRUE(\n      start_info_port->ConnectPort(stream_start_node->GetInputPort(\"In_1\")));\n  auto start_output_port = stream_start_node->GetOutputPort(\"Out_1\");\n  EXPECT_TRUE(start_output_port->ConnectPort(\n      stream_tail_filter_node->GetInputPort(\"In_1\")));\n  auto mid_output_port = stream_tail_filter_node->GetOutputPort(\"Out_1\");\n  EXPECT_TRUE(\n      mid_output_port->ConnectPort(simple_pass_node->GetInputPort(\"In_1\")));\n  auto simple_pass_output_port = simple_pass_node->GetOutputPort(\"Out_1\");\n  EXPECT_TRUE(simple_pass_output_port->ConnectPort(\n      stream_end_node->GetInputPort(\"In_1\")));\n\n  EXPECT_EQ(stream_info_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(stream_start_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(stream_tail_filter_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(simple_pass_node->Run(DATA), STATUS_SUCCESS);\n\n  EXPECT_EQ(stream_start_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(stream_tail_filter_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(simple_pass_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(stream_end_node->Run(DATA), STATUS_SUCCESS);\n\n  EXPECT_EQ(stream_start_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(stream_start_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(stream_tail_filter_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(simple_pass_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(stream_end_node->Run(DATA), STATUS_SUCCESS);\n}\n\nTEST_F(NodeRunTest, Completion_Unfinish_Stream_Data) {\n  ConfigurationBuilder builder;\n  auto stream_info_node = Add_Stream_Info_Node();\n  auto stream_start_node = Add_Stream_Start_Node({1, 3, 1});\n\n  auto stream_tail_filter_node_cfg = builder.Build();\n  stream_tail_filter_node_cfg->SetProperty<uint32_t>(\"batch_size\", 5);\n  auto stream_tail_filter_node_1 =\n      Add_Stream_Tail_Filter_Node(3, stream_tail_filter_node_cfg);\n\n  auto stream_tail_filter_node2_cfg = builder.Build();\n  stream_tail_filter_node2_cfg->SetProperty<uint32_t>(\"batch_size\", 5);\n  auto stream_tail_filter_node_2 =\n      Add_Stream_Tail_Filter_Node(2, stream_tail_filter_node2_cfg);\n\n  auto stream_end_node = Add_Stream_End_Node(1);\n\n  auto start_info_port = stream_info_node->GetOutputPort(\"Out_1\");\n  EXPECT_TRUE(\n      start_info_port->ConnectPort(stream_start_node->GetInputPort(\"In_1\")));\n  auto start_output_port = stream_start_node->GetOutputPort(\"Out_1\");\n  EXPECT_TRUE(start_output_port->ConnectPort(\n      stream_tail_filter_node_1->GetInputPort(\"In_1\")));\n  auto mid_output_port = stream_tail_filter_node_1->GetOutputPort(\"Out_1\");\n  EXPECT_TRUE(mid_output_port->ConnectPort(\n      stream_tail_filter_node_2->GetInputPort(\"In_1\")));\n  auto simple_pass_output_port =\n      stream_tail_filter_node_2->GetOutputPort(\"Out_1\");\n  EXPECT_TRUE(simple_pass_output_port->ConnectPort(\n      stream_end_node->GetInputPort(\"In_1\")));\n\n  EXPECT_EQ(stream_info_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(stream_start_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(stream_tail_filter_node_1->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(stream_tail_filter_node_2->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(stream_start_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(stream_tail_filter_node_1->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(stream_tail_filter_node_2->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(stream_end_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(stream_start_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(stream_start_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(stream_tail_filter_node_1->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(stream_tail_filter_node_2->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(stream_end_node->Run(DATA), STATUS_SUCCESS);\n}\n\nTEST_F(NodeRunTest, Completion_Unfinish_Expand_Collapse_Data) {\n  ConfigurationBuilder builder;\n  auto stream_info_node = Add_Stream_Info_Node();\n  auto stream_start_node = Add_Stream_Start_Node({1, 3, 1});\n\n  auto stream_tail_filter_node_cfg = builder.Build();\n  stream_tail_filter_node_cfg->SetProperty<uint32_t>(\"batch_size\", 10);\n  auto stream_tail_filter_node =\n      Add_Stream_Tail_Filter_Node(2, stream_tail_filter_node_cfg);\n\n  auto expand_node = Add_Expand_Process_Node(10);\n  auto collapse_node = Add_Collapse_Process_Node(10);\n  auto stream_end_node = Add_Stream_End_Node(1);\n\n  auto start_info_port = stream_info_node->GetOutputPort(\"Out_1\");\n  EXPECT_TRUE(\n      start_info_port->ConnectPort(stream_start_node->GetInputPort(\"In_1\")));\n  auto start_output_port = stream_start_node->GetOutputPort(\"Out_1\");\n  EXPECT_TRUE(start_output_port->ConnectPort(\n      stream_tail_filter_node->GetInputPort(\"In_1\")));\n  auto stream_tail_output_port =\n      stream_tail_filter_node->GetOutputPort(\"Out_1\");\n  EXPECT_TRUE(\n      stream_tail_output_port->ConnectPort(expand_node->GetInputPort(\"In_1\")));\n  auto expand_output_port = expand_node->GetOutputPort(\"Out_1\");\n  EXPECT_TRUE(\n      expand_output_port->ConnectPort(collapse_node->GetInputPort(\"In_1\")));\n  auto collapse_output_port = collapse_node->GetOutputPort(\"Out_1\");\n  EXPECT_TRUE(\n      collapse_output_port->ConnectPort(stream_end_node->GetInputPort(\"In_1\")));\n\n  EXPECT_EQ(stream_info_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(stream_start_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(stream_start_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(stream_tail_filter_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(expand_node->Run(DATA), STATUS_SUCCESS);\n  for (int i = 0; i < 10; i++) {\n    EXPECT_EQ(expand_node->Run(EVENT), STATUS_SUCCESS);\n  }\n  EXPECT_EQ(collapse_node->Run(DATA), STATUS_SUCCESS);\n  for (int i = 0; i < 10; i++) {\n    EXPECT_EQ(collapse_node->Run(EVENT), STATUS_SUCCESS);\n  }\n  EXPECT_EQ(stream_end_node->Run(DATA), STATUS_SUCCESS);\n\n  EXPECT_EQ(stream_start_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(stream_start_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(stream_tail_filter_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(expand_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(collapse_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(stream_end_node->Run(DATA), STATUS_SUCCESS);\n}\n\nTEST_F(NodeRunTest, Completion_Unfinish_Condition_Data) {\n  ConfigurationBuilder builder;\n  auto stream_info_node = Add_Stream_Info_Node();\n  auto stream_start_node = Add_Stream_Start_Node({1, 3, 1});\n\n  auto stream_tail_filter_node_cfg = builder.Build();\n  stream_tail_filter_node_cfg->SetProperty<uint32_t>(\"batch_size\", 10);\n  auto stream_tail_filter_node =\n      Add_Stream_Tail_Filter_Node(2, stream_tail_filter_node_cfg);\n\n  auto condition_node = Add_Normal_Condition_Node(10);\n  auto stream_end_node = Add_Stream_End_Node(1);\n\n  auto start_info_port = stream_info_node->GetOutputPort(\"Out_1\");\n  EXPECT_TRUE(\n      start_info_port->ConnectPort(stream_start_node->GetInputPort(\"In_1\")));\n\n  auto start_output_port = stream_start_node->GetOutputPort(\"Out_1\");\n  EXPECT_TRUE(start_output_port->ConnectPort(\n      stream_tail_filter_node->GetInputPort(\"In_1\")));\n\n  auto stream_tail_output_port =\n      stream_tail_filter_node->GetOutputPort(\"Out_1\");\n  EXPECT_TRUE(stream_tail_output_port->ConnectPort(\n      condition_node->GetInputPort(\"In_1\")));\n\n  auto condition_output_port_1 = condition_node->GetOutputPort(\"Out_1\");\n  EXPECT_TRUE(condition_output_port_1->ConnectPort(\n      stream_end_node->GetInputPort(\"In_1\")));\n  auto condition_output_port_2 = condition_node->GetOutputPort(\"Out_2\");\n  EXPECT_TRUE(condition_output_port_2->ConnectPort(\n      stream_end_node->GetInputPort(\"In_1\")));\n\n  EXPECT_EQ(stream_info_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(stream_start_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(stream_start_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(stream_tail_filter_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(condition_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(stream_end_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(stream_start_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(stream_start_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(stream_tail_filter_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(condition_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(stream_end_node->Run(DATA), STATUS_SUCCESS);\n}\n\nTEST_F(NodeRunTest, Dynamic_Config) {\n  ConfigurationBuilder configbuilderflowunit;\n  auto config_flowunit = configbuilderflowunit.Build();\n  auto flowunit_mgr_ = FlowUnitManager::GetInstance();\n  auto device_ = flow_->GetDevice();\n  auto dynamic_config_node = std::make_shared<Node>();\n  dynamic_config_node->SetFlowUnitInfo(\"dynamic_config\", \"cpu\", \"0\",\n                                       flowunit_mgr_);\n  dynamic_config_node->SetSessionManager(&node_test_session_manager);\n  auto dynamic_get_config_node_1 = std::make_shared<Node>();\n  dynamic_get_config_node_1->SetFlowUnitInfo(\"dynamic_get_config\", \"cpu\", \"0\",\n                                             flowunit_mgr_);\n  auto dynamic_get_config_node_2 = std::make_shared<Node>();\n  dynamic_get_config_node_2->SetFlowUnitInfo(\"dynamic_get_config\", \"cpu\", \"0\",\n                                             flowunit_mgr_);\n  auto dynamic_get_config_node_3 = std::make_shared<Node>();\n  dynamic_get_config_node_3->SetFlowUnitInfo(\"dynamic_get_config_other\", \"cpu\",\n                                             \"0\", flowunit_mgr_);\n  auto dynamic_get_config_node_4 = std::make_shared<Node>();\n  dynamic_get_config_node_4->SetFlowUnitInfo(\"dynamic_get_config_other\", \"cpu\",\n                                             \"0\", flowunit_mgr_);\n  dynamic_get_config_node_1->SetName(\"dynamic_get_config_1\");\n  dynamic_get_config_node_2->SetName(\"dynamic_get_config_2\");\n\n  EXPECT_EQ(dynamic_config_node->Init({}, {\"Out_1\"}, config_flowunit),\n            STATUS_SUCCESS);\n  EXPECT_EQ(dynamic_config_node->Open(), STATUS_SUCCESS);\n  EXPECT_EQ(\n      dynamic_get_config_node_1->Init({\"In_1\"}, {\"Out_1\"}, config_flowunit),\n      STATUS_SUCCESS);\n  EXPECT_EQ(dynamic_get_config_node_1->Open(), STATUS_SUCCESS);\n  EXPECT_EQ(\n      dynamic_get_config_node_2->Init({\"In_1\"}, {\"Out_1\"}, config_flowunit),\n      STATUS_SUCCESS);\n  EXPECT_EQ(dynamic_get_config_node_2->Open(), STATUS_SUCCESS);\n  EXPECT_EQ(\n      dynamic_get_config_node_3->Init({\"In_1\"}, {\"Out_1\"}, config_flowunit),\n      STATUS_SUCCESS);\n  EXPECT_EQ(dynamic_get_config_node_3->Open(), STATUS_SUCCESS);\n\n  EXPECT_EQ(\n      dynamic_get_config_node_4->Init({\"In_1\"}, {\"Out_1\"}, config_flowunit),\n      STATUS_SUCCESS);\n  EXPECT_EQ(dynamic_get_config_node_4->Open(), STATUS_SUCCESS);\n\n  auto dynamic_config_port = dynamic_config_node->GetOutputPort(\"Out_1\");\n  EXPECT_TRUE(dynamic_config_port->ConnectPort(\n      dynamic_get_config_node_1->GetInputPort(\"In_1\")));\n\n  auto dynamic_get_config_port_1 =\n      dynamic_get_config_node_1->GetOutputPort(\"Out_1\");\n  EXPECT_TRUE(dynamic_get_config_port_1->ConnectPort(\n      dynamic_get_config_node_2->GetInputPort(\"In_1\")));\n\n  auto dynamic_get_config_port_2 =\n      dynamic_get_config_node_2->GetOutputPort(\"Out_1\");\n  EXPECT_TRUE(dynamic_get_config_port_2->ConnectPort(\n      dynamic_get_config_node_3->GetInputPort(\"In_1\")));\n\n  auto dynamic_get_config_port_3 =\n      dynamic_get_config_node_3->GetOutputPort(\"Out_1\");\n  EXPECT_TRUE(dynamic_get_config_port_3->ConnectPort(\n      dynamic_get_config_node_4->GetInputPort(\"In_1\")));\n\n  EXPECT_EQ(dynamic_config_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(dynamic_get_config_node_1->Run(DATA), STATUS_SUCCESS);\n  auto queue_1 = dynamic_get_config_node_2->GetInputPort(\"In_1\")->GetQueue();\n  std::vector<std::shared_ptr<Buffer>> buffer_vector_1;\n  queue_1->PopBatch(&buffer_vector_1);\n  std::string test_1;\n  buffer_vector_1[0]->Get(\"test\", test_1);\n  EXPECT_EQ(test_1, \"node.dynamic_get_config_1.test\");\n  queue_1->PushBatch(&buffer_vector_1);\n\n  EXPECT_EQ(dynamic_get_config_node_2->Run(DATA), STATUS_SUCCESS);\n  std::vector<std::shared_ptr<Buffer>> buffer_vector_2;\n  auto queue_2 = dynamic_get_config_node_3->GetInputPort(\"In_1\")->GetQueue();\n  queue_2->PopBatch(&buffer_vector_2);\n  std::string test_2;\n  buffer_vector_2[0]->Get(\"test\", test_2);\n  EXPECT_EQ(test_2, \"flowunit.dynamic_get_config.test\");\n  queue_2->PushBatch(&buffer_vector_2);\n\n  EXPECT_EQ(dynamic_get_config_node_3->Run(DATA), STATUS_SUCCESS);\n  auto queue_3 = dynamic_get_config_node_4->GetInputPort(\"In_1\")->GetQueue();\n  std::vector<std::shared_ptr<Buffer>> buffer_vector_3;\n  queue_3->PopBatch(&buffer_vector_3);\n  std::string test_3;\n  buffer_vector_3[0]->Get(\"test\", test_3);\n  EXPECT_EQ(test_3, \"nodes.test\");\n  queue_3->PushBatch(&buffer_vector_3);\n}\n\nTEST_F(NodeRunTest, ConditionSwitchRun) {\n  auto output_node = Add_Test_0_2_Node();\n  auto scatter_node = Add_Scatter_Node();\n  auto switch_case_node = Add_Switch_Case_Node();\n  auto garther_node = Add_Garther_Node();\n  auto add_node = Add_Add_Node();\n  auto input_node = Add_Test_2_0_Node();\n\n  auto output_port_1 = output_node->GetOutputPort(\"Out_1\");\n  auto output_port_2 = output_node->GetOutputPort(\"Out_2\");\n  auto scatter_output_port = scatter_node->GetOutputPort(\"Out_1\");\n  auto condition_output_1_port = switch_case_node->GetOutputPort(\"Out_1\");\n  auto condition_output_2_port = switch_case_node->GetOutputPort(\"Out_2\");\n  auto condition_output_3_port = switch_case_node->GetOutputPort(\"Out_3\");\n  auto garther_output_port = garther_node->GetOutputPort(\"Out_1\");\n  auto add_output_port = add_node->GetOutputPort(\"Out_1\");\n\n  EXPECT_TRUE(output_port_1->ConnectPort(scatter_node->GetInputPort(\"In_1\")));\n  EXPECT_TRUE(output_port_2->ConnectPort(add_node->GetInputPort(\"In_2\")));\n  EXPECT_TRUE(\n      scatter_output_port->ConnectPort(switch_case_node->GetInputPort(\"In_1\")));\n  EXPECT_TRUE(\n      condition_output_1_port->ConnectPort(garther_node->GetInputPort(\"In_1\")));\n  EXPECT_TRUE(\n      condition_output_2_port->ConnectPort(garther_node->GetInputPort(\"In_1\")));\n  EXPECT_TRUE(\n      condition_output_3_port->ConnectPort(garther_node->GetInputPort(\"In_1\")));\n  EXPECT_TRUE(garther_output_port->ConnectPort(add_node->GetInputPort(\"In_1\")));\n  EXPECT_TRUE(add_output_port->ConnectPort(input_node->GetInputPort(\"In_1\")));\n\n  EXPECT_EQ(output_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(scatter_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(scatter_node->Run(EVENT), STATUS_SUCCESS);\n  EXPECT_EQ(switch_case_node->Run(DATA), STATUS_SUCCESS);\n\n  std::vector<std::shared_ptr<Buffer>> buffer_vector;\n  auto queue = garther_node->GetInputPort(\"In_1\")->GetQueue();\n  queue->PopBatch(&buffer_vector);\n  EXPECT_EQ(buffer_vector.size(), 16);  // contain 4 end_flag\n  queue->PushBatch(&buffer_vector);\n  buffer_vector.clear();\n\n  EXPECT_EQ(garther_node->Run(DATA), STATUS_SUCCESS);\n  EXPECT_EQ(garther_node->Run(EVENT), STATUS_SUCCESS);\n\n  std::vector<std::shared_ptr<Buffer>> add_vector_1;\n  std::vector<std::shared_ptr<Buffer>> add_vector_2;\n  auto add_queue_1 = add_node->GetInputPort(\"In_1\")->GetQueue();\n  auto add_queue_2 = add_node->GetInputPort(\"In_2\")->GetQueue();\n  add_queue_1->PopBatch(&add_vector_1);\n  add_queue_2->PopBatch(&add_vector_2);\n  EXPECT_EQ(add_vector_1.size(), 2);\n  EXPECT_EQ(add_vector_2.size(), 2);\n  add_queue_1->PushBatch(&add_vector_1);\n  add_queue_2->PushBatch(&add_vector_2);\n  add_vector_1.clear();\n  add_vector_2.clear();\n\n  EXPECT_EQ(add_node->Run(DATA), STATUS_SUCCESS);\n\n  std::vector<std::shared_ptr<Buffer>> final_buffer_vector;\n  auto queue_4 = input_node->GetInputPort(\"In_1\")->GetQueue();\n  queue_4->PopBatch(&final_buffer_vector);\n  EXPECT_EQ(final_buffer_vector.size(), 2);\n  EXPECT_EQ(final_buffer_vector[0]->GetBytes(), 40);\n  EXPECT_TRUE(\n      BufferManageView::GetIndexInfo(final_buffer_vector[1])->IsEndFlag());\n  auto *add_data_result = (int *)final_buffer_vector[0]->ConstData();\n  for (int i = 0; i < 10; i++) {\n    EXPECT_EQ(add_data_result[i], 10 + 2 * i);\n  }\n  final_buffer_vector.clear();\n}\n\n}  // namespace modelbox\n"
  },
  {
    "path": "test/unit/libmodelbox/engine/port_test.cc",
    "content": "\n/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/port.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"mockflow.h\"\n\nnamespace modelbox {\n\nclass PortTest : public testing::Test {\n public:\n  PortTest() = default;\n\n protected:\n  std::shared_ptr<Node> node_;\n  void SetUp() override {\n    node_ = std::make_shared<Node>();\n    node_->SetFlowUnitInfo(\"test_2_inputs_2_outputs\", \"cpu\", \"0\", nullptr);\n  };\n  void TearDown() override{};\n};\n\nclass InPortTest : public testing::Test {\n public:\n  InPortTest() = default;\n\n protected:\n  void SetUp() override{};\n  void TearDown() override{};\n};\n\nTEST_F(PortTest, Construct) {\n  auto port = Port(\"In_1\", node_);\n  auto name = port.GetName();\n  auto get_node = port.GetNode();\n  EXPECT_EQ(\"In_1\", name);\n  EXPECT_EQ(node_, get_node);\n}\n\nTEST_F(InPortTest, GetDataCount) {\n  auto port = std::make_shared<InPort>(\"In_1\", nullptr);\n  EXPECT_EQ(port->GetDataCount(), 0);\n\n  {\n    auto buffer = std::make_shared<Buffer>();\n    port->Send(buffer);\n    EXPECT_EQ(port->GetDataCount(), 1);\n  }\n\n  {\n    auto buffer = std::make_shared<Buffer>();\n    port->Send(buffer);\n    EXPECT_EQ(port->GetDataCount(), 2);\n  }\n\n  auto notify_port =\n      std::dynamic_pointer_cast<NotifyPort<Buffer, CustomCompare>>(port);\n  notify_port->Recv();\n  EXPECT_EQ(port->GetDataCount(), 1);\n}\n\nclass EventPortTest : public testing::Test {\n public:\n  EventPortTest() = default;\n\n protected:\n  void SetUp() override{};\n  void TearDown() override{};\n};\n\nTEST_F(EventPortTest, Send_Recv) {\n  EventPort event_port(\"test_event_port\", nullptr);\n\n  auto event = std::make_shared<FlowUnitInnerEvent>(\n      FlowUnitInnerEvent::EXPAND_UNFINISH_DATA);\n  const int MAX_SEND_COUNT = 10;\n  for (int i = 0; i < MAX_SEND_COUNT; i++) {\n    EXPECT_EQ(event_port.Send(event), STATUS_OK);\n  }\n\n  FlowunitEventList events = nullptr;\n  EXPECT_EQ(event_port.Recv(events), STATUS_OK);\n\n  EXPECT_NE(events, nullptr);\n  EXPECT_EQ(events->size(), MAX_SEND_COUNT);\n  for (size_t i = 0; i < MAX_SEND_COUNT; i++) {\n    EXPECT_EQ(events->at(i), event);\n  }\n}\n\nTEST_F(EventPortTest, Empty) {\n  EventPort event_port(\"test_event_port\", nullptr);\n\n  EXPECT_TRUE(event_port.Empty());\n\n  auto event = std::make_shared<FlowUnitInnerEvent>(\n      FlowUnitInnerEvent::EXPAND_UNFINISH_DATA);\n  const int MAX_SEND_COUNT = 10;\n  for (int i = 0; i < MAX_SEND_COUNT; i++) {\n    EXPECT_EQ(event_port.Send(event), STATUS_OK);\n  }\n\n  EXPECT_FALSE(event_port.Empty());\n\n  FlowunitEventList events = nullptr;\n  EXPECT_EQ(event_port.Recv(events), STATUS_OK);\n\n  EXPECT_TRUE(event_port.Empty());\n}\n\nTEST_F(EventPortTest, GetPriority_SetPriority) {\n  EventPort event_port(\"test_event_port\", nullptr);\n\n  EXPECT_EQ(event_port.GetPriority(), 0);\n  event_port.SetPriority(10);\n  EXPECT_EQ(event_port.GetPriority(), 10);\n}\n\nTEST_F(EventPortTest, NotifyPushEvent) {\n  EventPort event_port(\"test_event_port\", nullptr);\n\n  bool flag = false;\n  auto func = [&](bool no_need_flag) { flag = true; };\n\n  EXPECT_TRUE(!flag);\n\n  event_port.SetPushEventCallBack(func);\n  auto event = std::make_shared<FlowUnitInnerEvent>(\n      FlowUnitInnerEvent::EXPAND_UNFINISH_DATA);\n  EXPECT_EQ(event_port.Send(event), STATUS_OK);\n  event_port.NotifyPushEvent();\n\n  EXPECT_TRUE(flag);\n}\n\nTEST_F(EventPortTest, NotifyPopEvent) {\n  EventPort event_port(\"test_event_port\", nullptr);\n\n  bool flag = false;\n  auto func = [&]() { flag = true; };\n\n  EXPECT_TRUE(!flag);\n\n  event_port.SetPopEventCallBack(func);\n  auto event = std::make_shared<FlowUnitInnerEvent>(\n      FlowUnitInnerEvent::EXPAND_UNFINISH_DATA);\n  EXPECT_EQ(event_port.Send(event), STATUS_OK);\n  event_port.NotifyPushEvent();\n\n  EXPECT_TRUE(!flag);\n  FlowunitEventList events = nullptr;\n  EXPECT_EQ(event_port.Recv(events), STATUS_OK);\n  event_port.NotifyPopEvent();\n\n  EXPECT_TRUE(flag);\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "test/unit/libmodelbox/engine/session_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/session.h\"\n\n#include <functional>\n#include <future>\n#include <thread>\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"mock_driver_ctl.h\"\n#include \"modelbox/base/log.h\"\n#include \"modelbox/device/mockdevice/device_mockdevice.h\"\n\nnamespace modelbox {\nclass SessionTest : public testing::Test {};\n\nTEST_F(SessionTest, SessionManage) {\n  SessionManager sess_mgr;\n  auto session = sess_mgr.CreateSession(nullptr);\n  ASSERT_NE(session, nullptr);\n  {\n    auto session1 = sess_mgr.CreateSession(nullptr);\n    ASSERT_NE(session1, nullptr);\n    auto session2 = sess_mgr.CreateSession(nullptr);\n    ASSERT_NE(session2, nullptr);\n    EXPECT_EQ(sess_mgr.GetSessions().size(), 3);\n  }\n  EXPECT_EQ(sess_mgr.GetSessions().size(), 1);\n}\n\nclass TestSessionIO : public SessionIO {\n public:\n  Status SetOutputMeta(const std::string &port_name,\n                       std::shared_ptr<DataMeta> meta) override {\n    return STATUS_OK;\n  }\n  Status Send(const std::string &port_name,\n              std::shared_ptr<BufferList> buffer_list) override {\n    return STATUS_OK;\n  }\n  Status Recv(OutputBufferList &map_buffer_list, int timeout = 0) override {\n    return STATUS_OK;\n  }\n  Status Close() override { return STATUS_OK; }\n  Status Shutdown() override { return STATUS_OK; }\n\n  bool TestSessionEnd() { return session_end_; }\n\n  std::shared_ptr<FlowUnitError> GetSessionError() { return error_; }\n\n protected:\n  void SessionEnd(std::shared_ptr<FlowUnitError> error = nullptr) override {\n    error_ = error;\n    session_end_ = true;\n  }\n\n  bool session_end_{false};\n  std::shared_ptr<FlowUnitError> error_;\n};\n\nTEST_F(SessionTest, SessionClose) {\n  SessionManager sess_mgr;\n  auto io1 = std::make_shared<TestSessionIO>();\n  auto io2 = std::make_shared<TestSessionIO>();\n  {\n    auto session = sess_mgr.CreateSession(nullptr);\n    session->SetSessionIO(io1);\n  }\n  {\n    auto session = sess_mgr.CreateSession(nullptr);\n    session->SetSessionIO(io2);\n    session->Close();\n  }\n  EXPECT_TRUE(io1->TestSessionEnd());\n  EXPECT_EQ(io1->GetSessionError(), nullptr);\n  EXPECT_TRUE(io2->TestSessionEnd());\n  ASSERT_NE(io2->GetSessionError(), nullptr);\n  auto end_error = io2->GetSessionError();\n  EXPECT_EQ(end_error->GetDesc(), \"EOF\");\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "test/unit/libmodelbox/engine/stream_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/stream.h\"\n\n#include <functional>\n#include <future>\n#include <thread>\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"mock_driver_ctl.h\"\n#include \"modelbox/base/log.h\"\n#include \"modelbox/device/mockdevice/device_mockdevice.h\"\n\nnamespace modelbox {\nclass StreamTest : public testing::Test {};\n\nTEST_F(StreamTest, DataMetaTest) {\n  DataMeta data_meta;\n  auto value = std::make_shared<int32_t>();\n  *value = 123;\n  data_meta.SetMeta(\"test\", value);\n  auto result = std::static_pointer_cast<int32_t>(data_meta.GetMeta(\"test\"));\n  ASSERT_EQ(result, value);\n  EXPECT_EQ(*result, *value);\n  EXPECT_EQ(data_meta.GetMetas().size(), 1);\n\n  DataMeta data_meta2(data_meta);\n  auto result2 = std::static_pointer_cast<int32_t>(data_meta2.GetMeta(\"test\"));\n  ASSERT_EQ(result2, value);\n  EXPECT_EQ(*result2, *value);\n  EXPECT_EQ(data_meta.GetMetas().size(), 1);\n}\n\nTEST_F(StreamTest, StreamOrderTest) {\n  auto order = std::make_shared<StreamOrder>();\n  auto order2 = order->Copy();\n  order2->Expand(0);\n  auto order3 = order2->Copy();\n  order3->Expand(1);\n\n  auto order4 = order->Copy();\n  order4->Expand(1);\n  EXPECT_TRUE(*order3 < *order4);\n\n  auto order5 = order3->Copy();\n  order5->Collapse();\n  EXPECT_FALSE(*order5 < *order2);\n  EXPECT_FALSE(*order2 < *order5);\n}\n\nTEST_F(StreamTest, StreamTest) {\n  Stream stream(nullptr);\n  EXPECT_FALSE(stream.ReachEnd());\n  stream.SetMaxBufferCount(3);\n  stream.IncreaseBufferCount();\n  EXPECT_FALSE(stream.ReachEnd());\n  stream.IncreaseBufferCount();\n  stream.IncreaseBufferCount();\n  EXPECT_TRUE(stream.ReachEnd());\n  EXPECT_EQ(stream.GetBufferCount(), 3);\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "test/unit/libmodelbox/engine/tensor_list_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include \"modelbox/tensor_list.h\"\n\n#include <functional>\n#include <future>\n#include <thread>\n\n#include \"modelbox/base/log.h\"\n#include \"modelbox/device/mockdevice/device_mockdevice.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"mock_driver_ctl.h\"\n#include \"mockflow.h\"\n\nnamespace modelbox {\nclass TensorListTest : public testing::Test {\n public:\n  TensorListTest() = default;\n\n protected:\n  std::shared_ptr<MockFlow> flow_;\n  void SetUp() override {\n    flow_ = std::make_shared<MockFlow>();\n    flow_->Init();\n  };\n\n  void TearDown() override { flow_->Destroy(); };\n};\n\nTEST_F(TensorListTest, TensorList) {\n  auto device = flow_->GetDevice();\n\n  {\n    auto bl = std::make_shared<BufferList>(device);\n    TensorList tensor_list(bl);\n    EXPECT_EQ(tensor_list.GetBytes(), 0);\n  }\n}\n\nTEST_F(TensorListTest, TensorListBuild) {\n  auto device = flow_->GetDevice();\n  auto bl = std::make_shared<BufferList>(device);\n  TensorList tensor_list(bl);\n\n  const int BATCH_NUM = 10;\n  std::vector<std::vector<size_t>> shapes(BATCH_NUM, {1, 2, 3});\n  auto status = tensor_list.Build<int>(shapes);\n  EXPECT_EQ(status, STATUS_OK);\n  EXPECT_EQ(tensor_list.Size(), shapes.size());\n\n  size_t size = BATCH_NUM * Volume(shapes[0]);\n  auto *data = tensor_list.MutableData<int>();\n  for (size_t i = 0; i < size; ++i) {\n    data[i] = i;\n  }\n\n  for (size_t i = 0; i < tensor_list.Size(); ++i) {\n    auto tensor_buffer = tensor_list[i];\n    EXPECT_EQ(tensor_list[i]->Shape(), shapes[i]);\n    const auto *tensor_data = tensor_list[i]->ConstData<int>();\n    auto tensor_size = tensor_list[i]->GetBytes() / sizeof(int);\n    for (size_t j = 0; j < tensor_size; ++j) {\n      EXPECT_EQ(tensor_data[j], i * tensor_size + j);\n    }\n  }\n}\n\nTEST_F(TensorListTest, TensorListBuildFromHost) {\n  auto device = flow_->GetDevice();\n  auto bl = std::make_shared<BufferList>(device);\n  TensorList tensor_list(bl);\n\n  const int BATCH_NUM = 10;\n  std::vector<std::vector<size_t>> shapes(BATCH_NUM, {1, 2, 3});\n  auto size = Volume(shapes);\n  auto *data = (int *)malloc(size * sizeof(int));\n  Defer {\n    if (data) {\n      free(data);\n    }\n  };\n  EXPECT_NE(data, nullptr);\n  for (size_t i = 0; i < size; ++i) {\n    data[i] = i;\n  }\n\n  auto status =\n      tensor_list.BuildFromHost<int>(shapes, data, size * sizeof(int));\n  EXPECT_EQ(status, STATUS_OK);\n  EXPECT_EQ(tensor_list.Size(), shapes.size());\n\n  for (size_t i = 0; i < tensor_list.Size(); ++i) {\n    EXPECT_EQ(tensor_list[i]->Shape(), shapes[i]);\n    const auto *tensor_data = tensor_list[i]->ConstData<int>();\n    auto tensor_size = tensor_list[i]->GetBytes() / sizeof(int);\n    for (size_t j = 0; j < tensor_size; ++j) {\n      EXPECT_EQ(tensor_data[j], i * tensor_size + j);\n    }\n  }\n}\n\nTEST_F(TensorListTest, SetShape) {\n  auto device = flow_->GetDevice();\n  auto bl = std::make_shared<BufferList>(device);\n  TensorList tensor_list(bl);\n\n  const int BATCH_NUM = 10;\n  std::vector<std::vector<size_t>> shapes(BATCH_NUM, {1, 2, 3});\n  auto status = tensor_list.Build<int>(shapes);\n  EXPECT_EQ(status, STATUS_OK);\n  EXPECT_EQ(tensor_list.Size(), shapes.size());\n\n  status = tensor_list.SetShape<int>({BATCH_NUM, {3, 2, 1}});\n  EXPECT_EQ(status, STATUS_OK);\n\n  status = tensor_list.SetShape<float>({BATCH_NUM, {3, 2, 1}});\n  EXPECT_NE(status, STATUS_OK);\n\n  status = tensor_list.SetShape<int>({BATCH_NUM, {3, 2, 2}});\n  EXPECT_NE(status, STATUS_OK);\n\n  status = tensor_list.SetShape<int>({BATCH_NUM - 1, {3, 2, 1}});\n  EXPECT_NE(status, STATUS_OK);\n}\n\nTEST_F(TensorListTest, Shape) {\n  auto device = flow_->GetDevice();\n  auto bl = std::make_shared<BufferList>(device);\n  TensorList tensor_list(bl);\n\n  const int BATCH_NUM = 10;\n  std::vector<std::vector<size_t>> shapes(BATCH_NUM, {1, 2, 3});\n  auto status = tensor_list.Build<int>(shapes);\n  EXPECT_EQ(status, STATUS_OK);\n  EXPECT_EQ(tensor_list.Size(), shapes.size());\n\n  for (size_t i = 0; i < tensor_list.Size(); ++i) {\n    EXPECT_EQ(tensor_list[i]->Shape(), shapes[i]);\n  }\n}\n\nTEST_F(TensorListTest, SetType) {\n  auto device = flow_->GetDevice();\n  auto bl = std::make_shared<BufferList>(device);\n  TensorList tensor_list(bl);\n\n  const int BATCH_NUM = 10;\n  std::vector<std::vector<size_t>> shapes(BATCH_NUM, {1, 2, 3});\n  auto status = tensor_list.Build<int>(shapes);\n  EXPECT_EQ(status, STATUS_OK);\n  EXPECT_EQ(tensor_list.Size(), shapes.size());\n\n  for (size_t i = 0; i < tensor_list.Size(); ++i) {\n    EXPECT_EQ(tensor_list[i]->Shape(), shapes[i]);\n  }\n}\n\nclass TensorBufferTest : public testing::Test {\n public:\n  TensorBufferTest() = default;\n\n protected:\n  std::shared_ptr<MockFlow> flow_;\n  void SetUp() override {\n    flow_ = std::make_shared<MockFlow>();\n    flow_->Init();\n  };\n\n  void TearDown() override { flow_->Destroy(); };\n};\n\nTEST_F(TensorBufferTest, TensorBuffer) {\n  auto device = flow_->GetDevice();\n  TensorBuffer tensor(device);\n\n  constexpr int DATA_SIZE = 10;\n  std::vector<int> data(DATA_SIZE, 0);\n  for (size_t i = 0; i < data.size(); ++i) {\n    data[i] = i;\n  }\n\n  tensor.Build(data.data(), data.size() * sizeof(int), [](void *ptr) {});\n  tensor.SetShape<int>({2, 5});\n  tensor.Set(\"Height\", 720);\n  tensor.Set(\"Width\", 1280);\n\n  TensorBuffer tensor2(tensor);\n  EXPECT_EQ(tensor.MutableData<int>(), tensor2.MutableData<int>());\n  EXPECT_EQ(tensor.Shape(), tensor2.Shape());\n  EXPECT_EQ(tensor.GetType(), tensor2.GetType());\n\n  int tensor_value = 0;\n  int tensor2_value = -1;\n  tensor.Get(\"Height\", tensor_value);\n  tensor2.Get(\"Height\", tensor2_value);\n  EXPECT_EQ(tensor_value, tensor2_value);\n\n  tensor_value = 0;\n  tensor2_value = -1;\n  tensor.Get(\"Width\", tensor_value);\n  tensor2.Get(\"Width\", tensor2_value);\n  EXPECT_EQ(tensor_value, tensor2_value);\n}\n\nTEST_F(TensorBufferTest, Copy) {\n  auto device = flow_->GetDevice();\n  TensorBuffer tensor(device);\n\n  constexpr int DATA_SIZE = 10;\n  std::vector<int> data(DATA_SIZE, 0);\n  for (size_t i = 0; i < data.size(); ++i) {\n    data[i] = i;\n  }\n\n  tensor.Build(data.data(), data.size() * sizeof(int), [](void *ptr) {});\n  tensor.SetShape<int>({2, 5});\n  tensor.Set(\"Height\", 720);\n  tensor.Set(\"Width\", 1280);\n\n  auto buffer = tensor.Copy();\n  auto tensor2 = std::dynamic_pointer_cast<TensorBuffer>(buffer);\n  EXPECT_NE(nullptr, tensor2);\n\n  EXPECT_EQ(tensor.MutableData<int>(), tensor2->MutableData<int>());\n  EXPECT_EQ(tensor.Shape(), tensor2->Shape());\n  EXPECT_EQ(tensor.GetType(), tensor2->GetType());\n\n  int tensor_value = 0;\n  int tensor2_value = -1;\n  tensor.Get(\"Height\", tensor_value);\n  tensor2->Get(\"Height\", tensor2_value);\n  EXPECT_EQ(tensor_value, tensor2_value);\n\n  tensor_value = 0;\n  tensor2_value = -1;\n  tensor.Get(\"Width\", tensor_value);\n  tensor2->Get(\"Width\", tensor2_value);\n  EXPECT_EQ(tensor_value, tensor2_value);\n}\n\nTEST_F(TensorBufferTest, DeepCopy) {\n  auto device = flow_->GetDevice();\n  TensorBuffer tensor(device);\n\n  constexpr int DATA_SIZE = 10;\n  std::vector<int> data(DATA_SIZE, 0);\n  for (size_t i = 0; i < data.size(); ++i) {\n    data[i] = i;\n  }\n\n  tensor.Build(data.data(), data.size() * sizeof(int), [](void *ptr) {});\n  tensor.SetShape<int>({2, 5});\n  tensor.Set(\"Height\", 720);\n  tensor.Set(\"Width\", 1280);\n\n  auto buffer = tensor.DeepCopy();\n  auto tensor2 = std::dynamic_pointer_cast<TensorBuffer>(buffer);\n  EXPECT_NE(nullptr, tensor2);\n\n  EXPECT_EQ(tensor.Shape(), tensor2->Shape());\n  EXPECT_EQ(tensor.GetType(), tensor2->GetType());\n\n  int tensor_value = 0;\n  int tensor2_value = -1;\n  tensor.Get(\"Height\", tensor_value);\n  tensor2->Get(\"Height\", tensor2_value);\n  EXPECT_EQ(tensor_value, tensor2_value);\n\n  tensor_value = 0;\n  tensor2_value = -1;\n  tensor.Get(\"Width\", tensor_value);\n  tensor2->Get(\"Width\", tensor2_value);\n  EXPECT_EQ(tensor_value, tensor2_value);\n\n  auto *buf_data = tensor.MutableData<int>();\n  auto *buf_data2 = tensor2->MutableData<int>();\n  EXPECT_NE(buf_data, buf_data2);\n\n  EXPECT_EQ(tensor.GetBytes(), tensor2->GetBytes());\n  for (size_t i = 0; i < data.size(); ++i) {\n    EXPECT_EQ(buf_data[i], data[i]);\n    EXPECT_EQ(buf_data2[i], data[i]);\n  }\n}\n\n}  // namespace modelbox\n"
  },
  {
    "path": "test/unit/libmodelbox/engine/type_test.cc",
    "content": "\n/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include \"modelbox/type.h\"\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"mockflow.h\"\n\nnamespace modelbox {\n\nclass TypeTest : public testing::Test {\n public:\n  TypeTest() = default;\n\n protected:\n  void SetUp() override{};\n  void TearDown() override{};\n};\n\n#define TEST_TYPE_TypeToDataType(TYPE, DATA_TYPE) \\\n  {                                               \\\n    auto type = TypeToDataType<TYPE>::Value;      \\\n    EXPECT_EQ(type, DATA_TYPE);                   \\\n  }\n\nTEST_F(TypeTest, TypeToDataType) {\n  TEST_TYPE_TypeToDataType(float, MODELBOX_FLOAT);\n  TEST_TYPE_TypeToDataType(double, MODELBOX_DOUBLE);\n  TEST_TYPE_TypeToDataType(int32_t, MODELBOX_INT32);\n  TEST_TYPE_TypeToDataType(uint32_t, MODELBOX_UINT32);\n  TEST_TYPE_TypeToDataType(uint16_t, MODELBOX_UINT16);\n  TEST_TYPE_TypeToDataType(uint8_t, MODELBOX_UINT8);\n  TEST_TYPE_TypeToDataType(int16_t, MODELBOX_INT16);\n  TEST_TYPE_TypeToDataType(int8_t, MODELBOX_INT8);\n  TEST_TYPE_TypeToDataType(std::string, MODELBOX_STRING);\n  TEST_TYPE_TypeToDataType(int64_t, MODELBOX_INT64);\n  TEST_TYPE_TypeToDataType(uint64_t, MODELBOX_UINT64);\n  TEST_TYPE_TypeToDataType(bool, MODELBOX_BOOL);\n}\n\n#define TEST_TYPE_DataTypeSize(TYPE, DATA_TYPE) \\\n  {                                             \\\n    auto size = DataTypeSize<DATA_TYPE>::Size;  \\\n    EXPECT_EQ(size, sizeof(TYPE));              \\\n  }\n\nTEST_F(TypeTest, DataTypeSize) {\n  TEST_TYPE_DataTypeSize(float, MODELBOX_FLOAT);\n  TEST_TYPE_DataTypeSize(double, MODELBOX_DOUBLE);\n  TEST_TYPE_DataTypeSize(int32_t, MODELBOX_INT32);\n  TEST_TYPE_DataTypeSize(uint32_t, MODELBOX_UINT32);\n  TEST_TYPE_DataTypeSize(uint16_t, MODELBOX_UINT16);\n  TEST_TYPE_DataTypeSize(uint8_t, MODELBOX_UINT8);\n  TEST_TYPE_DataTypeSize(int16_t, MODELBOX_INT16);\n  TEST_TYPE_DataTypeSize(int8_t, MODELBOX_INT8);\n  TEST_TYPE_DataTypeSize(std::string, MODELBOX_STRING);\n  TEST_TYPE_DataTypeSize(int64_t, MODELBOX_INT64);\n  TEST_TYPE_DataTypeSize(uint64_t, MODELBOX_UINT64);\n  TEST_TYPE_DataTypeSize(bool, MODELBOX_BOOL);\n}\n\n#define TEST_TYPE_DataTypeToType(TYPE, DATA_TYPE)          \\\n  {                                                        \\\n    typedef typename DataTypeToType<DATA_TYPE>::Type type; \\\n    EXPECT_EQ(typeid(type), typeid(TYPE));                 \\\n  }\n\nTEST_F(TypeTest, DataTypeToType) {\n  TEST_TYPE_DataTypeToType(float, MODELBOX_FLOAT);\n  TEST_TYPE_DataTypeToType(double, MODELBOX_DOUBLE);\n  TEST_TYPE_DataTypeToType(int32_t, MODELBOX_INT32);\n  TEST_TYPE_DataTypeToType(uint32_t, MODELBOX_UINT32);\n  TEST_TYPE_DataTypeToType(uint16_t, MODELBOX_UINT16);\n  TEST_TYPE_DataTypeToType(uint8_t, MODELBOX_UINT8);\n  TEST_TYPE_DataTypeToType(int16_t, MODELBOX_INT16);\n  TEST_TYPE_DataTypeToType(int8_t, MODELBOX_INT8);\n  TEST_TYPE_DataTypeToType(std::string, MODELBOX_STRING);\n  TEST_TYPE_DataTypeToType(int64_t, MODELBOX_INT64);\n  TEST_TYPE_DataTypeToType(uint64_t, MODELBOX_UINT64);\n  TEST_TYPE_DataTypeToType(bool, MODELBOX_BOOL);\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "test/unit/libmodelbox/engine/virtual_node_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"modelbox/virtual_node.h\"\n\n#include <fstream>\n#include <string>\n\n#include \"flowunit_mockflowunit/flowunit_mockflowunit.h\"\n#include \"gmock/gmock.h\"\n#include \"graph_conf_mockgraphconf/graph_conf_mockgraphconf.h\"\n#include \"gtest/gtest.h\"\n#include \"mock_driver_ctl.h\"\n#include \"mockflow.h\"\n#include \"modelbox/base/log.h\"\n#include \"modelbox/data_context.h\"\n#include \"modelbox/graph.h\"\n#include \"modelbox/session_context.h\"\n\nusing ::testing::_;\nnamespace modelbox {\nclass VirtualNodeTest : public testing::Test {\n public:\n  VirtualNodeTest() = default;\n\n protected:\n  std::shared_ptr<MockFlow> mock_flow_;\n  void SetUp() override {\n    old_level_ = ModelBoxLogger.GetLogger()->GetLogLevel();\n    std::shared_ptr<Drivers> drivers = Drivers::GetInstance();\n    mock_flow_ = std::make_shared<MockFlow>();\n    mock_flow_->Init();\n    {\n      // flowunit add10\n      {\n        MockFlowUnitDriverDesc desc_flowunit;\n        desc_flowunit.SetClass(\"DRIVER-FLOWUNIT\");\n        desc_flowunit.SetType(\"cpu\");\n        desc_flowunit.SetName(\"add10\");\n        desc_flowunit.SetDescription(\"the int add10 function\");\n        desc_flowunit.SetVersion(\"1.0.0\");\n        std::string file_path_flowunit =\n            std::string(TEST_LIB_DIR) + \"/libmodelbox-unit-cpu-add10.so\";\n        desc_flowunit.SetFilePath(file_path_flowunit);\n        auto mock_flowunit = std::make_shared<MockFlowUnit>();\n        auto mock_flowunit_desc = std::make_shared<FlowUnitDesc>();\n        mock_flowunit_desc->SetFlowType(NORMAL);\n        mock_flowunit_desc->SetFlowUnitName(\"add10\");\n        mock_flowunit_desc->AddFlowUnitInput(modelbox::FlowUnitInput(\"In_1\"));\n        mock_flowunit_desc->AddFlowUnitOutput(\n            modelbox::FlowUnitOutput(\"Out_1\"));\n        mock_flowunit->SetFlowUnitDesc(mock_flowunit_desc);\n        EXPECT_CALL(*mock_flowunit, Open(_))\n            .WillRepeatedly(testing::Invoke(\n                [&](const std::shared_ptr<modelbox::Configuration>&\n                        flow_option) {\n                  MBLOG_INFO << \"add Open\";\n                  return modelbox::STATUS_OK;\n                }));\n\n        EXPECT_CALL(*mock_flowunit, DataPre(_))\n            .WillRepeatedly(testing::Invoke(\n                [&](const std::shared_ptr<DataContext> &data_ctx) {\n                  MBLOG_INFO << \"add DataPre\";\n                  return modelbox::STATUS_OK;\n                }));\n\n        EXPECT_CALL(*mock_flowunit, DataPost(_))\n            .WillRepeatedly(testing::Invoke(\n                [&](const std::shared_ptr<DataContext> &data_ctx) {\n                  MBLOG_INFO << \"add DataPost\";\n                  return modelbox::STATUS_OK;\n                }));\n\n        EXPECT_CALL(\n            *mock_flowunit,\n            Process(testing::An<std::shared_ptr<modelbox::DataContext>>()))\n            .WillRepeatedly(testing::Invoke(\n                [=](const std::shared_ptr<DataContext> &data_ctx) -> Status {\n                  MBLOG_INFO << \"add Process\";\n                  const auto input_bufs_1 = data_ctx->Input(\"In_1\");\n                  auto output_bufs = data_ctx->Output(\"Out_1\");\n\n                  std::vector<size_t> shape;\n                  for (size_t i = 0; i < input_bufs_1->Size(); ++i) {\n                    auto input_size_1 = (*input_bufs_1)[i]->GetBytes();\n                    shape.emplace_back(input_size_1);\n                  }\n                  output_bufs->Build(shape);\n\n                  for (size_t i = 0; i < shape.size(); ++i) {\n                    auto *input_data_1 = (int *)(*input_bufs_1)[i]->ConstData();\n                    auto *output_data = (int *)(*output_bufs)[i]->MutableData();\n                    auto data_size = shape[i] / sizeof(int);\n                    for (size_t j = 0; j < data_size; ++j) {\n                      output_data[j] = input_data_1[j] + 10;\n                      MBLOG_DEBUG << input_data_1[j] << \" + \" << 10 << \" = \"\n                                  << output_data[j];\n                    }\n                  }\n\n                  return modelbox::STATUS_OK;\n                }));\n        EXPECT_CALL(*mock_flowunit, Close())\n            .WillRepeatedly(testing::Invoke([&]() {\n              MBLOG_INFO << \"add Close\";\n              return modelbox::STATUS_OK;\n            }));\n        desc_flowunit.SetMockFlowUnit(mock_flowunit);\n        ctl_.AddMockDriverFlowUnit(\"add10\", \"cpu\", desc_flowunit);\n      }\n    }\n    drivers->Scan(TEST_LIB_DIR, \"/libmodelbox-unit-*\");\n  }\n\n  void TearDown() override {\n    ModelBoxLogger.GetLogger()->SetLogLevel(old_level_);\n  }\n\n  const std::string test_lib_dir = TEST_LIB_DIR;\n  const std::string test_data_dir = TEST_DATA_DIR;\n\n private:\n  LogLevel old_level_;\n  MockDriverCtl ctl_;\n};\n\nTEST_F(VirtualNodeTest, VirtualNode_ONE_INPUT) {\n  std::string toml_content = R\"(\n    [driver]\n    skip-default=true\n    dir=[\")\" + test_lib_dir + \"\\\"]\\n    \" +\n                             R\"(\n    [graph]\n    graphconf = '''digraph demo {\n          input1[type=input, device=cpu,deviceid=0] \n          output1[type=output, device=cpu, deviceid=0]\n          add[type=flowunit, flowunit=add10, device=cpu, deviceid=0, label=\"<In_1> |<In_2> | <Out_1>\"]\n          \n          input1 ->add:In_1\n          add:Out_1->output1\n\n        }'''\n    format = \"graphviz\"\n  )\";\n\n  auto ret = mock_flow_->BuildAndRun(\"VirtualNode_ONE_INPUT\", toml_content, -1);\n  auto flow = mock_flow_->GetFlow();\n\n  // data 1\n  {\n    auto ext_data = flow->CreateExternalDataMap();\n    int len = 10;\n    auto buffer_list = ext_data->CreateBufferList();\n    buffer_list->Build({len * sizeof(int)});\n    auto *data = (int *)buffer_list->MutableData();\n    std::string dataStr;\n    for (auto i = 0; i < len; ++i) {\n      data[i] = i;\n      dataStr += std::to_string(data[i]) + \",\";\n    }\n    MBLOG_INFO << \"in: \" << dataStr;\n\n    auto status = ext_data->Send(\"input1\", buffer_list);\n    if (!status) {\n      MBLOG_ERROR << \"external data send buffer list failed:\" << status;\n    }\n\n    status = ext_data->Close();\n    if (!status) {\n      MBLOG_ERROR << \"external data close failed:\" << status;\n    }\n\n    OutputBufferList map_buffer_list;\n    ext_data->Recv(map_buffer_list);\n\n    for (const auto &buffer_list_iter : map_buffer_list) {\n      auto name = buffer_list_iter.first;\n      auto buffer_list = buffer_list_iter.second;\n      auto buffer_size = buffer_list->Size();\n\n      std::string dataStr;\n      for (size_t i = 0; i < buffer_size; ++i) {\n        auto *data = (int *)buffer_list->At(i)->ConstData();\n        auto data_size = buffer_list->At(i)->GetBytes();\n        for (size_t j = 0; j < data_size / sizeof(int); ++j) {\n          dataStr += std::to_string(data[j]) + \",\";\n        }\n      }\n      MBLOG_INFO << name << \" \" << dataStr;\n    }\n  }\n\n  // data 2\n  {\n    auto ext_data = flow->CreateExternalDataMap();\n\n    int len = 10;\n    auto buffer_list = ext_data->CreateBufferList();\n    buffer_list->Build({len * sizeof(int)});\n    auto *data = (int *)buffer_list->MutableData();\n    std::string dataStr;\n    for (auto i = 0; i < len; ++i) {\n      data[i] = i + 100;\n      dataStr += std::to_string(data[i]) + \",\";\n    }\n    MBLOG_INFO << \"in: \" << dataStr;\n\n    auto status = ext_data->Send(\"input1\", buffer_list);\n    if (!status) {\n      MBLOG_ERROR << \"external data send buffer list failed:\" << status;\n    }\n\n    status = ext_data->Close();\n    if (!status) {\n      MBLOG_ERROR << \"external data close failed:\" << status;\n    }\n\n    OutputBufferList map_buffer_list;\n    ext_data->Recv(map_buffer_list);\n\n    for (const auto &buffer_list_iter : map_buffer_list) {\n      auto name = buffer_list_iter.first;\n      auto buffer_list = buffer_list_iter.second;\n      auto buffer_size = buffer_list->Size();\n\n      std::string dataStr;\n      for (size_t i = 0; i < buffer_size; ++i) {\n        auto *data = (int *)buffer_list->At(i)->ConstData();\n        auto data_size = buffer_list->At(i)->GetBytes();\n        for (size_t j = 0; j < data_size / sizeof(int); ++j) {\n          EXPECT_EQ(data[j], 110 + j);\n          dataStr += std::to_string(data[j]) + \",\";\n        }\n      }\n      MBLOG_INFO << name << \" \" << dataStr;\n    }\n  }\n\n  flow->Wait(3 * 1000);\n}\n\nTEST_F(VirtualNodeTest, VirtualNode_MULTI_INPUT) {\n  std::string toml_content = R\"(\n    [driver]\n    skip-default=true\n    dir=[\")\" + test_lib_dir + \"\\\"]\\n    \" +\n                             R\"(\n    [graph]\n    graphconf = '''digraph demo {\n          input1[type=input, device=cpu,deviceid=0] \n          input2[type=input, device=cpu,deviceid=0] \n          output1[type=output, device=cpu, deviceid=0]\n          add[type=flowunit, flowunit=add, device=cpu, deviceid=0, label=\"<In_1> | <In_2> | <Out_1>\"]\n          \n          input1 ->add:In_1\n          input2 ->add:In_2\n          add:Out_1->output1\n\n        }'''\n    format = \"graphviz\"\n  )\";\n  auto ret =\n      mock_flow_->BuildAndRun(\"VirtualNode_MULTI_INPUT\", toml_content, -1);\n  auto flow = mock_flow_->GetFlow();\n\n  {\n    auto ext_data = flow->CreateExternalDataMap();\n\n    auto sess_ctx = ext_data->GetSessionContext();\n    sess_ctx->SetPrivate(\"test\", std::make_shared<int64_t>(1111));\n    sess_ctx = nullptr;\n    int len = 10;\n    auto buffer_list = ext_data->CreateBufferList();\n    buffer_list->Build({len * sizeof(int)});\n    auto *data = (int *)buffer_list->MutableData();\n    std::string dataStr;\n    for (auto i = 0; i < len; ++i) {\n      data[i] = i;\n      dataStr += std::to_string(data[i]) + \",\";\n    }\n    MBLOG_INFO << \"in: \" << dataStr;\n\n    auto status = ext_data->Send(\"input1\", buffer_list);\n    EXPECT_EQ(status, STATUS_SUCCESS);\n\n    status = ext_data->Send(\"input2\", buffer_list);\n    EXPECT_EQ(status, STATUS_SUCCESS);\n\n    OutputBufferList map_buffer_list_1;\n\n    status = ext_data->Recv(map_buffer_list_1);\n    EXPECT_EQ(status, STATUS_SUCCESS);\n\n    for (const auto &buffer_list_iter : map_buffer_list_1) {\n      auto name = buffer_list_iter.first;\n      auto buffer_list = buffer_list_iter.second;\n      auto buffer_size = buffer_list->Size();\n\n      std::string dataStr;\n      for (size_t i = 0; i < buffer_size; ++i) {\n        auto *data = (int *)buffer_list->At(i)->ConstData();\n        auto data_size = buffer_list->At(i)->GetBytes();\n        for (size_t j = 0; j < data_size / sizeof(int); ++j) {\n          EXPECT_EQ(data[j], 2 * j);\n          dataStr += std::to_string(data[j]) + \",\";\n        }\n      }\n      MBLOG_INFO << name << \" \" << dataStr;\n    }\n\n    status = ext_data->Send(\"input1\", buffer_list);\n    EXPECT_EQ(status, STATUS_SUCCESS);\n\n    status = ext_data->Send(\"input2\", buffer_list);\n\n    EXPECT_EQ(status, STATUS_SUCCESS);\n\n    status = ext_data->Close();\n    EXPECT_EQ(status, STATUS_SUCCESS);\n\n    OutputBufferList map_buffer_list_2;\n    status = ext_data->Recv(map_buffer_list_2);\n    EXPECT_EQ(status, STATUS_SUCCESS);\n\n    for (const auto &buffer_list_iter : map_buffer_list_2) {\n      auto name = buffer_list_iter.first;\n      auto buffer_list = buffer_list_iter.second;\n      auto buffer_size = buffer_list->Size();\n\n      std::string dataStr;\n      for (size_t i = 0; i < buffer_size; ++i) {\n        auto *data = (int *)buffer_list->At(i)->ConstData();\n        auto data_size = buffer_list->At(i)->GetBytes();\n        for (size_t j = 0; j < data_size / sizeof(int); ++j) {\n          EXPECT_EQ(data[j], 2 * j);\n          dataStr += std::to_string(data[j]) + \",\";\n        }\n      }\n      MBLOG_INFO << name << \" \" << dataStr;\n    }\n\n    OutputBufferList map_buffer_list_3;\n    status = ext_data->Recv(map_buffer_list_3);\n    EXPECT_TRUE(map_buffer_list_3.empty());\n    EXPECT_EQ(status, STATUS_EOF);\n  }\n\n  flow->Wait(3 * 1000);\n}\n\nTEST_F(VirtualNodeTest, VirtualNode_NO_OUTPUT) {\n  std::string toml_content = R\"(\n    [driver]\n    skip-default=true\n    dir=[\")\" + std::string(TEST_LIB_DIR) +\n                             \"\\\"]\\n    \" +\n                             R\"(\n    [graph]\n    graphconf = '''digraph demo {\n          input1[type=input, device=cpu,deviceid=0] \n          stream_start[type=flowunit, flowunit=virtual_stream_start, device=cpu, deviceid=0, label=\"<In_1> | <Out_1>\"]\n          stream_mid[type=flowunit, flowunit=virtual_stream_mid, device=cpu, deviceid=0, label=\"<In_1> | <Out_1>\"]\n          stream_end[type=flowunit, flowunit=virtual_stream_end, device=cpu, deviceid=0, label=\"<In_1>\"]\n          \n          input1 ->stream_start:In_1\n          stream_start:Out_1 ->stream_mid:In_1\n          stream_mid:Out_1->stream_end:In_1\n\n        }'''\n    format = \"graphviz\"\n  )\";\n\n  auto ret = mock_flow_->BuildAndRun(\"VirtualNode_NO_OUTPUT\", toml_content, -1);\n  auto flow = mock_flow_->GetFlow();\n\n  {\n    auto ext_data = flow->CreateExternalDataMap();\n\n    auto output_buf = ext_data->CreateBufferList();\n    output_buf->Build({3 * sizeof(int)});\n    auto *data = (int *)output_buf->MutableData();\n    data[0] = 0;\n    data[1] = 25000;\n    data[2] = 3;\n\n    auto status = ext_data->Send(\"input1\", output_buf);\n    EXPECT_EQ(status, STATUS_SUCCESS);\n\n    status = ext_data->Close();\n    EXPECT_EQ(status, STATUS_SUCCESS);\n\n    OutputBufferList map_buffer_list;\n\n    status = ext_data->Recv(map_buffer_list);\n    EXPECT_EQ(status, STATUS_EOF);\n  }\n\n  {\n    auto ext_data = flow->CreateExternalDataMap();\n\n    auto output_buf = ext_data->CreateBufferList();\n    output_buf->Build({3 * sizeof(int)});\n    auto *data = (int *)output_buf->MutableData();\n    data[0] = 0;\n    data[1] = 25000;\n    data[2] = 3;\n\n    auto status = ext_data->Send(\"input1\", output_buf);\n    EXPECT_EQ(status, STATUS_SUCCESS);\n\n    status = ext_data->Close();\n    EXPECT_EQ(status, STATUS_SUCCESS);\n\n    OutputBufferList map_buffer_list;\n    status = ext_data->Recv(map_buffer_list);\n    EXPECT_EQ(status, STATUS_EOF);\n    auto error = ext_data->GetLastError();\n    EXPECT_EQ(error, nullptr);\n  }\n  flow->Wait(5 * 1000);\n}\n\nTEST_F(VirtualNodeTest, VirtualNode_Stop) {\n  std::string toml_content = R\"(\n    [driver]\n    skip-default=true\n    dir=[\")\" + std::string(TEST_LIB_DIR) +\n                             \"\\\"]\\n    \" +\n                             R\"(\n    [graph]\n    graphconf = '''digraph demo {\n          input1[type=input, device=cpu,deviceid=0] \n          output1[type=output, device=cpu, deviceid=0]\n          stream_start[type=flowunit, flowunit=virtual_stream_start, device=cpu, deviceid=0, label=\"<In_1> | <Out_1>\"]\n          stream_mid[type=flowunit, flowunit=virtual_stream_mid, device=cpu, deviceid=0, label=\"<In_1> | <Out_1>\"]\n          \n          input1 ->stream_start:In_1\n          stream_start:Out_1 ->stream_mid:In_1\n          stream_mid:Out_1->output1\n\n        }'''\n    format = \"graphviz\"\n  )\";\n  auto ret = mock_flow_->BuildAndRun(\"VirtualNode_Stop\", toml_content, -1);\n  auto flow = mock_flow_->GetFlow();\n\n  {\n    auto ext_data = flow->CreateExternalDataMap();\n\n    auto output_buf = ext_data->CreateBufferList();\n    output_buf->Build({3 * sizeof(int)});\n    auto *data = (int *)output_buf->MutableData();\n    data[0] = 0;\n    data[1] = 25000;\n    data[2] = 3;\n\n    auto status = ext_data->Send(\"input1\", output_buf);\n    EXPECT_EQ(status, STATUS_SUCCESS);\n\n    status = ext_data->Close();\n    EXPECT_EQ(status, STATUS_SUCCESS);\n\n    OutputBufferList map_buffer_list;\n\n    uint32_t i = 0;\n    while (true) {\n      auto status = ext_data->Recv(map_buffer_list);\n      if (i == 5) {\n        ext_data->Shutdown();\n      }\n      if (status != STATUS_SUCCESS) {\n        EXPECT_EQ(status, STATUS_INVALID);\n        auto error = ext_data->GetLastError();\n        EXPECT_EQ(error->GetDesc(), \"EOF\");\n        break;\n      }\n      i++;\n    }\n  }\n  flow->Wait(5 * 1000);\n}\n\nTEST_F(VirtualNodeTest, VirtualNode_Stop_2) {\n  std::string toml_content = R\"(\n    [driver]\n    skip-default=true\n    dir=[\")\" + std::string(TEST_LIB_DIR) +\n                             \"\\\"]\\n    \" +\n                             R\"(\n    [graph]\n    graphconf = '''digraph demo {\n          input1[type=input, device=cpu,deviceid=0]\n          output1[type=output, device=cpu, deviceid=0]\n          stream_start[type=flowunit, flowunit=stream_simple_pass,\n          device=cpu, deviceid=0, label=\"<In_1> | <Out_1>\"]\n          stream_mid[type=flowunit, flowunit=virtual_stream, device=cpu,\n          deviceid=0, label=\"<In_1> | <Out_1>\"]\n\n          input1 ->stream_start:In_1\n          stream_start:Out_1 ->stream_mid:In_1\n          stream_mid:Out_1->output1\n\n        }'''\n    format = \"graphviz\"\n  )\";\n  auto ret = mock_flow_->BuildAndRun(\"VirtualNode_Stop_2\", toml_content, -1);\n  auto flow = mock_flow_->GetFlow();\n\n  {\n    auto ext_data = flow->CreateExternalDataMap();\n\n    auto output_buf = ext_data->CreateBufferList();\n    std::vector<size_t> shape(1, 3 * sizeof(int));\n    output_buf->Build(shape);\n    auto *data = (int *)output_buf->MutableData();\n    data[0] = 0;\n    data[1] = 25000;\n    data[2] = 3;\n\n    auto status = ext_data->Send(\"input1\", output_buf);\n    EXPECT_EQ(status, STATUS_SUCCESS);\n\n    status = ext_data->Close();\n    EXPECT_EQ(status, STATUS_SUCCESS);\n\n    OutputBufferList map_buffer_list;\n\n    uint32_t i = 0;\n    while (true) {\n      auto status = ext_data->Recv(map_buffer_list);\n      if (i == 200) {\n        ext_data->Shutdown();\n      }\n      if (status != STATUS_SUCCESS) {\n        EXPECT_EQ(status, STATUS_INVALID);\n        auto error = ext_data->GetLastError();\n        EXPECT_EQ(error->GetDesc(), \"EOF\");\n        break;\n      }\n      i++;\n    }\n  }\n  flow->Wait(5 * 1000);\n}\n\nTEST_F(VirtualNodeTest, VirtualNode_Stop_3) {\n  std::string toml_content = R\"(\n    [driver]\n    skip-default=true\n    dir=[\")\" + std::string(TEST_LIB_DIR) +\n                             \"\\\"]\\n    \" +\n                             R\"(\n    [graph]\n    graphconf = '''digraph demo {\n          input1[type=input, device=cpu,deviceid=0] \n          output1[type=output, device=cpu, deviceid=0]\n          stream_start[type=flowunit, flowunit=virtual_stream_start, device=cpu, deviceid=0, label=\"<In_1> | <Out_1>\"]\n          stream_mid[type=flowunit, flowunit=virtual_stream_mid, device=cpu, deviceid=0, label=\"<In_1> | <Out_1>\", batch_size=5]\n          \n          input1 ->stream_start:In_1\n          stream_start:Out_1 ->stream_mid:In_1\n          stream_mid:Out_1->output1\n\n        }'''\n    format = \"graphviz\"\n  )\";\n  auto ret = mock_flow_->BuildAndRun(\"VirtualNode_Select\", toml_content, -1);\n  auto flow = mock_flow_->GetFlow();\n\n  {\n    auto ext_data_1 = flow->CreateExternalDataMap();\n\n    auto output_buf_1 = ext_data_1->CreateBufferList();\n    output_buf_1->Build({3 * sizeof(int)});\n    auto *data_1 = (int *)output_buf_1->MutableData();\n    data_1[0] = 0;\n    data_1[1] = 25000;\n    data_1[2] = 3;\n\n    auto status = ext_data_1->Send(\"input1\", output_buf_1);\n    EXPECT_EQ(status, STATUS_SUCCESS);\n\n    auto selector = std::make_shared<ExternalDataSelect>();\n    selector->RegisterExternalData(ext_data_1);\n\n    int recv_count = 0;\n    while (true) {\n      std::list<std::shared_ptr<ExternalDataMap>> external_list;\n      auto select_status = selector->SelectExternalData(\n          external_list, std::chrono::milliseconds(3000));\n      if (select_status == STATUS_TIMEDOUT) {\n        break;\n      }\n\n      for (const auto &external : external_list) {\n        OutputBufferList map_buffer_list;\n        external->Recv(map_buffer_list);\n\n        if (external == ext_data_1) {\n          recv_count++;\n          if (recv_count >= 50) {\n            ext_data_1->Close();\n          }\n          if (recv_count >= 100) {\n            ext_data_1->Shutdown();\n          }\n        }\n      }\n    }\n\n    OutputBufferList map_buffer_list;\n    auto last_status_1 = ext_data_1->Recv(map_buffer_list);\n    EXPECT_EQ(last_status_1, STATUS_INVALID);\n  }\n\n  flow->Wait(5 * 1000);\n}\n\nTEST_F(VirtualNodeTest, VirtualNode_Select) {\n  std::string toml_content = R\"(\n    [driver]\n    skip-default=true\n    dir=[\")\" + std::string(TEST_LIB_DIR) +\n                             \"\\\"]\\n    \" +\n                             R\"(\n    [graph]\n    graphconf = '''digraph demo {\n          input1[type=input, device=cpu,deviceid=0] \n          output1[type=output, device=cpu, deviceid=0]\n          stream_start[type=flowunit, flowunit=virtual_stream_start, device=cpu, deviceid=0, label=\"<In_1> | <Out_1>\"]\n          stream_mid[type=flowunit, flowunit=virtual_stream_mid, device=cpu, deviceid=0, label=\"<In_1> | <Out_1>\", batch_size=5]\n          \n          input1 ->stream_start:In_1\n          stream_start:Out_1 ->stream_mid:In_1\n          stream_mid:Out_1->output1\n\n        }'''\n    format = \"graphviz\"\n  )\";\n  auto ret = mock_flow_->BuildAndRun(\"VirtualNode_Select\", toml_content, -1);\n  auto flow = mock_flow_->GetFlow();\n\n  {\n    auto ext_data_1 = flow->CreateExternalDataMap();\n\n    auto output_buf_1 = ext_data_1->CreateBufferList();\n    output_buf_1->Build({3 * sizeof(int)});\n    auto *data_1 = (int *)output_buf_1->MutableData();\n    data_1[0] = 0;\n    data_1[1] = 25000;\n    data_1[2] = 3;\n\n    auto status = ext_data_1->Send(\"input1\", output_buf_1);\n    EXPECT_EQ(status, STATUS_SUCCESS);\n\n    status = ext_data_1->Close();\n    EXPECT_EQ(status, STATUS_SUCCESS);\n\n    auto ext_data_2 = flow->CreateExternalDataMap();\n\n    auto output_buf_2 = ext_data_2->CreateBufferList();\n    output_buf_2->Build({3 * sizeof(int)});\n    auto *data_2 = (int *)output_buf_2->MutableData();\n    data_2[0] = 0;\n    data_2[1] = 25000;\n    data_2[2] = 3;\n\n    status = ext_data_2->Send(\"input1\", output_buf_2);\n    EXPECT_EQ(status, STATUS_SUCCESS);\n\n    auto selector = std::make_shared<ExternalDataSelect>();\n    selector->RegisterExternalData(ext_data_1);\n    selector->RegisterExternalData(ext_data_2);\n\n    int size = 0;\n    int recv_count = 0;\n\n    while (true) {\n      std::list<std::shared_ptr<ExternalDataMap>> external_list;\n      auto select_status = selector->SelectExternalData(\n          external_list, std::chrono::milliseconds(3000));\n      if (select_status == STATUS_TIMEDOUT) {\n        break;\n      }\n\n      for (const auto &external : external_list) {\n        OutputBufferList map_buffer_list;\n        auto status = external->Recv(map_buffer_list);\n        if (status == STATUS_SUCCESS && external == ext_data_1) {\n          size += map_buffer_list[\"output1\"]->Size();\n        }\n\n        if (external == ext_data_2) {\n          recv_count++;\n          if (recv_count >= 10) {\n            ext_data_2->Shutdown();\n          }\n        }\n      }\n    }\n    EXPECT_EQ(size, 8334);\n\n    OutputBufferList map_buffer_list;\n    auto last_status_1 = ext_data_1->Recv(map_buffer_list);\n    EXPECT_EQ(last_status_1, STATUS_EOF);\n    auto last_status_2 = ext_data_2->Recv(map_buffer_list);\n    EXPECT_EQ(last_status_2, STATUS_INVALID);\n  }\n\n  flow->Wait(5 * 1000);\n}\n\nTEST_F(VirtualNodeTest, VirtualNode_Select_Timeout) {\n  std::string toml_content = R\"(\n    [driver]\n    skip-default=true\n    dir=[\")\" + std::string(TEST_LIB_DIR) +\n                             \"\\\"]\\n    \" +\n                             R\"(\n    [graph]\n    graphconf = '''digraph demo {\n          input1[type=input, device=cpu,deviceid=0] \n          output1[type=output, device=cpu, deviceid=0]\n          stream_start[type=flowunit, flowunit=virtual_stream_start, device=cpu, deviceid=0, label=\"<In_1> | <Out_1>\"]\n          stream_mid[type=flowunit, flowunit=virtual_stream_mid, device=cpu, deviceid=0, label=\"<In_1> | <Out_1>\"]\n          \n          input1 ->stream_start:In_1\n          stream_start:Out_1 ->stream_mid:In_1\n          stream_mid:Out_1->output1\n\n        }'''\n    format = \"graphviz\"\n  )\";\n  auto ret =\n      mock_flow_->BuildAndRun(\"VirtualNode_Select_Timeout\", toml_content, -1);\n  auto flow = mock_flow_->GetFlow();\n\n  {\n    auto ext_data = flow->CreateExternalDataMap();\n\n    auto output_buf = ext_data->CreateBufferList();\n    output_buf->Build({3 * sizeof(int)});\n    auto *data = (int *)output_buf->MutableData();\n    data[0] = 0;\n    data[1] = 25000;\n    data[2] = 3;\n\n    auto selector = std::make_shared<ExternalDataSelect>();\n    selector->RegisterExternalData(ext_data);\n    std::list<std::shared_ptr<ExternalDataMap>> external_list;\n    auto select_status = selector->SelectExternalData(\n        external_list, std::chrono::milliseconds(100));\n    EXPECT_EQ(select_status, STATUS_TIMEDOUT);\n  }\n\n  {\n    auto selector = std::make_shared<ExternalDataSelect>();\n    std::list<std::shared_ptr<ExternalDataMap>> external_list;\n    auto select_status = selector->SelectExternalData(\n        external_list, std::chrono::milliseconds(100));\n    EXPECT_EQ(select_status, STATUS_TIMEDOUT);\n  }\n\n  flow->Wait(5 * 1000);\n}\n\nTEST_F(VirtualNodeTest, VirtualNode_Muliti_Output) {\n  std::string toml_content = R\"(\n    [driver]\n    skip-default=true\n    dir=[\")\" + std::string(TEST_LIB_DIR) +\n                             \"\\\"]\\n    \" +\n                             R\"(\n    [graph]\n    graphconf = '''digraph demo {\n          input1[type=input, device=cpu,deviceid=0] \n          output1[type=output,output_type=unmatch ,device=cpu, deviceid=0]\n          output2[type=output,output_type=unmatch, device=cpu, deviceid=0]\n          stream_start[type=flowunit, flowunit=virtual_stream_start, device=cpu, deviceid=0, label=\"<In_1> | <Out_1>\"]\n          stream_mid[type=flowunit, flowunit=virtual_stream_mid, device=cpu, deviceid=0, label=\"<In_1> | <Out_1>\"]\n          \n          input1 ->stream_start:In_1\n          stream_start:Out_1 ->stream_mid:In_1\n          stream_start:Out_1 ->output2\n          stream_mid:Out_1->output1\n\n        }'''\n    format = \"graphviz\"\n  )\";\n  auto ret =\n      mock_flow_->BuildAndRun(\"VirtualNode_Muliti_Output\", toml_content, -1);\n  auto flow = mock_flow_->GetFlow();\n\n  {\n    auto ext_data_1 = flow->CreateExternalDataMap();\n\n    auto output_buf_1 = ext_data_1->CreateBufferList();\n    output_buf_1->Build({3 * sizeof(int)});\n    auto *data_1 = (int *)output_buf_1->MutableData();\n    data_1[0] = 0;\n    data_1[1] = 25000;\n    data_1[2] = 3;\n\n    auto status = ext_data_1->Send(\"input1\", output_buf_1);\n    EXPECT_EQ(status, STATUS_SUCCESS);\n\n    status = ext_data_1->Close();\n    EXPECT_EQ(status, STATUS_SUCCESS);\n\n    uint32_t size_1 = 0;\n    uint32_t size_2 = 0;\n    while (true) {\n      OutputBufferList map_buffer_list;\n      auto status = ext_data_1->Recv(map_buffer_list);\n      if (map_buffer_list[\"output1\"] != nullptr) {\n        size_1 += map_buffer_list[\"output1\"]->Size();\n      }\n      if (map_buffer_list[\"output2\"] != nullptr) {\n        size_2 += map_buffer_list[\"output2\"]->Size();\n      }\n      if (status == STATUS_EOF) {\n        break;\n      }\n    }\n    EXPECT_EQ(size_1, 8334);\n    EXPECT_EQ(size_2, 25000);\n  }\n  flow->Wait(3 * 1000);\n}\n\n}  // namespace modelbox\n"
  },
  {
    "path": "test/unit/libmodelbox/flow_test.cc",
    "content": "\n/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <atomic>\n#include <cstdio>\n#include <fstream>\n#include <functional>\n#include <future>\n#include <thread>\n\n#include \"engine/scheduler/flow_scheduler.h\"\n#include \"flowunit_mockflowunit/flowunit_mockflowunit.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"mockflow.h\"\n#include \"modelbox/base/log.h\"\n#include \"modelbox/buffer.h\"\n#include \"modelbox/device/mockdevice/device_mockdevice.h\"\n#include \"modelbox/graph.h\"\n#include \"modelbox/node.h\"\n\nnamespace modelbox {\nusing ::testing::Sequence;\nclass FlowTest : public testing::Test {\n public:\n  FlowTest() = default;\n\n protected:\n  std::shared_ptr<MockFlow> flow_;\n\n  void SetUp() override {\n    flow_ = std::make_shared<MockFlow>();\n    flow_->Init();\n  };\n\n  void TearDown() override { flow_->Destroy(); };\n};\n\nclass MockNode : public Node {\n public:\n  MOCK_METHOD1(Run, Status(RunType type));\n};\n\nstatic SessionManager g_test_session_manager;\n\nTEST_F(FlowTest, All) {\n  auto graph = std::make_shared<Graph>();\n  auto gc = std::make_shared<GCGraph>();\n  auto flowunit_mgr = FlowUnitManager::GetInstance();\n  auto device_mgr = DeviceManager::GetInstance();\n\n  std::shared_ptr<Node> node_a = nullptr;\n  std::shared_ptr<Node> node_b = nullptr;\n  std::shared_ptr<Node> node_c = nullptr;\n\n  {\n    ConfigurationBuilder configbuilder;\n    auto config = configbuilder.Build();\n\n    node_a = std::make_shared<Node>();\n    node_a->SetFlowUnitInfo(\"listen\", \"cpu\", \"0\", flowunit_mgr);\n    node_a->SetName(\"gendata\");\n    node_a->Init({}, {\"Out_1\", \"Out_2\"}, config);\n    node_a->SetSessionManager(&g_test_session_manager);\n    EXPECT_TRUE(graph->AddNode(node_a));\n  }\n\n  {\n    ConfigurationBuilder configbuilder;\n    auto config = configbuilder.Build();\n\n    node_b = std::make_shared<Node>();\n    node_b->SetFlowUnitInfo(\"add\", \"cpu\", \"0\", flowunit_mgr);\n    node_b->SetName(\"addop\");\n    node_b->Init({\"In_1\", \"In_2\"}, {\"Out_1\"}, config);\n    node_b->SetSessionManager(&g_test_session_manager);\n    EXPECT_TRUE(graph->AddNode(node_b));\n  }\n\n  {\n    ConfigurationBuilder configbuilder;\n    auto config = configbuilder.Build();\n    config->SetProperty(\"max_count\", 50);\n\n    node_c = std::make_shared<Node>();\n    node_c->SetFlowUnitInfo(\"check_print\", \"cpu\", \"0\", flowunit_mgr);\n    node_c->SetName(\"check_print\");\n    node_c->Init({\"IN1\", \"IN2\", \"IN3\"}, {}, config);\n    node_c->SetSessionManager(&g_test_session_manager);\n    EXPECT_TRUE(graph->AddNode(node_c));\n  }\n\n  graph->AddLink(node_a->GetName(), \"Out_1\", node_b->GetName(), \"In_1\");\n  graph->AddLink(node_a->GetName(), \"Out_2\", node_b->GetName(), \"In_2\");\n  graph->AddLink(node_a->GetName(), \"Out_1\", node_c->GetName(), \"IN1\");\n  graph->AddLink(node_a->GetName(), \"Out_2\", node_c->GetName(), \"IN2\");\n  graph->AddLink(node_b->GetName(), \"Out_1\", node_c->GetName(), \"IN3\");\n\n  ConfigurationBuilder configbuilder;\n  auto config = configbuilder.Build();\n  graph->Initialize(flowunit_mgr, device_mgr, nullptr, config);\n  EXPECT_TRUE(graph->Build(gc) == STATUS_OK);\n  graph->RunAsync();\n\n  Status retval;\n  graph->Wait(0, &retval);\n  EXPECT_EQ(retval, STATUS_STOP);\n}\n\nTEST_F(FlowTest, PortEnlargeQueue) {\n  auto graph = std::make_shared<Graph>();\n  auto gc = std::make_shared<GCGraph>();\n  auto flowunit_mgr = FlowUnitManager::GetInstance();\n  auto device_mgr = DeviceManager::GetInstance();\n\n  std::shared_ptr<Node> start_node = nullptr;\n  std::shared_ptr<Node> condition_node = nullptr;\n  std::shared_ptr<Node> simple_pass_node = nullptr;\n  std::shared_ptr<Node> receive_node = nullptr;\n\n  {\n    ConfigurationBuilder configbuilder;\n    auto config = configbuilder.Build();\n\n    start_node = std::make_shared<Node>();\n    start_node->SetFlowUnitInfo(\"test_orgin_0_2\", \"cpu\", \"0\", flowunit_mgr);\n    start_node->SetName(\"test_orgin_0_2\");\n    auto status = start_node->Init({}, {\"Out_1\", \"Out_2\"}, config);\n    EXPECT_EQ(status, STATUS_OK);\n    start_node->SetSessionManager(&g_test_session_manager);\n    EXPECT_TRUE(graph->AddNode(start_node));\n  }\n\n  {\n    ConfigurationBuilder configbuilder;\n    auto config = configbuilder.Build();\n\n    condition_node = std::make_shared<Node>();\n    condition_node->SetFlowUnitInfo(\"half-condition\", \"cpu\", \"0\", flowunit_mgr);\n    condition_node->SetName(\"half-condition\");\n    auto status = condition_node->Init({\"In_1\"}, {\"Out_1\", \"Out_2\"}, config);\n    EXPECT_EQ(status, STATUS_OK);\n    condition_node->SetSessionManager(&g_test_session_manager);\n    EXPECT_TRUE(graph->AddNode(condition_node));\n  }\n\n  {\n    ConfigurationBuilder configbuilder;\n    auto config = configbuilder.Build();\n\n    simple_pass_node = std::make_shared<Node>();\n    simple_pass_node->SetFlowUnitInfo(\"simple_pass\", \"cpu\", \"0\", flowunit_mgr);\n    simple_pass_node->SetName(\"simple_pass\");\n    auto status = simple_pass_node->Init({\"In_1\"}, {\"Out_1\"}, config);\n    EXPECT_EQ(status, STATUS_OK);\n    simple_pass_node->SetSessionManager(&g_test_session_manager);\n    EXPECT_TRUE(graph->AddNode(simple_pass_node));\n  }\n\n  {\n    ConfigurationBuilder configbuilder;\n    auto config = configbuilder.Build();\n    config->SetProperty(\"queue_size\", \"5\");\n\n    receive_node = std::make_shared<Node>();\n    receive_node->SetFlowUnitInfo(\"test_2_0\", \"cpu\", \"0\", flowunit_mgr);\n    receive_node->SetName(\"receive\");\n    auto status = receive_node->Init({\"In_1\", \"In_2\"}, {}, config);\n    EXPECT_EQ(status, STATUS_OK);\n    receive_node->SetSessionManager(&g_test_session_manager);\n    EXPECT_TRUE(graph->AddNode(receive_node));\n  }\n\n  graph->AddLink(start_node->GetName(), \"Out_1\", condition_node->GetName(),\n                 \"In_1\");\n  graph->AddLink(condition_node->GetName(), \"Out_1\", receive_node->GetName(),\n                 \"In_1\");\n  graph->AddLink(condition_node->GetName(), \"Out_2\",\n                 simple_pass_node->GetName(), \"In_1\");\n  graph->AddLink(simple_pass_node->GetName(), \"Out_1\", receive_node->GetName(),\n                 \"In_1\");\n  graph->AddLink(start_node->GetName(), \"Out_2\", receive_node->GetName(),\n                 \"In_2\");\n\n  Sequence s1;\n  auto pass_fu = std::dynamic_pointer_cast<MockFlowUnit>(\n      simple_pass_node->GetFlowUnitGroup()->GetExecutorUnit());\n  EXPECT_CALL(*pass_fu, Process(testing::_)).Times(5).InSequence(s1);\n\n  ConfigurationBuilder configbuilder;\n  auto config = configbuilder.Build();\n  graph->Initialize(flowunit_mgr, device_mgr, nullptr, config);\n  EXPECT_TRUE(graph->Build(gc) == STATUS_OK);\n  graph->RunAsync();\n\n  Status retval;\n  graph->Wait(0, &retval);\n  EXPECT_EQ(retval, STATUS_STOP);\n}\n\nTEST_F(FlowTest, TensorList_All) {\n  auto graph = std::make_shared<Graph>();\n  auto gc = std::make_shared<GCGraph>();\n  auto flowunit_mgr = FlowUnitManager::GetInstance();\n  auto device_mgr = DeviceManager::GetInstance();\n\n  std::shared_ptr<Node> node_a = nullptr;\n  std::shared_ptr<Node> node_b = nullptr;\n  std::shared_ptr<Node> node_c = nullptr;\n\n  {\n    ConfigurationBuilder configbuilder;\n    auto config = configbuilder.Build();\n\n    node_a = std::make_shared<Node>();\n    node_a->SetFlowUnitInfo(\"listen\", \"cpu\", \"0\", flowunit_mgr);\n    node_a->SetName(\"gendata\");\n    node_a->Init({}, {\"Out_1\", \"Out_2\"}, config);\n    node_a->SetSessionManager(&g_test_session_manager);\n    EXPECT_TRUE(graph->AddNode(node_a));\n  }\n\n  {\n    ConfigurationBuilder configbuilder;\n    auto config = configbuilder.Build();\n\n    node_b = std::make_shared<Node>();\n    node_b->SetFlowUnitInfo(\"tensorlist_test_1\", \"cpu\", \"0\", flowunit_mgr);\n    node_b->SetName(\"tensorlist_test_1\");\n    node_b->Init({\"IN1\"}, {\"OUT1\"}, config);\n    node_b->SetSessionManager(&g_test_session_manager);\n    EXPECT_TRUE(graph->AddNode(node_b));\n  }\n\n  {\n    ConfigurationBuilder configbuilder;\n    auto config = configbuilder.Build();\n\n    node_c = std::make_shared<Node>();\n    node_c->SetFlowUnitInfo(\"check_tensorlist_test_1\", \"cpu\", \"0\",\n                            flowunit_mgr);\n    node_c->SetName(\"check_tensorlist_test_1\");\n    node_c->Init({\"IN1\", \"IN2\"}, {}, config);\n    node_c->SetSessionManager(&g_test_session_manager);\n    EXPECT_TRUE(graph->AddNode(node_c));\n  }\n\n  graph->AddLink(node_a->GetName(), \"Out_1\", node_b->GetName(), \"IN1\");\n  graph->AddLink(node_a->GetName(), \"Out_2\", node_c->GetName(), \"IN1\");\n  graph->AddLink(node_b->GetName(), \"OUT1\", node_c->GetName(), \"IN2\");\n\n  ConfigurationBuilder configbuilder;\n  auto config = configbuilder.Build();\n  graph->Initialize(flowunit_mgr, device_mgr, nullptr, config);\n  EXPECT_TRUE(graph->Build(gc) == STATUS_OK);\n  graph->RunAsync();\n\n  Status retval;\n  graph->Wait(0, &retval);\n  EXPECT_EQ(retval, STATUS_STOP);\n}\n\nTEST_F(FlowTest, FAILED_ALL) {\n  auto graph = std::make_shared<Graph>();\n  auto gc = std::make_shared<GCGraph>();\n  auto flowunit_mgr = FlowUnitManager::GetInstance();\n  auto device_mgr = DeviceManager::GetInstance();\n\n  std::shared_ptr<MockNode> node_a = nullptr;\n  std::shared_ptr<Node> node_b = nullptr;\n  std::shared_ptr<Node> node_c = nullptr;\n\n  {\n    ConfigurationBuilder configbuilder;\n    auto config = configbuilder.Build();\n\n    node_a = std::make_shared<MockNode>();\n    node_a->SetFlowUnitInfo(\"listen\", \"cpu\", \"0\", flowunit_mgr);\n    EXPECT_CALL(*node_a, Run(testing::_))\n        .WillRepeatedly(testing::Return(STATUS_FAULT));\n    node_a->SetName(\"gendata\");\n    node_a->Init({}, {\"Out_1\", \"Out_2\"}, config);\n    node_a->SetSessionManager(&g_test_session_manager);\n    EXPECT_TRUE(graph->AddNode(node_a));\n  }\n\n  {\n    ConfigurationBuilder configbuilder;\n    auto config = configbuilder.Build();\n\n    node_b = std::make_shared<Node>();\n    node_b->SetFlowUnitInfo(\"add\", \"cpu\", \"0\", flowunit_mgr);\n    node_b->SetName(\"addop\");\n    node_b->Init({\"In_1\", \"In_2\"}, {\"Out_1\"}, config);\n    node_b->SetSessionManager(&g_test_session_manager);\n    EXPECT_TRUE(graph->AddNode(node_b));\n  }\n\n  {\n    ConfigurationBuilder configbuilder;\n    auto config = configbuilder.Build();\n    config->SetProperty(\"max_count\", 50);\n\n    node_c = std::make_shared<Node>();\n    node_c->SetFlowUnitInfo(\"check_print\", \"cpu\", \"0\", flowunit_mgr);\n    node_c->SetName(\"check_print\");\n    node_c->Init({\"IN1\", \"IN2\", \"IN3\"}, {}, config);\n    node_c->SetSessionManager(&g_test_session_manager);\n    EXPECT_TRUE(graph->AddNode(node_c));\n  }\n\n  graph->AddLink(node_a->GetName(), \"Out_1\", node_b->GetName(), \"In_1\");\n  graph->AddLink(node_a->GetName(), \"Out_2\", node_b->GetName(), \"In_2\");\n  graph->AddLink(node_a->GetName(), \"Out_1\", node_c->GetName(), \"IN1\");\n  graph->AddLink(node_a->GetName(), \"Out_2\", node_c->GetName(), \"IN2\");\n  graph->AddLink(node_b->GetName(), \"Out_1\", node_c->GetName(), \"IN3\");\n\n  ConfigurationBuilder configbuilder;\n  auto config = configbuilder.Build();\n  graph->Initialize(flowunit_mgr, device_mgr, nullptr, config);\n  EXPECT_TRUE(graph->Build(gc) == STATUS_OK);\n  graph->RunAsync();\n\n  Status retval;\n  graph->Wait(0, &retval);\n  EXPECT_EQ(retval, STATUS_FAULT);\n}\n\nTEST_F(FlowTest, ConfigJson) {\n  const std::string test_lib_dir = TEST_LIB_DIR;\n  std::string toml_content = R\"(\n    [driver]\n    skip-default=true\n    dir=[\")\" + test_lib_dir + \"\\\"]\\n    \" +\n                             R\"([graph]\n    graphconf = [\"digraph demo {\",\n          \"listen[type=flowunit, flowunit=listen, device=cpu, deviceid=0, label=\\\"<Out_1> | <Out_2>\\\"]\",\n          \"tensorlist_test_2[type=flowunit, flowunit=tensorlist_test_2, device=cpu, deviceid=0, label=\\\"<IN1> | <IN2> | <OUT1>\\\", batch_size=4]\",\n          \"check_tensorlist_test_2[type=flowunit, flowunit=check_tensorlist_test_2, device=cpu, deviceid=0, label=\\\"<IN1> | <IN2> | <IN3>\\\", batch_size=6]\",\n          \"listen:Out_1 -> tensorlist_test_2:IN1\",\n          \"listen:Out_2 -> tensorlist_test_2:IN2\",\n          \"listen:Out_1 -> check_tensorlist_test_2:IN1\",\n          \"listen:Out_2 -> check_tensorlist_test_2:IN2\",\n          \"tensorlist_test_2:OUT1 -> check_tensorlist_test_2:IN3\",\n        \"}\"\n        ]\n    format = \"graphviz\"\n  )\";\n\n  MBLOG_INFO << toml_content;\n  std::string config_file_path = std::string(TEST_WORKING_DIR) + \"/test.json\";\n  std::string json_data;\n  auto ret = TomlToJson(toml_content, &json_data, true);\n  ASSERT_TRUE(ret);\n  MBLOG_INFO << json_data;\n  std::ofstream ofs(config_file_path);\n  EXPECT_TRUE(ofs.is_open());\n  ofs.write(json_data.data(), json_data.size());\n  ofs.flush();\n  ofs.close();\n  Defer {\n    auto rmret = remove(config_file_path.c_str());\n    EXPECT_EQ(rmret, 0);\n  };\n\n  auto flow = std::make_shared<Flow>();\n  ret = flow->Init(config_file_path);\n  EXPECT_EQ(ret, STATUS_OK);\n\n  ret = flow->Build();\n  EXPECT_EQ(ret, STATUS_OK);\n\n  flow->RunAsync();\n\n  Status retval;\n  flow->Wait(0, &retval);\n  EXPECT_EQ(retval, STATUS_STOP);\n\n  flow->Stop();\n}\n\nTEST_F(FlowTest, Extern_Config) {\n  const std::string test_lib_dir = TEST_LIB_DIR;\n  std::string toml_content = R\"(\n    [driver]\n    skip-default=true\n    dir=[\")\" + test_lib_dir + \"\\\"]\\n    \" +\n                             R\"([graph]\n    graphconf = '''digraph demo {                                                                            \n          listen[type=flowunit, flowunit=listen, device=cpu, deviceid=0, label=\"<Out_1> | <Out_2>\"]             \n          add[type=flowunit, flowunit=add, device=cpu, deviceid=0, label=\"<In_1> | <In_2> | <Out_1>\"] \n          check_print[type=flowunit, flowunit=check_print, device=cpu, deviceid=0, label=\"<IN1> | <IN2> | <IN3>\" , max_count=50]                                \n          listen:Out_1 -> add:In_1\n          listen:Out_2 -> add:In_2\n          listen:Out_1 -> check_print:IN1\n          listen:Out_2 -> check_print:IN2\n          add:Out_1 -> check_print:IN3                                                                             \n        }'''\n    format = \"graphviz\"\n  )\";\n\n  auto flow = std::make_shared<Flow>();\n  auto ret = flow->Init(\"graph\", toml_content);\n  EXPECT_EQ(ret, STATUS_OK);\n\n  ret = flow->Build();\n  EXPECT_EQ(ret, STATUS_OK);\n\n  flow->RunAsync();\n\n  Status retval;\n  flow->Wait(0, &retval);\n  EXPECT_EQ(retval, STATUS_STOP);\n\n  flow->Stop();\n}\n\nTEST_F(FlowTest, DISABLED_Perf) {\n  auto graph = std::make_shared<Graph>();\n  auto gc = std::make_shared<GCGraph>();\n  auto flowunit_mgr = FlowUnitManager::GetInstance();\n  auto device_mgr = DeviceManager::GetInstance();\n\n  std::shared_ptr<Node> node_a = nullptr;\n  std::shared_ptr<Node> node_b = nullptr;\n  std::shared_ptr<Node> node_c = nullptr;\n\n  {\n    ConfigurationBuilder configbuilder;\n    auto config = configbuilder.Build();\n    config->SetProperty(\"interval_time\", 0);\n    config->SetProperty(\"queue_size\", 1024);\n\n    node_a = std::make_shared<Node>();\n    node_a->SetFlowUnitInfo(\"listen\", \"cpu\", \"0\", flowunit_mgr);\n    node_a->SetName(\"gendata\");\n    node_a->Init({}, {\"Out_1\", \"Out_2\"}, config);\n    node_a->SetSessionManager(&g_test_session_manager);\n    EXPECT_TRUE(graph->AddNode(node_a));\n  }\n\n  {\n    ConfigurationBuilder configbuilder;\n    auto config = configbuilder.Build();\n    config->SetProperty(\"queue_size\", 1024);\n\n    node_b = std::make_shared<Node>();\n    node_b->SetFlowUnitInfo(\"add\", \"cpu\", \"0\", flowunit_mgr);\n    node_b->SetName(\"add\");\n    node_b->Init({\"In_1\", \"In_2\"}, {\"Out_1\"}, config);\n    node_b->SetSessionManager(&g_test_session_manager);\n    EXPECT_TRUE(graph->AddNode(node_b));\n  }\n\n  {\n    ConfigurationBuilder configbuilder;\n    auto config = configbuilder.Build();\n    config->SetProperty(\"max_count\", INT64_MAX);\n    config->SetProperty(\"queue_size\", 1024);\n\n    node_c = std::make_shared<Node>();\n    node_c->SetFlowUnitInfo(\"check_print\", \"cpu\", \"0\", flowunit_mgr);\n    node_c->SetName(\"check_print\");\n    node_c->Init({\"IN1\", \"IN2\", \"IN3\"}, {}, config);\n    node_c->SetSessionManager(&g_test_session_manager);\n    EXPECT_TRUE(graph->AddNode(node_c));\n  }\n\n  graph->AddLink(node_a->GetName(), \"Out_1\", node_b->GetName(), \"In_1\");\n  graph->AddLink(node_a->GetName(), \"Out_2\", node_b->GetName(), \"In_2\");\n  graph->AddLink(node_a->GetName(), \"Out_1\", node_c->GetName(), \"IN1\");\n  graph->AddLink(node_a->GetName(), \"Out_2\", node_c->GetName(), \"IN2\");\n  graph->AddLink(node_b->GetName(), \"Out_1\", node_c->GetName(), \"IN3\");\n\n  ConfigurationBuilder configbuilder;\n  auto config = configbuilder.Build();\n  graph->Initialize(flowunit_mgr, device_mgr, nullptr, config);\n  EXPECT_TRUE(graph->Build(gc) == STATUS_OK);\n  graph->RunAsync();\n\n  Status retval;\n  graph->Wait(0, &retval);\n  EXPECT_EQ(retval, STATUS_STOP);\n}\n\nTEST_F(FlowTest, Statistics) {\n  const std::string test_lib_dir = TEST_LIB_DIR;\n  std::string toml_content = R\"(\n    [driver]\n    skip-default=true\n    dir=[\")\" + test_lib_dir + \"\\\"]\\n    \" +\n                             R\"([graph]\n    graphconf = '''digraph demo {\n          input[type=input]\n          statistic_test[type=flowunit, flowunit=statistic_test, device=cpu, deviceid=0, label=\"<IN1> | <OUT1>\"]\n          output[type=output]\n\n          input -> statistic_test:IN1\n          statistic_test:OUT1 -> output\n        }'''\n    format = \"graphviz\"\n  )\";\n\n  auto flow = std::make_shared<Flow>();\n  auto ret = flow->Init(\"graph\", toml_content);\n  EXPECT_EQ(ret, STATUS_OK);\n\n  ret = flow->Build();\n  EXPECT_EQ(ret, STATUS_OK);\n\n  flow->RunAsync();\n\n  auto external = flow->CreateExternalDataMap();\n  auto input_buffer = external->CreateBufferList();\n  input_buffer->Build({1});\n  auto session_ctx = external->GetSessionContext();\n  auto session_id = session_ctx->GetSessionId();\n  auto graph_id = flow->GetGraphId();\n\n  auto profiler = flow->GetProfiler();\n  auto statistics = Statistics::GetGlobalItem();\n  Defer { Statistics::ReleaseGlobalItem(); };\n  std::atomic<std::uint32_t> change_notify_count = {0};\n  std::atomic<std::uint32_t> timer_notify_count = {0};\n  const std::string path_pattern = \"flow.*.*.statistic_test.test_key\";\n  auto change_notify_cfg = std::make_shared<StatisticsNotifyCfg>(\n      path_pattern,\n      [&change_notify_count, graph_id, session_id](\n          const std::shared_ptr<const modelbox::StatisticsNotifyMsg>& msg) {\n        MBLOG_INFO << \"Change notify [\" << msg->path_ << \"]\";\n        EXPECT_EQ(msg->path_, \"flow.\" + graph_id + \".\" + session_id +\n                                  \".statistic_test.test_key\");\n        EXPECT_TRUE(msg->value_->IsInt32());\n        EXPECT_FALSE(msg->value_->IsString());\n        int32_t test_val = 0;\n        auto ret = msg->value_->GetInt32(test_val);\n        EXPECT_TRUE(ret);\n        EXPECT_EQ(test_val, 1);\n        ++change_notify_count;\n      },\n      std::set<StatisticsNotifyType>{StatisticsNotifyType::CREATE,\n                                     StatisticsNotifyType::CHANGE});\n  statistics->RegisterNotify(change_notify_cfg);\n\n  auto delete_notify_cfg = std::make_shared<StatisticsNotifyCfg>(\n      path_pattern,\n      [session_id](\n          const std::shared_ptr<const modelbox::StatisticsNotifyMsg>& msg) {\n        MBLOG_INFO << \"Delete notify [\" << msg->path_ << \"]\";\n        EXPECT_EQ(msg->type_, StatisticsNotifyType::DELETE);\n      },\n      StatisticsNotifyType::DELETE);\n  statistics->RegisterNotify(delete_notify_cfg);\n\n  auto timer_notify_cfg = std::make_shared<StatisticsNotifyCfg>(\n      path_pattern,\n      [&timer_notify_count, graph_id, session_id](\n          const std::shared_ptr<const modelbox::StatisticsNotifyMsg>& msg) {\n        ++timer_notify_count;\n      });\n  timer_notify_cfg->SetNotifyTimer(100, 100);\n  statistics->RegisterNotify(timer_notify_cfg);\n\n  EXPECT_NE(session_ctx, nullptr);\n  external->Send(\"input\", input_buffer);\n  OutputBufferList output_buffer;\n  external->Recv(output_buffer);\n  external->Close();\n\n  flow->Stop();\n\n  auto item = statistics->GetItem(\"flow.\" + graph_id + \".\" + session_id +\n                                  \".statistic_test.test_key\");\n  ASSERT_NE(item, nullptr);\n  EXPECT_TRUE(item->GetValue()->IsInt32());\n  int32_t test_val = 0;\n  auto b_ret = item->GetValue()->GetInt32(test_val);\n  EXPECT_TRUE(b_ret);\n  EXPECT_EQ(test_val, 1);\n\n  // change_val == create_val, only notify once\n  EXPECT_EQ(change_notify_count, 1);\n  EXPECT_GE(timer_notify_count, 0);  // minimum timer notify interval is 60s\n  statistics->UnRegisterNotify(change_notify_cfg);\n  statistics->UnRegisterNotify(delete_notify_cfg);\n  statistics->UnRegisterNotify(timer_notify_cfg);\n}\n\nTEST_F(FlowTest, LoopGraph_All) {\n  const std::string test_lib_dir = TEST_LIB_DIR;\n  std::string toml_content = R\"(\n    [driver]\n    skip-default=true\n    dir=[\")\" + test_lib_dir + \"\\\"]\\n    \" +\n                             R\"([graph]\n    graphconf = '''digraph demo {                                                                            \n          data_input[type=flowunit, flowunit=test_0_1_batch_thread, device=cpu, deviceid=0, label= \"<Out_1>\", interval_time = 10000]             \n          loop[type=flowunit, flowunit=loop, device=cpu, deviceid=0, label=\"<In_1> | <Out_1> | <Out_2>\"] \n          data_output[type=flowunit, flowunit=test_1_0_batch_thread, device=cpu, deviceid=0, label=\"<In_1>\"]                                \n          data_input:Out_1 -> loop:In_1\n          loop:Out_1 -> loop:In_1\n          loop:Out_2 -> data_output:In_1                                                                        \n        }'''\n    format = \"graphviz\"\n  )\";\n\n  auto flow = std::make_shared<Flow>();\n  auto ret = flow->Init(\"graph\", toml_content);\n  EXPECT_EQ(ret, STATUS_OK);\n\n  ret = flow->Build();\n  EXPECT_EQ(ret, STATUS_OK);\n\n  flow->RunAsync();\n\n  Status retval;\n  flow->Wait(0, &retval);\n  EXPECT_EQ(retval, STATUS_STOP);\n\n  flow->Stop();\n}\n\nTEST_F(FlowTest, NormalError) {\n  const std::string test_lib_dir = TEST_LIB_DIR;\n  std::string toml_content = R\"(\n    [driver]\n    skip-default=true\n    dir=[\")\" + test_lib_dir + \"\\\"]\\n    \" +\n                             R\"([graph]\n    graphconf = '''digraph demo {                                                                            \n          error_input[type=flowunit, flowunit=error_start_normal, device=cpu, deviceid=0, label= \"<Out_1>\"]             \n          error_output[type=flowunit, flowunit=error_end_normal, device=cpu, deviceid=0, label=\"<In_1>\"]                                \n          error_input:Out_1 -> error_output:In_1\n        }'''\n    format = \"graphviz\"\n  )\";\n\n  auto flow = std::make_shared<Flow>();\n  auto ret = flow->Init(\"graph\", toml_content);\n  EXPECT_EQ(ret, STATUS_OK);\n\n  ret = flow->Build();\n  EXPECT_EQ(ret, STATUS_OK);\n\n  flow->RunAsync();\n\n  Status retval;\n  flow->Wait(1000 * 5, &retval);\n  EXPECT_EQ(retval, STATUS_SUCCESS);\n\n  flow->Stop();\n}\n\nclass InlineFlowUnit : public FlowUnit {\n public:\n  Status Process(std::shared_ptr<DataContext> data_ctx) override {\n    auto indata = data_ctx->Input(\"in\");\n    auto output = data_ctx->Output(\"out\");\n\n    for (const auto& buff : *indata) {\n      output->PushBack(buff);\n    }\n\n    return modelbox::STATUS_OK;\n  }\n\n  class Builder : public FlowUnitBuilder {\n   public:\n    void Probe(std::shared_ptr<FlowUnitDesc>& desc) override {\n      desc->SetFlowUnitType(\"cpu\");\n      desc->SetFlowUnitName(\"inlineflowunit\");\n      desc->AddFlowUnitInput({\"in\"});\n      desc->AddFlowUnitOutput({\"out\"});\n    }\n\n    std::shared_ptr<FlowUnit> Build() override {\n      return std::make_shared<InlineFlowUnit>();\n    }\n  };\n};\n\nTEST_F(FlowTest, InlineFlowUnit) {\n  const std::string test_lib_dir = TEST_LIB_DIR;\n  const std::string test_driver_dir = TEST_DRIVER_DIR;\n  std::string toml_content = R\"(\n    [driver]\n    skip-default=true\n    dir=[\")\" + test_lib_dir + \"\\\", \\\"\" +\n                             test_driver_dir + \"\\\"]\\n    \" +\n                             R\"([graph]\n    graphconf = '''digraph demo {                                                                            \n          input[type=input]             \n          process[flowunit=inlineflowunit, device=cpu] \n          output[type=output]                                \n          input -> process:in\n          process:out -> output                                                                     \n        }'''\n    format = \"graphviz\"\n  )\";\n\n  auto flow = std::make_shared<Flow>();\n  flow->RegisterFlowUnit(std::make_shared<InlineFlowUnit::Builder>());\n  auto ret = flow->Init(\"graph\", toml_content);\n  ASSERT_EQ(ret, STATUS_OK);\n\n  ret = flow->Build();\n  ASSERT_EQ(ret, STATUS_OK);\n\n  flow->RunAsync();\n\n  auto streamio = flow->CreateStreamIO();\n  std::string msg = \"hello\";\n  streamio->Send(\"input\", (char*)msg.c_str(), msg.size());\n  streamio->Send(\"input\", (char*)msg.c_str(), msg.size());\n  streamio->CloseInput();\n\n  int count = 0;\n  bool get_result = false;\n\n  while (true) {\n    auto retbuf = streamio->Recv(\"output\", 1000 * 10);\n    if (retbuf == nullptr) {\n      break;\n    }\n\n    std::string retmsg((const char*)retbuf->ConstData(), retbuf->GetBytes());\n    EXPECT_STREQ(msg.c_str(), retmsg.c_str());\n    get_result = true;\n    count++;\n  }\n\n  EXPECT_EQ(count, 2);\n  EXPECT_TRUE(get_result);\n}\n\n}  // namespace modelbox\n"
  },
  {
    "path": "test/unit/libmodelbox/profiling/profiler_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n#include \"modelbox/profiler.h\"\n\n#include <modelbox/base/any.h>\n#include <dirent.h>\n#include <sys/stat.h>\n\n#include <atomic>\n\n#include \"modelbox/statistics.h\"\n#include \"gtest/gtest.h\"\n#include \"test_config.h\"\n\nclass ProfilerTest : public testing::Test {\n public:\n  ProfilerTest() = default;\n  ~ProfilerTest() override = default;\n\n protected:\n  void SetUp() override {\n    std::ostringstream sstr_profile_path;\n    sstr_profile_path << TEST_DATA_DIR << \"/perf\";\n    setenv(modelbox::PROFILE_PATH_ENV, sstr_profile_path.str().c_str(), 1);\n  };\n\n  void TearDown() override { unsetenv(modelbox::PROFILE_PATH_ENV); };\n};\n\nTEST_F(ProfilerTest, ProfilerInit) {\n  auto device_manager = std::make_shared<modelbox::DeviceManager>();\n  auto config = std::make_shared<modelbox::Configuration>();\n  auto profiler = std::make_shared<modelbox::Profiler>(device_manager, config);\n  EXPECT_EQ(profiler->Init(), modelbox::STATUS_SUCCESS);\n  EXPECT_EQ(profiler->Init(), modelbox::STATUS_SUCCESS);\n  EXPECT_TRUE(profiler->IsInitialized());\n}\n\nTEST_F(ProfilerTest, ProfilerStartAndStop) {\n  auto device_manager = std::make_shared<modelbox::DeviceManager>();\n  auto config = std::make_shared<modelbox::Configuration>();\n  auto profiler = std::make_shared<modelbox::Profiler>(device_manager, config);\n  EXPECT_EQ(profiler->Init(), modelbox::STATUS_SUCCESS);\n  EXPECT_EQ(profiler->Start(), modelbox::STATUS_SUCCESS);\n  EXPECT_EQ(profiler->Start(), modelbox::STATUS_SUCCESS);\n  EXPECT_TRUE(profiler->IsRunning());\n  EXPECT_EQ(profiler->Stop(), modelbox::STATUS_SUCCESS);\n  EXPECT_FALSE(profiler->IsRunning());\n}\n\nTEST_F(ProfilerTest, ProfilerPauseAndResume) {\n  auto device_manager = std::make_shared<modelbox::DeviceManager>();\n  auto config = std::make_shared<modelbox::Configuration>();\n  auto profiler = std::make_shared<modelbox::Profiler>(device_manager, config);\n  EXPECT_EQ(profiler->Init(), modelbox::STATUS_SUCCESS);\n  EXPECT_EQ(profiler->Start(), modelbox::STATUS_SUCCESS);\n  EXPECT_EQ(profiler->Pause(), modelbox::STATUS_SUCCESS);\n  EXPECT_FALSE(profiler->IsRunning());\n  EXPECT_EQ(profiler->Resume(), modelbox::STATUS_SUCCESS);\n  EXPECT_TRUE(profiler->IsRunning());\n}\n\nTEST_F(ProfilerTest, ProfilerSetTraceSlice) {\n  auto device_manager = std::make_shared<modelbox::DeviceManager>();\n  auto config = std::make_shared<modelbox::Configuration>();\n  config->SetProperty(\"profile.trace\", \"true\");\n  auto profiler = std::make_shared<modelbox::Profiler>(device_manager, config);\n  profiler->Init();\n  std::string session = \"session_0\";\n  auto trace = profiler->GetTrace();\n  {\n    auto trace_slice = trace->FlowUnit(\"test\")->Slice(\n        modelbox::TraceSliceType::PROCESS, session);\n    trace_slice->Begin();\n    trace_slice->End();\n  }\n\n  std::vector<std::shared_ptr<modelbox::TraceSlice>> all_slices;\n  trace->FlowUnit(\"test\")->GetTraceSlices(all_slices);\n  int process_slices = 0;\n  std::shared_ptr<modelbox::TraceSlice> last_slice;\n  for (const auto& slice : all_slices) {\n    if (slice->GetTraceSliceType() == modelbox::TraceSliceType::PROCESS) {\n      process_slices++;\n      last_slice = slice;\n    }\n  }\n\n  EXPECT_EQ(process_slices, 1);\n  EXPECT_NE(last_slice->GetBeginEvent(), nullptr);\n  EXPECT_NE(last_slice->GetEndEvent(), nullptr);\n}\n\nTEST_F(ProfilerTest, ProfilerTraceSliceEndNotCalled) {\n  auto device_manager = std::make_shared<modelbox::DeviceManager>();\n  auto config = std::make_shared<modelbox::Configuration>();\n  config->SetProperty(\"profile.trace\", \"true\");\n  auto profiler = std::make_shared<modelbox::Profiler>(device_manager, config);\n  std::string session = \"session_0\";\n  profiler->Init();\n  auto trace = profiler->GetTrace();\n  {\n    auto trace_slice = trace->FlowUnit(\"test\")->Slice(\n        modelbox::TraceSliceType::PROCESS, session);\n    trace_slice->Begin();\n  }\n\n  std::vector<std::shared_ptr<modelbox::TraceSlice>> all_slices;\n  trace->FlowUnit(\"test\")->GetTraceSlices(all_slices);\n\n  int process_slices = 0;\n  std::shared_ptr<modelbox::TraceEvent> last_event;\n  for (const auto& slice : all_slices) {\n    if (slice->GetTraceSliceType() == modelbox::TraceSliceType::PROCESS) {\n      process_slices++;\n      last_event = slice->GetEndEvent();\n    }\n  }\n\n  EXPECT_EQ(process_slices, 1);\n  EXPECT_NE(last_event.get(), nullptr);\n}\n\nTEST_F(ProfilerTest, ProfilerTimer) {\n  auto device_manager = std::make_shared<modelbox::DeviceManager>();\n  auto config = std::make_shared<modelbox::Configuration>();\n  auto profiler = std::make_shared<modelbox::Profiler>(device_manager, config);\n  profiler->Init();\n  auto perf = profiler->GetPerf();\n  EXPECT_EQ(profiler->Init(), modelbox::STATUS_SUCCESS);\n  EXPECT_EQ(profiler->Start(), modelbox::STATUS_SUCCESS);\n}\n\nTEST_F(ProfilerTest, TracePerf) {\n  auto deviceManager = std::make_shared<modelbox::DeviceManager>();\n  auto config = std::make_shared<modelbox::Configuration>();\n  config->SetProperty(\"profile.trace\", \"true\");\n  std::shared_ptr<modelbox::Profiler> profiler =\n      std::make_shared<modelbox::Profiler>(deviceManager, config);\n  profiler->Init();\n  auto trace = profiler->GetTrace();\n\n  profiler->Start();\n\n  auto flow_unit_test = trace->FlowUnit(\"resize\");\n  for (int i = 0; i < 1000; i++) {\n    auto open_slice =\n        flow_unit_test->Slice(modelbox::TraceSliceType::OPEN, \"session\");\n    open_slice->Begin();\n    open_slice->End();\n  }\n}\n\nTEST_F(ProfilerTest, WriteTrace) {\n  MBLOG_INFO << \"PROFILE_PATH : \" << getenv(modelbox::PROFILE_PATH_ENV);\n  auto deviceManager = std::make_shared<modelbox::DeviceManager>();\n  auto config = std::make_shared<modelbox::Configuration>();\n  config->SetProperty(\"profile.trace\", \"true\");\n  std::shared_ptr<modelbox::Profiler> profiler =\n      std::make_shared<modelbox::Profiler>(deviceManager, config);\n  profiler->Init();\n  auto trace = profiler->GetTrace();\n  auto flow_unit_trace_resize = trace->FlowUnit(\"resize\");\n  auto flow_unit_trace_crop = trace->FlowUnit(\"crop\");\n  auto flow_unit_trace_preprocess = trace->FlowUnit(\"preprocess\");\n  auto flow_unit_trace_infer = trace->FlowUnit(\"infer\");\n\n  std::string session = \"session_0\";\n  // OPEN\n  std::thread process_resize_open([&]() {\n    auto open_slice =\n        flow_unit_trace_resize->Slice(modelbox::TraceSliceType::OPEN, session);\n    open_slice->Begin();\n    std::this_thread::sleep_for(std::chrono::milliseconds(10));\n    open_slice->End();\n  });\n\n  std::thread process_crop_open([&]() {\n    auto open_slice =\n        flow_unit_trace_crop->Slice(modelbox::TraceSliceType::OPEN, session);\n    open_slice->Begin();\n    std::this_thread::sleep_for(std::chrono::milliseconds(10));\n    open_slice->End();\n  });\n\n  std::thread process_preprocess_open([&]() {\n    auto open_slice = flow_unit_trace_preprocess->Slice(\n        modelbox::TraceSliceType::OPEN, session);\n    open_slice->Begin();\n    std::this_thread::sleep_for(std::chrono::milliseconds(10));\n    open_slice->End();\n  });\n\n  std::thread process_infer_open([&]() {\n    auto open_slice =\n        flow_unit_trace_infer->Slice(modelbox::TraceSliceType::OPEN, session);\n    open_slice->Begin();\n    std::this_thread::sleep_for(std::chrono::milliseconds(10));\n    open_slice->End();\n  });\n\n  process_resize_open.join();\n  process_infer_open.join();\n  process_preprocess_open.join();\n  process_crop_open.join();\n\n  // PROCESS\n  std::thread process_resize([&]() {\n    for (int i = 0; i < 5; i++) {\n      auto process_slice = flow_unit_trace_resize->Slice(\n          modelbox::TraceSliceType::PROCESS, session);\n      process_slice->Begin();\n      std::this_thread::sleep_for(std::chrono::milliseconds(5));\n      process_slice->End();\n    }\n  });\n\n  std::this_thread::sleep_for(std::chrono::milliseconds(5));\n\n  std::thread process_crop([&]() {\n    for (int i = 0; i < 5; i++) {\n      auto process_slice =\n          flow_unit_trace_crop->Slice(modelbox::TraceSliceType::PROCESS, session);\n      process_slice->Begin();\n      std::this_thread::sleep_for(std::chrono::milliseconds(5));\n      process_slice->End();\n    }\n  });\n\n  std::this_thread::sleep_for(std::chrono::milliseconds(5));\n\n  std::thread process_preprocess([&]() {\n    for (int i = 0; i < 5; i++) {\n      auto process_slice = flow_unit_trace_preprocess->Slice(\n          modelbox::TraceSliceType::PROCESS, session);\n      process_slice->Begin();\n      std::this_thread::sleep_for(std::chrono::milliseconds(5));\n      process_slice->End();\n    }\n  });\n\n  std::this_thread::sleep_for(std::chrono::milliseconds(5));\n\n  // ANOTHER\n  std::thread process_infer([&]() {\n    for (int i = 0; i < 5; i++) {\n      auto process_slice = flow_unit_trace_infer->Slice(\n          modelbox::TraceSliceType::PROCESS, session);\n      process_slice->Begin();\n      std::this_thread::sleep_for(std::chrono::milliseconds(5));\n      process_slice->End();\n    }\n  });\n\n  process_resize.join();\n  process_crop.join();\n  process_preprocess.join();\n  process_infer.join();\n\n  // CLOSE\n  std::thread process_resize_close([&]() {\n    auto close_slice =\n        flow_unit_trace_resize->Slice(modelbox::TraceSliceType::CLOSE, session);\n    close_slice->Begin();\n    std::this_thread::sleep_for(std::chrono::milliseconds(10));\n    close_slice->End();\n  });\n\n  std::thread process_crop_close([&]() {\n    auto close_slice =\n        flow_unit_trace_crop->Slice(modelbox::TraceSliceType::CLOSE, session);\n    close_slice->Begin();\n    std::this_thread::sleep_for(std::chrono::milliseconds(10));\n    close_slice->End();\n  });\n\n  std::thread process_preprocess_close([&]() {\n    auto close_slice = flow_unit_trace_preprocess->Slice(\n        modelbox::TraceSliceType::CLOSE, session);\n    close_slice->Begin();\n    std::this_thread::sleep_for(std::chrono::milliseconds(10));\n    close_slice->End();\n  });\n\n  std::thread process_infer_close([&]() {\n    auto close_slice =\n        flow_unit_trace_infer->Slice(modelbox::TraceSliceType::CLOSE, session);\n    close_slice->Begin();\n    std::this_thread::sleep_for(std::chrono::milliseconds(10));\n    close_slice->End();\n  });\n\n  process_resize_close.join();\n  process_infer_close.join();\n  process_preprocess_close.join();\n  process_crop_close.join();\n\n  modelbox::Status ret = trace->WriteTrace();\n  EXPECT_EQ(ret, modelbox::STATUS_SUCCESS);\n}\n\nTEST_F(ProfilerTest, FlowUnitProfile) {\n  auto deviceManager = std::make_shared<modelbox::DeviceManager>();\n  auto config = std::make_shared<modelbox::Configuration>();\n  config->SetProperty(\"profile.profile\", \"true\");\n  std::shared_ptr<modelbox::Profiler> profiler =\n      std::make_shared<modelbox::Profiler>(deviceManager, config);\n\n  auto flow_unit_perf_ctx = std::make_shared<modelbox::FlowUnitPerfCtx>(\"resize\");\n\n  flow_unit_perf_ctx->UpdateProcessLatency(10);\n  flow_unit_perf_ctx->UpdateProcessLatency(10);\n  flow_unit_perf_ctx->UpdateProcessLatency(1);\n  int32_t process_latency = flow_unit_perf_ctx->GetProcessLatency();\n  EXPECT_EQ(process_latency, 7);\n\n  std::string device_type_1 = \"GPU\";\n  std::string device_id_1 = \"001\";\n\n  flow_unit_perf_ctx->UpdateDeviceMemory(device_type_1, device_id_1, 20);\n  std::this_thread::sleep_for(std::chrono::milliseconds(1));\n  flow_unit_perf_ctx->UpdateDeviceMemory(device_type_1, device_id_1, 30);\n  std::this_thread::sleep_for(std::chrono::milliseconds(1));\n  flow_unit_perf_ctx->UpdateDeviceMemory(device_type_1, device_id_1, 50);\n  int32_t device_memory =\n      flow_unit_perf_ctx->GetDeviceMemory(device_type_1, device_id_1);\n  EXPECT_EQ(device_memory, 50);\n}\n\nTEST_F(ProfilerTest, Statistics) {\n  std::atomic<size_t> create_notify_count(0);\n  std::atomic<size_t> delete_notify_count(0);\n  std::atomic<size_t> change_notify_count(0);\n  std::atomic<size_t> timer_notify_count(0);\n  {\n    auto root = std::make_shared<modelbox::StatisticsItem>();\n    const std::string path_pattern = \"flow.*.VideoDecoder.frame_count\";\n    const std::string frame_key = \"frame_count\";\n    std::set<std::string> expect_val;\n    // Plugin register notify\n    auto create_notify_cfg = std::make_shared<modelbox::StatisticsNotifyCfg>(\n        path_pattern,\n        [&create_notify_count](\n            const std::shared_ptr<const modelbox::StatisticsNotifyMsg>& msg) {\n          MBLOG_INFO << \"Create notify [\" << msg->path_ << \"]\";\n          EXPECT_EQ(msg->type_, modelbox::StatisticsNotifyType::CREATE);\n          EXPECT_EQ(msg->path_, \"flow.SessionId.VideoDecoder.frame_count\");\n          EXPECT_TRUE(msg->value_->IsUint64());\n          EXPECT_FALSE(msg->value_->IsString());\n          uint64_t frame_count = 0;\n          auto ret = msg->value_->GetUint64(frame_count);\n          EXPECT_TRUE(ret);\n          EXPECT_EQ(frame_count, 0);\n          ++create_notify_count;\n        },\n        modelbox::StatisticsNotifyType::CREATE);\n    root->RegisterNotify(create_notify_cfg);\n\n    auto delete_notify_cfg = std::make_shared<modelbox::StatisticsNotifyCfg>(\n        path_pattern,\n        [&delete_notify_count](\n            const std::shared_ptr<const modelbox::StatisticsNotifyMsg>& msg) {\n          MBLOG_INFO << \"Delete notify [\" << msg->path_ << \"]\";\n          EXPECT_EQ(msg->type_, modelbox::StatisticsNotifyType::DELETE);\n          EXPECT_EQ(msg->path_, \"flow.SessionId.VideoDecoder.frame_count\");\n          EXPECT_TRUE(msg->value_->IsUint64());\n          EXPECT_FALSE(msg->value_->IsString());\n          uint64_t frame_count = 0;\n          auto ret = msg->value_->GetUint64(frame_count);\n          EXPECT_TRUE(ret);\n          EXPECT_EQ(frame_count, 1);\n          ++delete_notify_count;\n        },\n        modelbox::StatisticsNotifyType::DELETE);\n    root->RegisterNotify(delete_notify_cfg);\n\n    auto change_notify_cfg = std::make_shared<modelbox::StatisticsNotifyCfg>(\n        path_pattern,\n        [&change_notify_count](\n            const std::shared_ptr<const modelbox::StatisticsNotifyMsg>& msg) {\n          MBLOG_INFO << \"Change notify [\" << msg->path_ << \"]\";\n          EXPECT_EQ(msg->type_, modelbox::StatisticsNotifyType::CHANGE);\n          EXPECT_EQ(msg->path_, \"flow.SessionId.VideoDecoder.frame_count\");\n          EXPECT_TRUE(msg->value_->IsUint64());\n          EXPECT_FALSE(msg->value_->IsString());\n          uint64_t frame_count = 0;\n          auto ret = msg->value_->GetUint64(frame_count);\n          EXPECT_TRUE(ret);\n          EXPECT_EQ(frame_count, 1);\n          ++change_notify_count;\n        },\n        modelbox::StatisticsNotifyType::CHANGE);\n    root->RegisterNotify(change_notify_cfg);\n\n    auto timer_notify_cfg = std::make_shared<modelbox::StatisticsNotifyCfg>(\n        path_pattern,\n        [&timer_notify_count](\n            const std::shared_ptr<const modelbox::StatisticsNotifyMsg>& msg) {\n          MBLOG_INFO << \"Timer notify [\" << msg->path_ << \"]\";\n          EXPECT_EQ(msg->type_, modelbox::StatisticsNotifyType::TIMER);\n          EXPECT_EQ(msg->path_, \"flow.SessionId.VideoDecoder.frame_count\");\n          EXPECT_TRUE(msg->value_->IsUint64());\n          EXPECT_FALSE(msg->value_->IsString());\n          uint64_t frame_count = 0;\n          auto ret = msg->value_->GetUint64(frame_count);\n          EXPECT_TRUE(ret);\n          EXPECT_EQ(frame_count, 1);\n          ++timer_notify_count;\n        });\n    timer_notify_cfg->SetNotifyTimer(100, 100);\n    root->RegisterNotify(timer_notify_cfg);\n    // FlowUnit\n    auto flow_item = root->AddItem(modelbox::STATISTICS_ITEM_FLOW);\n    auto session_item = flow_item->AddItem(\"SessionId\");\n    auto decoder_item = session_item->AddItem(\"VideoDecoder\");\n    // Device\n    auto device_item = root->AddItem(\"Device\");\n    auto gpu0_item = device_item->AddItem(\"gpu0\");\n    auto gpu1_item = device_item->AddItem(\"gpu1\", std::string(\"\"));\n    // Check item\n    expect_val = {modelbox::STATISTICS_ITEM_FLOW, \"Device\"};\n    EXPECT_EQ(root->GetItemNames(), expect_val);\n    expect_val = {\"SessionId\"};\n    EXPECT_EQ(flow_item->GetItemNames(), expect_val);\n    expect_val = {\"VideoDecoder\"};\n    EXPECT_EQ(session_item->GetItemNames(), expect_val);\n    EXPECT_EQ(decoder_item->GetName(), \"VideoDecoder\");\n    EXPECT_EQ(decoder_item->GetPath(), \"flow.SessionId.VideoDecoder\");\n\n    expect_val = {\"gpu0\", \"gpu1\"};\n    EXPECT_EQ(device_item->GetItemNames(), expect_val);\n    EXPECT_EQ(gpu0_item->GetName(), \"gpu0\");\n    EXPECT_EQ(gpu0_item->GetPath(), \"Device.gpu0\");\n    EXPECT_EQ(gpu1_item->GetName(), \"gpu1\");\n    EXPECT_EQ(gpu1_item->GetPath(), \"Device.gpu1\");\n\n    // Add frame count\n    uint64_t init_frame_count = 0;\n    auto frame_item = decoder_item->AddItem(frame_key, init_frame_count);\n\n    // Wrong type\n    auto ret = frame_item->IncreaseValue<uint32_t>(1);\n    EXPECT_EQ(ret, modelbox::STATUS_INVALID);\n\n    uint32_t wrong_type_frame_count;\n    ret = frame_item->GetValue(wrong_type_frame_count);\n    EXPECT_NE(ret, modelbox::STATUS_OK);\n\n    // Right op\n    std::this_thread::sleep_for(\n        std::chrono::seconds(1));  // Wait change notify cool down\n    ret = frame_item->IncreaseValue<uint64_t>(1);\n    EXPECT_EQ(ret, modelbox::STATUS_SUCCESS);\n    ret = frame_item->IncreaseValue<uint64_t>(1);\n    EXPECT_EQ(ret, modelbox::STATUS_SUCCESS);\n\n    std::this_thread::sleep_for(\n        std::chrono::seconds(1));  // Wait change notify cool down\n    ret = frame_item->SetValue<uint64_t>(1);\n    EXPECT_EQ(ret, modelbox::STATUS_SUCCESS);\n    ret = frame_item->SetValue<uint64_t>(1);\n    EXPECT_EQ(ret, modelbox::STATUS_SUCCESS);\n\n    uint64_t frame_count = 0;\n    ret = frame_item->GetValue(frame_count);\n    EXPECT_EQ(ret, modelbox::STATUS_OK);\n    EXPECT_EQ(frame_count, 1);\n\n    ret = gpu0_item->SetValue<uint64_t>(1);\n    EXPECT_EQ(ret, modelbox::STATUS_NOTSUPPORT);\n    // Foreach\n    std::atomic_size_t foreach_count(0);\n    root->ForEach(\n        [&foreach_count](const std::shared_ptr<modelbox::StatisticsItem>& item,\n                         const std::string& relative_path) {\n          auto value = item->GetValue();\n          MBLOG_INFO << \"Foreach : \" << item->GetPath() << \" : \"\n                     << (value ? value->ToString() : \"null\");\n          EXPECT_EQ(relative_path, item->GetName());\n          ++foreach_count;\n          return modelbox::STATUS_OK;\n        });\n    EXPECT_EQ(foreach_count, 2);\n    foreach_count = 0;\n    root->ForEach(\n        [&foreach_count](const std::shared_ptr<modelbox::StatisticsItem>& item,\n                         const std::string& relative_path) {\n          auto value = item->GetValue();\n          MBLOG_INFO << \"Foreach : \" << item->GetPath() << \" : \"\n                     << (value ? value->ToString() : \"null\");\n          ++foreach_count;\n          return modelbox::STATUS_OK;\n        },\n        true);\n    EXPECT_EQ(foreach_count, 7);\n    decoder_item->ForEach(\n        [](const std::shared_ptr<modelbox::StatisticsItem>& item,\n           const std::string& relative_path) {\n          auto value = item->GetValue();\n          MBLOG_INFO << \"Foreach : \" << item->GetPath() << \" : \"\n                     << (value ? value->ToString() : \"null\") << \" : \"\n                     << relative_path;\n          EXPECT_EQ(item->GetName(), relative_path);\n          return modelbox::STATUS_OK;\n        });\n    // Read\n    auto item = root->GetItem(\"flow\");\n    ASSERT_NE(item, nullptr);\n    EXPECT_EQ(item->GetName(), \"flow\");\n    item = root->GetItem(\"flow.SessionId.VideoDecoder\");\n    ASSERT_NE(item, nullptr);\n    EXPECT_EQ(item->GetName(), \"VideoDecoder\");\n    // Trigger timer\n    frame_item->Notify(modelbox::StatisticsNotifyType::TIMER);\n    // Unregister notify\n    root->UnRegisterNotify(timer_notify_cfg);\n    frame_item->Notify(\n        modelbox::StatisticsNotifyType::TIMER);  // this should not working\n    // Remove ctx\n    decoder_item->Dispose();\n    EXPECT_TRUE(session_item->GetItemNames().empty());\n    EXPECT_EQ(decoder_item->AddItem(\"test\"), nullptr);\n    EXPECT_TRUE(decoder_item->GetItemNames().empty());\n    flow_item->DelItem(\"SessionId\");\n    EXPECT_TRUE(flow_item->GetItemNames().empty());\n    root->DelItem(modelbox::STATISTICS_ITEM_FLOW);\n    expect_val = {\"Device\"};\n    EXPECT_EQ(root->GetItemNames(), expect_val);\n\n    std::string val;\n    gpu1_item->SetValue(std::string(\"test\"));\n    gpu1_item->GetValue(val);\n    EXPECT_EQ(val, \"test\");\n    gpu1_item->SetValue(std::string(\"test2\"));\n    gpu1_item->GetValue(val);\n    EXPECT_EQ(val, \"test2\");\n    // Destroy\n  }\n  // Check callback\n  EXPECT_EQ(create_notify_count, 1);\n  EXPECT_EQ(change_notify_count, 2);\n  EXPECT_EQ(delete_notify_count, 1);\n  EXPECT_EQ(timer_notify_count, 1);\n}\n"
  },
  {
    "path": "test/unit/modelbox/server_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#define _TURN_OFF_PLATFORM_STRING\n\n#include \"src/modelbox/server/server.h\"\n\n#include <dlfcn.h>\n#include <ftw.h>\n#include <modelbox/base/popen.h>\n#include <stdio.h>\n\n#include <fstream>\n#include <future>\n#include <nlohmann/json.hpp>\n\n#include \"gtest/gtest.h\"\n#include \"memory\"\n#include \"mock_server.h\"\n#include \"mock_tool.h\"\n#include \"mockflow.h\"\n#include \"modelbox/server/job_manager.h\"\n#include \"src/modelbox/server/config.h\"\n#include \"test_config.h\"\n#include \"thread\"\n\nnamespace modelbox {\n\nclass ModelboxServerTest : public testing::Test {\n public:\n  ModelboxServerTest() = default;\n\n protected:\n  void SetUp() override {\n    flow_ = std::make_shared<MockFlow>();\n    flow_->Init(false);\n    flow_->Register_Test_0_2_Flowunit();\n    flow_->Register_Test_OK_2_0_Flowunit();\n  };\n  void TearDown() override { flow_->Destroy(); };\n\n private:\n  std::shared_ptr<MockFlow> flow_;\n};\n\nTEST_F(ModelboxServerTest, Post) {\n  std::shared_ptr<JobManager> job_manager = std::make_shared<JobManager>();\n  auto job = job_manager->CreateJob(\"test\", \"\");\n\n  EXPECT_EQ(job->GetJobName(), \"test\");\n}\n\nnlohmann::json GetCreateJobMsg(const std::string &name) {\n  const std::string test_lib_dir = TEST_LIB_DIR;\n  const auto *graph = R\"(\n      digraph demo {\n      IN[flowunit=test_0_2]\n      OUT[flowunit=test_ok_2_0]\n      IN:Out_1->OUT:In_1\n      IN:Out_2->OUT:In_2\n  })\";\n\n  auto create_body = nlohmann::json::parse(R\"(\n      {\n        \"job_id\" : \"\",\n        \"job_graph_format\" : \"json\",\n        \"job_graph\": {\n          \"driver\": {\n            \"skip-default\": true\n          },\n          \"graph\": {\n            \"graphconf\" : \"\",\n            \"format\":\"graphviz\"\n          }\n        }\n      }\n    )\");\n  create_body[\"job_id\"] = name;\n  create_body[\"job_graph\"][\"driver\"][\"dir\"] = test_lib_dir;\n  create_body[\"job_graph\"][\"graph\"][\"graphconf\"] = graph;\n  return create_body;\n}\n\nnlohmann::json GetCreateJobFail(const std::string &name) {\n  const std::string test_lib_dir = TEST_LIB_DIR;\n  const auto *graph = R\"(\n      digraph demo {\n      IN[flowunit=not_exist_in]\n      OUT[flowunit=not_exist_out]\n      IN:Out_1->OUT:In_1\n      IN:Out_2->OUT:In_2\n  })\";\n\n  auto create_body = nlohmann::json::parse(R\"(\n      {\n        \"job_id\" : \"\",\n        \"job_graph_format\" : \"json\",\n        \"job_graph\": {\n          \"driver\": {\n            \"skip-default\": true\n          },\n          \"graph\": {\n            \"graphconf\" : \"\",\n            \"format\":\"graphviz\"\n          }\n        }\n      }\n    )\");\n  create_body[\"job_id\"] = name;\n  create_body[\"job_graph\"][\"driver\"][\"dir\"] = test_lib_dir;\n  create_body[\"job_graph\"][\"graph\"][\"graphconf\"] = graph;\n  return create_body;\n}\n\nnlohmann::json GetFlowInfoMsg(const std::vector<std::string> &dir_list) {\n  const std::string test_lib_dir = TEST_DRIVER_DIR;\n  auto body = nlohmann::json::parse(R\"(\n      {\n        \"skip-default\" : true,\n        \"dir\" : []\n      }\n    )\");\n\n  auto dirs = nlohmann::json::array();\n  for (const auto &dir : dir_list) {\n    dirs.push_back(dir);\n  }\n  body[\"dir\"] = dirs;\n  return body;\n}\n\nhttplib::Response CreateJob(MockServer &server, const nlohmann::json &body) {\n  HttpRequest request(HttpMethods::PUT,\n                      server.GetServerURL() + \"/v1/modelbox/job/\");\n  request.SetBody(body.dump());\n\n  return server.DoRequest(request);\n}\n\nhttplib::Response ListAllJobs(MockServer &server) {\n  HttpRequest request(HttpMethods::GET,\n                      server.GetServerURL() + \"/v1/modelbox/job/list/all\");\n  return server.DoRequest(request);\n}\n\nhttplib::Response QueryJob(MockServer &server, const std::string &name) {\n  HttpRequest request(HttpMethods::GET,\n                      server.GetServerURL() + \"/v1/modelbox/job/\" + name);\n  return server.DoRequest(request);\n}\n\nhttplib::Response DeleteJob(MockServer &server, const std::string &name) {\n  HttpRequest request(HttpMethods::DELETE,\n                      server.GetServerURL() + \"/v1/modelbox/job/\" + name);\n  return server.DoRequest(request);\n}\n\nhttplib::Response GetFlowInfo(MockServer &server) {\n  HttpRequest request(HttpMethods::GET,\n                      server.GetServerURL() + \"/editor/flow-info\");\n  return server.DoRequest(request);\n}\n\nhttplib::Response GetFlowInfoSpecificDir(\n    MockServer &server, const std::vector<std::string> &dir_list) {\n  HttpRequest request(HttpMethods::PUT,\n                      server.GetServerURL() + \"/editor/flow-info\");\n  request.SetBody(GetFlowInfoMsg(dir_list).dump());\n  return server.DoRequest(request);\n}\n\nhttplib::Response EditorCreateProject(MockServer &server,\n                                      const std::string &projectname,\n                                      const std::string &path,\n                                      const std::string &temp = \"\") {\n  HttpRequest request(HttpMethods::PUT,\n                      server.GetServerURL() + \"/editor/project/create\");\n  nlohmann::json create_body;\n  create_body[\"name\"] = projectname;\n  create_body[\"path\"] = path;\n  if (temp.length() > 0) {\n    create_body[\"template\"] = temp;\n  }\n  request.SetBody(create_body.dump());\n  return server.DoRequest(request);\n}\n\nhttplib::Response EditorTemplateListGet(MockServer &server) {\n  HttpRequest request(HttpMethods::GET,\n                      server.GetServerURL() + \"/editor/project/template\");\n  return server.DoRequest(request);\n}\n\nhttplib::Response EditorBasicInfoGet(MockServer &server) {\n  HttpRequest request(HttpMethods::GET,\n                      server.GetServerURL() + \"/editor/basic-info\");\n  return server.DoRequest(request);\n}\n\nhttplib::Response EditorQueryProject(MockServer &server,\n                                     const std::string &path) {\n  HttpRequest request(HttpMethods::GET,\n                      server.GetServerURL() + \"/editor/project?path=\" + path);\n  return server.DoRequest(request);\n}\n\nhttplib::Response EditorCreateFlowunit(\n    MockServer &server, const std::map<std::string, std::string> &value,\n    const std::vector<std::string> &in = {},\n    const std::vector<std::string> &out = {}) {\n  HttpRequest request(HttpMethods::PUT,\n                      server.GetServerURL() + \"/editor/flowunit/create\");\n  nlohmann::json create_body;\n  size_t i = 0;\n\n  for (const auto &kv : value) {\n    create_body[kv.first] = kv.second;\n  }\n\n  nlohmann::json injson;\n  for (i = 0; i < in.size(); i++) {\n    injson[\"name\"] = in[i];\n  }\n\n  if (in.size() == 0) {\n    create_body[\"input\"] = \"-\";\n  } else {\n    create_body[\"input\"] = injson;\n  }\n\n  nlohmann::json outjson;\n  for (i = 0; i < out.size(); i++) {\n    outjson[\"name\"] = out[i];\n  }\n\n  if (out.size() == 0) {\n    create_body[\"output\"] = \"-\";\n  } else {\n    create_body[\"output\"] = outjson;\n  }\n  request.SetBody(create_body.dump());\n  return server.DoRequest(request);\n}\n\nhttplib::Response GetDemo(MockServer &server, const std::string &demo = \"\") {\n  auto url = server.GetServerURL() + \"/editor/demo\";\n  if (!demo.empty()) {\n    url = url + \"/\" + demo;\n  }\n  HttpRequest request(HttpMethods::GET, url);\n  return server.DoRequest(request);\n}\n\nTEST_F(ModelboxServerTest, CreateJob) {\n  MockServer server;\n  auto ret = server.Init(nullptr);\n  if (ret == STATUS_NOTSUPPORT) {\n    GTEST_SKIP();\n  }\n  server.Start();\n  sleep(1);\n  auto body = GetCreateJobMsg(\"example\");\n  auto response = CreateJob(server, body);\n  MBLOG_INFO << response.body;\n  EXPECT_EQ(response.status, HttpStatusCodes::CREATED);\n}\n\nTEST_F(ModelboxServerTest, CreateJobFail) {\n  MockServer server;\n  auto ret = server.Init(nullptr);\n  if (ret == STATUS_NOTSUPPORT) {\n    GTEST_SKIP();\n  }\n  server.Start();\n  sleep(1);\n  auto body = GetCreateJobFail(\"example\");\n  auto response = CreateJob(server, body);\n  MBLOG_INFO << response.body;\n  EXPECT_EQ(response.status, HttpStatusCodes::BAD_REQUEST);\n  EXPECT_NE(response.body.find_first_of(\"not_exist_in\"), std::string::npos);\n}\n\nTEST_F(ModelboxServerTest, ListAllJobs) {\n  MockServer server;\n  auto ret = server.Init(nullptr);\n  if (ret == STATUS_NOTSUPPORT) {\n    GTEST_SKIP();\n  }\n  server.Start();\n  sleep(1);\n  auto body = GetCreateJobMsg(\"example1\");\n  auto create_response = CreateJob(server, body);\n  EXPECT_EQ(create_response.status, HttpStatusCodes::CREATED);\n\n  body = GetCreateJobMsg(\"example2\");\n  create_response = CreateJob(server, body);\n  EXPECT_EQ(create_response.status, HttpStatusCodes::CREATED);\n\n  auto response = ListAllJobs(server);\n  EXPECT_EQ(response.status, HttpStatusCodes::OK);\n  auto result = nlohmann::json::parse(response.body);\n  auto jobs = result[\"job_list\"];\n  EXPECT_EQ(jobs[0][\"job_id\"], \"example2\");\n  EXPECT_EQ(jobs[0][\"job_status\"], \"RUNNING\");\n  EXPECT_EQ(jobs[1][\"job_id\"], \"example1\");\n  EXPECT_EQ(jobs[1][\"job_status\"], \"RUNNING\");\n}\n\nTEST_F(ModelboxServerTest, QueryJobNotExists) {\n  MockServer server;\n  auto ret = server.Init(nullptr);\n  if (ret == STATUS_NOTSUPPORT) {\n    GTEST_SKIP();\n  }\n  server.Start();\n  sleep(1);\n  auto response = QueryJob(server, \"example\");\n  EXPECT_EQ(response.status, HttpStatusCodes::NOT_FOUND);\n}\n\nTEST_F(ModelboxServerTest, QueryJob) {\n  MockServer server;\n  auto ret = server.Init(nullptr);\n  if (ret == STATUS_NOTSUPPORT) {\n    GTEST_SKIP();\n  }\n  server.Start();\n  sleep(1);\n  auto body = GetCreateJobMsg(\"example\");\n  auto create_response = CreateJob(server, body);\n  EXPECT_EQ(create_response.status, HttpStatusCodes::CREATED);\n  auto response = QueryJob(server, \"example\");\n  EXPECT_EQ(response.status, HttpStatusCodes::OK);\n  auto result = nlohmann::json::parse(response.body);\n  EXPECT_EQ(result[\"job_id\"], \"example\");\n  EXPECT_EQ(result[\"job_status\"], \"RUNNING\");\n}\n\nTEST_F(ModelboxServerTest, DeleteJob) {\n  MockServer server;\n  auto ret = server.Init(nullptr);\n  if (ret == STATUS_NOTSUPPORT) {\n    GTEST_SKIP();\n  }\n  server.Start();\n  sleep(1);\n  auto body = GetCreateJobMsg(\"example\");\n  auto response = CreateJob(server, body);\n  EXPECT_EQ(response.status, HttpStatusCodes::CREATED);\n  response = QueryJob(server, \"example\");\n  EXPECT_EQ(response.status, HttpStatusCodes::OK);\n  response = DeleteJob(server, \"example\");\n  EXPECT_EQ(response.status, HttpStatusCodes::NO_CONTENT);\n  response = QueryJob(server, \"example\");\n  EXPECT_EQ(response.status, HttpStatusCodes::NOT_FOUND);\n}\n\nTEST_F(ModelboxServerTest, QueryDemo) {\n  MockServer server;\n  std::string demo_root_dir = std::string(TEST_DATA_DIR) + \"/demo\";\n  RemoveDirectory(demo_root_dir);\n  CreateDirectory(demo_root_dir);\n  Defer { RemoveDirectory(demo_root_dir); };\n\n  auto conf = std::make_shared<Configuration>();\n  conf->SetProperty(\"editor.demo_root\", demo_root_dir);\n\n  auto create_demo_file = [&](const std::string &name,\n                              const std::string &graphfilename,\n                              const std::string &content) {\n    auto demo_path = demo_root_dir + \"/\" + name;\n    CreateDirectory(demo_path + \"/flowunit\");\n    CreateDirectory(demo_path + \"/graph\");\n    std::ofstream out(demo_path + \"/graph/\" + graphfilename, std::ios::trunc);\n    if (out.fail()) {\n      return false;\n    }\n    Defer { out.close(); };\n\n    out << content;\n    if (out.fail()) {\n      return false;\n    }\n    return true;\n  };\n\n  std::string data = R\"(\n      {\n        \"flow\" : {\n          \"name\": \"demo1\",\n          \"desc\": \"demo1 desc\"\n        }\n      })\";\n  create_demo_file(\"demo1\", \"flow1.json\", data);\n\n  data = R\"(\n[flow]\nname = \"demo2\"\ndesc = \"demo2 desc\"\n  )\";\n  create_demo_file(\"demo2\", \"flow2.toml\", data);\n\n  auto ret = server.Init(conf);\n  if (ret == STATUS_NOTSUPPORT) {\n    GTEST_SKIP();\n  }\n  server.Start();\n  sleep(1);\n  auto response = GetDemo(server);\n  EXPECT_EQ(response.status, HttpStatusCodes::OK);\n  auto result = nlohmann::json::parse(response.body);\n  MBLOG_INFO << response.body;\n  auto demo_list = result[\"demo_list\"];\n  EXPECT_EQ(demo_list[0][\"demo\"], \"demo1\");\n  EXPECT_EQ(demo_list[0][\"name\"], \"demo1\");\n  EXPECT_EQ(demo_list[0][\"graphfile\"], \"flow1.json\");\n  EXPECT_EQ(demo_list[0][\"desc\"], \"demo1 desc\");\n  EXPECT_EQ(demo_list[1][\"demo\"], \"demo2\");\n  EXPECT_EQ(demo_list[1][\"name\"], \"demo2\");\n  EXPECT_EQ(demo_list[1][\"graphfile\"], \"flow2.toml\");\n  EXPECT_EQ(demo_list[1][\"desc\"], \"demo2 desc\");\n\n  response = GetDemo(server, \"demo1/flow1.json\");\n  EXPECT_EQ(response.status, HttpStatusCodes::OK);\n  result = nlohmann::json::parse(response.body);\n  MBLOG_INFO << response.body;\n  EXPECT_EQ(result[\"flow\"][\"name\"], \"demo1\");\n\n  response = GetDemo(server, \"../../demo2/flow2.toml\");\n  EXPECT_EQ(response.status, HttpStatusCodes::OK);\n  MBLOG_INFO << response.body;\n}\n\n// Python library conflict problem, disable test cases.\nTEST_F(ModelboxServerTest, DISABLED_FlowInfo) {\n  MockServer server;\n  auto ret = server.Init(nullptr);\n  if (ret == STATUS_NOTSUPPORT) {\n    GTEST_SKIP();\n  }\n  server.Start();\n  sleep(1);\n  auto response = GetFlowInfo(server);\n  MBLOG_INFO << response.body;\n  EXPECT_EQ(response.status, HttpStatusCodes::OK);\n}\n\nTEST_F(ModelboxServerTest, FlowInfoSpecificPath) {\n  MockServer server;\n  auto ret = server.Init(nullptr);\n  if (ret == STATUS_NOTSUPPORT) {\n    GTEST_SKIP();\n  }\n  server.Start();\n  sleep(1);\n  std::vector<std::string> dir_list;\n  dir_list.emplace_back(VIRTUAL_PYTHON_PATH);\n  auto response = GetFlowInfoSpecificDir(server, dir_list);\n  EXPECT_EQ(response.status, HttpStatusCodes::OK);\n}\n\nTEST_F(ModelboxServerTest, TemplateCommandTest) {\n  MockServer server;\n  auto conf = std::make_shared<Configuration>();\n  std::string template_env = \"MODELBOX_TEMPLATE_PATH\";\n  std::string project_name = \"test\";\n  template_env += \"=\" + std::string(MODELBOX_TEMPLATE_BIN_DIR);\n\n  std::string tmp_path = TEST_WORKING_DIR + std::string(\"/tmp/project\");\n  RemoveDirectory(tmp_path);\n  Defer { RemoveDirectory(tmp_path); };\n\n  conf->SetProperty(\"editor.test.template_cmd_env\", template_env);\n  conf->SetProperty(\"editor.test.template_cmd\", MODELBOX_TEMPLATE_CMD_PATH);\n  auto ret = server.Init(conf);\n  if (ret == STATUS_NOTSUPPORT) {\n    GTEST_SKIP();\n  }\n  server.Start();\n  sleep(1);\n  auto response = EditorCreateProject(server, project_name, tmp_path);\n  MBLOG_INFO << response.body.c_str();\n  EXPECT_EQ(response.status, HttpStatusCodes::CREATED);\n\n  response = EditorCreateFlowunit(\n      server, {{\"name\", \"cpp\"}, {\"lang\", \"c++\"}, {\"project-path\", tmp_path}});\n  MBLOG_INFO << response.body.c_str();\n  EXPECT_EQ(response.status, HttpStatusCodes::CREATED);\n\n  response = EditorCreateFlowunit(\n      server,\n      {{\"name\", \"python\"}, {\"lang\", \"python\"}, {\"project-path\", tmp_path}});\n  MBLOG_INFO << response.body.c_str();\n  EXPECT_EQ(response.status, HttpStatusCodes::CREATED);\n\n  response = EditorCreateFlowunit(server, {{\"name\", \"infer\"},\n                                           {\"lang\", \"infer\"},\n                                           {\"project-path\", tmp_path},\n                                           {\"virtual-type\", \"tensorrt\"},\n                                           {\"model\", \"modelfile\"}});\n  MBLOG_INFO << response.body.c_str();\n  EXPECT_EQ(response.status, HttpStatusCodes::CREATED);\n\n  response =\n      EditorCreateFlowunit(server, {{\"name\", \"yolo\"},\n                                    {\"lang\", \"yolo\"},\n                                    {\"project-path\", tmp_path},\n                                    {\"virtual-type\", \"yolov3_postprocess\"}});\n  MBLOG_INFO << response.body.c_str();\n  EXPECT_EQ(response.status, HttpStatusCodes::CREATED);\n\n  response = EditorQueryProject(server, tmp_path);\n  MBLOG_INFO << response.body.c_str();\n  EXPECT_EQ(response.status, HttpStatusCodes::OK);\n  auto result = nlohmann::json::parse(response.body);\n  EXPECT_EQ(result[\"project_name\"], project_name);\n  EXPECT_EQ(result[\"flowunits\"].size(), 4);\n}\n\nTEST_F(ModelboxServerTest, TemplateResizeProject) {\n  MockServer server;\n  auto conf = std::make_shared<Configuration>();\n  std::string template_env = \"MODELBOX_TEMPLATE_PATH\";\n  std::string project_name = \"test\";\n  template_env += \"=\" + std::string(MODELBOX_TEMPLATE_BIN_DIR);\n\n  std::string tmp_path = TEST_WORKING_DIR + std::string(\"/tmp/project\");\n  RemoveDirectory(tmp_path);\n  Defer { RemoveDirectory(tmp_path); };\n\n  conf->SetProperty(\"editor.test.template_cmd_env\", template_env);\n  conf->SetProperty(\"editor.test.template_cmd\", MODELBOX_TEMPLATE_CMD_PATH);\n  auto ret = server.Init(conf);\n  if (ret == STATUS_NOTSUPPORT) {\n    GTEST_SKIP();\n  }\n  server.Start();\n  sleep(1);\n  auto response = EditorCreateProject(server, project_name, tmp_path, \"resize\");\n  MBLOG_INFO << response.body.c_str();\n  EXPECT_EQ(response.status, HttpStatusCodes::CREATED);\n}\n\nTEST_F(ModelboxServerTest, TemplateListGet) {\n  MockServer server;\n  auto conf = std::make_shared<Configuration>();\n  std::string template_env = \"MODELBOX_TEMPLATE_PATH\";\n  std::string project_name = \"test\";\n  template_env += \"=\" + std::string(MODELBOX_TEMPLATE_BIN_DIR);\n\n  conf->SetProperty(\"editor.test.template_cmd_env\", template_env);\n  conf->SetProperty(\"editor.template_dir\", MODELBOX_TEMPLATE_BIN_DIR);\n  conf->SetProperty(\"editor.test.template_cmd\", MODELBOX_TEMPLATE_CMD_PATH);\n  auto ret = server.Init(conf);\n  if (ret == STATUS_NOTSUPPORT) {\n    GTEST_SKIP();\n  }\n  server.Start();\n  sleep(1);\n  auto response = EditorTemplateListGet(server);\n  MBLOG_INFO << response.body.c_str();\n  EXPECT_EQ(response.status, HttpStatusCodes::OK);\n\n  std::vector<std::string> files;\n  modelbox::ListSubDirectoryFiles(\n      MODELBOX_TEMPLATE_BIN_DIR + std::string(\"/project\"), \"desc.toml\", &files);\n  auto result = nlohmann::json::parse(response.body);\n  EXPECT_EQ(files.size(), result[\"project_template_list\"].size());\n}\n\n\nTEST_F(ModelboxServerTest, BasicInfoGet) {\n  MockServer server;\n  auto conf = std::make_shared<Configuration>();\n\n  auto ret = server.Init(conf);\n  if (ret == STATUS_NOTSUPPORT) {\n    GTEST_SKIP();\n  }\n  server.Start();\n  sleep(1);\n  auto response = EditorBasicInfoGet(server);\n  MBLOG_INFO << response.body.c_str();\n  EXPECT_EQ(response.status, HttpStatusCodes::OK);\n}\n\nTEST_F(ModelboxServerTest, ServerLoadConfig) {\n  const std::string test_etc_dir = TEST_DATA_DIR;\n  const std::string test_lib_dir = TEST_LIB_DIR;\n  std::string conf = R\"\"\"([server]\nip = \"127.0.0.1\"\nport = \"1104\"\nplugin_path = \")\"\"\" + test_lib_dir +\n                     R\"\"\"(/modelbox-plugin.so\"\n)\"\"\";\n\n  MBLOG_INFO << \"modelbox config: \\n\" << conf;\n  std::string config_file_path = test_etc_dir + \"/modelbox.conf\";\n  std::ofstream ofs(config_file_path);\n  EXPECT_TRUE(ofs.is_open());\n  ofs.write(conf.data(), conf.size());\n  ofs.flush();\n  ofs.close();\n  Defer { remove(config_file_path.c_str()); };\n  ASSERT_TRUE(LoadConfig(config_file_path));\n}\n\nTEST_F(ModelboxServerTest, Log) {\n  MockServer server;\n  MockTool tool;\n  auto conf = std::make_shared<Configuration>();\n  auto retval = server.Init(conf);\n  if (retval == STATUS_NOTSUPPORT) {\n    GTEST_SKIP();\n  }\n  server.Start();\n  auto ret = tool.Run(\"server log --getlevel\");\n  EXPECT_EQ(ret, 0);\n  ret = tool.Run(\"server log --setlevel debug\");\n  MBLOG_DEBUG << \"You will see this log\";\n  EXPECT_EQ(ret, 0);\n  ret = tool.Run(\"server log --getlevel\");\n  EXPECT_EQ(ret, 0);\n  ret = tool.Run(\"server log --setlevel info\");\n  EXPECT_EQ(ret, 0);\n  server.Stop();\n}\n\nTEST_F(ModelboxServerTest, Slab) {\n  MockServer server;\n  MockTool tool;\n  auto conf = std::make_shared<Configuration>();\n  auto retval = server.Init(conf);\n  if (retval == STATUS_NOTSUPPORT) {\n    GTEST_SKIP();\n  }\n  server.Start();\n\n  auto ret = tool.Run(\"server slab\");\n  EXPECT_EQ(ret, 1);\n  ret = tool.Run(\"server slab --device\");\n  EXPECT_EQ(ret, 0);\n  ret = tool.Run(\"server slab --device --type cpu\");\n  EXPECT_EQ(ret, 0);\n  ret = tool.Run(\"server slab --device --type cuda\");\n  EXPECT_EQ(ret, 1);\n  ret = tool.Run(\"server slab --device --type cpu --id 0\");\n  EXPECT_EQ(ret, 1);\n  ret = tool.Run(\"server slab --device --type cuda --id 0\");\n  EXPECT_EQ(ret, 1);\n  sleep(1);\n  server.Stop();\n}\n\nTEST_F(ModelboxServerTest, Stat) {\n  MockServer server;\n  MockTool tool;\n  auto conf = std::make_shared<Configuration>();\n  auto retval = server.Init(conf);\n  if (retval == STATUS_NOTSUPPORT) {\n    GTEST_SKIP();\n  }\n  server.Start();\n\n  auto root = modelbox::Statistics::GetGlobalItem();\n  Defer {Statistics::ReleaseGlobalItem();};\n  // FlowUnit\n  auto flow_item = root->AddItem(modelbox::STATISTICS_ITEM_FLOW);\n  auto session_item = flow_item->AddItem(\"SessionId\");\n  auto decoder_item = session_item->AddItem(\"video_decoder\");\n  flow_item->SetValue<uint64_t>(1);\n  // Device\n  auto device_item = root->AddItem(\"Device\");\n  auto gpu0_item = device_item->AddItem(\"gpu0\", 20);\n  auto gpu1_item = device_item->AddItem(\"gpu1\", 15);\n  gpu1_item->SetValue<uint64_t>(10);\n\n  auto ret = tool.Run(\"server stat --all\");\n  EXPECT_EQ(ret, 0);\n  ret = tool.Run(\"server stat --node gpu1\");\n  EXPECT_EQ(ret, 0);\n  ret = tool.Run(\"server stat --node flow\");\n  EXPECT_EQ(ret, 0);\n  ret = tool.Run(\"server stat --node not_exist\");\n  EXPECT_EQ(ret, 0);\n  server.Stop();\n}\n\nTEST_F(ModelboxServerTest, JSPlugin) {\n  const std::string test_etc_dir = TEST_DATA_DIR;\n  const std::string test_js_path = test_etc_dir + \"/modelbox-plugin-billing.js\";\n  std::string conf = R\"(\n                     [server]\n                     ip = \"127.0.0.1\"\n                     port = \"1104\"\n                     [plugin]\n                     files = [\")\" +\n                     test_js_path +\n                     R\"(\"]\n                     )\";\n\n  MBLOG_INFO << \"modelbox config: \\n\" << conf;\n  std::string config_file_path = test_etc_dir + \"/modelbox.conf\";\n  std::ofstream ofs(config_file_path);\n  EXPECT_TRUE(ofs.is_open());\n  ofs.write(conf.data(), conf.size());\n  ofs.flush();\n  ofs.close();\n  Defer { remove(config_file_path.c_str()); };\n\n  std::string js = R\"(\n    var bill_for_sessions = {};\n    var value_count = {\"value_count\": 0};\n\n    function init(ctx) {\n      console.error(\"Register stats notify\");\n      var type = NOTIFY_CREATE | NOTIFY_CHANGE | NOTIFY_DELETE;\n      registerStatsNotify(\"flow.*\", type, \"onGraphChange\");\n      registerStatsNotify(\"flow.*.*\", type, \"onSessionChange\");\n      registerStatsNotify(\"flow.*.*.demuxer.video_duration\", type, \"onValueChange\", value_count);\n      registerStatsNotify(\"flow.*.*.demuxer.video_rate\", type, \"onValueChange\");\n      registerStatsNotify(\"flow.*.*.decoder.width\", type, \"onValueChange\");\n      registerStatsNotify(\"flow.*.*.decoder.height\", type, \"onValueChange\");\n      registerStatsNotify(\"flow.*.*.decoder.frame_count\", type, \"onValueChange\");\n    }\n\n    function onGraphChange(path, value, type) {\n      if (type & NOTIFY_DELETE) {\n        routeData(\"test_router\", \"bill\", JSON.stringify(bill_for_sessions));\n      }\n    }\n\n    function onSessionChange(path, value, type) {\n      console.warn(\"Session change\");\n      var session_id = path.substring(path.lastIndexOf(\".\") + 1);\n      if (type & NOTIFY_CREATE) {\n        bill_for_sessions[session_id] = {};\n      }\n\n      return;\n    }\n\n    function onValueChange(path, value, type, priv_data) {\n      console.log(\"Value \" + path + \" change to \" + value);\n      var value_name_pos = path.lastIndexOf(\".\");\n      var node_pos = path.lastIndexOf(\".\", value_name_pos - 1);\n      var session_pos = path.lastIndexOf(\".\", node_pos - 1);\n      var value_name = path.substring(value_name_pos + 1);\n      var session_id = path.substring(session_pos + 1, node_pos);\n\n      bill_for_sessions[session_id][value_name] = value;\n      if (priv_data !== undefined) {\n        ++priv_data[\"value_count\"];\n        bill_for_sessions[session_id][\"value_count\"] = String(priv_data[\"value_count\"]);\n      }\n\n      return;\n    }\n\n    function start(ctx) {\n      return 0;\n    }\n\n    function stop(ctx) {\n      return 0;\n    }\n  )\";\n  std::ofstream ofs2(test_js_path);\n  EXPECT_TRUE(ofs2.is_open());\n  ofs2.write(js.data(), js.size());\n  ofs2.flush();\n  ofs2.close();\n  Defer { remove(test_js_path.c_str()); };\n\n  ASSERT_TRUE(LoadConfig(config_file_path));\n  MockServer server;\n  auto ret = server.Init(kConfig);\n  if (ret == modelbox::STATUS_NOTSUPPORT) {\n    GTEST_SKIP();\n  }\n  ASSERT_EQ(ret, modelbox::STATUS_OK);\n  ret = server.Start();\n  ASSERT_EQ(ret, modelbox::STATUS_OK);\n  auto stats = modelbox::Statistics::GetGlobalItem();\n  Defer {Statistics::ReleaseGlobalItem();};\n\n  // graph init\n  auto flow_stats = stats->GetItem(STATISTICS_ITEM_FLOW);\n  auto graph_stats = flow_stats->AddItem(\"demo\");\n\n  // router init\n  auto msg_router = PluginMsgRouter::GetInstance();\n  std::promise<bool> recv_notify;\n  auto recv_handle = recv_notify.get_future();\n  msg_router->RegisterRecvFunc(\n      \"test_router\", [&recv_notify](const std::string &msg_name,\n                                    const std::shared_ptr<const void> &msg_data,\n                                    size_t msg_len) {\n        EXPECT_EQ(msg_name, \"bill\");\n        std::string msg_str((const char *)msg_data.get(), msg_len);\n        bool parse_json_ok = false;\n        try {\n          auto msg_json = nlohmann::json::parse(msg_str);\n          std::vector<std::string> session_name_list = {\"session_id1\",\n                                                        \"session_id2\"};\n          EXPECT_EQ(msg_json.size(), session_name_list.size());\n          std::vector<std::vector<std::string>> expected_value = {\n              {\"10\", \"25\", \"1920\", \"1080\", \"20\", \"2\"},\n              {\"20\", \"30\", \"1366\", \"768\", \"100\", \"4\"}};\n          for (size_t session_index = 0;\n               session_index < session_name_list.size(); ++session_index) {\n            auto &session_name = session_name_list[session_index];\n            auto &expected_session_value = expected_value[session_index];\n            auto session_json_item = msg_json.find(session_name);\n            ASSERT_NE(session_json_item, msg_json.end());\n            auto &session_json = session_json_item.value();\n            EXPECT_EQ((std::string)session_json[\"video_duration\"],\n                      expected_session_value[0]);\n            EXPECT_EQ((std::string)session_json[\"video_rate\"],\n                      expected_session_value[1]);\n            EXPECT_EQ((std::string)session_json[\"width\"],\n                      expected_session_value[2]);\n            EXPECT_EQ((std::string)session_json[\"height\"],\n                      expected_session_value[3]);\n            EXPECT_EQ((std::string)session_json[\"frame_count\"],\n                      expected_session_value[4]);\n            EXPECT_EQ((std::string)session_json[\"value_count\"],\n                      expected_session_value[5]);\n          }\n        } catch (std::exception &e) {\n          MBLOG_ERROR << \"Process json failed \" << msg_str;\n          MBLOG_ERROR << \"Expection \" << e.what();\n          EXPECT_TRUE(parse_json_ok);\n        }\n\n        recv_notify.set_value(true);\n      });\n\n  // session1 begin\n  {\n    // modelbox task1 begin\n    auto session_stats = graph_stats->AddItem(\"session_id1\");\n    auto demuxer_stats = session_stats->AddItem(\"demuxer\");\n    auto decoder_stats = session_stats->AddItem(\"decoder\");\n\n    // modelbox task running\n    demuxer_stats->AddItem(\"video_duration\", (int32_t)10);\n    demuxer_stats->AddItem(\"video_rate\", (int32_t)25);\n    decoder_stats->AddItem(\"width\", (int32_t)1920);\n    decoder_stats->AddItem(\"height\", (int32_t)1080);\n    decoder_stats->AddItem(\"frame_count\", (int32_t)20);\n\n    // modelbox task end\n    graph_stats->DelItem(\"session_id1\");\n  }\n  // session2 begin\n  {\n    // modelbox task1 begin\n    auto session_stats = graph_stats->AddItem(\"session_id2\");\n    auto demuxer_stats = session_stats->AddItem(\"demuxer\");\n    auto decoder_stats = session_stats->AddItem(\"decoder\");\n\n    // modelbox task running\n    demuxer_stats->AddItem(\"video_duration\", (int32_t)20);\n    demuxer_stats->AddItem(\"video_rate\", (int32_t)30);\n    decoder_stats->AddItem(\"width\", (int32_t)1366);\n    decoder_stats->AddItem(\"height\", (int32_t)768);\n    decoder_stats->AddItem(\"frame_count\", (int32_t)100);\n\n    // modelbox task end\n    graph_stats->DelItem(\"session_id2\");\n  }\n\n  // modelbox exit\n  flow_stats->DelItem(\"demo\");\n  EXPECT_TRUE(recv_handle.get());\n  server.Stop();\n}\n\n}  // namespace modelbox\n"
  },
  {
    "path": "test/unit/modelbox/serving_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"src/modelbox/serving/serving.h\"\n\n#include <dlfcn.h>\n#include <stdio.h>\n#include \"modelbox/base/popen.h\"\n\n#include <fstream>\n\n#include \"gtest/gtest.h\"\n#include \"mockflow.h\"\n#include \"test_config.h\"\n\nnamespace modelbox {\n\nstatic std::set<std::string> SUPPORT_TF_VERSION = {\"1.13.1\", \"1.15.0\",\n                                                   \"2.6.0-dev20210809\"};\n\nclass ModelboxServingTest : public testing::Test {\n public:\n protected:\n  ModelboxServingTest() {\n    flow_ = std::make_shared<Flow>();\n  };\n\n  ~ModelboxServingTest() override = default;\n\n  void SetUp() override {\n    auto version = GetTFVersion();\n\n    if (SUPPORT_TF_VERSION.find(version) == SUPPORT_TF_VERSION.end()) {\n      version_suitable_ = false;\n      MBLOG_INFO << \"the version is \" << version\n                 << \", not in support version, skip test suit\";\n      GTEST_SKIP();\n    }\n\n    UpdateToml(version);\n  };\n\n  void TearDown() override {\n    if (!version_suitable_) {\n      GTEST_SKIP();\n    }\n  };\n\n  const std::string test_lib_dir = TEST_DRIVER_DIR,\n                    test_data_dir = TEST_DATA_DIR, test_assets = TEST_ASSETS,\n                    test_toml_file = \"test_serving_model.toml\",\n                    test_python_file = \"test_custom_service.py\";\n\n  std::string default_custom_service_path, custom_service_path,\n      new_test_serving_toml;\n\n  void PrepareFiles(const std::string &default_serving_path, bool is_default);\n  void RemoveFiles(const std::string &default_serving_path, const std::string &model_name);\n  void ReplaceGraphToml(const std::string &graph_toml, const std::string &model_name,\n                  std::string &update_graph_toml_file);\n  modelbox::Status BuildAndRunFlow(const std::string &graph_path, int timeout = 10 * 1000);\n private:\n  void UpdateToml(const std::string &version);\n  modelbox::Status ReplaceVersion(const std::string &src,\n                                  const std::string &dest,\n                                  const std::string &version);\n  std::string GetTFVersion();\n  bool version_suitable_{true};\n  std::shared_ptr<Flow> flow_;\n};\n\nvoid ModelboxServingTest::RemoveFiles(const std::string &default_serving_path, const std::string &model_name) {\n    std::string default_graph_toml = \"/tmp/\" + model_name + \"_new.toml\";\n    std::string default_flow_path = \"/tmp/\" + model_name;\n    modelbox::RemoveDirectory(default_serving_path);\n    modelbox::RemoveDirectory(default_flow_path);\n    remove(default_graph_toml.c_str());\n}\n\n\nmodelbox::Status ModelboxServingTest::ReplaceVersion(\n    const std::string &src, const std::string &dest,\n    const std::string &version) {\n  if (access(dest.c_str(), F_OK) == 0) {\n    if (remove(dest.c_str()) == -1) {\n        return modelbox::STATUS_FAULT;\n    }\n  }\n\n  std::ifstream src_file(src, std::ios::binary);\n  std::ofstream dst_file(dest, std::ios::binary | std::ios::trunc);\n\n  if (src_file.fail() || dst_file.fail()) {\n    return modelbox::STATUS_FAULT;\n  }\n\n  std::string line;\n  std::string tf_version = \"TF_VERSION\";\n\n  while (std::getline(src_file, line)) {\n    auto pos = line.find(tf_version);\n    if (pos != std::string::npos) {\n      line.replace(pos, tf_version.size(), version);\n    }\n    dst_file << line << \"\\n\";\n  }\n\n  src_file.close();\n  if (dst_file.fail()) {\n    dst_file.close();\n    remove(dest.c_str());\n    return modelbox::STATUS_FAULT;\n  }\n  dst_file.close();\n\n  return modelbox::STATUS_OK;\n}\n\nstd::string ModelboxServingTest::GetTFVersion() {\n  std::string ans;\n  void *handler =\n      dlopen(MODELBOX_TF_SO_PATH, RTLD_LOCAL | RTLD_DEEPBIND);\n  if (handler == nullptr) {\n    MBLOG_ERROR << \"dlopen error: \" << dlerror();\n    return ans;\n  }\n\n  Defer { dlclose(handler); };\n  typedef const char *(*TF_Version)();\n  TF_Version func = nullptr;\n\n  func = (TF_Version)dlsym(handler, \"TF_Version\");\n  if (func == nullptr) {\n    MBLOG_ERROR << \"dlsym TF_Version failed, \" << dlerror();\n    return ans;\n  }\n\n  ans = func();\n  return ans;\n}\n\nvoid ModelboxServingTest::UpdateToml(const std::string &version) {\n  const std::string src_test_serving_toml =\n      test_data_dir + \"/\" + test_toml_file;\n  new_test_serving_toml = test_data_dir + \"/test_serving_model_new.toml\";\n  auto status =\n      ReplaceVersion(src_test_serving_toml, new_test_serving_toml, version);\n  EXPECT_EQ(status, STATUS_OK);\n}\n\nvoid ModelboxServingTest::PrepareFiles(const std::string &default_serving_path,\n                                       bool is_default) {\n  auto mkdir_ret = mkdir(default_serving_path.c_str(), 0700);\n  EXPECT_EQ(mkdir_ret, 0);\n\n  const std::string dest_test_serving_toml =\n      default_serving_path + \"/model.toml\";\n  auto status = modelbox::CopyFile(new_test_serving_toml, dest_test_serving_toml);\n  EXPECT_EQ(status, STATUS_OK);\n\n  if (!is_default) {\n    const std::string src_custom_service_file =\n        test_data_dir + \"/test_custom_service.py\";\n    const std::string dest_custom_sevice_file =\n        default_serving_path + \"/custom_service.py\";\n    auto status =\n        modelbox::CopyFile(src_custom_service_file, dest_custom_sevice_file);\n    EXPECT_EQ(status, STATUS_OK);\n  }\n}\n\nvoid ModelboxServingTest::ReplaceGraphToml(const std::string &graph_toml,\n                const std::string &model_name, std::string &update_graph_toml_file) {\n  std::ifstream graph_reader(graph_toml);\n  EXPECT_EQ(graph_reader.is_open(), true);\n  \n  update_graph_toml_file = \"/tmp/\" + model_name + \"_new.toml\";\n  std::ofstream new_graph_writer(update_graph_toml_file);\n  EXPECT_EQ(new_graph_writer.is_open(), true);\n  \n  std::stringstream ss;\n  std::string content;\n  while (std::getline(graph_reader, content)) {\n    size_t pos;\n    pos = content.find(\"skip-default\");\n    if (pos != std::string::npos) {\n       ss << \"skip-default = true\\n\";\n       continue;\n    }\n\n   pos = content.find(\"dir\");\n   if (pos != std::string::npos) {\n       ss << \"dir=[\\\"\" + std::string(TEST_DRIVER_DIR) + \"\\\", \\\"/tmp/\" + model_name + \"\\\"]\\n\";\n       continue;\n   }\n\n   ss << content << \"\\n\";\n  }\n\n  new_graph_writer << ss.str();\n  EXPECT_EQ(new_graph_writer.good(), true);\n  graph_reader.close();\n  new_graph_writer.close();\n  EXPECT_EQ(remove(graph_toml.c_str()), 0);\n}\n\nmodelbox::Status ModelboxServingTest::BuildAndRunFlow(const std::string &graph_path, int timeout) {\n  auto ret = flow_->Init(graph_path);\n  if (!ret) {\n     return ret;\n  }\n\n  ret = flow_->Build();\n  if (!ret) {\n     return ret;\n  }\n\n  ret = flow_->RunAsync();\n  if (!ret) {\n     return ret;\n  }\n\n  if (timeout < 0) {\n     return ret;\n  }\n\n  Status retval;\n  flow_->Wait(timeout, &retval);\n  return retval;\n}\n\nTEST_F(ModelboxServingTest, DefaultCustomService) {\n  const std::string default_serving_path =\n      test_data_dir + \"/default_test_serving_path\";\n  std::string model_name = \"test_default_custom_service\";\n  PrepareFiles(default_serving_path, true);\n  auto serving = std::make_shared<ModelServing>();\n  auto status = serving->GenerateTemplate(model_name,\n                                          default_serving_path, 39110);\n  EXPECT_EQ(status, STATUS_OK);\n  std::string default_graph_toml = \"/tmp/\" + model_name + \".toml\";\n  std::string default_graph_toml_new;\n  ReplaceGraphToml(default_graph_toml, model_name, default_graph_toml_new);\n  status = BuildAndRunFlow(default_graph_toml_new);\n  EXPECT_EQ(status, STATUS_OK);\n  modelbox::Popen p;\n  std::vector<std::string> cmds{\"python3\"};\n  std::string python_file = test_data_dir + \"/test_client.py\";\n  cmds.push_back(python_file);\n  p.Open(cmds);\n  std::string line;\n  p.ReadOutLine(line);\n  auto res_line = line.substr(0, line.size() - 1);  \n  EXPECT_EQ(res_line, \"{\\\"output\\\": [1.05097496509552, 1.3005822896957397, 1.550189733505249]}\");\n  auto ret = p.Close();\n  EXPECT_EQ(WEXITSTATUS(ret), 0);\n  RemoveFiles(default_serving_path, model_name);\n}\n\nTEST_F(ModelboxServingTest, CustomService) {\n  const std::string serving_path = test_data_dir + \"/test_serving_path\";\n  std::string model_name = \"test_custom_service\";\n  PrepareFiles(serving_path, false);\n  auto serving = std::make_shared<ModelServing>();\n  auto status =\n      serving->GenerateTemplate(model_name, serving_path, 39110);\n  EXPECT_EQ(status, STATUS_OK);\n  std::string default_graph_toml = \"/tmp/\" + model_name + \".toml\";\n  std::string default_graph_toml_new;\n  ReplaceGraphToml(default_graph_toml, model_name, default_graph_toml_new);\n  status = BuildAndRunFlow(default_graph_toml_new);\n  EXPECT_EQ(status, STATUS_OK);\n  modelbox::Popen p;\n  std::vector<std::string> cmds{\"python3\"};\n  std::string python_file = test_data_dir + \"/test_client.py\";\n  cmds.push_back(python_file);\n  p.Open(cmds);\n  std::string line;\n  p.ReadOutLine(line);\n  auto res_line = line.substr(0, line.size() - 1);\n  EXPECT_EQ(res_line, \"{\\\"predict_result\\\": \\\"2\\\"}\");\n  auto ret = p.Close();\n  EXPECT_EQ(WEXITSTATUS(ret), 0);\n  RemoveFiles(serving_path, model_name);\n}\n\n}  // namespace modelbox\n"
  },
  {
    "path": "test/unit/modelbox/utils_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n/* clang-format off */\n#include <modelbox/base/log.h>\n#include <modelbox/server/utils.h>\n#include <securec.h>\n\n#include <list>\n#include <toml.hpp>\n\n#include \"gtest/gtest.h\"\n#include \"test_config.h\"\n#include <nlohmann/json.hpp>\n\n/* clang-format on */\nnamespace modelbox {\nclass ServerUtilsTest : public testing::Test {\n public:\n  ServerUtilsTest() = default;\n  ~ServerUtilsTest() override = default;\n\n protected:\n  void SetUp() override{};\n  void TearDown() override{};\n};\n\nTEST_F(ServerUtilsTest, IPMatchClassC) {\n  IPACL acl;\n  acl.AddCidr(\"192.168.1.1/24\");\n  EXPECT_EQ(acl.IsMatch(\"192.168.1.2\"), STATUS_OK);\n  EXPECT_EQ(acl.IsMatch(\"192.168.1.0\"), STATUS_OK);\n  EXPECT_EQ(acl.IsMatch(\"192.168.1.127\"), STATUS_OK);\n  EXPECT_EQ(acl.IsMatch(\"192.168.1.255\"), STATUS_OK);\n  EXPECT_EQ(acl.IsMatch(\"192.168.2.0\"), STATUS_NOTFOUND);\n}\n\nTEST_F(ServerUtilsTest, IPMatch) {\n  IPACL acl;\n  acl.AddCidr(\"192.168.1.1\");\n  EXPECT_EQ(acl.IsMatch(\"192.168.1.2\"), STATUS_NOTFOUND);\n  EXPECT_EQ(acl.IsMatch(\"192.168.1.0\"), STATUS_NOTFOUND);\n  EXPECT_EQ(acl.IsMatch(\"192.168.1.1\"), STATUS_OK);\n}\n\nTEST_F(ServerUtilsTest, IPMatchAll) {\n  IPACL acl;\n  acl.AddCidr(\"0.0.0.0/0\");\n  EXPECT_EQ(acl.IsMatch(\"192.168.1.2\"), STATUS_OK);\n  EXPECT_EQ(acl.IsMatch(\"192.168.1.0\"), STATUS_OK);\n  EXPECT_EQ(acl.IsMatch(\"192.168.1.1\"), STATUS_OK);\n}\n\n}  // namespace modelbox\n"
  },
  {
    "path": "test/unit/plugin/task_manger_test.cc",
    "content": "/*\n * Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <modelbox/server/task.h>\n#include <modelbox/server/task_manager.h>\n\n#include <condition_variable>\n#include <fstream>\n#include <mutex>\n\n#include \"gtest/gtest.h\"\n#include \"mock_driver_ctl.h\"\n#include \"mockflow.h\"\n#include \"modelbox/base/log.h\"\n\nnamespace modelbox {\n\nstd::condition_variable cv;\nstd::mutex mtx;\nint count;\nclass TaskManagerTest : public testing::Test {\n public:\n  TaskManagerTest() = default;\n\n protected:\n  void SetUp() override {\n    std::string toml_content = R\"(\n    [driver]\n    skip-default=true\n    dir=[\")\" + std::string(TEST_LIB_DIR) +\n                               \"\\\"]\\n    \" +\n                               R\"(\n    [graph]\n    graphconf = '''digraph demo {\n          input1[type=input, device=cpu,deviceid=0] \n          output1[type=output, device=cpu, deviceid=0]\n          stream_start[type=flowunit, flowunit=virtual_stream_start, device=cpu, deviceid=0, label=\"<In_1> | <Out_1>\"]\n          stream_mid[type=flowunit, flowunit=virtual_stream_mid, device=cpu, deviceid=0, label=\"<In_1> | <Out_1>\"]\n          \n          input1 ->stream_start:In_1\n          stream_start:Out_1 ->stream_mid:In_1\n          stream_mid:Out_1->output1\n\n        }'''\n    format = \"graphviz\"\n  )\";\n    mockflow_ = std::make_shared<MockFlow>();\n    mockflow_->Init();\n    auto ret = mockflow_->BuildAndRun(\"TaskManager\", toml_content, -1);\n  };\n  void TearDown() override { mockflow_ = nullptr; };\n\n  std::shared_ptr<MockFlow> mockflow_;\n\n private:\n};\n\nvoid TaskFinished(OneShotTask *task, TaskStatus status) {\n  count++;\n  EXPECT_EQ(FINISHED, status);\n  cv.notify_one();\n}\n\nvoid TaskStopped(OneShotTask *task, TaskStatus status) {\n  EXPECT_EQ(STOPPED, status);\n  cv.notify_one();\n}\n\nvoid TaskEnd(OneShotTask *task, TaskStatus status) {\n  MBLOG_INFO << \"Task end \" << status;\n}\n\nTEST_F(TaskManagerTest, CreateTask) {\n  std::unique_lock<std::mutex> lck(mtx);\n  auto tm = std::make_shared<TaskManager>(mockflow_->GetFlow(), 10);\n  auto status = tm->Start();\n  EXPECT_EQ(status, STATUS_SUCCESS);\n  auto task_1 =\n      std::dynamic_pointer_cast<OneShotTask>(tm->CreateTask(TASK_ONESHOT));\n  EXPECT_EQ(WAITING, task_1->GetTaskStatus());\n  auto output_buf = task_1->CreateBufferList();\n  output_buf->Build({3 * sizeof(int)});\n  auto *data = (int *)output_buf->MutableData();\n  data[0] = 0;\n  data[1] = 30000;\n  data[2] = 3;\n  std::unordered_map<std::string, std::shared_ptr<BufferList>> datas;\n  datas.emplace(\"input1\", output_buf);\n  task_1->FillData(datas);\n  task_1->RegisterStatusCallback(TaskFinished);\n  task_1->Start();\n  cv.wait_for(lck, std::chrono::seconds(10),\n              [task_1]() { return task_1->GetTaskStatus() == FINISHED; });\n  EXPECT_EQ(FINISHED, task_1->GetTaskStatus());\n\n  auto task_2 =\n      std::dynamic_pointer_cast<OneShotTask>(tm->CreateTask(TASK_ONESHOT));\n  EXPECT_EQ(WAITING, task_2->GetTaskStatus());\n  task_2->Start();\n  task_2->RegisterStatusCallback(TaskStopped);\n  cv.wait_for(lck, std::chrono::seconds(10),\n              [task_2]() { return task_2->GetTaskStatus() == STOPPED; });\n  EXPECT_EQ(STOPPED, task_2->GetTaskStatus());\n}\n\nTEST_F(TaskManagerTest, StopTask) {\n  std::unique_lock<std::mutex> lck(mtx);\n  auto tm = std::make_shared<TaskManager>(mockflow_->GetFlow(), 10);\n  auto status = tm->Start();\n  EXPECT_EQ(status, STATUS_SUCCESS);\n  auto task_1 =\n      std::dynamic_pointer_cast<OneShotTask>(tm->CreateTask(TASK_ONESHOT));\n  EXPECT_EQ(WAITING, task_1->GetTaskStatus());\n  auto output_buf = task_1->CreateBufferList();\n  output_buf->Build({3 * sizeof(int)});\n  auto *data = (int *)output_buf->MutableData();\n  data[0] = 0;\n  data[1] = 30000;\n  data[2] = 3;\n  std::unordered_map<std::string, std::shared_ptr<BufferList>> datas;\n  datas.emplace(\"input1\", output_buf);\n  task_1->FillData(datas);\n  task_1->RegisterStatusCallback(TaskStopped);\n  task_1->Start();\n  task_1->Stop();\n  EXPECT_EQ(STOPPED, task_1->GetTaskStatus());\n\n  auto task_2 =\n      std::dynamic_pointer_cast<OneShotTask>(tm->CreateTask(TASK_ONESHOT));\n  EXPECT_EQ(WAITING, task_2->GetTaskStatus());\n  task_2->RegisterStatusCallback(TaskStopped);\n  task_2->Stop();\n  EXPECT_EQ(STOPPED, task_2->GetTaskStatus());\n\n  auto task_3 =\n      std::dynamic_pointer_cast<OneShotTask>(tm->CreateTask(TASK_ONESHOT));\n  EXPECT_EQ(WAITING, task_3->GetTaskStatus());\n  task_3->Start();\n  task_3->Stop();\n  EXPECT_EQ(STOPPED, task_3->GetTaskStatus());\n\n  EXPECT_EQ(tm->GetAvaiableTaskCount(), 0);\n}\n\nTEST_F(TaskManagerTest, DeleteTaskById) {\n  std::unique_lock<std::mutex> lck(mtx);\n  auto tm = std::make_shared<TaskManager>(mockflow_->GetFlow(), 10);\n  auto status = tm->Start();\n  EXPECT_EQ(status, STATUS_SUCCESS);\n\n  auto task =\n      std::dynamic_pointer_cast<OneShotTask>(tm->CreateTask(TASK_ONESHOT));\n  auto uuid = task->GetTaskId();\n  EXPECT_EQ(WAITING, task->GetTaskStatus());\n  auto output_buf = task->CreateBufferList();\n  output_buf->Build({3 * sizeof(int)});\n  auto *data = (int *)output_buf->MutableData();\n  data[0] = 0;\n  data[1] = 30000;\n  data[2] = 3;\n  std::unordered_map<std::string, std::shared_ptr<BufferList>> datas;\n  datas.emplace(\"input1\", output_buf);\n  task->FillData(datas);\n  task->RegisterStatusCallback(TaskEnd);\n  task->Start();\n  sleep(1);\n  auto get_task = tm->GetTaskById(uuid);\n  EXPECT_TRUE(get_task != nullptr);\n  sleep(1);\n  tm->DeleteTaskById(uuid);\n  auto del_task = tm->GetTaskById(uuid);\n  EXPECT_TRUE(del_task == nullptr);\n}\n\nTEST_F(TaskManagerTest, TaskInQueue) {\n  count = 0;\n  std::unique_lock<std::mutex> lck(mtx);\n  auto tm = std::make_shared<TaskManager>(mockflow_->GetFlow(), 3);\n  auto status = tm->Start();\n  EXPECT_EQ(status, STATUS_SUCCESS);\n\n  for (uint32_t i = 0; i < 4; i++) {\n    auto task =\n        std::dynamic_pointer_cast<OneShotTask>(tm->CreateTask(TASK_ONESHOT));\n    auto uuid = task->GetTaskId();\n    EXPECT_EQ(WAITING, task->GetTaskStatus());\n    auto output_buf = task->CreateBufferList();\n    output_buf->Build({3 * sizeof(int)});\n    auto *data = (int *)output_buf->MutableData();\n    data[0] = 0;\n    data[1] = 40000;\n    data[2] = 3;\n    std::unordered_map<std::string, std::shared_ptr<BufferList>> datas;\n    datas.emplace(\"input1\", output_buf);\n    task->FillData(datas);\n    task->Start();\n  }\n  sleep(1);\n  int running_tasks = 0;\n  int waitting_tasks = 0;\n  int stopped_tasks = 0;\n  int finish_tasks = 0;\n\n  auto task_list = tm->GetAllTasks();\n\n  std::shared_ptr<OneShotTask> running_task;\n\n  for (const auto &task : task_list) {\n    auto one_shot_task = std::dynamic_pointer_cast<OneShotTask>(task);\n    one_shot_task->RegisterStatusCallback(TaskFinished);\n    if (task->GetTaskStatus() == WORKING) {\n      running_task = one_shot_task;\n      running_tasks++;\n    }\n    if (task->GetTaskStatus() == WAITING) {\n      waitting_tasks++;\n    }\n    if (task->GetTaskStatus() == STOPPED) {\n      stopped_tasks++;\n    }\n    if (task->GetTaskStatus() == FINISHED) {\n      finish_tasks++;\n    }\n  }\n  EXPECT_EQ(running_tasks, 3);\n  EXPECT_EQ(waitting_tasks, 1);\n  EXPECT_EQ(stopped_tasks, 0);\n  EXPECT_EQ(finish_tasks, 0);\n\n  running_task->RegisterStatusCallback(TaskStopped);\n  running_task->Stop();\n\n  cv.wait(lck, []() { return count >= 3; });\n  running_tasks = 0;\n  waitting_tasks = 0;\n  stopped_tasks = 0;\n  finish_tasks = 0;\n  for (const auto &task : task_list) {\n    if (task->GetTaskStatus() == WORKING) {\n      running_tasks++;\n    }\n    if (task->GetTaskStatus() == WAITING) {\n      waitting_tasks++;\n    }\n    if (task->GetTaskStatus() == STOPPED) {\n      stopped_tasks++;\n    }\n    if (task->GetTaskStatus() == FINISHED) {\n      finish_tasks++;\n    }\n  }\n  EXPECT_EQ(running_tasks, 0);\n  EXPECT_EQ(waitting_tasks, 0);\n  EXPECT_EQ(stopped_tasks, 1);\n  EXPECT_EQ(finish_tasks, 3);\n}\n\n}  // namespace modelbox"
  },
  {
    "path": "thirdparty/CMake/APIGW_CPP_CMakeList.in",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nproject(APIGW_CPP)\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${CMAKE_CURRENT_SOURCE_DIR})\naux_source_directory(. DIR_SRCS)\n\nFILE(GLOB SRC_LIST_CPP \n    hasher.cpp\n    RequestParams.cpp\n    signer.cpp\n    utils.cpp\n)\n\nfile(GLOB_RECURSE APIGW_CPP_CPP .)\nadd_library(APIGW_CPP SHARED  ${SRC_LIST_CPP})\nadd_library(APIGW_CPP-static STATIC  ${SRC_LIST_CPP})\ntarget_include_directories(APIGW_CPP PRIVATE .)\ntarget_include_directories(APIGW_CPP-static PRIVATE .)\nset_target_properties(APIGW_CPP-static PROPERTIES OUTPUT_NAME APIGW_CPP)\nset_target_properties(APIGW_CPP PROPERTIES LINK_FLAGS \"-s\")\nset_target_properties(APIGW_CPP-static PROPERTIES LINK_FLAGS \"-s\")\n\nset(APIGW_CPP_INCLUDE_DIR ${CMAKE_CURRENT_BINARY_DIR}/include)\n\nfile(MAKE_DIRECTORY ${APIGW_CPP_INCLUDE_DIR})\nfile(COPY ${CMAKE_CURRENT_SOURCE_DIR}/constants.h DESTINATION ${APIGW_CPP_INCLUDE_DIR})\nfile(COPY ${CMAKE_CURRENT_SOURCE_DIR}/header.h DESTINATION ${APIGW_CPP_INCLUDE_DIR})\nfile(COPY ${CMAKE_CURRENT_SOURCE_DIR}/hasher.h DESTINATION ${APIGW_CPP_INCLUDE_DIR})\nfile(COPY ${CMAKE_CURRENT_SOURCE_DIR}/RequestParams.h DESTINATION ${APIGW_CPP_INCLUDE_DIR})\nfile(COPY ${CMAKE_CURRENT_SOURCE_DIR}/signer.h DESTINATION ${APIGW_CPP_INCLUDE_DIR})\nfile(COPY ${CMAKE_CURRENT_SOURCE_DIR}/utils.h DESTINATION ${APIGW_CPP_INCLUDE_DIR})\n\nset(APIGW_CPP_INCLUDE_DIR ${APIGW_CPP_INCLUDE_DIR} CACHE INTERNAL \"\")\n\nset(APIGW_CPP_LIBRARIES APIGW_CPP CACHE INTERNAL \"\")\nset(APIGW_CPP_STATIC_LIBRARIES APIGW_CPP-static CACHE INTERNAL \"\")"
  },
  {
    "path": "thirdparty/CMake/Demo_Files_CMakeList.in",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nproject(demo_files)\n\n# emotion files\nset(EMOTION_DIR ${CMAKE_CURRENT_SOURCE_DIR}/emotion_demo_files)\n\nset(EMOTION_MODEL_FILE ${EMOTION_DIR}/emotion.pt CACHE INTERNAL \"\")\nset(FACE_DETECTION_MODEL_FILE ${EMOTION_DIR}/face_detector.pt CACHE INTERNAL \"\")\nset(EMOTION_TEST_VIDEO_FILE ${EMOTION_DIR}/emotion_test_video.mp4 CACHE INTERNAL \"\")\n"
  },
  {
    "path": "thirdparty/CMake/Huawei_Secure_C_CMakeList.in",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nproject(Huawei_Secure_c)\n\nset(FLAGS \"-Wall  -DNDEBUG -fstack-protector-all -Wformat=2 -Wfloat-equal -Wshadow -Wconversion -Wformat-security -Wextra --param ssp-buffer-size=4 -Warray-bounds -Wpointer-arith -Wcast-qual -Wstrict-prototypes -Wmissing-prototypes -Wstrict-overflow=1 -Wstrict-aliasing=2 -Wswitch -Wswitch-default\")\nfile(GLOB_RECURSE SECURE_C_SOURCE src/*.cpp src/*.cc src/*.c)\nadd_library(securec SHARED EXCLUDE_FROM_ALL ${SECURE_C_SOURCE})\nadd_library(securec-static STATIC EXCLUDE_FROM_ALL ${SECURE_C_SOURCE})\ntarget_include_directories(securec PRIVATE include src)\ntarget_include_directories(securec-static PRIVATE include src)\nset_target_properties(securec-static PROPERTIES OUTPUT_NAME securec)\nset_target_properties(securec PROPERTIES COMPILE_FLAGS ${FLAGS})\nset_target_properties(securec-static PROPERTIES COMPILE_FLAGS ${FLAGS})\nset_target_properties(securec PROPERTIES LINK_FLAGS \"-s\")\nset_target_properties(securec-static PROPERTIES LINK_FLAGS \"-s\")\nset(HUAWEI_SECURE_C_INCLUDE_DIR ${CMAKE_CURRENT_LIST_DIR}/include CACHE INTERNAL \"\")\nset(HUAWEI_SECURE_C_LIBRARIES  securec CACHE INTERNAL \"\")\nset(HUAWEI_SECURE_C_STATIC_LIBRARIES securec-static CACHE INTERNAL \"\")\n"
  },
  {
    "path": "thirdparty/CMake/cpp_httplib_cmakelist.in",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nproject(cpp-httplib)\n\nfile(GLOB CPP_HTTPLIB_SOURCE *.c *.cpp *.cc)\nadd_library(cpp-httplib SHARED EXCLUDE_FROM_ALL ${CPP_HTTPLIB_SOURCE})\nadd_library(cpp-httplib-static STATIC EXCLUDE_FROM_ALL ${CPP_HTTPLIB_SOURCE})\nset_property(TARGET cpp-httplib-static PROPERTY POSITION_INDEPENDENT_CODE ON)\nset_target_properties(cpp-httplib  PROPERTIES COMPILE_FLAGS \"-DCPPHTTPLIB_USE_POLL -DCPPHTTPLIB_OPENSSL_SUPPORT -DCPPHTTPLIB_HEADER_MAX_LENGTH=65535\")\nset_target_properties(cpp-httplib-static PROPERTIES COMPILE_FLAGS \"-DCPPHTTPLIB_USE_POLL -DCPPHTTPLIB_OPENSSL_SUPPORT -DCPPHTTPLIB_HEADER_MAX_LENGTH=65535\")\n\nset(CPP_HTTPLIB_INCLUDE ${CMAKE_CURRENT_LIST_DIR} CACHE INTERNAL \"\")\nset(CPP_HTTPLIB_LIBRARIES cpp-httplib CACHE INTERNAL \"\")\nset(CPP_HTTPLIB_STATIC_LIBRARIES cpp-httplib-static CACHE INTERNAL \"\")\n"
  },
  {
    "path": "thirdparty/CMake/local-package.in",
    "content": "\n#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nproject(modelbox-localpackage)\n\n# 替换的环境变量\nset(THIRDPARTY_DOWNLOAD_DIR @THIRDPARTY_DOWNLOAD_DIR@)\nset(WITH_ALL_DEMO @WITH_ALL_DEMO@)\n\n# 预先下载代码库列表，此处仅包含需要使用ADD_SUBDIRECTORY添加的外部项目。\ninclude(ExternalProject)\nfind_package(Git)\n\n# 下载googletest\nExternalProject_Add(\n  GoogleTest\n  URL               @LOCAL_PACKAGE_PATH@/googletest-release-1.11.0.tar.gz\n  SOURCE_DIR        ${THIRDPARTY_DOWNLOAD_DIR}/googletest\n  CONFIGURE_COMMAND \"\"\n  BUILD_COMMAND     \"\"\n  INSTALL_COMMAND   \"\"\n  TEST_COMMAND      \"\"\n)\n\n# 下载安全C库\nExternalProject_Add(\n  Huawei_Secure_C_download\n  URL               @LOCAL_PACKAGE_PATH@/libboundscheck-v1.1.11.zip\n  SOURCE_DIR        ${THIRDPARTY_DOWNLOAD_DIR}/Huawei_Secure_C\n  CONFIGURE_COMMAND \"\"\n  BUILD_COMMAND     \"\"\n  INSTALL_COMMAND   \"\"\n  TEST_COMMAND      \"\"\n)\n\n# 下载安全tinylog\nExternalProject_Add(\n  tinylog\n  URL               @LOCAL_PACKAGE_PATH@/tinylog-1.8.zip\n  SOURCE_DIR        ${THIRDPARTY_DOWNLOAD_DIR}/tinylog\n  CONFIGURE_COMMAND \"\"\n  BUILD_COMMAND     \"\"\n  INSTALL_COMMAND   \"\"\n  TEST_COMMAND      \"\"\n)\n\n# pybind11\nExternalProject_Add(\n  pybind11\n  URL               @LOCAL_PACKAGE_PATH@/pybind11-2.10.4.zip\n  SOURCE_DIR        ${THIRDPARTY_DOWNLOAD_DIR}/pybind11\n  CONFIGURE_COMMAND \"\"\n  BUILD_COMMAND     \"\"\n  INSTALL_COMMAND   \"\"\n  TEST_COMMAND      \"\"\n)\n\n# toml11\nExternalProject_Add(\n  toml11\n  URL               @LOCAL_PACKAGE_PATH@/toml11-3.7.1.zip\n  SOURCE_DIR         ${THIRDPARTY_DOWNLOAD_DIR}/toml11\n  CONFIGURE_COMMAND \"\"\n  BUILD_COMMAND     \"\"\n  INSTALL_COMMAND   \"\"\n  TEST_COMMAND      \"\"\n)\n\n# nlohmann json\nExternalProject_Add(\n  nlohmann\n  URL               @LOCAL_PACKAGE_PATH@/json-v3.11.2.tar.gz\n  SOURCE_DIR         ${THIRDPARTY_DOWNLOAD_DIR}/nlohmann\n  CONFIGURE_COMMAND \"\"\n  BUILD_COMMAND     \"\"\n  INSTALL_COMMAND   \"\"\n  TEST_COMMAND      \"\"\n)\n\n# cpp-httplib\nExternalProject_Add(\n  cpp-httplib\n  URL               @LOCAL_PACKAGE_PATH@/cpp-httplib-0.12.6.tar.gz\n  SOURCE_DIR        ${THIRDPARTY_DOWNLOAD_DIR}/cpp-httplib\n  CONFIGURE_COMMAND ${THIRDPARTY_DOWNLOAD_DIR}/cpp-httplib/split.py -o ${THIRDPARTY_DOWNLOAD_DIR}/cpp-httplib/build\n  BUILD_COMMAND     \"\"\n  INSTALL_COMMAND   \"\"\n  TEST_COMMAND      \"\"\n)\n\n# APIGW_Cpp\nExternalProject_Add(\n  APIGW_Cpp\n  URL               @LOCAL_PACKAGE_PATH@/APIGW-cpp-sdk.zip\n  SOURCE_DIR         ${THIRDPARTY_DOWNLOAD_DIR}/APIGW-cpp-sdk-1.0.2\n  CONFIGURE_COMMAND \"\"\n  BUILD_COMMAND     \"\"\n  INSTALL_COMMAND   \"\"\n  TEST_COMMAND      \"\"\n)\n\n# demo files\nif (${WITH_ALL_DEMO})\n  ExternalProject_Add(\n    emotion_demo_files\n    URL                @LOCAL_PACKAGE_PATH@/emotion_demo_files.zip\n    SOURCE_DIR         ${THIRDPARTY_DOWNLOAD_DIR}/demo/emotion_demo_files\n    CONFIGURE_COMMAND \"\"\n    BUILD_COMMAND     \"\"\n    INSTALL_COMMAND   \"\"\n    TEST_COMMAND      \"\"\n  )\nendif()\n"
  },
  {
    "path": "thirdparty/CMake/pre-download.in",
    "content": "\n#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nproject(modelbox-downloadpackage)\n\n# 替换的环境变量\nset(THIRDPARTY_DOWNLOAD_DIR @THIRDPARTY_DOWNLOAD_DIR@)\nset(WITH_ALL_DEMO @WITH_ALL_DEMO@)\n\n# 预先下载代码库列表，此处仅包含需要使用ADD_SUBDIRECTORY添加的外部项目。\ninclude(ExternalProject)\nfind_package(Git)\n\nif (NOT @USE_CN_MIRROR@) \n  set(GOOGLETEST_DOWNLOAD_URL \"https://github.com/google/googletest/archive/refs/tags/release-1.11.0.zip\")\n  set(HUAWEI_SECURE_C_DOWNLOAD_URL \"https://github.com/openeuler-mirror/libboundscheck/archive/refs/heads/master.zip\")\n  set(TINYLOG_DOWNLOAD_URL \"https://github.com/pymumu/tinylog/archive/refs/tags/v1.8.zip\")\n  set(PYBIND11_DOWNLOAD_URL \"https://github.com/pybind/pybind11/archive/refs/tags/v2.10.4.zip\")\n  set(TOML11_DOWNLOAD_URL \"https://github.com/ToruNiina/toml11/archive/refs/tags/v3.7.1.zip\")\n  set(NLOHMANN_DOWNLOAD_URL \"https://github.com/nlohmann/json/releases/download/v3.11.2/include.zip\")\n  set(CPP_HTTPLIB_DOWNLOAD_URL \"https://github.com/yhirose/cpp-httplib/archive/refs/tags/v0.12.6.zip\")\n  set(APIGW_CPP_SDK_DOWNLOAD_URL \"https://github.com/modelbox-ai/modelbox-binary/releases/download/BinaryArchive/APIGW-cpp-sdk.zip\")\n  set(MODELBOX_WEBUI_DOWNLOAD_URL \"https://github.com/modelbox-ai/modelbox-webui/archive/refs/tags/0.1.3.zip\")\n  set(EMOTION_DEMO_FILES_DOWNLOAD_URL \"https://github.com/modelbox-ai/modelbox-binary/releases/download/BinaryArchive/emotion_demo_files.zip\")\nelse()\n  set(GOOGLETEST_DOWNLOAD_URL \"https://ghproxy.com/github.com/google/googletest/archive/refs/tags/release-1.11.0.zip\")\n  set(HUAWEI_SECURE_C_DOWNLOAD_URL \"https://gitee.com/openeuler/libboundscheck/repository/archive/master.zip\")\n  set(TINYLOG_DOWNLOAD_URL \"https://ghproxy.com/github.com/pymumu/tinylog/archive/refs/tags/v1.8.zip\")\n  set(PYBIND11_DOWNLOAD_URL \"https://ghproxy.com/github.com/pybind/pybind11/archive/refs/tags/v2.10.4.zip\")\n  set(TOML11_DOWNLOAD_URL \"https://ghproxy.com/github.com/ToruNiina/toml11/archive/refs/tags/v3.7.1.zip\")\n  set(NLOHMANN_DOWNLOAD_URL \"https://ghproxy.com/github.com/nlohmann/json/releases/download/v3.11.2/include.zip\")\n  set(CPP_HTTPLIB_DOWNLOAD_URL \"https://ghproxy.com/github.com/yhirose/cpp-httplib/archive/refs/tags/v0.12.6.zip\")\n  set(APIGW_CPP_SDK_DOWNLOAD_URL \"https://obs.cn-north-1.myhuaweicloud.com/apig-sdk/APIGW-cpp-sdk.zip\")\n  set(MODELBOX_WEBUI_DOWNLOAD_URL \"https://gitee.com/modelbox/modelbox-webui/repository/archive/tags/0.1.3.zip\")\n  set(EMOTION_DEMO_FILES_DOWNLOAD_URL \"https://gitee.com/modelbox/modelbox-binary/attach_files/1010735/download/emotion_demo_files.zip\")\nendif()\n\n# 下载googletest\nExternalProject_Add(\n  GoogleTest\n  URL               ${GOOGLETEST_DOWNLOAD_URL}\n  SOURCE_DIR        ${THIRDPARTY_DOWNLOAD_DIR}/googletest\n  CONFIGURE_COMMAND \"\"\n  BUILD_COMMAND     \"\"\n  INSTALL_COMMAND   \"\"\n  TEST_COMMAND      \"\"\n)\n\n# 下载安全C库\nExternalProject_Add(\n  Huawei_Secure_C_download\n  URL               ${HUAWEI_SECURE_C_DOWNLOAD_URL}\n  SOURCE_DIR        ${THIRDPARTY_DOWNLOAD_DIR}/Huawei_Secure_C\n  CONFIGURE_COMMAND \"\"\n  BUILD_COMMAND     \"\"\n  INSTALL_COMMAND   \"\"\n  TEST_COMMAND      \"\"\n)\n\n# 下载安全tinylog\nExternalProject_Add(\n  tinylog\n  URL               ${TINYLOG_DOWNLOAD_URL}\n  SOURCE_DIR        ${THIRDPARTY_DOWNLOAD_DIR}/tinylog\n  CONFIGURE_COMMAND \"\"\n  BUILD_COMMAND     \"\"\n  INSTALL_COMMAND   \"\"\n  TEST_COMMAND      \"\"\n)\n\n# pybind11\nExternalProject_Add(\n  pybind11\n  URL               ${PYBIND11_DOWNLOAD_URL}\n  SOURCE_DIR        ${THIRDPARTY_DOWNLOAD_DIR}/pybind11\n  CONFIGURE_COMMAND \"\"\n  BUILD_COMMAND     \"\"\n  INSTALL_COMMAND   \"\"\n  TEST_COMMAND      \"\"\n)\n\n# toml11\nExternalProject_Add(\n  toml11\n  URL               ${TOML11_DOWNLOAD_URL}\n  SOURCE_DIR        ${THIRDPARTY_DOWNLOAD_DIR}/toml11\n  CONFIGURE_COMMAND \"\"\n  BUILD_COMMAND     \"\"\n  INSTALL_COMMAND   \"\"\n  TEST_COMMAND      \"\"\n)\n\n# nlohmann json\nExternalProject_Add(\n  nlohmann\n  URL               ${NLOHMANN_DOWNLOAD_URL}\n  SOURCE_DIR        ${THIRDPARTY_DOWNLOAD_DIR}/nlohmann\n  CONFIGURE_COMMAND \"\"\n  BUILD_COMMAND     \"\"\n  INSTALL_COMMAND   \"\"\n  TEST_COMMAND      \"\"\n)\n\n# cpp-httplib\nExternalProject_Add(\n  cpp-httplib\n  URL               ${CPP_HTTPLIB_DOWNLOAD_URL}\n  SOURCE_DIR        ${THIRDPARTY_DOWNLOAD_DIR}/cpp-httplib\n  CONFIGURE_COMMAND ${THIRDPARTY_DOWNLOAD_DIR}/cpp-httplib/split.py -o ${THIRDPARTY_DOWNLOAD_DIR}/cpp-httplib/build\n  BUILD_COMMAND     \"\"\n  INSTALL_COMMAND   \"\"\n  TEST_COMMAND      \"\"\n)\n\n# APIGW-cpp-sdk\nExternalProject_Add(\n  APIGW-cpp-sdk\n  URL                ${APIGW_CPP_SDK_DOWNLOAD_URL}\n  SOURCE_DIR         ${THIRDPARTY_DOWNLOAD_DIR}/APIGW-cpp-sdk-1.0.2\n  CONFIGURE_COMMAND \"\"\n  BUILD_COMMAND     \"\"\n  INSTALL_COMMAND   \"\"\n  TEST_COMMAND      \"\"\n)\n\n# webui\nExternalProject_Add(\n  modelbox-webui\n  URL                ${MODELBOX_WEBUI_DOWNLOAD_URL}\n  SOURCE_DIR         ${THIRDPARTY_DOWNLOAD_DIR}/modelbox-webui\n  CONFIGURE_COMMAND \"\"\n  BUILD_COMMAND     \"\"\n  INSTALL_COMMAND   \"\"\n  TEST_COMMAND      \"\"\n)\n\n# demo files\nif (${WITH_ALL_DEMO})\n  ExternalProject_Add(\n    emotion_demo_files\n    URL                ${EMOTION_DEMO_FILES_DOWNLOAD_URL}\n    SOURCE_DIR         ${THIRDPARTY_DOWNLOAD_DIR}/demo/emotion_demo_files\n    CONFIGURE_COMMAND \"\"\n    BUILD_COMMAND     \"\"\n    INSTALL_COMMAND   \"\"\n    TEST_COMMAND      \"\"\n  )\nendif()\n"
  },
  {
    "path": "thirdparty/CMake/tlog_cmakelist.in",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\nproject(tlog)\n\nfile(GLOB TLOG_SOURCE *.c *.cpp)\nadd_library(tlog SHARED EXCLUDE_FROM_ALL ${TLOG_SOURCE})\nadd_library(tlog-static STATIC EXCLUDE_FROM_ALL ${TLOG_SOURCE})\nset_target_properties(tlog PROPERTIES COMPILE_FLAGS \"-DTLOG_MAX_LINE_LEN=8192\")\ntarget_link_libraries(tlog pthread)\nset_target_properties(tlog-static PROPERTIES COMPILE_FLAGS \"-DTLOG_MAX_LINE_LEN=8192\")\ntarget_link_libraries(tlog-static pthread)\nset(TLOG_INCLUDE ${CMAKE_CURRENT_LIST_DIR} CACHE INTERNAL \"\")\nset(TLOG_LIBRARIES tlog CACHE INTERNAL \"\")\nset(TLOG_STATIC_LIBRARIES tlog-static CACHE INTERNAL \"\")\n"
  },
  {
    "path": "thirdparty/CMakeLists.txt",
    "content": "#\n# Copyright 2021 The Modelbox Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ncmake_minimum_required(VERSION 3.10)\n\ninclude(ExternalProject)\n\nset(THIRDPARTY_DOWNLOAD_BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR}/download)\nset(THIRDPARTY_DOWNLOAD_WORKING_DIR ${THIRDPARTY_DOWNLOAD_BINARY_DIR}/build)\n\n# disable clang lint for third party lib\nfile(WRITE \"${CMAKE_CURRENT_BINARY_DIR}/.clang-tidy\" \"\n---\nChecks: '-*,llvm-twine-local'\n...\n\")\n\n# 预先下载的三方组件，当使用ADD_SUBDIRECTORY包含子项目时，采用此方式。\nif (NOT LOCAL_PACKAGE_PATH) \n  set(THIRDPARTY_DOWNLOAD_DIR ${CMAKE_CURRENT_BINARY_DIR}/download)\n  configure_file(CMake/pre-download.in ${THIRDPARTY_DOWNLOAD_BINARY_DIR}/CMakeLists.txt @ONLY)\nelse()\n  set(THIRDPARTY_DOWNLOAD_DIR ${CMAKE_CURRENT_BINARY_DIR}/download)\n  configure_file(CMake/local-package.in ${THIRDPARTY_DOWNLOAD_BINARY_DIR}/CMakeLists.txt @ONLY)\nendif()\n\nfile(MAKE_DIRECTORY ${THIRDPARTY_DOWNLOAD_WORKING_DIR})\nexecute_process(COMMAND ${CMAKE_COMMAND} -G \"${CMAKE_GENERATOR}\" ..\n  RESULT_VARIABLE COMMAND_RESULT\n  WORKING_DIRECTORY ${THIRDPARTY_DOWNLOAD_WORKING_DIR} \n)\n\nif(COMMAND_RESULT)\n  message(FATAL_ERROR \"Download thirdparty failed: ${COMMAND_RESULT}\")\nendif()\n\nexecute_process(COMMAND ${CMAKE_COMMAND} --build .\n  RESULT_VARIABLE COMMAND_RESULT\n  WORKING_DIRECTORY ${THIRDPARTY_DOWNLOAD_WORKING_DIR} \n)\n\nif(COMMAND_RESULT)\n  message(FATAL_ERROR \"Download thirdparty failed: ${COMMAND_RESULT}\")\nendif()\n\nset(CMAKE_CXX_FLAGS_OLD ${CMAKE_CXX_FLAGS})\nset(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} -fPIC\")\nset(GOOGLETEST_SOURCE_DIR ${THIRDPARTY_DOWNLOAD_DIR}/googletest)\nadd_subdirectory(${GOOGLETEST_SOURCE_DIR} ${THIRDPARTY_DOWNLOAD_WORKING_DIR}/googletest EXCLUDE_FROM_ALL)\nset(CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS_OLD})\n\nset(PYTHON_VER 3.5)\nfind_package(PythonInterp ${PYTHON_VER})\nif(NOT ${PYTHONINTERP_FOUND})\n  set(PYTHON_EXECUTABLE /usr/bin/python3)\n  find_package(PythonInterp ${PYTHON_VER})\nendif()\n\nfind_package(PythonLibs ${PYTHON_VER})\nset(PYTHONLIBS_FOUND ${PYTHONLIBS_FOUND} CACHE INTERNAL \"\")\n\nif (NOT ${PYTHONE_DISABLED} AND ${PYTHONLIBS_FOUND})\n  set(PYBIND_SOURCE_DIR ${THIRDPARTY_DOWNLOAD_DIR}/pybind11)\n  add_subdirectory(${PYBIND_SOURCE_DIR} ${THIRDPARTY_DOWNLOAD_WORKING_DIR}/pybind11 EXCLUDE_FROM_ALL)\n  set(PYBIND11_PYTHON_VERSION ${PYBIND11_PYTHON_VERSION} CACHE INTERNAL \"\")\n  set(PYBIND11_INCLUDE_DIRS ${PYBIND_SOURCE_DIR}/include CACHE INTERNAL \"\")\n  set(PYTHON_INCLUDE_DIRS ${PYTHON_INCLUDE_DIRS} CACHE INTERNAL \"\")\n  set(PYTHON_EXECUTABLE ${PYTHON_EXECUTABLE} CACHE INTERNAL \"\")\n  set(PYTHON_VERSION_STRING ${PYTHON_VERSION_STRING} CACHE INTERNAL \"\")\nelse()\n  message(STATUS \"Disable python\")\n  set(PYTHONLIBS_FOUND FALSE CACHE INTERNAL \"\")\nendif()\n\n# 安全C的CMake生成\nset(HUAWEI_SECURE_C_DIR ${THIRDPARTY_DOWNLOAD_DIR}/Huawei_Secure_C)\nconfigure_file(CMake/Huawei_Secure_C_CMakeList.in ${HUAWEI_SECURE_C_DIR}/CMakeLists.txt @ONLY)\nadd_subdirectory(${HUAWEI_SECURE_C_DIR} ${THIRDPARTY_DOWNLOAD_WORKING_DIR}/Huawei_Secure_C EXCLUDE_FROM_ALL)\n\nif (WITH_SECURE_C)\n  install(FILES \n      $<TARGET_FILE:${HUAWEI_SECURE_C_LIBRARIES}> DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}\n      COMPONENT libmodelbox\n      )\nendif()\n\nset(TLOG_DIR ${THIRDPARTY_DOWNLOAD_DIR}/tinylog)\nconfigure_file(CMake/tlog_cmakelist.in ${TLOG_DIR}/CMakeLists.txt @ONLY)\nadd_subdirectory(${TLOG_DIR} ${THIRDPARTY_DOWNLOAD_WORKING_DIR}/tinylog EXCLUDE_FROM_ALL)\n\nset(TOML_INCLUDE_DIR ${THIRDPARTY_DOWNLOAD_DIR}/toml11 CACHE INTERNAL \"\")\n\nset(NLOHMANN_INCLUDE_DIR ${THIRDPARTY_DOWNLOAD_DIR}/nlohmann/include CACHE INTERNAL \"\")\n\nset(CPP_HTTPLIB_DIR ${THIRDPARTY_DOWNLOAD_DIR}/cpp-httplib/build)\nconfigure_file(CMake/cpp_httplib_cmakelist.in ${CPP_HTTPLIB_DIR}/CMakeLists.txt @ONLY)\nadd_subdirectory(${CPP_HTTPLIB_DIR} ${THIRDPARTY_DOWNLOAD_WORKING_DIR}/cpp_httplib  EXCLUDE_FROM_ALL)\n\nif (${WITH_ALL_DEMO})\n  set(DEMO_DIR ${THIRDPARTY_DOWNLOAD_DIR}/demo)\n  configure_file(CMake/Demo_Files_CMakeList.in ${DEMO_DIR}/CMakeLists.txt @ONLY)\n  add_subdirectory(${DEMO_DIR} ${THIRDPARTY_DOWNLOAD_WORKING_DIR}/demo EXCLUDE_FROM_ALL)\nendif()\n\n# APIGW_CPP的签名库\nset(APIGW_CPP_DIR ${THIRDPARTY_DOWNLOAD_BINARY_DIR}/APIGW-cpp-sdk-1.0.2)\nconfigure_file(CMake/APIGW_CPP_CMakeList.in ${APIGW_CPP_DIR}/CMakeLists.txt @ONLY)\nadd_subdirectory(${APIGW_CPP_DIR} ${THIRDPARTY_DOWNLOAD_WORKING_DIR}/APIGW-cpp-sdk-1.0.2 EXCLUDE_FROM_ALL)\n\nif (WITH_WEBUI)\n  set(MODELBOX_WEBUI ${THIRDPARTY_DOWNLOAD_DIR}/modelbox-webui)\n  if (IS_DIRECTORY ${MODELBOX_WEBUI}) \n    add_subdirectory(${MODELBOX_WEBUI} ${THIRDPARTY_DOWNLOAD_WORKING_DIR}/modelbox-webui)\n  else()\n    message(STATUS \"Skip build modelbox editor webui\")\n  endif()\nendif()\n"
  }
]